diff --git a/massifs/go.mod b/massifs/go.mod index a6dc395..669cee0 100644 --- a/massifs/go.mod +++ b/massifs/go.mod @@ -35,6 +35,7 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect + github.com/fxamacker/cbor v1.5.1 github.com/fxamacker/cbor/v2 v2.6.0 // indirect github.com/gabriel-vasile/mimetype v1.4.4 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect diff --git a/massifs/go.sum b/massifs/go.sum index cfbef84..e687e21 100644 --- a/massifs/go.sum +++ b/massifs/go.sum @@ -51,6 +51,8 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fxamacker/cbor v1.5.1 h1:XjQWBgdmQyqimslUh5r4tUGmoqzHmBFQOImkWGi2awg= +github.com/fxamacker/cbor v1.5.1/go.mod h1:3aPGItF174ni7dDzd6JZ206H8cmr4GDNBGpPa971zsU= github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I= diff --git a/massifs/localmassifreader.go b/massifs/localmassifreader.go index deac5e9..2d86dbe 100644 --- a/massifs/localmassifreader.go +++ b/massifs/localmassifreader.go @@ -216,7 +216,7 @@ func (r *LocalReader) ReplaceVerifiedContext( // Note: ensure that the root is *never* written to disc or available in the // cached copy of the seal, so that it always has to be recomputed. state := vc.MMRState - state.Rootx = nil + state.LegacySealRoot = nil state.Peaks = nil return r.cache.ReplaceSeal(sealFilename, vc.Start.MassifIndex, &SealedState{ diff --git a/massifs/massifcontext.go b/massifs/massifcontext.go index 132c5e8..4a0ee13 100644 --- a/massifs/massifcontext.go +++ b/massifs/massifcontext.go @@ -110,7 +110,7 @@ func (mc *MassifContext) CopyPeakStack() map[uint64]int { // with how GetRoot accesses the store. The default configuration works only for // how leaf addition accesses the stack. func (mc *MassifContext) CreatePeakStackMap() error { - mc.peakStackMap = PeakStackMap(mc.Start.MassifHeight, mc.Start.FirstIndex+1) + mc.peakStackMap = PeakStackMap(mc.Start.MassifHeight, mc.Start.FirstIndex) if mc.peakStackMap == nil { return fmt.Errorf("invalid massif height or first index in start record") } @@ -438,6 +438,7 @@ func (mc *MassifContext) CheckConsistency( return nil, ErrStateRootMissing } + // Note: this can never be 0, because we always create a new massif with at least one node mmrSizeCurrent := mc.RangeCount() if mmrSizeCurrent < baseState.MMRSize { @@ -633,4 +634,4 @@ func (mc MassifContext) RangeCount() uint64 { // added. func (mc MassifContext) LastLeafMMRIndex() uint64 { return RangeLastLeafIndex(mc.Start.FirstIndex, mc.Start.MassifHeight) -} \ No newline at end of file +} diff --git a/massifs/massifcontextverified.go b/massifs/massifcontextverified.go index 2e48672..b672737 100644 --- a/massifs/massifcontextverified.go +++ b/massifs/massifcontextverified.go @@ -174,7 +174,7 @@ func (mc *MassifContext) verifyContext( // get the peaks from the local store, we are checking the store against the // latest additions. as we verify the signature below, any changes to the // store will be caught. - state.Peaks, err = mmr.PeakHashes(mc, state.MMRSize) + state.Peaks, err = mmr.PeakHashes(mc, state.MMRSize-1) if err != nil { return nil, err } @@ -200,7 +200,7 @@ func (mc *MassifContext) verifyContext( // Otherwise we can get caught out by the store tampered after the seal was // created. Of course the seal itself could have been replaced, but at that // point the only defense is an indpendent replica. - err = VerifySignedRoot( + err = VerifySignedCheckPoint( *options.codec, pubKeyProvider, msg, state, nil, ) if err != nil { diff --git a/massifs/massifpeakstack_test.go b/massifs/massifpeakstack_test.go index cdaaae7..f30d9b1 100644 --- a/massifs/massifpeakstack_test.go +++ b/massifs/massifpeakstack_test.go @@ -595,7 +595,7 @@ func TestPeakStack_Height4Massif2to3Size63(t *testing.T) { assert.Equal(t, mc3.peakStackMap[iPeakNode30], iStack30) assert.Equal(t, mc3.peakStackMap[iPeakNode45], iStack45) - proof, err := mmr.IndexProofBagged(mmrSizeB, &mc3, sha256.New(), iPeakNode30) + proof, err := mmr.InclusionProofBagged(mmrSizeB, &mc3, sha256.New(), iPeakNode30) require.NoError(t, err) peakHash, err := mc3.Get(iPeakNode30) diff --git a/massifs/mmriver.go b/massifs/mmriver.go new file mode 100644 index 0000000..ad7e1e7 --- /dev/null +++ b/massifs/mmriver.go @@ -0,0 +1,234 @@ +package massifs + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + + "github.com/datatrails/go-datatrails-common/azblob" + commoncbor "github.com/datatrails/go-datatrails-common/cbor" + "github.com/fxamacker/cbor/v2" + + commoncose "github.com/datatrails/go-datatrails-common/cose" + "github.com/datatrails/go-datatrails-common/logger" + "github.com/datatrails/go-datatrails-merklelog/mmr" +) + +// MMRIVER COSE Receipts to accompany our COSE MMRIVER seals + +type MMRiverInclusionProof struct { + Index uint64 `cbor:"1,keyasint"` + InclusionPath [][]byte `cbor:"2,keyasint"` +} + +type MMRiverConsistencyProof struct { + TreeSize1 uint64 `cbor:"1,keyasint"` + TreeSize2 uint64 `cbor:"2,keyasint"` + ConsistencyPaths [][]byte `cbor:"3,keyasint"` + RightPeaks [][]byte `cbor:"4,keyasint"` +} + +type MMRiverVerifiableProofs struct { + InclusionProofs []MMRiverInclusionProof `cbor:"-1,keyasint,omitempty"` + ConsistencyProofs []MMRiverConsistencyProof `cbor:"-2,keyasint,omitempty"` +} + +// MMRiverInclusionProofHeader provides for encoding, and defered decoding, of +// COSE_Sign1 message headers for MMRIVER receipts +type MMRiverVerifiableProofsHeader struct { + VerifiableProofs MMRiverVerifiableProofs `cbor:"396,keyasint"` +} + +/* +func SetMMRiverInclusionProofsHeader( + msg *commoncose.CoseSign1Message, massif MassifReader, mmrSize, mmrIndex uint64) error { + msg.Headers.Unprotected[VDSCoseReceiptProofsTag] = proofs +}*/ + +// VerifySignedInclusionReceipts verifies a signed COSE receipt encoded according to the MMRIVER VDS +// on success the produced root is returned. +// Signature verification failure is not an error, but the returned root will be nil and the result will be false. +// All other unexpected issues are returned as errors, with a false result and nil root. +// Note that MMRIVER receipts allow for multiple inclusion proofs to be attached to the receipt. +// This function returns true only if ALL receipts verify +func VerifySignedInclusionReceipts( + ctx context.Context, + receipt *commoncose.CoseSign1Message, + candidates [][]byte, +) (bool, []byte, error) { + + var err error + + // ignore any existing payload + receipt.Payload = nil + + // We must return false if there are no candidates + if len(candidates) == 0 { + return false, nil, fmt.Errorf("no candidates provided") + } + + var header MMRiverVerifiableProofsHeader + err = cbor.Unmarshal(receipt.Headers.RawUnprotected, &header) + if err != nil { + return false, nil, fmt.Errorf("MMRIVER receipt proofs malformed") + } + verifiableProofs := header.VerifiableProofs + if len(verifiableProofs.InclusionProofs) == 0 { + return false, nil, fmt.Errorf("MMRIVER receipt inclusion proofs not present") + } + + // permit *fewer* candidates than proofs, but not more + if len(candidates) > len(verifiableProofs.InclusionProofs) { + return false, nil, fmt.Errorf("MMRIVER receipt more candidates than proofs") + } + + var proof MMRiverInclusionProof + + proof = verifiableProofs.InclusionProofs[0] + receipt.Payload = mmr.IncludedRoot( + sha256.New(), + proof.Index, candidates[0], + proof.InclusionPath) + + err = receipt.VerifyWithCWTPublicKey(nil) + if err != nil { + return false, nil, fmt.Errorf( + "MMRIVER receipt VERIFY FAILED for: mmrIndex %d, candidate %d, err %v", proof.Index, 0, err) + } + // verify the first proof then just compare the produced roots + + for i := 1; i < len(verifiableProofs.InclusionProofs); i++ { + + proof = verifiableProofs.InclusionProofs[i] + proven := mmr.IncludedRoot(sha256.New(), proof.Index, candidates[i], proof.InclusionPath) + if bytes.Compare(receipt.Payload, proven) != 0 { + return false, nil, fmt.Errorf( + "MMRIVER receipt VERIFY FAILED for: mmrIndex %d, candidate %d, err %v", proof.Index, i, err) + } + } + return true, receipt.Payload, nil +} + +// VerifySignedInclusionReceipt verifies a reciept comprised of a single inclusion proof +// If there are 0 or more than 1 candidates, the result will be false and an error will be returned +func VerifySignedInclusionReceipt( + ctx context.Context, + receipt *commoncose.CoseSign1Message, + candidate []byte, +) (bool, []byte, error) { + + ok, root, err := VerifySignedInclusionReceipts(ctx, receipt, [][]byte{candidate}) + if err != nil { + return false, nil, err + } + if !ok { + return false, nil, nil + } + return true, root, nil +} + +type ReceiptBuilder struct { + log logger.Logger + massifReader MassifReader + cborCodec commoncbor.CBORCodec + sealReader SignedRootReader + + massifHeight uint8 +} + +// newReceiptBuilder creates a new receiptBuilder configured with all the necessary readers and information required to build a receipt +// Note that errors are logged assuming the calling context is retrieving a receipt, +// and that all returned errors are StatusErrors that can be returned to the client or nil +func NewReceiptBuilder(log logger.Logger, reader azblob.Reader, massifHeight uint8) (ReceiptBuilder, error) { + + var err error + + b := ReceiptBuilder{ + log: log, + massifHeight: massifHeight, + } + + b.massifReader = NewMassifReader(log, reader) + if b.cborCodec, err = NewRootSignerCodec(); err != nil { + return ReceiptBuilder{}, err + } + b.sealReader = NewSignedRootReader(log, reader, b.cborCodec) + b.massifHeight = massifHeight + + return b, nil +} + +func (b *ReceiptBuilder) BuildReceipt( + ctx context.Context, tenantIdentity string, mmrIndex uint64, +) (*commoncose.CoseSign1Message, error) { + + log := b.log.FromContext(ctx) + defer log.Close() + + massifIndex := MassifIndexFromMMRIndex(b.massifHeight, mmrIndex) + + // Get the seal with the latest peak for this event + massif, err := b.massifReader.GetMassif(ctx, tenantIdentity, massifIndex) + if err != nil { + return nil, fmt.Errorf( + "%w: failed to read massif %d for %s", err, massifIndex, tenantIdentity) + } + + sealContext := LogBlobContext{ + BlobPath: TenantMassifSignedRootPath(tenantIdentity, uint32(massifIndex)), + } + + msg, state, err := b.sealReader.ReadLogicalContext(ctx, sealContext) + if err != nil { + return nil, fmt.Errorf("failed to read seal: %s, %v", sealContext.BlobPath, err) + } + + proof, err := mmr.InclusionProof(&massif, state.MMRSize, mmrIndex) + if err != nil { + return nil, fmt.Errorf( + "failed to generating inclusion proof: %d in MMR(%d), %v", mmrIndex, state.MMRSize, err) + } + + peakIndex := mmr.PeakIndex(mmr.LeafCount(state.MMRSize), len(proof)) + + // NOTE: The old-accumulator compatibility property, from + // https://eprint.iacr.org/2015/718.pdf, along with the COSE protected & + // unprotected buckets, is why we can just pre sign the receipts. + // As long as the receipt consumer is convinced of the logs consistency (not split view), + // it does not matter which accumulator state the receipt is signed against. + + var peaksHeader MMRStateReceipts + err = cbor.Unmarshal(msg.Headers.RawUnprotected, &peaksHeader) + if err != nil { + return nil, fmt.Errorf( + "%w: failed decoding peaks header: for tenant %s, seal %d", err, tenantIdentity, massifIndex) + } + if peakIndex >= len(peaksHeader.PeakReceipts) { + return nil, fmt.Errorf( + "%w: peaks header containes to few peak receipts: for tenant %s, seal %d", err, tenantIdentity, massifIndex) + } + + // This is an array of marshaled COSE_Sign1's + receiptMsg := peaksHeader.PeakReceipts[peakIndex] + signed, err := commoncose.NewCoseSign1MessageFromCBOR( + receiptMsg, commoncose.WithDecOptions(CheckpointDecOptions())) + if err != nil { + return nil, fmt.Errorf( + "%w: failed to decode pre-signed receipt for: %d in MMR(%d)", + err, mmrIndex, state.MMRSize) + } + + // signed.Headers.RawProtected = nil + signed.Headers.RawUnprotected = nil + + verifiableProofs := MMRiverVerifiableProofs{ + InclusionProofs: []MMRiverInclusionProof{{ + Index: mmrIndex, + InclusionPath: proof}}, + } + + signed.Headers.Unprotected[VDSCoseReceiptProofsTag] = verifiableProofs + + return signed, nil +} diff --git a/massifs/peakstack.go b/massifs/peakstack.go index decc8fb..1169c71 100644 --- a/massifs/peakstack.go +++ b/massifs/peakstack.go @@ -6,7 +6,7 @@ import "github.com/datatrails/go-datatrails-merklelog/mmr" // PeakStackMap builds a map from mmr indices to peak stack entries // massifHeight is the 1 based height (not the height index) -func PeakStackMap(massifHeight uint8, mmrSize uint64) map[uint64]int { +func PeakStackMap(massifHeight uint8, mmrIndex uint64) map[uint64]int { if massifHeight == 0 { return nil @@ -15,12 +15,12 @@ func PeakStackMap(massifHeight uint8, mmrSize uint64) map[uint64]int { // XXX:TODO there is likely a more efficient way to do this using // PeaksBitmap or a variation of it, but this isn't a terribly hot path. stackMap := map[uint64]int{} - iPeaks := mmr.PosPeaks(mmrSize) + iPeaks := mmr.Peaks(mmrIndex) for i, ip := range iPeaks { - if mmr.PosHeight(ip) < uint64(massifHeight-1) { + if mmr.IndexHeight(ip) < uint64(massifHeight-1) { continue } - stackMap[ip-1] = i + stackMap[ip] = i } return stackMap diff --git a/massifs/peakstack_test.go b/massifs/peakstack_test.go index e139f07..19f2cf0 100644 --- a/massifs/peakstack_test.go +++ b/massifs/peakstack_test.go @@ -8,7 +8,7 @@ import ( func TestPeakStackMap(t *testing.T) { type args struct { massifHeight uint8 - mmrSize uint64 + mmrIndex uint64 } tests := []struct { name string @@ -17,25 +17,25 @@ func TestPeakStackMap(t *testing.T) { }{ // Note that the mmrSize used here, is also the FirstLeaf + 1 of the // massif containing the peak stack. - {"massifpeakstack_test:0", args{2, 1}, map[uint64]int{}}, - {"massifpeakstack_test:1", args{2, 4}, map[uint64]int{ + {"massifpeakstack_test:0", args{2, 0}, map[uint64]int{}}, + {"massifpeakstack_test:1", args{2, 3}, map[uint64]int{ 2: 0, }}, - {"massifpeakstack_test:2", args{2, 7}, map[uint64]int{ + {"massifpeakstack_test:2", args{2, 6}, map[uint64]int{ 6: 0, }}, - {"massifpeakstack_test:3", args{2, 10}, map[uint64]int{ + {"massifpeakstack_test:3", args{2, 9}, map[uint64]int{ 6: 0, 9: 1, }}, - {"massifpeakstack_test:4", args{2, 15}, map[uint64]int{ + {"massifpeakstack_test:4", args{2, 14}, map[uint64]int{ 14: 0, }}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := PeakStackMap(tt.args.massifHeight, tt.args.mmrSize); !reflect.DeepEqual(got, tt.want) { + if got := PeakStackMap(tt.args.massifHeight, tt.args.mmrIndex); !reflect.DeepEqual(got, tt.want) { t.Errorf("PeakStackMap() = %v, want %v", got, tt.want) } }) diff --git a/massifs/rootsigner.go b/massifs/rootsigner.go index ff364cc..72cdcea 100644 --- a/massifs/rootsigner.go +++ b/massifs/rootsigner.go @@ -3,22 +3,44 @@ package massifs import ( "crypto/ecdsa" "crypto/rand" + "errors" + "fmt" - dtcbor "github.com/datatrails/go-datatrails-common/cbor" - dtcose "github.com/datatrails/go-datatrails-common/cose" + commoncbor "github.com/datatrails/go-datatrails-common/cbor" + commoncose "github.com/datatrails/go-datatrails-common/cose" + "github.com/fxamacker/cbor/v2" "github.com/veraison/go-cose" ) +var ( + ErrNodeSize = errors.New("node value sizes must match the hash size") +) + type MMRStateVersion int const ( MMRStateVersion0 MMRStateVersion = iota // Implicit initial release version MMRStateVersion1 // Version 1 // Note: new versions must be monotonicaly assigned. + ) const ( - MMRStateVersionCurrent = MMRStateVersion1 + MMRStateVersionCurrent = MMRStateVersion1 + VDSCoseReceiptsTag = 395 + VDSCoseReceiptProofsTag = 396 + VDSMMRiver = 2 + VDSInclusionProof = -1 + InclusionProofIndex = 1 + InclusionProofProof = 2 + + // The numbers < -65535 are reserved for private use. + COSEPrivateStart = int64(-65535) + // Numbers in the private use space are organisation / implementation specific. + // Allocation in this range MUST be co-ordinated datatrails wide. + // Remembering that the range is *negative* we allocate the tag by + // subtracting the IANA registered tag for marking COSE Receipts proof data. + SealPeakReceiptsLabel = int64(COSEPrivateStart - VDSCoseReceiptProofsTag) ) // MMRState defines the details we include in our signed commitment to the head log state. @@ -32,8 +54,8 @@ type MMRState struct { // than this, can also (efficiently) reproduce this particular root, and // hence can be used to verify 'old' receipts. This property is due to the // strict append only structure of the tree. - MMRSize uint64 `cbor:"1,keyasint"` - Rootx []byte `cbor:"2,keyasint"` // Valid in Version 0 only. + MMRSize uint64 `cbor:"1,keyasint"` + LegacySealRoot []byte `cbor:"2,keyasint"` // Valid in Version 0 only. // The peak hashes for the mmr identified by MMRSize, this is also the packed accumulator for the tree state. // All inclusion proofs for any node under MMRSize will lead directly to one // of these peaks, or can be extended to do so. @@ -63,16 +85,23 @@ type MMRState struct { CommitmentEpoch uint32 `cbor:"6,keyasint"` } +type MMRStateReceipts struct { + // A Pre-signed COSE Receipts MMRIVER COSE_Sign1 message for each peak in the MMR identified by MMRSize. + // To create a receipt, simply attach the inclusion proof to the unprotected header for the appropriate PeakIndex. + // PeakReceipts []cbor.RawMessage `cbor:"-65931,keyasint"` + PeakReceipts [][]byte `cbor:"-65931,keyasint"` +} + // RootSigner is used to produce a signature over an mmr log state. This // signature commits to a log state, and should only be created and published // after checking the consistency between the last signed state and the new one. // See merklelog/mmrblobs/logconfirmer.go:LogConfirmer for expected use. type RootSigner struct { issuer string - cborCodec dtcbor.CBORCodec + cborCodec commoncbor.CBORCodec } -func NewRootSigner(issuer string, cborCodec dtcbor.CBORCodec) RootSigner { +func NewRootSigner(issuer string, cborCodec commoncbor.CBORCodec) RootSigner { rs := RootSigner{ issuer: issuer, cborCodec: cborCodec, @@ -85,18 +114,39 @@ func NewRootSigner(issuer string, cborCodec dtcbor.CBORCodec) RootSigner { // datatrails signature. func (rs RootSigner) Sign1( coseSigner cose.Signer, - keyIdentifier string, publicKey *ecdsa.PublicKey, subject string, + keyIdentifier string, + publicKey *ecdsa.PublicKey, + subject string, state MMRState, external []byte) ([]byte, error) { - payload, err := rs.cborCodec.MarshalCBOR(state) + + receipts, err := rs.signEmptyPeakReceipts(coseSigner, publicKey, keyIdentifier, rs.issuer, subject, state.Peaks) if err != nil { return nil, err } + if len(receipts) != len(state.Peaks) { + return nil, fmt.Errorf("receipt vs peak count mismatch: %d vs %d", len(receipts), len(state.Peaks)) + } coseHeaders := cose.Headers{ Protected: cose.ProtectedHeader{ - dtcose.HeaderLabelCWTClaims: dtcose.NewCNFClaim( + commoncose.HeaderLabelCWTClaims: commoncose.NewCNFClaim( rs.issuer, subject, keyIdentifier, coseSigner.Algorithm(), *publicKey), }, + // one receipt is present for each peak identified by tree-size-2 in + // the protected header each receipt is individualy signed + // COSE_Sign1 message over that specific peak. All receipts of + // inclusion for individual leaves are created by attaching proofs + // to the unprotected header of the peak receipt. + // SealPeakReceiptsLabel: receipts, + // RawUnprotected: rawunprotected, + Unprotected: cose.UnprotectedHeader{ + SealPeakReceiptsLabel: receipts, + }, + } + + payload, err := rs.cborCodec.MarshalCBOR(state) + if err != nil { + return nil, err } msg := cose.Sign1Message{ @@ -108,30 +158,159 @@ func (rs RootSigner) Sign1( return nil, err } - // We purposefully detach the root so that verifiers are forced to obtain it + // We purposefully detach the peaks so that verifiers are forced to obtain it // from the log. - state.Rootx = nil + state.LegacySealRoot = nil state.Peaks = nil + payload, err = rs.cborCodec.MarshalCBOR(state) if err != nil { return nil, err } + msg.Payload = payload - return msg.MarshalCBOR() + encodable, err := commoncose.NewCoseSign1Message(&msg) + if err != nil { + return nil, err + } + return encodable.MarshalCBOR() +} + +// Note: regarding why and how we can pre-sign receipts: +// +// A specific advantage of MMR's is that we can pre-sign the protected headers +// for all receipts we will ever be asked for. The scitt endpoint then only has +// to copy the pre-signed receipt and *add* the inclusion path it is asked for. +// +// Importantly, this allows for self service *privacy preserving*, scitt +// compatible, receipts based on replicated copies of the log. +// +// The most natural place to produce the pre-signed receipts is in the the log +// confirmer, because we are allways pre-signing *peaks* of the MMR. And the +// consistency between peaks (accumulators) is the concern of the sealer by way +// of LogConfirmer. And the most natural place to store them is in the massif seal. +// Whis is what we accomodate here. +func (c *RootSigner) signEmptyPeakReceipts( + coseSigner cose.Signer, + publicKey *ecdsa.PublicKey, + keyIdentifier string, + issuer string, + subject string, + peaks [][]byte, +) ([][]byte, error) { + + receipts := make([][]byte, len(peaks)) + + for i, peak := range peaks { + receipt, err := c.signEmptyPeakReceipt(coseSigner, publicKey, keyIdentifier, issuer, subject, peak) + if err != nil { + return nil, err + } + + receipts[i] = receipt + } + return receipts, nil } -func NewRootSignerCodec() (dtcbor.CBORCodec, error) { - codec, err := dtcbor.NewCBORCodec( - dtcbor.NewDeterministicEncOpts(), - dtcbor.NewDeterministicDecOpts(), // unsigned int decodes to uint64 - ) +// signEmptyPeakReceipt signes a Receipt for an accumulator peak. +// +// Because many inclusion proofs lead to the same peak, the proof material for +// the unprotected header is empty. This can be added by the log consumer in a +// privacy preserving way based on replicated massif content. +// +// Arguments: +// +// ctx: The context for the operation +// coseSigner: The signer of the completed shared receipt +// issuer: The identifier for the issuer of the receipt +// subject: The identifier for the subject of the receipt +func (rs RootSigner) signEmptyPeakReceipt( + coseSigner cose.Signer, + publicKey *ecdsa.PublicKey, + keyIdentifier string, + issuer string, + subject string, + // The bytes of a peak, which an mmr node which is a member of an accumulator for one or more tree states. + peak []byte, +) ([]byte, error) { + + if len(peak) != 32 { + return nil, fmt.Errorf("%w: peak must be 32 bytes, got %d", ErrNodeSize, len(peak)) + } + + headers := cose.Headers{ + Protected: cose.ProtectedHeader{ + VDSCoseReceiptsTag: VDSMMRiver, + cose.HeaderLabelAlgorithm: coseSigner.Algorithm(), + cose.HeaderLabelKeyID: []byte(keyIdentifier), + commoncose.HeaderLabelCWTClaims: commoncose.NewCNFClaim( + issuer, + subject, + keyIdentifier, + coseSigner.Algorithm(), + *publicKey), + }, + // The receipt producer, which MAY be the relying party in possesion of + // a log massif, can fill in the inclusion proof directly and + // independently, without revealing the item of interest to the log + // service. + Unprotected: cose.UnprotectedHeader{}, + } + + msg := cose.Sign1Message{ + Headers: headers, + Payload: peak, + } + + err := msg.Sign(rand.Reader, nil, coseSigner) if err != nil { - return dtcbor.CBORCodec{}, err + return nil, err + } + + // now, detach the payload + msg.Payload = nil + + // Use the appropraite encoding options + encodable, err := commoncose.NewCoseSign1Message(&msg) + if err != nil { + return nil, err + } + return encodable.MarshalCBOR() +} + +func NewRootSignerCodec() (commoncbor.CBORCodec, error) { + codec, err := commoncbor.NewCBORCodec(encOptions, decOptions) + if err != nil { + return commoncbor.CBORCodec{}, err } return codec, nil } -func newDecOptions() []dtcose.SignOption { - return []dtcose.SignOption{dtcose.WithDecOptions(dtcbor.NewDeterministicDecOpts())} +var ( + encOptions = commoncbor.NewDeterministicEncOpts() + decOptions = cbor.DecOptions{ + DupMapKey: cbor.DupMapKeyEnforcedAPF, // (default) duplicated key not allowed + IndefLength: cbor.IndefLengthForbidden, // (default) no streaming + // override the default decoding behaviour for unsigned integers to retain the sign + IntDec: cbor.IntDecConvertNone, // decode CBOR uint/int to Go int64 + TagsMd: cbor.TagsForbidden, // (default) no tags + } +) + +// CheckpointDecOptions returns the decoding options compatible with the RootSigner +// With these options the sign is always retained +// The options align with the cbor defaults, except for the handling of unsigned integers. +func CheckpointDecOptions() cbor.DecOptions { + return decOptions +} + +// CheckpointEncOptions returns the decoding options compatible with the RootSigner +// These options align with the cbor defaults +func CheckpointEncOptions() cbor.EncOptions { + return encOptions +} + +func newCheckpointDecOptions() []commoncose.SignOption { + return []commoncose.SignOption{commoncose.WithDecOptions(decOptions)} } diff --git a/massifs/rootsigner_test.go b/massifs/rootsigner_test.go index ee5698a..64bf874 100644 --- a/massifs/rootsigner_test.go +++ b/massifs/rootsigner_test.go @@ -2,15 +2,350 @@ package massifs import ( "crypto/elliptic" + "crypto/rand" "testing" "github.com/datatrails/go-datatrails-common/azkeys" - dtcose "github.com/datatrails/go-datatrails-common/cose" + commoncose "github.com/datatrails/go-datatrails-common/cose" + _ "github.com/fxamacker/cbor/v2" + "github.com/veraison/go-cose" + _ "github.com/veraison/go-cose" + "github.com/datatrails/go-datatrails-common/logger" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +// TestCoseSign1_UnprotectedEncDec just checks our asumptions about how to encode and decode +// nested cose messages in the unprotected headers of a cose sign1 message. +// There are some gotcha's in the encoding rules when nesting cose messages and this test is used +// to isolate the aspects we care about for the MMRIVER pre-signed receipts. +func TestCoseSign1_UnprotectedEncDec(t *testing.T) { + logger.New("TEST") + + key := TestGenerateECKey(t, elliptic.P256()) + cborCodec, err := NewRootSignerCodec() + require.NoError(t, err) + coseSigner := azkeys.NewTestCoseSigner(t, key) + rs := TestNewRootSigner(t, "test-issuer") + + mustMarshalCBOR := func(value any) []byte { + b, err := cborCodec.MarshalCBOR(value) + require.NoError(t, err) + return b + } + + mustSignPeak := func(peak []byte) []byte { + b, err := rs.signEmptyPeakReceipt(coseSigner, &key.PublicKey, "test-key", "test-issuer", "test-subject", peak) + require.NoError(t, err) + return b + } + + mustSignPeaks := func(peaks [][]byte) [][]byte { + receipts, err := rs.signEmptyPeakReceipts(coseSigner, &key.PublicKey, "test-key", "test-issuer", "test-subject", peaks) + require.NoError(t, err) + return receipts + } + + mustSignMessage := func(payload []byte, headers cose.Headers) []byte { + + headers.Protected[commoncose.HeaderLabelCWTClaims] = commoncose.NewCNFClaim( + "test-issuer", "test-subject", "test-key", coseSigner.Algorithm(), + key.PublicKey, + ) + + msg := cose.Sign1Message{ + Headers: headers, + Payload: payload, + } + err := msg.Sign(rand.Reader, nil, coseSigner) + require.NoError(t, err) + + encodable, err := commoncose.NewCoseSign1Message(&msg) + require.NoError(t, err) + encoded, err := encodable.MarshalCBOR() + require.NoError(t, err) + return encoded + } + + verifyDecoded := func(decoded *commoncose.CoseSign1Message) error { + _, ok := decoded.Headers.Protected[commoncose.HeaderLabelCWTClaims] + if ok { + return decoded.VerifyWithCWTPublicKey(nil) + } + return decoded.VerifyWithPublicKey(&key.PublicKey, nil) + } + + testDecodVerify := func(encoded []byte, t *testing.T) { + decoded, err := commoncose.NewCoseSign1MessageFromCBOR(encoded) + assert.NoError(t, err) + + err = verifyDecoded(decoded) + assert.NoError(t, err) + } + + testDecodeSingleNestedVerify := func(encoded []byte, t *testing.T) { + + var err error + var decoded *commoncose.CoseSign1Message + decoded, err = commoncose.NewCoseSign1MessageFromCBOR(encoded) + assert.NoError(t, err) + + err = verifyDecoded(decoded) + assert.NoError(t, err) + + singleNested, ok := decoded.Headers.Unprotected[int64(-65535-1)] + assert.True(t, ok) + if !ok { + return + } + b, ok := singleNested.([]byte) + assert.True(t, ok) + if !ok { + return + } + decoded, err = commoncose.NewCoseSign1MessageFromCBOR(b) + assert.NoError(t, err) + err = verifyDecoded(decoded) + assert.NoError(t, err) + return + } + + testDecodeArrayOfNestedVerify := func(encoded []byte, t *testing.T) { + + var err error + var decoded *commoncose.CoseSign1Message + decoded, err = commoncose.NewCoseSign1MessageFromCBOR(encoded) + assert.NoError(t, err) + err = verifyDecoded(decoded) + assert.NoError(t, err) + + arrayOfNested, ok := decoded.Headers.Unprotected[int64(-65535-2)] + assert.True(t, ok) + if !ok { + return + } + outer, ok := arrayOfNested.([]interface{}) + assert.True(t, ok) + for _, inner := range outer { + b, ok := inner.([]byte) + assert.True(t, ok) + if !ok { + return + } + decoded, err := commoncose.NewCoseSign1MessageFromCBOR(b) + assert.NoError(t, err) + err = verifyDecoded(decoded) + assert.NoError(t, err) + } + } + + // TestDecode is a test case specific decoder test function + type TestDecode func(encoded []byte, t *testing.T) + + type fields struct { + Protected cose.ProtectedHeader + Unprotected cose.UnprotectedHeader + Payload []byte + } + tests := []struct { + name string + fields fields + testDecode TestDecode + }{ + { + name: "cbor payload, unprotected header with private range array of signed peaks", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{ + -65535 - 0: mustSignPeaks([][]byte{{ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + }, { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + }, + }), + }, + Payload: mustMarshalCBOR(MMRState{ + MMRSize: 1, + Peaks: [][]byte{{ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + }}, + Timestamp: 1234, + }), + }, + testDecode: testDecodVerify, + }, + + { + name: "cbor payload, unprotected header with private range signed peak", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{ + -65535 - 0: mustSignPeak([]byte{ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + }), + }, + Payload: mustMarshalCBOR(MMRState{ + MMRSize: 1, + Peaks: [][]byte{{ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + }}, + Timestamp: 1234, + }), + }, + testDecode: testDecodVerify, + }, + + { + name: "cbor payload, unprotected header with private range integer value", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{ + -65535 - 0: 123, + }, + Payload: mustMarshalCBOR(MMRState{ + MMRSize: 1, + Peaks: [][]byte{{ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31}}, + Timestamp: 1234, + }), + }, + testDecode: testDecodVerify, + }, + + { + name: "unprotected header with private range nested signed message", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{ + -65535 - 1: mustSignMessage([]byte("hello continent"), cose.Headers{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + }), + }, + Payload: []byte("hello world"), + }, + testDecode: testDecodeSingleNestedVerify, + }, + { + name: "unprotected header with private range nested signed message", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{ + -65535 - 2: [][]byte{ + mustSignMessage([]byte("hello uk"), cose.Headers{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + }), + mustSignMessage([]byte("hello france"), cose.Headers{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + }), + }, + }, + Payload: []byte("hello world"), + }, + testDecode: testDecodeArrayOfNestedVerify, + }, + + { + name: "empty unprotected headers", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{}, + Payload: []byte("hello world"), + }, + testDecode: testDecodVerify, + }, + { + name: "unprotected header with private range integer value", + fields: fields{ + Protected: cose.ProtectedHeader{ + "alg": coseSigner.Algorithm(), + "kid": "log attestation key 1", + }, + Unprotected: cose.UnprotectedHeader{ + -65535 - 0: 123, + }, + Payload: []byte("hello world"), + }, + testDecode: testDecodVerify, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + var err error + + // cborCodec, err := NewRootSignerCodec() + // require.NoError(t, err) + + headers := cose.Headers{ + Protected: tt.fields.Protected, + Unprotected: tt.fields.Unprotected, + } + + msg := cose.Sign1Message{ + Headers: headers, + Payload: tt.fields.Payload, + } + err = msg.Sign(rand.Reader, nil, coseSigner) + require.NoError(t, err) + + encodable, err := commoncose.NewCoseSign1Message(&msg) + assert.NoError(t, err) + encoded, err := encodable.MarshalCBOR() + assert.NoError(t, err) + + if tt.testDecode != nil { + tt.testDecode(encoded, t) + } + }) + } +} + func TestRootSigner_Sign1(t *testing.T) { logger.New("TEST") @@ -42,8 +377,12 @@ func TestRootSigner_Sign1(t *testing.T) { args: args{ subject: "merklelog-attestor", state: MMRState{ - MMRSize: 1, - Peaks: [][]byte{{1}}, + MMRSize: 1, + Peaks: [][]byte{{ + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31}}, Timestamp: 1234, }, }, @@ -68,9 +407,9 @@ func TestRootSigner_Sign1(t *testing.T) { signed, state, err := DecodeSignedRoot(rs.cborCodec, coseMsg) assert.NoError(t, err) - err = VerifySignedRoot( + err = VerifySignedCheckPoint( rs.cborCodec, - dtcose.NewCWTPublicKeyProvider(signed), + commoncose.NewCWTPublicKeyProvider(signed), signed, state, nil, ) // verification must fail if we haven't put the root in @@ -80,9 +419,9 @@ func TestRootSigner_Sign1(t *testing.T) { // blob then compute the root from it by passing MMRState.MMRSize to // GetRoot state.Peaks = tt.args.state.Peaks - err = VerifySignedRoot( + err = VerifySignedCheckPoint( rs.cborCodec, - dtcose.NewCWTPublicKeyProvider(signed), + commoncose.NewCWTPublicKeyProvider(signed), signed, state, nil, ) diff --git a/massifs/rootsigverify.go b/massifs/rootsigverify.go index 235408a..c0ecd41 100644 --- a/massifs/rootsigverify.go +++ b/massifs/rootsigverify.go @@ -13,11 +13,11 @@ type publicKeyProvider interface { } // DecodeSignedRoot decodes the MMRState values from the signed message -// See VerifySignedRoot for a description of how to verify a signed root +// See VerifySignedCheckPoint for a description of how to verify a signed root func DecodeSignedRoot( codec cbor.CBORCodec, msg []byte, ) (*dtcose.CoseSign1Message, MMRState, error) { - signed, err := dtcose.NewCoseSign1MessageFromCBOR(msg, newDecOptions()...) + signed, err := dtcose.NewCoseSign1MessageFromCBOR(msg, newCheckpointDecOptions()...) if err != nil { return nil, MMRState{}, err } @@ -30,19 +30,19 @@ func DecodeSignedRoot( return signed, unverifiedState, nil } -// VerifySignedRoot applies the provided state to the signed message and +// VerifySignedCheckPoint applies the provided state to the signed message and // verifies the result // -// When signing and publishing roots, we remove the root from the signed message -// prior to publishing. So that it can only be verified by recovering the root +// When signing and publishing roots, we remove the peaks from the signed message +// prior to publishing. So that it can only be verified by recovering the peaks // from the mmr at the size in the signed message. // // Verification of a signed root is a 3 step process: // 1. Use DecodeSignedRoot to obtain the MMRState from the signed message. This -// state will not verify as the root has been removed after signing. -// 2. Use MMRState.MMRSize to obtain the root of the log corresponding to that size -// 3. Update the MMRState with the derived root and call this function to complete the verification -func VerifySignedRoot( +// state will not verify as the peaks have been removed after signing. +// 2. Use MMRState.MMRSize to obtain the peaks of the log corresponding to that size +// 3. Update the MMRState with the derived peaks and call this function to complete the verification +func VerifySignedCheckPoint( codec cbor.CBORCodec, keyProvider publicKeyProvider, signed *dtcose.CoseSign1Message, unverifiedState MMRState, external []byte) error { var err error diff --git a/massifs/signedrootreader.go b/massifs/signedrootreader.go index c07790b..d4bd861 100644 --- a/massifs/signedrootreader.go +++ b/massifs/signedrootreader.go @@ -156,7 +156,7 @@ func (s *SignedRootReader) GetSignedRoot( // Get the signed tree head (SignedRoot) for the mmr massif. // // NOTICE: TO VERIFY YOU MUST obtain the mmr root from the log using the -// MMRState.MMRSize in the returned MMRState. See {@link VerifySignedRoot} +// MMRState.MMRSize in the returned MMRState. See {@link VerifySignedCheckPoint} // // This may not be the latest mmr head, but it will be the latest for the // argument massifIndex. If the identified massif is complete, the returned SignedRoot diff --git a/massifs/testcommitter.go b/massifs/testcommitter.go index 1fc2cc0..236e717 100644 --- a/massifs/testcommitter.go +++ b/massifs/testcommitter.go @@ -88,7 +88,7 @@ func (c *TestMinimalCommitter) ContextCommitted(ctx context.Context, tenantIdent if mmrSize == 0 { return errors.New("no leaves to seal") } - peaks, err := mmr.PeakHashes(&mc, mmrSize) + peaks, err := mmr.PeakHashes(&mc, mmrSize-1) if err != nil { return err } diff --git a/mmr/consistentroots.go b/mmr/consistentroots.go index 1bafe53..ae0cb1d 100644 --- a/mmr/consistentroots.go +++ b/mmr/consistentroots.go @@ -10,8 +10,25 @@ var ( ErrAccumulatorProofLen = errors.New("a proof for each accumulator is required") ) -func ConsistentRoots(hasher hash.Hash, fromSize uint64, accumulatorfrom [][]byte, proofs [][][]byte) ([][]byte, error) { - frompeaks := PosPeaks(fromSize) +// ConsistentRoots is supplied with the accumulator from which consistency is +// being shown, and an inclusion proof for each accumulator entry in a future MMR +// state. +// +// The algorithm recovers the necessary prefix (peaks) of the future +// accumulator against which the proofs were obtained. +// It is typical that many nodes in the original accumulator share the same peak in the new accumulator. +// The returned list will be a descending height ordered list of elements from the +// accumulator for the consistent future state. It may be exactly the future +// accumulator or it may be a prefix of it. +// +// The order of the roots returned matches the order of the nodes in the accumulator. +// +// Args: +// - fromSize the size the complete MMR from which consistency was proven. +// - accumulatorfrom the node values correponding to the peaks of the accumulator at MMR(sizeA) +// - proofs the inclusion proofs for each node in accumulatorfrom in MMR(sizeB) +func ConsistentRoots(hasher hash.Hash, ifrom uint64, accumulatorfrom [][]byte, proofs [][][]byte) ([][]byte, error) { + frompeaks := Peaks(ifrom) if len(frompeaks) != len(proofs) { return nil, ErrAccumulatorProofLen @@ -21,7 +38,7 @@ func ConsistentRoots(hasher hash.Hash, fromSize uint64, accumulatorfrom [][]byte for iacc := 0; iacc < len(accumulatorfrom); iacc++ { // remembering that peaks are 1 based (for now) - root := IncludedRoot(hasher, frompeaks[iacc]-1, accumulatorfrom[iacc], proofs[iacc]) + root := IncludedRoot(hasher, frompeaks[iacc], accumulatorfrom[iacc], proofs[iacc]) // The nature of MMR's is that many nodes are committed by the // same accumulator peak, and that peak changes with // low frequency. diff --git a/mmr/draft_kat39_test.go b/mmr/draft_kat39_test.go new file mode 100644 index 0000000..d6ac7d8 --- /dev/null +++ b/mmr/draft_kat39_test.go @@ -0,0 +1,224 @@ +package mmr + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// tests and KAT data corresponding to the MMRIVER draft + +var ( + KAT39CompleteMMRSizes = []uint64{1, 3, 4, 7, 8, 10, 11, 15, 16, 18, 19, 22, 23, 25, 26, 31, 32, 34, 35, 38, 39} + KAT39CompleteMMRIndices = []uint64{0, 2, 3, 6, 7, 9, 10, 14, 15, 17, 18, 21, 22, 24, 25, 30, 31, 33, 34, 37, 38} + KAT39LeafMMRIndices = []uint64{0, 1, 3, 4, 7, 8, 10, 11, 15, 16, 18, 19, 22, 23, 25, 26, 31, 32, 34, 35, 38} + KAT39PeakIndices = map[uint64][]uint64{ + 0: {0}, + 2: {2}, + 3: {2, 3}, + 6: {6}, + 7: {6, 7}, + 9: {6, 9}, + 10: {6, 9, 10}, + 14: {14}, + 15: {14, 15}, + 17: {14, 17}, + 18: {14, 17, 18}, + 21: {14, 21}, + 22: {14, 21, 22}, + 24: {14, 21, 24}, + 25: {14, 21, 24, 25}, + 30: {30}, + 31: {30, 31}, + 33: {30, 33}, + 34: {30, 33, 34}, + 37: {30, 37}, + 38: {30, 37, 38}, + } + // Note: its just easier all round to maintain these as hex strings and convert to bytes on demand. + KAT39PeakHashes = map[uint64][]string{ + 0: {"af5570f5a1810b7af78caf4bc70a660f0df51e42baf91d4de5b2328de0e83dfc"}, + 2: {"ad104051c516812ea5874ca3ff06d0258303623d04307c41ec80a7a18b332ef8"}, + 3: {"ad104051c516812ea5874ca3ff06d0258303623d04307c41ec80a7a18b332ef8", "d5688a52d55a02ec4aea5ec1eadfffe1c9e0ee6a4ddbe2377f98326d42dfc975"}, + 6: {"827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88"}, + 7: {"827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88", "a3eb8db89fc5123ccfd49585059f292bc40a1c0d550b860f24f84efb4760fbf2"}, + 9: {"827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88", "b8faf5f748f149b04018491a51334499fd8b6060c42a835f361fa9665562d12d"}, + 10: {"827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88", "b8faf5f748f149b04018491a51334499fd8b6060c42a835f361fa9665562d12d", "8d85f8467240628a94819b26bee26e3a9b2804334c63482deacec8d64ab4e1e7"}, + 14: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112"}, + 15: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "e66c57014a6156061ae669809ec5d735e484e8fcfd540e110c9b04f84c0b4504"}, + 17: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "f4a0db79de0fee128fbe95ecf3509646203909dc447ae911aa29416bf6fcba21"}, + 18: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "f4a0db79de0fee128fbe95ecf3509646203909dc447ae911aa29416bf6fcba21", "5bc67471c189d78c76461dcab6141a733bdab3799d1d69e0c419119c92e82b3d"}, + 21: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710"}, + 22: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710", "7a42e3892368f826928202014a6ca95a3d8d846df25088da80018663edf96b1c"}, + 24: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710", "dd7efba5f1824103f1fa820a5c9e6cd90a82cf123d88bd035c7e5da0aba8a9ae"}, + 25: {"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710", "dd7efba5f1824103f1fa820a5c9e6cd90a82cf123d88bd035c7e5da0aba8a9ae", "561f627b4213258dc8863498bb9b07c904c3c65a78c1a36bca329154d1ded213"}, + 30: {"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7"}, + 31: {"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "1664a6e0ea12d234b4911d011800bb0f8c1101a0f9a49a91ee6e2493e34d8e7b"}, + 33: {"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "0c9f36783b5929d43c97fe4b170d12137e6950ef1b3a8bd254b15bbacbfdee7f"}, + 34: {"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "0c9f36783b5929d43c97fe4b170d12137e6950ef1b3a8bd254b15bbacbfdee7f", "4d75f61869104baa4ccff5be73311be9bdd6cc31779301dfc699479403c8a786"}, + 37: {"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "6a169105dcc487dbbae5747a0fd9b1d33a40320cf91cf9a323579139e7ff72aa"}, + 38: {"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "6a169105dcc487dbbae5747a0fd9b1d33a40320cf91cf9a323579139e7ff72aa", "e9a5f5201eb3c3c856e0a224527af5ac7eb1767fb1aff9bd53ba41a60cde9785"}, + } + + KAT39Leaves = []string{ + "af5570f5a1810b7af78caf4bc70a660f0df51e42baf91d4de5b2328de0e83dfc", + "cd2662154e6d76b2b2b92e70c0cac3ccf534f9b74eb5b89819ec509083d00a50", + "d5688a52d55a02ec4aea5ec1eadfffe1c9e0ee6a4ddbe2377f98326d42dfc975", + "8005f02d43fa06e7d0585fb64c961d57e318b27a145c857bcd3a6bdb413ff7fc", + "a3eb8db89fc5123ccfd49585059f292bc40a1c0d550b860f24f84efb4760fbf2", + "4c0e071832d527694adea57b50dd7b2164c2a47c02940dcf26fa07c44d6d222a", + "8d85f8467240628a94819b26bee26e3a9b2804334c63482deacec8d64ab4e1e7", + "0b5000b73a53f0916c93c68f4b9b6ba8af5a10978634ae4f2237e1f3fbe324fa", + "e66c57014a6156061ae669809ec5d735e484e8fcfd540e110c9b04f84c0b4504", + "998e907bfbb34f71c66b6dc6c40fe98ca6d2d5a29755bc5a04824c36082a61d1", + "5bc67471c189d78c76461dcab6141a733bdab3799d1d69e0c419119c92e82b3d", + "1b8d0103e3a8d9ce8bda3bff71225be4b5bb18830466ae94f517321b7ecc6f94", + "7a42e3892368f826928202014a6ca95a3d8d846df25088da80018663edf96b1c", + "aed2b8245fdc8acc45eda51abc7d07e612c25f05cadd1579f3474f0bf1f6bdc6", + "561f627b4213258dc8863498bb9b07c904c3c65a78c1a36bca329154d1ded213", + "1209fe3bc3497e47376dfbd9df0600a17c63384c85f859671956d8289e5a0be8", + "1664a6e0ea12d234b4911d011800bb0f8c1101a0f9a49a91ee6e2493e34d8e7b", + "707d56f1f282aee234577e650bea2e7b18bb6131a499582be18876aba99d4b60", + "4d75f61869104baa4ccff5be73311be9bdd6cc31779301dfc699479403c8a786", + "0764c726a72f8e1d245f332a1d022fffdada0c4cb2a016886e4b33b66cb9a53f", + "e9a5f5201eb3c3c856e0a224527af5ac7eb1767fb1aff9bd53ba41a60cde9785", + } + + KAT39Nodes = []string{ + "af5570f5a1810b7af78caf4bc70a660f0df51e42baf91d4de5b2328de0e83dfc", + "cd2662154e6d76b2b2b92e70c0cac3ccf534f9b74eb5b89819ec509083d00a50", + "ad104051c516812ea5874ca3ff06d0258303623d04307c41ec80a7a18b332ef8", + "d5688a52d55a02ec4aea5ec1eadfffe1c9e0ee6a4ddbe2377f98326d42dfc975", + "8005f02d43fa06e7d0585fb64c961d57e318b27a145c857bcd3a6bdb413ff7fc", + "9a18d3bc0a7d505ef45f985992270914cc02b44c91ccabba448c546a4b70f0f0", + "827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88", + "a3eb8db89fc5123ccfd49585059f292bc40a1c0d550b860f24f84efb4760fbf2", + "4c0e071832d527694adea57b50dd7b2164c2a47c02940dcf26fa07c44d6d222a", + "b8faf5f748f149b04018491a51334499fd8b6060c42a835f361fa9665562d12d", + "8d85f8467240628a94819b26bee26e3a9b2804334c63482deacec8d64ab4e1e7", + "0b5000b73a53f0916c93c68f4b9b6ba8af5a10978634ae4f2237e1f3fbe324fa", + "6f3360ad3e99ab4ba39f2cbaf13da56ead8c9e697b03b901532ced50f7030fea", + "508326f17c5f2769338cb00105faba3bf7862ca1e5c9f63ba2287e1f3cf2807a", + "78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", + "e66c57014a6156061ae669809ec5d735e484e8fcfd540e110c9b04f84c0b4504", + "998e907bfbb34f71c66b6dc6c40fe98ca6d2d5a29755bc5a04824c36082a61d1", + "f4a0db79de0fee128fbe95ecf3509646203909dc447ae911aa29416bf6fcba21", + "5bc67471c189d78c76461dcab6141a733bdab3799d1d69e0c419119c92e82b3d", + "1b8d0103e3a8d9ce8bda3bff71225be4b5bb18830466ae94f517321b7ecc6f94", + "0a4d7e66c92de549b765d9e2191027ff2a4ea8a7bd3eb04b0ed8ee063bad1f70", + "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710", + "7a42e3892368f826928202014a6ca95a3d8d846df25088da80018663edf96b1c", + "aed2b8245fdc8acc45eda51abc7d07e612c25f05cadd1579f3474f0bf1f6bdc6", + "dd7efba5f1824103f1fa820a5c9e6cd90a82cf123d88bd035c7e5da0aba8a9ae", + "561f627b4213258dc8863498bb9b07c904c3c65a78c1a36bca329154d1ded213", + "1209fe3bc3497e47376dfbd9df0600a17c63384c85f859671956d8289e5a0be8", + "6b4a3bd095c63d1dffae1ac03eb8264fdce7d51d2ac26ad0ebf9847f5b9be230", + "4459f4d6c764dbaa6ebad24b0a3df644d84c3527c961c64aab2e39c58e027eb1", + "77651b3eec6774e62545ae04900c39a32841e2b4bac80e2ba93755115252aae1", + "d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", + "1664a6e0ea12d234b4911d011800bb0f8c1101a0f9a49a91ee6e2493e34d8e7b", + "707d56f1f282aee234577e650bea2e7b18bb6131a499582be18876aba99d4b60", + "0c9f36783b5929d43c97fe4b170d12137e6950ef1b3a8bd254b15bbacbfdee7f", + "4d75f61869104baa4ccff5be73311be9bdd6cc31779301dfc699479403c8a786", + "0764c726a72f8e1d245f332a1d022fffdada0c4cb2a016886e4b33b66cb9a53f", + "c861552e9e17c41447d375c37928f9fa5d387d1e8470678107781c20a97ebc8f", + "6a169105dcc487dbbae5747a0fd9b1d33a40320cf91cf9a323579139e7ff72aa", + "e9a5f5201eb3c3c856e0a224527af5ac7eb1767fb1aff9bd53ba41a60cde9785", + } +) + +func hexHashList(hashes [][]byte) []string { + var hexes []string + for _, b := range hashes { + hexes = append(hexes, hex.EncodeToString(b)) + } + return hexes +} + +func mustHex2Hash(t *testing.T, hexEncodedHash string) []byte { + b, err := hex.DecodeString(hexEncodedHash) + require.NoError(t, err) + return b +} + +type testDBLinear struct { + nodes [][]byte +} + +func (db *testDBLinear) Get(i uint64) ([]byte, error) { + if int(i) < len(db.nodes) { + return db.nodes[i], nil + } + return nil, fmt.Errorf("index %d out of range", i) +} + +// Append adds a new node to the db and returns the index of the next addition +func (db *testDBLinear) Append(b []byte) (uint64, error) { + db.nodes = append(db.nodes, b) + return uint64(len(db.nodes)), nil +} + +// TestDraftAddHashedLeaf tests that AddHashedLeaf creates the expected KAT39 MMR +func TestDraftAddHashedLeaf(t *testing.T) { + db := &testDBLinear{} + for e, leaf := range KAT39Leaves { + leafHash := mustHex2Hash(t, leaf) + iNext, err := AddHashedLeaf(db, sha256.New(), leafHash) + assert.NoError(t, err) + assert.Equal(t, MMRIndex(uint64(e+1)), iNext) + } + assert.Equal(t, len(KAT39Nodes), len(db.nodes)) + for i := 0; i < len(KAT39Nodes); i++ { + assert.Equal(t, mustHex2Hash(t, KAT39Nodes[i]), db.nodes[i]) + } +} + +// TestDraftAddLeafAccumulators tests that the AddHashedLeaf produces the expected accumulator states +func TestDraftAddLeafAccumulators(t *testing.T) { + db := &testDBLinear{} + for _, leaf := range KAT39Leaves { + leafHash := mustHex2Hash(t, leaf) + _, err := AddHashedLeaf(db, sha256.New(), leafHash) + assert.NoError(t, err) + } + + // Check the peaks are all in the expected places. + for i, wantPeaks := range KAT39PeakHashes { + peaks, err := PeakHashes(db, i) + assert.NoError(t, err) + assert.Equal(t, wantPeaks, hexHashList(peaks)) + } +} + +// TestDraftKAT39PeakHashes tests that the peak indices match the KAT39 values +func TestDraftKAT39Peaks(t *testing.T) { + for mmrIndex, wantPeaks := range KAT39PeakIndices { + t.Run(fmt.Sprintf("%d", mmrIndex), func(t *testing.T) { + if got := Peaks(mmrIndex); !reflect.DeepEqual(got, wantPeaks) { + t.Errorf("Peaks() = %v, want %v", got, wantPeaks) + } + }) + } +} + +// TestDraftKAT39PeakHashes tests that the peak indices obtain the expected KAT39 hashes +func TestDraftKAT39PeakHashes(t *testing.T) { + + db := NewCanonicalTestDB(t) + + for mmrIndex, wantPeaksHex := range KAT39PeakHashes { + t.Run(fmt.Sprintf("%d", mmrIndex), func(t *testing.T) { + peakHashes, err := PeakHashes(db, mmrIndex) + require.NoError(t, err) + peakHashesHex := hexHashList(peakHashes) + if !reflect.DeepEqual(peakHashesHex, wantPeaksHex) { + t.Errorf("PeakHashes() = %v, want %v", peakHashesHex, wantPeaksHex) + } + }) + } +} diff --git a/mmr/peaks.go b/mmr/peaks.go index 7227cda..8032ce1 100644 --- a/mmr/peaks.go +++ b/mmr/peaks.go @@ -4,33 +4,28 @@ import ( "math/bits" ) -// Peaks returns the array of mountain peaks in the MMR. This is completely -// deterministic given a valid mmr size. If the mmr size is invalid, this -// function returns nil. +// Peaks returns the array of mountain peak indices in the MMR. // -// It is guaranteed that the peaks are listed in ascending order of position -// value. The highest peak has the lowest position and is listed first. This is -// a consequence of the fact that the 'little' 'down range' peaks can only appear -// to the 'right' of the first perfect peak, and so on recursively. +// This is completely deterministic given a complete mmr index. +// If the mmr index is not complete, or is otherwise invalid, is invalid, this function returns nil. // -// Note that as a matter of implementation convenience and efficiency the peaks -// are returned as *one based positions* +// The peaks are listed in ascending order of mmr index value. +// The highest peak has the lowest index and is listed first. This is a +// consequence of the fact that the 'little' 'down range' peaks can only appear +// to the 'right' of the first perfect peak, and so on recursively. // -// So given the example below, which has an mmrSize of 17, the peaks are [15, 18] +// Given the example below, which has an mmrSize of 10, the peaks are [6, 9]: // -// 3 15 -// / \ -// / \ -// / \ -// 2 7 14 -// / \ / \ -// 1 3 6 10 13 18 -// / \ / \ / \ / \ / \ -// 0 1 2 4 5 8 9 11 12 16 17 -func PosPeaks(mmrSize uint64) []uint64 { - if mmrSize == 0 { - return nil - } +// 2 6 +// / \ +// 1 2 5 9 +// / \ / \ / \ +// 0 0 1 3 4 7 8 +func Peaks(mmrIndex uint64) []uint64 { + + // The peaks algorithm works using the binary properties of the mmr *positions* + + mmrSize := mmrIndex + 1 // catch invalid range, where siblings exist but no parent exists if PosHeight(mmrSize+1) > PosHeight(mmrSize) { @@ -44,24 +39,37 @@ func PosPeaks(mmrSize uint64) []uint64 { // This next step computes the ^2 floor of the bits in mmrSize, which // picks out the highest peak (and also left most) remaining peak in // mmrSize (See TopPeak) - peakSize := TopPeak(mmrSize) + peakSize := TopPeak(mmrSize-1) + 1 // + 1 to recover position form // Because we *subtract* the computed peak size from mmrSize, we need to // recover the actual peak position. The arithmetic all works out so we // just accumulate the peakSizes as we go, and the result is always the // peak value against the original mmrSize we were given. peak = peak + peakSize - peaks = append(peaks, peak) + peaks = append(peaks, peak-1) mmrSize -= peakSize } return peaks } -func PeakHashes(store indexStoreGetter, mmrSize uint64) ([][]byte, error) { +// PosPeaks is a depricated version of peaks which returns an array of mmr positions rather than indices. +func PosPeaks(mmrSize uint64) []uint64 { + + peaks := Peaks(mmrSize - 1) + if peaks == nil { + return nil + } + for i, p := range peaks { + peaks[i] = p + 1 + } + return peaks +} + +func PeakHashes(store indexStoreGetter, mmrIndex uint64) ([][]byte, error) { // Note: we can implement this directly any time we want, but lets re-use the testing for Peaks var path [][]byte - for _, pos := range PosPeaks(mmrSize) { - stored, err := store.Get(pos - 1) + for _, i := range Peaks(mmrIndex) { + stored, err := store.Get(i) if err != nil { return nil, err } @@ -139,40 +147,40 @@ func PeakIndex(leafCount uint64, d int) int { return int(bits.OnesCount64(leafCount)) - n } -// TopPeak returns the smallest, leftmost, peak containing *or equal to* pos +// TopPeak returns the smallest, leftmost, peak containing *or equal to* i // // This is essentially a ^2 *floor* function for the accumulation of bits: // -// TopPeak(1) = TopPeak(2) = 1 -// TopPeak(2) = TopPeak(3) = TopPeak(4) = TopPeak(5) = TopPeak(6) = 3 -// TopPeak(7) = 7 +// TopPeak(0) = TopPeak(1) = 0 +// TopPeak(1) = TopPeak(2) = TopPeak(3) = TopPeak(4) = TopPeak(5) = 2 +// TopPeak(6) = 6 // -// 2 7 +// 2 6 // / \ -// 1 3 6 10 +// 1 2 5 9 // / \ / \ / \ -// 0 1 2 4 5 8 9 11 -func TopPeak(pos uint64) uint64 { +// 0 0 1 3 4 7 8 10 +func TopPeak(i uint64) uint64 { - // This works by working out the next peak up then subtracting 1, which is a + // This works by working out the next peak *position* up then subtracting 1, which is a // flooring function for the bits over the current peak - return 1<<(BitLength64(pos+1)-1) - 1 + return 1<<(BitLength64(i+2)-1) - 2 } // TopHeight returns the index height of the largest perfect peak contained in, or exactly, pos // This is essentially a ^2 *floor* function for the accumulation of bits: // -// TopHeight(1) = TopHeight(2) = 0 -// PeakFloor(2) = PeakFloor(3) = PeakFloor(4) = PeakFloor(5) = PeakFloor(6) = 1 -// PeakFloor(7) = 2 +// TopHeight(0) = TopHeight(1) = 0 +// TopHeight(1) = TopHeight(2) = TopHeight(3) = TopHeight(4) = TopHeight(5) = 1 +// TopHeight(6) = 2 // -// 2 7 +// 2 6 // / \ -// 1 3 6 10 +// 1 2 5 9 // / \ / \ / \ -// 0 1 2 4 5 8 9 11 -func TopHeight(pos uint64) uint64 { - return BitLength64(pos+1) - 1 +// 0 0 1 3 4 7 8 10 +func TopHeight(i uint64) uint64 { + return BitLength64(i+2) - 2 } // PeaksBitmap returns a bit mask where a 1 corresponds to a peak and the position diff --git a/mmr/peaks_test.go b/mmr/peaks_test.go index 1d17a87..ef3067a 100644 --- a/mmr/peaks_test.go +++ b/mmr/peaks_test.go @@ -1,13 +1,11 @@ package mmr import ( - "encoding/hex" "fmt" "reflect" "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestPosPeaks(t *testing.T) { @@ -37,86 +35,30 @@ func TestPosPeaks(t *testing.T) { } } -func TestPeaksKAT_MMR39(t *testing.T) { - tests := []struct { - mmrSize uint64 - want []uint64 - }{ - {1, []uint64{1}}, - {3, []uint64{3}}, - {4, []uint64{3, 4}}, - {7, []uint64{7}}, - {8, []uint64{7, 8}}, - {10, []uint64{7, 10}}, - {11, []uint64{7, 10, 11}}, - {15, []uint64{15}}, - {16, []uint64{15, 16}}, - {18, []uint64{15, 18}}, - {19, []uint64{15, 18, 19}}, - {22, []uint64{15, 22}}, - {23, []uint64{15, 22, 23}}, - {25, []uint64{15, 22, 25}}, - {26, []uint64{15, 22, 25, 26}}, - {31, []uint64{31}}, - {32, []uint64{31, 32}}, - {34, []uint64{31, 34}}, - {35, []uint64{31, 34, 35}}, - {38, []uint64{31, 38}}, - {39, []uint64{31, 38, 39}}, - } - for _, tt := range tests { - t.Run(fmt.Sprintf("%d", tt.mmrSize), func(t *testing.T) { - if got := PosPeaks(tt.mmrSize); !reflect.DeepEqual(got, tt.want) { - t.Errorf("PosPeaks() = %v, want %v", got, tt.want) - } - }) +func TestPeaks(t *testing.T) { + type args struct { + mmrIndex uint64 } -} - -func TestPeakHashesKAT_MMR39(t *testing.T) { tests := []struct { - mmrSize uint64 - want []string + name string + args args + want []uint64 }{ - {1, []string{"af5570f5a1810b7af78caf4bc70a660f0df51e42baf91d4de5b2328de0e83dfc"}}, - {3, []string{"ad104051c516812ea5874ca3ff06d0258303623d04307c41ec80a7a18b332ef8"}}, - {4, []string{"ad104051c516812ea5874ca3ff06d0258303623d04307c41ec80a7a18b332ef8", "d5688a52d55a02ec4aea5ec1eadfffe1c9e0ee6a4ddbe2377f98326d42dfc975"}}, - {7, []string{"827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88"}}, - {8, []string{"827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88", "a3eb8db89fc5123ccfd49585059f292bc40a1c0d550b860f24f84efb4760fbf2"}}, - {10, []string{"827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88", "b8faf5f748f149b04018491a51334499fd8b6060c42a835f361fa9665562d12d"}}, - {11, []string{"827f3213c1de0d4c6277caccc1eeca325e45dfe2c65adce1943774218db61f88", "b8faf5f748f149b04018491a51334499fd8b6060c42a835f361fa9665562d12d", "8d85f8467240628a94819b26bee26e3a9b2804334c63482deacec8d64ab4e1e7"}}, - {15, []string{"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112"}}, - {16, []string{"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "e66c57014a6156061ae669809ec5d735e484e8fcfd540e110c9b04f84c0b4504"}}, - {18, []string{"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "f4a0db79de0fee128fbe95ecf3509646203909dc447ae911aa29416bf6fcba21"}}, - {19, []string{"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "f4a0db79de0fee128fbe95ecf3509646203909dc447ae911aa29416bf6fcba21", "5bc67471c189d78c76461dcab6141a733bdab3799d1d69e0c419119c92e82b3d"}}, - {22, []string{"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710"}}, - {23, []string{"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710", "7a42e3892368f826928202014a6ca95a3d8d846df25088da80018663edf96b1c"}}, - {25, []string{"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710", "dd7efba5f1824103f1fa820a5c9e6cd90a82cf123d88bd035c7e5da0aba8a9ae"}}, - {26, []string{"78b2b4162eb2c58b229288bbcb5b7d97c7a1154eed3161905fb0f180eba6f112", "61b3ff808934301578c9ed7402e3dd7dfe98b630acdf26d1fd2698a3c4a22710", "dd7efba5f1824103f1fa820a5c9e6cd90a82cf123d88bd035c7e5da0aba8a9ae", "561f627b4213258dc8863498bb9b07c904c3c65a78c1a36bca329154d1ded213"}}, - {31, []string{"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7"}}, - {32, []string{"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "1664a6e0ea12d234b4911d011800bb0f8c1101a0f9a49a91ee6e2493e34d8e7b"}}, - {34, []string{"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "0c9f36783b5929d43c97fe4b170d12137e6950ef1b3a8bd254b15bbacbfdee7f"}}, - {35, []string{"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "0c9f36783b5929d43c97fe4b170d12137e6950ef1b3a8bd254b15bbacbfdee7f", "4d75f61869104baa4ccff5be73311be9bdd6cc31779301dfc699479403c8a786"}}, - {38, []string{"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "6a169105dcc487dbbae5747a0fd9b1d33a40320cf91cf9a323579139e7ff72aa"}}, - {39, []string{"d4fb5649422ff2eaf7b1c0b851585a8cfd14fb08ce11addb30075a96309582a7", "6a169105dcc487dbbae5747a0fd9b1d33a40320cf91cf9a323579139e7ff72aa", "e9a5f5201eb3c3c856e0a224527af5ac7eb1767fb1aff9bd53ba41a60cde9785"}}, - } - db := NewCanonicalTestDB(t) - - hexHashList := func(hashes [][]byte) []string { - var hexes []string - for _, b := range hashes { - hexes = append(hexes, hex.EncodeToString(b)) - } - return hexes + {"complete mmr(index 123) gives two peaks", args{FirstMMRSize(123)}, []uint64{126, 127}}, + {"index 123 gives nil", args{123}, []uint64(nil)}, + {"complete index 11 gives three peaks", args{10}, []uint64{6, 9, 10}}, + {"complete index 26 gives 4 peaks", args{25}, []uint64{14, 21, 24, 25}}, + {"complete index 9 gives two peaks", args{9}, []uint64{6, 9}}, + {"complete index 12, which is invalid because it should have been perfectly filled, gives nil", args{13}, nil}, + {"complete index 14, which is perfectly filled, gives a single peak", args{14}, []uint64{14}}, + {"complete index 17 gives two peaks", args{17}, []uint64{14, 17}}, + {"complete index 21 gives two peaks", args{21}, []uint64{14, 21}}, } for _, tt := range tests { - t.Run(fmt.Sprintf("%d", tt.mmrSize), func(t *testing.T) { - hashes, err := PeakHashes(db, tt.mmrSize) - require.NoError(t, err) - hexes := hexHashList(hashes) - if !reflect.DeepEqual(hexes, tt.want) { - t.Errorf("PeakHashes() = %v, want %v", hexes, tt.want) + t.Run(tt.name, func(t *testing.T) { + if got := Peaks(tt.args.mmrIndex); !reflect.DeepEqual(got, tt.want) { + t.Errorf("Peaks() = %v, want %v", got, tt.want) } }) } @@ -185,25 +127,29 @@ func TestAncestors(t *testing.T) { func TestTopHeight(t *testing.T) { type args struct { - mmrSize uint64 + mmrIndex uint64 } tests := []struct { name string args args want uint64 }{ - {"size 0 corner case", args{0}, 0}, - {"size 1 corner case", args{1}, 1}, - {"size 2", args{2}, 1}, - {"size 3", args{3}, 2}, - {"size 4, two peaks, single solo at i=3", args{4}, 2}, - {"size 5, three peaks, two solo at i=3, i=4", args{5}, 2}, - {"size 6, two perfect peaks,i=2, i=5 (note add does not ever leave the MMR in this state)", args{6}, 2}, - {"size 7, one perfect peaks at i=6", args{7}, 3}, + // 2 6 + // / \ + // 1 2 5 9 + // / \ / \ / \ + // 0 0 1 3 4 7 8 10 + + {"complete index 0 corner case", args{0}, 0}, + {"complete index 2", args{2}, 1}, + {"complete index 3, two peaks, single solo at i=3", args{3}, 1}, + {" index 4, three peaks, two solo at i=3, i=4", args{4}, 1}, + {" index 5, two perfect peaks,i=2, i=5", args{5}, 1}, + {"complete index 7, one perfect peaks at i=6", args{6}, 2}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := TopHeight(tt.args.mmrSize) + got := TopHeight(tt.args.mmrIndex) if got != tt.want { t.Errorf("HighestPos() got = %v, want %v", got, tt.want) } @@ -220,13 +166,13 @@ func topPeakLongHand(pos uint64) uint64 { } func TestTopPeak(t *testing.T) { - for pos := uint64(1); pos <= 39; pos++ { - t.Run(fmt.Sprintf("TopPeak(%d)", pos), func(t *testing.T) { - want := topPeakLongHand(pos) - x := 1<<(BitLength64(pos+1)-1) - 1 - fmt.Printf("%d %4b %4b %d\n", x, x, pos, want) - if got := TopPeak(pos); got != want { - t.Errorf("TopPeak(%d) = %v, want %v", pos, got, want) + for i := uint64(0); i < 39; i++ { + t.Run(fmt.Sprintf("TopPeak(%d)", i), func(t *testing.T) { + want := topPeakLongHand(i+1) - 1 + x := 1<<(BitLength64(i+1)-1) - 2 + fmt.Printf("%d %4b %4b %d\n", x, x, i, want) + if got := TopPeak(i); got != want { + t.Errorf("TopPeak(%d) = %v, want %v", i, got, want) } }) } diff --git a/mmr/peaksold.go b/mmr/peaksold.go index a3d99dc..1d15f19 100644 --- a/mmr/peaksold.go +++ b/mmr/peaksold.go @@ -40,7 +40,7 @@ func PeaksOld(mmrSize uint64) []uint64 { } // The top peak is always the left most and, when counting from 1, will have all binary '1's - top := TopPeak(mmrSize) + top := TopPeak(mmrSize-1) + 1 peaks := []uint64{top} peak := top diff --git a/mmr/proof.go b/mmr/proof.go index 4e76f44..d9176d6 100644 --- a/mmr/proof.go +++ b/mmr/proof.go @@ -38,7 +38,7 @@ func GetProofPeakRoot(mmrSize uint64, mmrIndex uint64, peakHashes [][]byte, proo // for leaf nodes, the peak height index is the proof length - 1, for // generality, to account for interior nodes, we use IndexHeight here. // In contexts where consistency proofs are being generated to check log - // extension, typically the returned height from IndexProofPath is + // extension, typically the returned height from InclusionProofPath is // available. heightIndex := IndexHeight(mmrIndex) @@ -86,7 +86,7 @@ func GetProofPeakIndex(mmrSize uint64, d int, heightIndex uint8) int { // 1 2 5 9 12 17 20 24 // / \ / \ / \ / \ / \ // 0 0 1 3 4 7 8 10 11 15 16 18 19 22 23 25 -func IndexProof(store indexStoreGetter, mmrSize uint64, i uint64) ([][]byte, error) { +func InclusionProof(store indexStoreGetter, mmrLastIndex uint64, i uint64) ([][]byte, error) { var iSibling uint64 @@ -116,7 +116,7 @@ func IndexProof(store indexStoreGetter, mmrSize uint64, i uint64) ([][]byte, err // When the computed sibling exceedes the range of MMR(C+1), // we have completed the path. - if iSibling >= mmrSize { + if iSibling > mmrLastIndex { return proof, nil } @@ -131,9 +131,10 @@ func IndexProof(store indexStoreGetter, mmrSize uint64, i uint64) ([][]byte, err } } -// IndexProofPath returns the mmr indices identifying the witness nodes for mmr index i +// returns the mmr indices identifying the witness nodes for mmr index i +// // This method allows tooling to individually audit the proof path node values for a given index. -func IndexProofPath(mmrSize uint64, i uint64) ([]uint64, error) { +func InclusionProofPath(mmrLastIndex uint64, i uint64) ([]uint64, error) { var iSibling uint64 @@ -163,7 +164,7 @@ func IndexProofPath(mmrSize uint64, i uint64) ([]uint64, error) { // When the computed sibling exceedes the range of MMR(C+1), // we have completed the path. - if iSibling >= mmrSize { + if iSibling > mmrLastIndex { return proof, nil } diff --git a/mmr/proof_test.go b/mmr/proof_test.go index bc3e9c7..cdad545 100644 --- a/mmr/proof_test.go +++ b/mmr/proof_test.go @@ -91,7 +91,7 @@ func TestGetRoot(t *testing.T) { } } -func TestIndexProofLocal(t *testing.T) { +func TestInclusionProofLocal(t *testing.T) { db := NewCanonicalTestDB(t) // H return the node hash for index i from the canonical test tree. @@ -189,22 +189,22 @@ func TestIndexProofLocal(t *testing.T) { if mmrSize == 0 { mmrSize = tt.args.store.Next() } - got, got1, err := IndexProofLocal(mmrSize, tt.args.store, tt.args.i) + got, got1, err := InclusionProofLocal(mmrSize, tt.args.store, tt.args.i) if (err != nil) != tt.wantErr { - t.Errorf("IndexProofLocal() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("InclusionProofLocal() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { - t.Errorf("IndexProofLocal() = %v, want %v", got, tt.want) + t.Errorf("InclusionProofLocal() = %v, want %v", got, tt.want) } if got1 != tt.want1 { - t.Errorf("IndexProofLocal() = %v, want %v", got1, tt.want1) + t.Errorf("InclusionProofLocal() = %v, want %v", got1, tt.want1) } }) } } -func TestIndexProofLocalOld(t *testing.T) { +func TestInclusionProofLocalOld(t *testing.T) { db := NewCanonicalTestDB(t) // H return the node hash for index i from the canonical test tree. @@ -302,16 +302,16 @@ func TestIndexProofLocalOld(t *testing.T) { if mmrSize == 0 { mmrSize = tt.args.store.Next() } - got, got1, err := IndexProofLocalOld(mmrSize, tt.args.store, tt.args.i) + got, got1, err := InclusionProofLocalOld(mmrSize, tt.args.store, tt.args.i) if (err != nil) != tt.wantErr { - t.Errorf("IndexProofLocal() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("InclusionProofLocal() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { - t.Errorf("IndexProofLocal() = %v, want %v", got, tt.want) + t.Errorf("InclusionProofLocal() = %v, want %v", got, tt.want) } if got1 != tt.want1 { - t.Errorf("IndexProofLocal() = %v, want %v", got1, tt.want1) + t.Errorf("InclusionProofLocal() = %v, want %v", got1, tt.want1) } }) } diff --git a/mmr/proofbagged.go b/mmr/proofbagged.go index 18986da..5e8e692 100644 --- a/mmr/proofbagged.go +++ b/mmr/proofbagged.go @@ -17,9 +17,9 @@ func GetRoot(mmrSize uint64, store indexStoreGetter, hasher hash.Hash) ([]byte, return BagPeaksRHS(store, hasher, 0, peaks) } -// IndexProofBagged provides a proof of inclusion for the leaf at index i against the full MMR +// InclusionProofBagged provides a proof of inclusion for the leaf at index i against the full MMR // -// It relies on the methods IndexProofLocal, BagPeaksRHS and PeaksLHS for +// It relies on the methods InclusionProofLocal, BagPeaksRHS and PeaksLHS for // collecting the necessary MMR elements and then combines the results into a // final verifiable commitment for the whole MMR. // @@ -49,12 +49,12 @@ func GetRoot(mmrSize uint64, store indexStoreGetter, hasher hash.Hash) ([]byte, // .___________. .___________. // | | // | reversed(PeaksLHS) -// IndexProofPath +// InclusionProofPath // // Note that right-sibling is omitted if there is none, and similarly, the left // peaks. The individual functions producing those elements contain more detail // over the construction of their particular proof component. -func IndexProofBagged(mmrSize uint64, store indexStoreGetter, hasher hash.Hash, i uint64) ([][]byte, error) { +func InclusionProofBagged(mmrSize uint64, store indexStoreGetter, hasher hash.Hash, i uint64) ([][]byte, error) { var err error var proof [][]byte @@ -62,7 +62,7 @@ func IndexProofBagged(mmrSize uint64, store indexStoreGetter, hasher hash.Hash, var leftPath [][]byte var rightSibling []byte - if proof, iLocalPeak, err = IndexProofLocal(mmrSize, store, i); err != nil { + if proof, iLocalPeak, err = InclusionProofLocal(mmrSize, store, i); err != nil { return nil, err } @@ -151,7 +151,7 @@ func PeakBagRHS( return peakHashes, nil } -// IndexProofLocal collects the merkle root proof for the local MMR peak containing index i +// InclusionProofLocal collects the merkle root proof for the local MMR peak containing index i // // So for the follwing index tree, and i=15 with mmrSize = 26 we would obtain the path // @@ -170,7 +170,7 @@ func PeakBagRHS( // 1 2 5 9 12 17 20 24 // / \ / \ / \ / \ / \ // 0 0 1 3 4 7 8 10 11 15 16 18 19 22 23 25 -func IndexProofLocal(mmrSize uint64, store indexStoreGetter, i uint64) ([][]byte, uint64, error) { +func InclusionProofLocal(mmrSize uint64, store indexStoreGetter, i uint64) ([][]byte, uint64, error) { var proof [][]byte height := IndexHeight(i) // allows for proofs of interior nodes diff --git a/mmr/proofofconsistency.go b/mmr/proofofconsistency.go index e9276fb..8407ff7 100644 --- a/mmr/proofofconsistency.go +++ b/mmr/proofofconsistency.go @@ -29,21 +29,21 @@ type ConsistencyProof struct { // and MMR(B) for each "old" peak in MMR(A) we show there is a path to a "new" // or "same" peak in MMR(B) func IndexConsistencyProof( - store indexStoreGetter, mmrSizeA, mmrSizeB uint64, + store indexStoreGetter, mmrIndexA, mmrIndexB uint64, ) (ConsistencyProof, error) { proof := ConsistencyProof{ - MMRSizeA: mmrSizeA, - MMRSizeB: mmrSizeB, + MMRSizeA: mmrIndexA + 1, + MMRSizeB: mmrIndexB + 1, } // Find the peaks corresponding to the previous mmr - peaksA := PosPeaks(mmrSizeA) + peaksA := Peaks(mmrIndexA) // Now generate peak proofs against the new mmr size, using the peak indices // as the input indices to prove for _, iPeakA := range peaksA { - peakProof, err := IndexProof(store, mmrSizeB, iPeakA-1) + peakProof, err := InclusionProof(store, mmrIndexB, iPeakA) if err != nil { return ConsistencyProof{}, err } diff --git a/mmr/proofofconsistency_test.go b/mmr/proofofconsistency_test.go index 4ac1ea8..2b9a75c 100644 --- a/mmr/proofofconsistency_test.go +++ b/mmr/proofofconsistency_test.go @@ -19,7 +19,7 @@ func testMinimal(t *testing.T, hasher hash.Hash, store *testDb) { t.Errorf(": %v", err) } - peakProof, err := IndexProofBagged(11, store, hasher, 0) + peakProof, err := InclusionProofBagged(11, store, hasher, 0) if err != nil { t.Errorf(": %v", err) } @@ -35,7 +35,7 @@ func testMinimal(t *testing.T, hasher hash.Hash, store *testDb) { t.Errorf("it is not ok") } - peakProof, err = IndexProofBagged(11, store, hasher, 1) + peakProof, err = InclusionProofBagged(11, store, hasher, 1) if err != nil { t.Errorf(": %v", err) } @@ -51,7 +51,7 @@ func testMinimal(t *testing.T, hasher hash.Hash, store *testDb) { t.Errorf("it is not ok") } - peakProof, err = IndexProofBagged(11, store, hasher, 2) + peakProof, err = InclusionProofBagged(11, store, hasher, 2) if err != nil { t.Errorf(": %v", err) } @@ -67,7 +67,7 @@ func testMinimal(t *testing.T, hasher hash.Hash, store *testDb) { t.Errorf("it is not ok") } - peakProof, err = IndexProofBagged(11, store, hasher, 6) + peakProof, err = InclusionProofBagged(11, store, hasher, 6) if err != nil { t.Errorf(": %v", err) } @@ -166,7 +166,7 @@ func TestIndexConsistencyProof(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := IndexConsistencyProof(store, tt.args.mmrSizeA, tt.args.mmrSizeB) + got, err := IndexConsistencyProof(store, tt.args.mmrSizeA-1, tt.args.mmrSizeB-1) if (err != nil) != tt.wantProofErr { t.Errorf("IndexConsistencyProof() error = %v, wantErr %v", err, tt.wantProofErr) return @@ -191,14 +191,14 @@ func TestIndexConsistencyProof(t *testing.T) { t.Errorf("IndexConsistencyProof(), want %v, got %v", tt.wantProof.Path, got.Path) } - peakHashesA, err := PeakHashes(store, got.MMRSizeA) + peakHashesA, err := PeakHashes(store, got.MMRSizeA-1) if tt.wantPeaksA != nil { require.NoError(t, err) fmt.Printf("peakHashesA expect: %s\n", proofPathStringer(peakHashesA, ", ")) fmt.Printf("peakHashesA got : %s\n", proofPathStringer(peakHashesA, ", ")) assert.Equal(t, peakHashesA, tt.wantPeaksA) } - peakHashesB, err := PeakHashes(store, got.MMRSizeB) + peakHashesB, err := PeakHashes(store, got.MMRSizeB-1) if tt.wantPeaksB != nil { require.NoError(t, err) fmt.Printf("peakHashesB expect: %s\n", proofPathStringer(peakHashesB, ", ")) diff --git a/mmr/proofofconsistencybagged.go b/mmr/proofofconsistencybagged.go index 2eda601..2d9fba3 100644 --- a/mmr/proofofconsistencybagged.go +++ b/mmr/proofofconsistencybagged.go @@ -30,7 +30,7 @@ func IndexConsistencyProofBagged( // Now generate peak proofs against the new mmr size, using the peak indices // as the input indices to prove for _, iPeakA := range peaksA { - peakProof, err := IndexProofBagged(mmrSizeB, store, hasher, iPeakA-1) + peakProof, err := InclusionProofBagged(mmrSizeB, store, hasher, iPeakA-1) if err != nil { return ConsistencyProof{}, err } diff --git a/mmr/proofold.go b/mmr/proofold.go index 06121d2..fbcf4d0 100644 --- a/mmr/proofold.go +++ b/mmr/proofold.go @@ -4,8 +4,8 @@ package mmr // will delete this file A reason to keep it around is that testing may benefit // from having multiple implementations of key algorithms -// IndexProofLocalOld is depreciated and retained only for testing -// See IndexProofLocal instead +// InclusionProofLocalOld is depreciated and retained only for testing +// See InclusionProofLocal instead // // collects the merkle root proof for the local MMR peak containing index i // @@ -26,7 +26,7 @@ package mmr // 1 2 5 9 12 17 20 24 // / \ / \ / \ / \ / \ // 0 0 1 3 4 7 8 10 11 15 16 18 19 22 23 25 -func IndexProofLocalOld(mmrSize uint64, store indexStoreGetter, i uint64) ([][]byte, uint64, error) { +func InclusionProofLocalOld(mmrSize uint64, store indexStoreGetter, i uint64) ([][]byte, uint64, error) { var proof [][]byte height := IndexHeight(i) // allows for proofs of interior nodes diff --git a/mmr/proofrefresh.go b/mmr/proofrefresh.go index 9392b3d..8a5d509 100644 --- a/mmr/proofrefresh.go +++ b/mmr/proofrefresh.go @@ -20,10 +20,10 @@ type ConsistencyProofLocal struct { PeakIndexB uint64 } -// IndexProofLocalExtend produces a proof which can verify for two mmr sizes +// InclusionProofLocalExtend produces a proof which can verify for two mmr sizes // It shows that the proof for mmrSizeB is an *extention* of the proof for // mmrSizeA. -func IndexProofLocalExtend(mmrSizeA, mmrSizeB uint64, store indexStoreGetter, i uint64) (ConsistencyProofLocal, error) { +func InclusionProofLocalExtend(mmrSizeA, mmrSizeB uint64, store indexStoreGetter, i uint64) (ConsistencyProofLocal, error) { height := uint64(0) diff --git a/mmr/spurs_test.go b/mmr/spurs_test.go index f7bf637..c5bf30a 100644 --- a/mmr/spurs_test.go +++ b/mmr/spurs_test.go @@ -2,6 +2,7 @@ package mmr import ( "fmt" + "math/bits" "testing" "github.com/stretchr/testify/assert" @@ -120,6 +121,8 @@ func TestLeafMinusSpurSum(t *testing.T) { t.Run(fmt.Sprintf("%d -> %d", iLeaf, want), func(t *testing.T) { sum := LeafMinusSpurSum(uint64(iLeaf)) assert.Equal(t, sum, want) + sum2 := uint64(bits.OnesCount64(uint64(iLeaf))) + assert.Equal(t, sum, sum2) // Test that the stack like property is maintained top := uint64(0) diff --git a/mmr/verify.go b/mmr/verify.go index b16325d..0a19e26 100644 --- a/mmr/verify.go +++ b/mmr/verify.go @@ -15,7 +15,7 @@ func VerifyInclusion( store indexStoreGetter, hasher hash.Hash, mmrSize uint64, leafHash []byte, iNode uint64, proof [][]byte, ) (bool, error) { - peaks, err := PeakHashes(store, mmrSize) + peaks, err := PeakHashes(store, mmrSize-1) if err != nil { return false, err } diff --git a/mmr/verify_test.go b/mmr/verify_test.go index cd5a30e..bad953b 100644 --- a/mmr/verify_test.go +++ b/mmr/verify_test.go @@ -26,13 +26,13 @@ func TestVerifyLeavesIn38(t *testing.T) { // Verify each leaf in all complete mmr sizes up to the size of the canonical mmr // for iLeaf := uint64(0); iLeaf < numLeafs; iLeaf++ { - proof, err := IndexProof(db, s, mmrIndex) + proof, err := InclusionProof(db, s-1, mmrIndex) require.NoError(t, err) nodeHash, err := db.Get(mmrIndex) require.NoError(t, err) - accumulator, err := PeakHashes(db, s) + accumulator, err := PeakHashes(db, s-1) require.NoError(t, err) iacc := PeakIndex(LeafCount(s), len(proof)) require.Less(t, iacc, len(accumulator)) diff --git a/mmr/verifybagged.go b/mmr/verifybagged.go index 2dc4629..e9a8a95 100644 --- a/mmr/verifybagged.go +++ b/mmr/verifybagged.go @@ -12,7 +12,7 @@ import ( // VerifyInclusionBagged returns true if the provided proof demonstrates inclusion of // nodeHash at position iLeaf+1 // -// proof and root should be obtained via IndexProof and GetRoot respectively. +// proof and root should be obtained via InclusionProof and GetRoot respectively. // // Remembering that the proof layout is this: // diff --git a/mmr/verifybagged_test.go b/mmr/verifybagged_test.go index d66efcd..8b5f045 100644 --- a/mmr/verifybagged_test.go +++ b/mmr/verifybagged_test.go @@ -35,7 +35,7 @@ func TestVerifyLeavesIn38Bagged(t *testing.T) { // for iLeaf := uint64(0); iLeaf < numLeafs; iLeaf++ { iNode := MMRIndex(iLeaf) - proof, err := IndexProofBagged(mmrSize, db, hasher, iNode) + proof, err := InclusionProofBagged(mmrSize, db, hasher, iNode) require.NoError(t, err) nodeHash, err := db.Get(iNode) @@ -67,7 +67,7 @@ func TestVerify38Bagged(t *testing.T) { // for iLeaf := uint64(0); iLeaf < numLeafs; iLeaf++ { // iNode := MMRIndex(iLeaf) - proof, err := IndexProofBagged(mmrSize, db, hasher, iNode) + proof, err := InclusionProofBagged(mmrSize, db, hasher, iNode) require.NoError(t, err) nodeHash, err := db.Get(iNode) @@ -100,7 +100,7 @@ func TestVerifyPerfectRootsBagged(t *testing.T) { } iNode := mmrSize - 1 - proof, err := IndexProofBagged(mmrSize, db, hasher, iNode) + proof, err := InclusionProofBagged(mmrSize, db, hasher, iNode) require.NoError(t, err) nodeHash, err := db.Get(iNode) @@ -123,7 +123,7 @@ func TestVerifyIndex30InSize63Bagged(t *testing.T) { db := NewGeneratedTestDB(t, 63) root, err := GetRoot(63, db, hasher) require.NoError(t, err) - peakProof, err := IndexProofBagged(63, db, hasher, 30) + peakProof, err := InclusionProofBagged(63, db, hasher, 30) require.NoError(t, err) peakHash := db.mustGet(30) ok := VerifyInclusionBagged(63, hasher, peakHash, 30, peakProof, root) @@ -162,7 +162,7 @@ func TestReVerify38ForAllSizesBagged(t *testing.T) { root, err := GetRoot(jMMRSize, db, hasher) require.NoError(t, err) // Get the proof for *** iLeaf's node *** - proof, err := IndexProofBagged(jMMRSize, db, hasher, iNode) + proof, err := InclusionProofBagged(jMMRSize, db, hasher, iNode) require.NoError(t, err) if proof == nil { // This is the iLeaf == 0 && mmrSize == 1 case which is @@ -200,19 +200,19 @@ func TestVerify(t *testing.T) { } getProofBagged := func(mmrSize uint64, i uint64) [][]byte { - proof, err := IndexProofBagged(mmrSize, db, hasher, i) + proof, err := InclusionProofBagged(mmrSize, db, hasher, i) require.NoError(t, err) if mmrSize == 1 && proof != nil { - t.Errorf("IndexProof() err: %v", errors.New("mmr size 1 should return nil proof")) + t.Errorf("InclusionProof() err: %v", errors.New("mmr size 1 should return nil proof")) return nil } return proof } getProof := func(mmrSize uint64, i uint64) [][]byte { - proof, err := IndexProof(db, mmrSize-1, i) + proof, err := InclusionProof(db, mmrSize-1, i) require.NoError(t, err) if mmrSize == 1 && proof != nil { - t.Errorf("IndexProof() err: %v", errors.New("mmr size 1 should return nil proof")) + t.Errorf("InclusionProof() err: %v", errors.New("mmr size 1 should return nil proof")) return nil } return proof @@ -240,7 +240,7 @@ func TestVerify(t *testing.T) { // peakMap is also the leaf count, which is often also known peakMap := LeafCount(mmrSize) peakIndex := PeakIndex(peakMap, d) - peakHashes, err := PeakHashes(db, mmrSize) + peakHashes, err := PeakHashes(db, mmrSize-1) require.Less(t, peakIndex, len(peakHashes)) require.NoError(t, err) root := peakHashes[peakIndex] @@ -380,7 +380,7 @@ func TestVerify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if tt.expectProofNodes != nil { - localPath, iLocalPeak, err := IndexProofLocal( + localPath, iLocalPeak, err := InclusionProofLocal( tt.args.mmrSize, db, tt.args.mmrIndex) localHeightIndex := len(localPath) require.NoError(t, err) diff --git a/mmr/verifyconsistency.go b/mmr/verifyconsistency.go index b4d0eea..94cd61c 100644 --- a/mmr/verifyconsistency.go +++ b/mmr/verifyconsistency.go @@ -21,13 +21,13 @@ func CheckConsistency( mmrSizeA, mmrSizeB uint64, peakHashesA [][]byte) (bool, [][]byte, error) { // Obtain the proofs from the current store - cp, err := IndexConsistencyProof(store, mmrSizeA, mmrSizeB) + cp, err := IndexConsistencyProof(store, mmrSizeA-1, mmrSizeB-1) if err != nil { return false, nil, err } // Obtain the expected resulting peaks from the current store - peakHashesB, err := PeakHashes(store, cp.MMRSizeB) + peakHashesB, err := PeakHashes(store, cp.MMRSizeB-1) if err != nil { return false, nil, err } @@ -60,7 +60,7 @@ func VerifyConsistency( // Get the peaks proven by the consistency proof using the provided peaks // for mmr size A - proven, err := ConsistentRoots(hasher, cp.MMRSizeA, peaksFrom, cp.Path) + proven, err := ConsistentRoots(hasher, cp.MMRSizeA-1, peaksFrom, cp.Path) if err != nil { return false, nil, err } @@ -91,8 +91,15 @@ func VerifyConsistency( } } - // All proven peaks have been matched against the future accumulator. The log - // committed by the future accumulator is consistent with the previously - // committed log state. - return true, proven, nil + // the accumulator consists of the proven peaks plus any new peaks in peaksTo. + // In the draft these new peaks are the 'right-peaks' of the consistency proof. + // Here, as ConsistentRoots requires that the peak count for the provided ifrom + // matches the number of peaks in peaksFrom, simply returning peaksTo is safe. + // Even in the corner case where proven is empty. + // + // We could do + // proven = append(proven, peaksTo[len(proven):]...) + // + // But that would be completely redundant given the loop above. + return true, peaksTo, nil } diff --git a/mmr/verifyold.go b/mmr/verifyold.go index 06ed997..f7fd6d3 100644 --- a/mmr/verifyold.go +++ b/mmr/verifyold.go @@ -9,7 +9,7 @@ import "hash" // VerifyInclusionOld returns true if the provided proof demonstrates inclusion of // nodeHash at position iLeaf+1 // -// proof and root should be obtained via IndexProof and GetRoot respectively. +// proof and root should be obtained via InclusionProof and GetRoot respectively. // // Remembering that the proof layout is this: // diff --git a/tests/massifs/localmassifreader_test.go b/tests/massifs/localmassifreader_test.go index e432342..0645440 100644 --- a/tests/massifs/localmassifreader_test.go +++ b/tests/massifs/localmassifreader_test.go @@ -102,23 +102,23 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { return nil, massifs.MMRState{}, err } signed, state, err := tc.SignedState(tenantIdentity, uint64(massifIndex), massifs.MMRState{ - MMRSize: mmrSize, Rootx: root, + MMRSize: mmrSize, LegacySealRoot: root, }) // put the root back, because the benefit of the "last good seal" // consistency check does not require access to the log data. - state.Rootx = root + state.LegacySealRoot = root return signed, state, err }*/ seal := func( - mc *massifs.MassifContext, mmrSize uint64, tenantIdentity string, massifIndex uint32, + mc *massifs.MassifContext, mmrIndex uint64, tenantIdentity string, massifIndex uint32, ) (*cose.CoseSign1Message, massifs.MMRState, error) { - peaks, err := mmr.PeakHashes(mc, mmrSize) + peaks, err := mmr.PeakHashes(mc, mmrIndex) if err != nil { return nil, massifs.MMRState{}, err } signed, state, err := tc.SignedState(tenantIdentity, uint64(massifIndex), massifs.MMRState{ Version: int(massifs.MMRStateVersion1), - MMRSize: mmrSize, Peaks: peaks, + MMRSize: mmrIndex + 1, Peaks: peaks, }) // put the root back, because the benefit of the "last good seal" // consistency check does not require access to the log data. @@ -146,7 +146,7 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { mmrSizeOld := mmr.FirstMMRSize(mmr.MMRIndex(sealedLeafCount - 1)) require.GreaterOrEqual(t, mmrSizeOld, mc.Start.FirstIndex) - return seal(mc, mmrSizeOld, tenantIdentity, massifIndex) + return seal(mc, mmrSizeOld-1, tenantIdentity, massifIndex) case tenantId2TamperedLogUpdate: // We are simulating a situation where the locally available @@ -184,22 +184,22 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { require.GreaterOrEqual(t, mmrSizeOld, mc.Start.FirstIndex) // Get the seal before applying the tamper - msg, state, err := seal(mc, mmrSizeOld, tenantIdentity, massifIndex) + msg, state, err := seal(mc, mmrSizeOld-1, tenantIdentity, massifIndex) if err != nil { return nil, massifs.MMRState{}, err } - peakIndices := mmr.PosPeaks(mmrSizeOld) + peakIndices := mmr.Peaks(mmrSizeOld - 1) // Remember, the peaks are *positions* - peaks, err := mmr.PeakHashes(mc, mmrSizeOld) + peaks, err := mmr.PeakHashes(mc, mmrSizeOld-1) require.NoError(t, err) // Note: we take the *last* peak, because it corresponds to the // most recent log entries, but tampering any peak will cause // the verification to fail to fail - tamperNode(mc, peakIndices[len(peakIndices)-1]-1) + tamperNode(mc, peakIndices[len(peakIndices)-1]) - peaks2, err := mmr.PeakHashes(mc, mmrSizeOld) + peaks2, err := mmr.PeakHashes(mc, mmrSizeOld-1) require.NoError(t, err) assert.NotEqual(t, peaks, peaks2, "tamper did not change the root") @@ -216,14 +216,14 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { require.GreaterOrEqual(t, mmrSizeOld, mc.Start.FirstIndex) // Get the seal before applying the tamper - msg, state, err := seal(mc, mmrSizeOld, tenantIdentity, massifIndex) + msg, state, err := seal(mc, mmrSizeOld-1, tenantIdentity, massifIndex) if err != nil { return nil, massifs.MMRState{}, err } // this time, tamper a peak after the seal, this simulates the // case where the extension is inconsistent with the seal. - peaks := mmr.PosPeaks(mc.RangeCount()) + peaks := mmr.Peaks(mc.RangeCount() - 1) // Note: we take the *last* peak, because it corresponds to the // most recent log entries. In this case we want the fresh @@ -232,14 +232,14 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { // dependent on the smallest sealed peak. // Remember, the peaks are *positions* - tamperNode(mc, peaks[len(peaks)-1]-1) + tamperNode(mc, peaks[len(peaks)-1]) // Now we can return the seal return msg, state, nil default: // Common case: the seal is the full extent of the massif - return seal(mc, mc.RangeCount(), tenantIdentity, massifIndex) + return seal(mc, mc.RangeCount()-1, tenantIdentity, massifIndex) } }) @@ -275,17 +275,17 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { mmrSizeOld := sizeBeforeLeaves(mc, 8) require.GreaterOrEqual(t, mmrSizeOld, mc.Start.FirstIndex) - peaks := mmr.PosPeaks(mmrSizeOld) + peaks := mmr.Peaks(mmrSizeOld - 1) // remember, the peaks are *positions* - tamperNode(mc, peaks[len(peaks)-1]-1) + tamperNode(mc, peaks[len(peaks)-1]) case tenantId3InconsistentLogUpdate: // tamper *after* the seal // this time, tamper a peak after the seal, this simulates the // case where the extension is inconsistent with the seal. - peaks := mmr.PosPeaks(mc.RangeCount()) + peaks := mmr.Peaks(mc.RangeCount() - 1) // Remember, the peaks are *positions* - tamperNode(mc, peaks[len(peaks)-1]-1) + tamperNode(mc, peaks[len(peaks)-1]) default: } @@ -305,14 +305,14 @@ func TestLocalMassifReaderGetVerifiedContext(t *testing.T) { require.NoError(t, err) mmrSizeOld := sizeBeforeLeaves(mc, 8) require.GreaterOrEqual(t, mmrSizeOld, mc.Start.FirstIndex) - peaks := mmr.PosPeaks(mmrSizeOld) + peaks := mmr.Peaks(mmrSizeOld - 1) // remember, the peaks are *positions* - tamperNode(mc, peaks[len(peaks)-1]-1) + tamperNode(mc, peaks[len(peaks)-1]) // We call this a fake good state because its actually tampered, and the // log is "good", but it has the same effect from a verification // perspective. - _, fakeGoodState, err := seal(mc, mmrSizeOld, tenantId4RemoteInconsistentWithTrustedSeal, 0) + _, fakeGoodState, err := seal(mc, mmrSizeOld-1, tenantId4RemoteInconsistentWithTrustedSeal, 0) require.NoError(t, err) fakeECKey := massifs.TestGenerateECKey(t, elliptic.P256())