Skip to content

Commit

Permalink
Dev/robin/9709 logconfigmer accumulator consistency proofs (#13)
Browse files Browse the repository at this point in the history
* tests passing

* updates for cose receipts algorithms

* linter save

* fix a long standing bug in one of the tests

---------

Co-authored-by: Robin Bryce <[email protected]>
  • Loading branch information
robinbryce and Robin Bryce authored Oct 30, 2024
1 parent c5884b7 commit 7bffdf9
Show file tree
Hide file tree
Showing 9 changed files with 120 additions and 179 deletions.
34 changes: 16 additions & 18 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,30 +3,28 @@ module github.com/datatrails/go-datatrails-logverification
go 1.22

require (
github.com/datatrails/go-datatrails-common v0.16.1
github.com/datatrails/go-datatrails-common v0.18.0
github.com/datatrails/go-datatrails-common-api-gen v0.4.5
github.com/datatrails/go-datatrails-merklelog/massifs v0.1.0
github.com/datatrails/go-datatrails-merklelog/mmr v0.0.2
github.com/datatrails/go-datatrails-merklelog/massifs v0.2.0
github.com/datatrails/go-datatrails-merklelog/mmr v0.1.0
github.com/datatrails/go-datatrails-merklelog/mmrtesting v0.1.0
github.com/datatrails/go-datatrails-simplehash v0.0.5
github.com/stretchr/testify v1.9.0
github.com/veraison/go-cose v1.1.0
google.golang.org/protobuf v1.34.1
google.golang.org/protobuf v1.34.2
)

// replace github.com/datatrails/go-datatrails-merklelog/massifs => ../go-datatrails-merklelog/massifs

require (
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 // indirect
github.com/Azure/go-amqp v1.0.5 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
Expand All @@ -37,10 +35,10 @@ require (
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/gabriel-vasile/mimetype v1.4.4 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/ldclabs/cose/go v0.0.0-20221214142927-d22c1cfc2154 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
Expand All @@ -53,12 +51,12 @@ require (
github.com/zeebo/bencode v1.0.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 // indirect
google.golang.org/grpc v1.64.0 // indirect
golang.org/x/crypto v0.25.0 // indirect
golang.org/x/net v0.27.0 // indirect
golang.org/x/sys v0.22.0 // indirect
golang.org/x/text v0.16.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d // indirect
google.golang.org/grpc v1.65.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
91 changes: 50 additions & 41 deletions go.sum

Large diffs are not rendered by default.

7 changes: 3 additions & 4 deletions integrationsupport/massifseal.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package integrationsupport
import (
"context"
"crypto/ecdsa"
"crypto/sha256"
"testing"

"github.com/datatrails/go-datatrails-common-api-gen/assets/v2/assets"
Expand All @@ -20,21 +19,21 @@ import (
// the test context.
func GenerateMassifSeal(t *testing.T, testContext mmrtesting.TestContext, lastEvent *assets.EventResponse, signingKey ecdsa.PrivateKey) {
massifReader := massifs.NewMassifReader(logger.Sugar, testContext.Storer)
hasher := sha256.New()

// Just handle a single massif for now
massifContext, err := massifReader.GetMassif(context.TODO(), mmrtesting.DefaultGeneratorTenantIdentity, 0)
require.Nil(t, err)

mmrSize := massifContext.RangeCount()
root, err := mmr.GetRoot(mmrSize, &massifContext, hasher)
peaks, err := mmr.PeakHashes(&massifContext, mmrSize-1)
require.Nil(t, err)
id, epoch, err := massifs.SplitIDTimestampHex(lastEvent.MerklelogEntry.Commit.Idtimestamp)
require.Nil(t, err)

mmrState := massifs.MMRState{
Version: 1,
MMRSize: mmrSize,
Root: root,
Peaks: peaks,
CommitmentEpoch: uint32(epoch),
IDTimestamp: id,
}
Expand Down
12 changes: 3 additions & 9 deletions logverification/proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@ func EventProof(verifiableEvent VerifiableEvent, massif *massifs.MassifContext)
// Get the size of the complete tenant MMR
mmrSize := massif.RangeCount()

hasher := sha256.New()
proof, err := mmr.IndexProof(mmrSize, massif, hasher, verifiableEvent.MerkleLog.Commit.Index)
proof, err := mmr.InclusionProof(massif, mmrSize-1, verifiableEvent.MerkleLog.Commit.Index)
if err != nil {
return nil, err
}
Expand All @@ -33,12 +32,7 @@ func VerifyProof(verifiableEvent VerifiableEvent, proof [][]byte, massif *massif
mmrSize := massif.RangeCount()

hasher := sha256.New()
root, err := mmr.GetRoot(mmrSize, massif, hasher)
if err != nil {
return false, err
}

verified := mmr.VerifyInclusion(mmrSize, hasher, verifiableEvent.LeafHash,
verifiableEvent.MerkleLog.Commit.Index, proof, root)
return verified, nil
return mmr.VerifyInclusion(massif, hasher, mmrSize, verifiableEvent.LeafHash,
verifiableEvent.MerkleLog.Commit.Index, proof)
}
7 changes: 4 additions & 3 deletions logverification/seal.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,12 @@ func SignedLogState(
massifIndex, err)
}

// The log state at time of sealing is the Payload. It included the root, but this is removed
// from the stored log state. This forces a verifier to recompute the merkle root from their view
// The log state at time of sealing is the Payload. It included the peaks, but this is removed
// from the stored log state. This forces a verifier to recompute the merkle peaks from their view
// of the data. If verification succeeds when this computed root is added to signedStateNow, then
// we can be confident that DataTrails signed this state, and that the root matches your data.
logState.Root, err = mmr.GetRoot(logState.MMRSize, &massifContext, hasher)

logState.Peaks, err = mmr.PeakHashes(&massifContext, logState.MMRSize-1)
if err != nil {
return nil, fmt.Errorf("SignedLogState failed: unable to get root for massifContextNow: %w", err)
}
Expand Down
115 changes: 28 additions & 87 deletions logverification/verifyconsistency.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package logverification

import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
"hash"
Expand All @@ -17,10 +16,10 @@ import (
// MMRState is an abstraction, but it is assumed that logStateA comes from a local, trusted copy of the data
// rather than a fresh download from DataTrails.
//
// This function assumes the two log states are from the same massif.
//
// NOTE: the log state's signatures are not verified in this function, it is expected that the signature verification
// is done as a separate step to the consistency verification.
//
// NOTE: it is expected that both logStateA and logStateB have had their root recalculated.
func VerifyConsistency(
ctx context.Context,
hasher hash.Hash,
Expand All @@ -30,102 +29,44 @@ func VerifyConsistency(
logStateB *massifs.MMRState,
) (bool, error) {

if logStateA.Root == nil || logStateB.Root == nil {
if logStateA.Peaks == nil || logStateB.Peaks == nil {
return false, errors.New("VerifyConsistency failed: the roots for both log state A and log state B need to be set")
}

if len(logStateA.Root) == 0 || len(logStateB.Root) == 0 {
if len(logStateA.Peaks) == 0 || len(logStateB.Peaks) == 0 {
return false, errors.New("VerifyConsistency failed: the roots for both log state A and log state B need to be set")
}

massifReader := massifs.NewMassifReader(logger.Sugar, reader)

// last massif in the merkle log for log state A
massifContextA, err := Massif(logStateA.MMRSize-1, massifReader, tenantID, DefaultMassifHeight)
if err != nil {
return false, fmt.Errorf("VerifyConsistency failed: unable to get the last massif for log state A: %w", err)
}

// last massif in the merkle log for log state B
massifContextB, err := Massif(logStateB.MMRSize-1, massifReader, tenantID, DefaultMassifHeight)
if err != nil {
return false, fmt.Errorf("VerifyConsistency failed: unable to get the last massif for log state B: %w", err)
}

// We construct a proof of consistency between logStateA and logStateB.
// This will be a proof that logStateB derives from logStateA.
consistencyProof, err := mmr.IndexConsistencyProof(logStateA.MMRSize, logStateB.MMRSize, massifContextB, hasher)
if err != nil {
return false, fmt.Errorf("VerifyConsistency failed: unable to generate consistency proof: %w", err)
}

// In order to verify the proof we take the hashes of all of the peaks in logStateA.
// The hash of each of these peaks guarantees the integrity of all of its child nodes, so we
// don't need to check every hash.

// Peaks returned as MMR positions (1-based), not MMR indices (0-based). The location of these
// is deterministic: Given an MMR of a particular size, the peaks will always be in the same place.
logPeaksA := mmr.Peaks(logStateA.MMRSize)

// Get the hashes of all of the peaks.
logPeakHashesA, err := mmr.PeakBagRHS(massifContextA, hasher, 0, logPeaksA)
if err != nil {
return false, errors.New("error")
}

// Lastly, verify the consistency proof using the peak hashes from our backed-up log. If this
// returns true, then we can confidently say that everything in the backed-up log is in the state
// of the log described by this signed state.
verified := mmr.VerifyConsistency(hasher, logPeakHashesA, consistencyProof, logStateA.Root, logStateB.Root)
return verified, nil
}

// VerifyConsistencyFromMassifs takes a massif context providing access to data from the past, and a massif
// context providing access to the current version of the log. It returns whether or not the
// new version of the log is consistent with the previous version (i.e. it contains all of the
// same nodes in the same positions.)
//
// It is assumed that in a production use case, massifContextBefore provides access to a trusted
// local copy of the massif, rather than a fresh download from DataTrails.
func VerifyConsistencyFromMassifs(
ctx context.Context,
verificationKey ecdsa.PublicKey,
hasher hash.Hash,
blobReader azblob.Reader,
massifContextBefore *massifs.MassifContext,
massifContextNow *massifs.MassifContext,
logStateNow *massifs.MMRState,
) (bool, error) {
// Grab some core info about our backed up merkle log, which we'll need to prove consistency
mmrSizeBefore := massifContextBefore.Count()
rootBefore, err := mmr.GetRoot(mmrSizeBefore, massifContextBefore, hasher)
if err != nil {
return false, fmt.Errorf("VerifyConsistency failed: unable to get root for massifContextBefore: %w", err)
}

// We construct a proof of consistency between the backed up MMR log and the head of the log.
consistencyProof, err := mmr.IndexConsistencyProof(mmrSizeBefore, logStateNow.MMRSize, massifContextNow, hasher)
if err != nil {
return false, errors.New("error")
}

// In order to verify the proof we take the hashes of all of the peaks in the backed up log.
// The hash of each of these peaks guarantees the integrity of all of its child nodes, so we
// don't need to check every hash.

// Peaks returned as MMR positions (1-based), not MMR indices (0-based). The location of these
// is deterministic: Given an MMR of a particular size, the peaks will always be in the same place.
backupLogPeaks := mmr.Peaks(mmrSizeBefore)

// Get the hashes of all of the peaks.
backupLogPeakHashes, err := mmr.PeakBagRHS(massifContextNow, hasher, 0, backupLogPeaks)
if err != nil {
return false, errors.New("error")
}

// Lastly, verify the consistency proof using the peak hashes from our backed-up log. If this
// returns true, then we can confidently say that everything in the backed-up log is in the state
// of the log described by this signed state.
verified := mmr.VerifyConsistency(hasher, backupLogPeakHashes, consistencyProof, rootBefore, logStateNow.Root)
return verified, nil
// We check a proof of consistency between logStateA and logStateB.
// This will be a proof that logStateB includes all elements from logStateA,
// and includes them in the same positions.

// In order to verify the proof we verify that the inclusion proofs of each of
// the peaks from the old log matches a peak in the new log.
// Because a proof of inclusion requires that the proof reproduces the peak,
// and because all nodes in the old tree have proofs that pass through the
// old peaks and then reach the new peaks, we know it is not possible for
// the children to verify unless their peaks also verify. So we don't need
// to check every hash.

verified, _ /*peaksB*/, err := mmr.CheckConsistency(massifContextB, hasher, logStateA.MMRSize, logStateB.MMRSize, logStateA.Peaks)

// A tampered node can not be proven unless the entire log is re-built. If
// a log is re-built, any proof held by a relying party will not verify. And
// as it is signed, it is evidence the log was re-built by someone with
// access to our signing key.
// In the case of a tamper (or corruption) without re-build, the proof of inclusion will fail.
// Examining the parent and sibling of an individually tampered node will reveal the tamper.
// This means we are always fail safe in the case of a tampered node - a
// party relying on the log can guarantee the will never use unverifiable
// data.
return verified, err
}
7 changes: 5 additions & 2 deletions logverification/verifyconsistency_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,11 @@ func (b *TestLogHelper) VerifyConsistencyBetween(fromState *massifs.MMRState, to
result, err := VerifyConsistency(
context.Background(), b.hasher, b.tctx.Storer, inTenant, fromState, toState,
)

require.NoError(b.t, err)
// Some callers are testing negative results, so we only ensure that the
// true/false is consistent with the error state here.
if result == true {
require.NoError(b.t, err)
}
return result
}

Expand Down
22 changes: 9 additions & 13 deletions logverification/verifylist.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ func VerifyList(reader azblob.Reader, eventList []VerifiableEvent, options ...Ve

// if the event is OMITTED add the leaf to the omitted list
if eventType == Omitted {
omittedMMRIndices = append(omittedMMRIndices, mmr.TreeIndex(leafIndex))
omittedMMRIndices = append(omittedMMRIndices, mmr.MMRIndex(leafIndex))

// as the event is still the lowest mmrIndex we check this event
// against the next leaf
Expand Down Expand Up @@ -240,7 +240,7 @@ func VerifyEventInList(

hasher.Reset()

leafMMRIndex := mmr.TreeIndex(leafIndex)
leafMMRIndex := mmr.MMRIndex(leafIndex)
eventMMRIndex := event.MerkleLog.Commit.Index

// First we check if the event mmrIndex corresponds to a leaf node.
Expand Down Expand Up @@ -348,24 +348,20 @@ func VerifyEventInList(
// Now we know that the event is the event stored on the leaf node,
// we can do an inclusion proof of the leaf node on the merkle log.
mmrSize := massifContext.RangeCount()
root, err := mmr.GetRoot(mmrSize, massifContext, hasher)
if err != nil {
return Unknown, err
}

inclusionProof, err := mmr.IndexProof(mmrSize, massifContext, hasher, leafMMRIndex)
inclusionProof, err := mmr.InclusionProof(massifContext, mmrSize-1, leafMMRIndex)
if err != nil {
return Unknown, err
}

verified := mmr.VerifyInclusion(mmrSize, hasher, event.LeafHash, leafMMRIndex, inclusionProof, root)

// if the inclusion proof verification failed, return EXCLUDED.
//
// This means the leaf node is not included on the merklelog.
if !verified {
verified, err := mmr.VerifyInclusion(
massifContext, hasher, mmrSize, event.LeafHash, leafMMRIndex, inclusionProof)
if !verified || errors.Is(err, mmr.ErrVerifyInclusionFailed) {
return Excluded, ErrInclusionProofVerify
}
if err != nil {
return Unknown, err
}

return Included, nil

Expand Down
4 changes: 2 additions & 2 deletions taskfiles/Taskfile_gotest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ tasks:
-v \
-coverprofile={{.UNITTEST_DIR}}/main.out \
./... \
2>&1 | go-junit-report -set-exit-code -debug.print-events > {{.UNITTEST_DIR}}/main.xml
2>&1
gocov convert {{.UNITTEST_DIR}}/main.out > {{.UNITTEST_DIR}}/coverage.json
Expand All @@ -58,6 +58,6 @@ tasks:
-v \
-coverprofile={{.UNITTEST_DIR}}/main.out \
./... \
2>&1 | go-junit-report -set-exit-code -debug.print-events > {{.UNITTEST_DIR}}/main.xml
2>&1
gocov convert {{.UNITTEST_DIR}}/main.out > {{.UNITTEST_DIR}}/coverage.json

0 comments on commit 7bffdf9

Please sign in to comment.