diff --git a/.gitignore b/.gitignore index 64fd555bf8..fc688fe4c9 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ cluster-data/ dist/ .idea/ .vscode/ +.tlacache/ *.bat *.bak Cargo.lock diff --git a/packages/chain/chainmanager/chain_manager.go b/packages/chain/chainmanager/chain_manager.go index d75e1803e0..18b7f27b8c 100644 --- a/packages/chain/chainmanager/chain_manager.go +++ b/packages/chain/chainmanager/chain_manager.go @@ -83,6 +83,7 @@ import ( "github.com/iotaledger/hive.go/log" iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/wasp/packages/chain/cmt_log" + "github.com/iotaledger/wasp/packages/chain/cons" "github.com/iotaledger/wasp/packages/cryptolib" "github.com/iotaledger/wasp/packages/gpa" "github.com/iotaledger/wasp/packages/isc" @@ -102,10 +103,17 @@ func (o *Output) LatestActiveAnchorOutput() *isc.ChainOutputs { if o.cmi.needConsensus == nil { return nil } - return o.cmi.needConsensus.BaseAnchorOutput + return o.cmi.needConsensus.ConsensusInput.BaseCO() } -func (o *Output) LatestConfirmedAnchorOutput() *isc.ChainOutputs { return o.cmi.latestConfirmedAO } -func (o *Output) NeedConsensus() *NeedConsensus { return o.cmi.needConsensus } + +func (o *Output) LatestConfirmedAnchorOutput() *isc.ChainOutputs { + return o.cmi.latestConfirmedAO +} + +func (o *Output) NeedConsensus() *NeedConsensus { + return o.cmi.needConsensus +} + func (o *Output) NeedPublishTX() *shrinkingmap.ShrinkingMap[iotago.TransactionID, *NeedPublishTX] { return o.cmi.needPublishTX } @@ -120,22 +128,22 @@ func (o *Output) String() string { } type NeedConsensus struct { - CommitteeAddr iotago.Ed25519Address - LogIndex cmt_log.LogIndex - DKShare tcrypto.DKShare - BaseAnchorOutput *isc.ChainOutputs + CommitteeAddr iotago.Ed25519Address + LogIndex cmt_log.LogIndex + DKShare tcrypto.DKShare + ConsensusInput cons.Input } func (nc *NeedConsensus) IsFor(output *cmt_log.Output) bool { - return output.GetLogIndex() == nc.LogIndex && output.GetBaseAnchorOutput().Equals(nc.BaseAnchorOutput) + return output.GetLogIndex() == nc.LogIndex } func (nc *NeedConsensus) String() string { return fmt.Sprintf( - "{chainMgr.NeedConsensus, CommitteeAddr=%v, LogIndex=%v, BaseAnchorOutput=%v}", + "{chainMgr.NeedConsensus, CommitteeAddr=%v, LogIndex=%v, ConsensusInput=%v}", nc.CommitteeAddr.String(), nc.LogIndex, - nc.BaseAnchorOutput, + nc.ConsensusInput, ) } @@ -278,10 +286,10 @@ func (cmi *chainMgrImpl) handleInputAnchorOutputConfirmed(input *inputAnchorOutp cmi.log.LogDebugf("handleInputAnchorOutputConfirmed: %+v", input) // // > Set LatestConfirmedAO <- ConfirmedAO - vsaTip, vsaUpdated := cmi.varAccessNodeState.BlockConfirmed(input.anchorOutput) - cmi.latestConfirmedAO = input.anchorOutput + vsaTip, vsaUpdated := cmi.varAccessNodeState.BlockConfirmed(input.confirmedOutputs) // TODO: + cmi.latestConfirmedAO = input.confirmedOutputs msgs := gpa.NoMessages() - committeeAddr := input.anchorOutput.AnchorOutput.StateController().(*iotago.Ed25519Address) + committeeAddr := input.confirmedOutputs.AnchorOutput.StateController().(*iotago.Ed25519Address) committeeLog, err := cmi.ensureCmtLog(*committeeAddr) if errors.Is(err, ErrNotInCommittee) { // > IF this node is in the committee THEN ... ELSE @@ -299,7 +307,7 @@ func (cmi *chainMgrImpl) handleInputAnchorOutputConfirmed(input *inputAnchorOutp cmi.log.LogDebugf("⊢ going to track %v as an access node on confirmed block.", vsaTip) cmi.trackActiveStateCB(vsaTip) } - cmi.log.LogDebugf("This node is not in the committee for anchorOutput: %v", input.anchorOutput) + cmi.log.LogDebugf("This node is not in the committee for anchorOutput: %v", input.confirmedOutputs) return msgs } if err != nil { @@ -310,7 +318,7 @@ func (cmi *chainMgrImpl) handleInputAnchorOutputConfirmed(input *inputAnchorOutp // > Pass it to the corresponding CmtLog; HandleCmtLogOutput. msgs.AddAll(cmi.handleCmtLogOutput( committeeLog, - committeeLog.gpaInstance.Input(cmt_log.NewInputAnchorOutputConfirmed(input.anchorOutput)), + committeeLog.gpaInstance.Input(cmt_log.NewInputAnchorOutputConfirmed(input.confirmedOutputs)), )) return msgs } @@ -350,10 +358,10 @@ func (cmi *chainMgrImpl) handleInputConsensusOutputDone(input *inputConsensusOut // > IF ConsensusOutput.BaseAO == NeedConsensus THEN // > Add ConsensusOutput.TX to NeedPublishTX if true { // TODO: Reconsider this condition. Several recent consensus instances should be published, if we run consensus instances in parallel. - txID := input.consensusResult.NextAnchorOutput.AnchorOutputID.TransactionID() - if !cmi.needPublishTX.Has(txID) && input.consensusResult.Block != nil { + txID := input.consensusResult.ProducedChainOutputs().AnchorOutputID.TransactionID() + if !cmi.needPublishTX.Has(txID) && input.consensusResult.ProducedStateBlock() != nil { // Inform the access nodes on new block produced. - block := input.consensusResult.Block + block := input.consensusResult.ProducedStateBlock() activeAccessNodes, activeCommitteeNodes := cmi.activeNodesCB() cmi.log.LogDebugf( "Sending MsgBlockProduced (stateIndex=%v, l1Commitment=%v, txID=%v) to access nodes: %v except committeeNodes %v", @@ -363,22 +371,22 @@ func (cmi *chainMgrImpl) handleInputConsensusOutputDone(input *inputConsensusOut if lo.Contains(activeCommitteeNodes, activeAccessNodes[i]) { continue } - msgs.Add(NewMsgBlockProduced(cmi.nodeIDFromPubKey(activeAccessNodes[i]), input.consensusResult.Transaction, block)) + msgs.Add(NewMsgBlockProduced(cmi.nodeIDFromPubKey(activeAccessNodes[i]), input.consensusResult.ProducedTransaction(), block)) } } cmi.needPublishTX.Set(txID, &NeedPublishTX{ CommitteeAddr: input.committeeAddr, LogIndex: input.logIndex, TxID: txID, - Tx: input.consensusResult.Transaction, - BaseAnchorOutputID: input.consensusResult.BaseAnchorOutput, - NextAnchorOutput: input.consensusResult.NextAnchorOutput, + Tx: input.consensusResult.ProducedTransaction(), + BaseAnchorOutputID: input.consensusResult.ConsumedAnchorOutputID(), + NextAnchorOutput: input.consensusResult.ProducedChainOutputs(), }) } // // > Forward the message to the corresponding CmtLog; HandleCmtLogOutput. msgs.AddAll(cmi.withCmtLog(input.committeeAddr, func(cl gpa.GPA) gpa.OutMessages { - return cl.Input(cmt_log.NewInputConsensusOutputDone(input.logIndex, input.proposedBaseAO, input.consensusResult.BaseAnchorOutput, input.consensusResult.NextAnchorOutput)) + return cl.Input(cmt_log.NewInputConsensusOutputDone(input.logIndex, input.consensusResult)) })) return msgs } @@ -387,7 +395,7 @@ func (cmi *chainMgrImpl) handleInputConsensusOutputDone(input *inputConsensusOut // > Forward the message to the corresponding CmtLog; HandleCmtLogOutput. func (cmi *chainMgrImpl) handleInputConsensusOutputSkip(input *inputConsensusOutputSkip) gpa.OutMessages { return cmi.withCmtLog(input.committeeAddr, func(cl gpa.GPA) gpa.OutMessages { - return cl.Input(cmt_log.NewInputConsensusOutputSkip(input.logIndex, input.proposedBaseAO)) + return cl.Input(cmt_log.NewInputConsensusOutputSkip(input.logIndex)) }) } @@ -494,7 +502,7 @@ func (cmi *chainMgrImpl) ensureNeedConsensus(cli *cmtLogInst, outputUntyped gpa. // Not changed, keep it. return } - committeeAddress := output.GetBaseAnchorOutput().AnchorOutput.StateController() + committeeAddress := output.ConsensusInput().BaseCO().AnchorOutput.StateController() // TODO: BaseCO can be nil. dkShare, err := cmi.dkShareRegistryProvider.LoadDKShare(committeeAddress) if errors.Is(err, tcrypto.ErrDKShareNotFound) { // Rotated to other nodes, so we don't need to start the next consensus. @@ -505,10 +513,10 @@ func (cmi *chainMgrImpl) ensureNeedConsensus(cli *cmtLogInst, outputUntyped gpa. panic(fmt.Errorf("ensureNeedConsensus cannot load DKShare for %v: %w", committeeAddress, err)) } cmi.needConsensus = &NeedConsensus{ - CommitteeAddr: cli.committeeAddr, - LogIndex: output.GetLogIndex(), - DKShare: dkShare, - BaseAnchorOutput: output.GetBaseAnchorOutput(), + CommitteeAddr: cli.committeeAddr, + LogIndex: output.GetLogIndex(), + DKShare: dkShare, + ConsensusInput: output.ConsensusInput(), } } diff --git a/packages/chain/chainmanager/chain_manager_test.go b/packages/chain/chainmanager/chain_manager_test.go index 156cb20b74..f7734072c6 100644 --- a/packages/chain/chainmanager/chain_manager_test.go +++ b/packages/chain/chainmanager/chain_manager_test.go @@ -116,7 +116,7 @@ func testChainMgrBasic(t *testing.T, n, f int) { out := n.Output().(*chainmanager.Output) require.Equal(t, 0, out.NeedPublishTX().Size()) require.NotNil(t, out.NeedConsensus()) - require.Equal(t, originAO, out.NeedConsensus().BaseAnchorOutput) + require.Equal(t, originAO, out.NeedConsensus().ConsensusInput) require.Equal(t, uint32(1), out.NeedConsensus().LogIndex.AsUint32()) require.Equal(t, cmtPubKeyA, &out.NeedConsensus().CommitteeAddr) } @@ -136,7 +136,7 @@ func testChainMgrBasic(t *testing.T, n, f int) { &cons.Result{ Transaction: step2TX, Block: block0, - BaseAnchorOutput: consReq.BaseAnchorOutput.AnchorOutputID, + BaseAnchorOutput: consReq.ConsensusInput.AnchorOutputID, NextAnchorOutput: step2AO, }, )) @@ -160,7 +160,7 @@ func testChainMgrBasic(t *testing.T, n, f int) { return &tx.CommitteeAddr }()) require.NotNil(t, out.NeedConsensus()) - require.Equal(t, step2AO, out.NeedConsensus().BaseAnchorOutput) + require.Equal(t, step2AO, out.NeedConsensus().ConsensusInput) require.Equal(t, uint32(2), out.NeedConsensus().LogIndex.AsUint32()) require.Equal(t, cmtPubKeyA, &out.NeedConsensus().CommitteeAddr) } @@ -176,7 +176,7 @@ func testChainMgrBasic(t *testing.T, n, f int) { out := n.Output().(*chainmanager.Output) require.Equal(t, 0, out.NeedPublishTX().Size()) require.NotNil(t, out.NeedConsensus()) - require.Equal(t, step2AO, out.NeedConsensus().BaseAnchorOutput) + require.Equal(t, step2AO, out.NeedConsensus().ConsensusInput) require.Equal(t, uint32(2), out.NeedConsensus().LogIndex.AsUint32()) require.Equal(t, cmtPubKeyA, &out.NeedConsensus().CommitteeAddr) } @@ -191,7 +191,7 @@ func testChainMgrBasic(t *testing.T, n, f int) { out := n.Output().(*chainmanager.Output) require.Equal(t, 0, out.NeedPublishTX().Size()) require.NotNil(t, out.NeedConsensus()) - require.Equal(t, step2AO, out.NeedConsensus().BaseAnchorOutput) + require.Equal(t, step2AO, out.NeedConsensus().ConsensusInput) require.Equal(t, uint32(2), out.NeedConsensus().LogIndex.AsUint32()) require.Equal(t, cmtPubKeyA, &out.NeedConsensus().CommitteeAddr) } @@ -207,7 +207,7 @@ func testChainMgrBasic(t *testing.T, n, f int) { out := n.Output().(*chainmanager.Output) require.Equal(t, 0, out.NeedPublishTX().Size()) require.NotNil(t, out.NeedConsensus()) - require.Equal(t, rotateAO, out.NeedConsensus().BaseAnchorOutput) + require.Equal(t, rotateAO, out.NeedConsensus().ConsensusInput) require.Equal(t, uint32(1), out.NeedConsensus().LogIndex.AsUint32()) require.Equal(t, cmtPubKeyB, &out.NeedConsensus().CommitteeAddr) } diff --git a/packages/chain/chainmanager/input_alias_output_received.go b/packages/chain/chainmanager/input_alias_output_received.go index 295f3a22e9..19fe802bce 100644 --- a/packages/chain/chainmanager/input_alias_output_received.go +++ b/packages/chain/chainmanager/input_alias_output_received.go @@ -11,15 +11,15 @@ import ( ) type inputAnchorOutputConfirmed struct { - anchorOutput *isc.ChainOutputs + confirmedOutputs *isc.ChainOutputs } -func NewInputAnchorOutputConfirmed(anchorOutput *isc.ChainOutputs) gpa.Input { +func NewInputAnchorOutputConfirmed(confirmedOutputs *isc.ChainOutputs) gpa.Input { return &inputAnchorOutputConfirmed{ - anchorOutput: anchorOutput, + confirmedOutputs: confirmedOutputs, } } func (inp *inputAnchorOutputConfirmed) String() string { - return fmt.Sprintf("{chainMgr.inputAnchorOutputConfirmed, %v}", inp.anchorOutput) + return fmt.Sprintf("{chainMgr.inputAnchorOutputConfirmed, %v}", inp.confirmedOutputs) } diff --git a/packages/chain/chainmanager/input_consensus_output_done.go b/packages/chain/chainmanager/input_consensus_output_done.go index 3de641dc86..da890748da 100644 --- a/packages/chain/chainmanager/input_consensus_output_done.go +++ b/packages/chain/chainmanager/input_consensus_output_done.go @@ -15,30 +15,26 @@ import ( type inputConsensusOutputDone struct { committeeAddr iotago.Ed25519Address logIndex cmt_log.LogIndex - proposedBaseAO iotago.OutputID consensusResult *cons.Result } func NewInputConsensusOutputDone( committeeAddr iotago.Ed25519Address, logIndex cmt_log.LogIndex, - proposedBaseAO iotago.OutputID, consensusResult *cons.Result, ) gpa.Input { return &inputConsensusOutputDone{ committeeAddr: committeeAddr, logIndex: logIndex, - proposedBaseAO: proposedBaseAO, consensusResult: consensusResult, } } func (inp *inputConsensusOutputDone) String() string { return fmt.Sprintf( - "{chainMgr.inputConsensusOutputDone, committeeAddr=%v, logIndex=%v, proposedBaseAO=%v, consensusResult=%v}", + "{chainMgr.inputConsensusOutputDone, committeeAddr=%v, logIndex=%v, consensusResult=%v}", inp.committeeAddr.String(), inp.logIndex, - inp.proposedBaseAO.ToHex(), inp.consensusResult, ) } diff --git a/packages/chain/chainmanager/input_consensus_output_skip.go b/packages/chain/chainmanager/input_consensus_output_skip.go index 21188b7b5d..14be25a2aa 100644 --- a/packages/chain/chainmanager/input_consensus_output_skip.go +++ b/packages/chain/chainmanager/input_consensus_output_skip.go @@ -12,28 +12,24 @@ import ( ) type inputConsensusOutputSkip struct { - committeeAddr iotago.Ed25519Address - logIndex cmt_log.LogIndex - proposedBaseAO iotago.OutputID + committeeAddr iotago.Ed25519Address + logIndex cmt_log.LogIndex } func NewInputConsensusOutputSkip( committeeAddr iotago.Ed25519Address, logIndex cmt_log.LogIndex, - proposedBaseAO iotago.OutputID, ) gpa.Input { return &inputConsensusOutputSkip{ - committeeAddr: committeeAddr, - logIndex: logIndex, - proposedBaseAO: proposedBaseAO, + committeeAddr: committeeAddr, + logIndex: logIndex, } } func (inp *inputConsensusOutputSkip) String() string { return fmt.Sprintf( - "{chainMgr.inputConsensusOutputSkip, committeeAddr=%v, logIndex=%v, proposedBaseAO=%v}", + "{chainMgr.inputConsensusOutputSkip, committeeAddr=%v, logIndex=%v}", inp.committeeAddr.String(), inp.logIndex, - inp.proposedBaseAO.ToHex(), ) } diff --git a/packages/chain/cmt_log/cmt_log.go b/packages/chain/cmt_log/cmt_log.go index 3556aeda10..bd49431e08 100644 --- a/packages/chain/cmt_log/cmt_log.go +++ b/packages/chain/cmt_log/cmt_log.go @@ -96,6 +96,7 @@ import ( "github.com/iotaledger/hive.go/log" iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/wasp/packages/chain/cons" "github.com/iotaledger/wasp/packages/cryptolib" "github.com/iotaledger/wasp/packages/gpa" "github.com/iotaledger/wasp/packages/isc" @@ -127,24 +128,24 @@ var ErrCmtLogStateNotFound = errors.New("errCmtLogStateNotFound") // is currently required to be run. The unique identifier here is the // logIndex (there will be no different baseAnchorOutputs for the same logIndex). type Output struct { - logIndex LogIndex - baseAnchorOutput *isc.ChainOutputs + logIndex LogIndex + consensusInput cons.Input } -func makeOutput(logIndex LogIndex, baseAnchorOutput *isc.ChainOutputs) *Output { - return &Output{logIndex: logIndex, baseAnchorOutput: baseAnchorOutput} +func makeOutput(logIndex LogIndex, consensusInput cons.Input) *Output { + return &Output{logIndex: logIndex, consensusInput: consensusInput} } func (o *Output) GetLogIndex() LogIndex { return o.logIndex } -func (o *Output) GetBaseAnchorOutput() *isc.ChainOutputs { - return o.baseAnchorOutput +func (o *Output) ConsensusInput() cons.Input { + return o.consensusInput } func (o *Output) String() string { - return fmt.Sprintf("{Output, logIndex=%v, baseAnchorOutput=%v}", o.logIndex, o.baseAnchorOutput) + return fmt.Sprintf("{Output, logIndex=%v, consensusInput=%v}", o.logIndex, o.consensusInput) } // Protocol implementation. @@ -230,7 +231,7 @@ func New( } }, log.NewChildLogger("VO")) cl.varLogIndex = NewVarLogIndex(nodeIDs, n, f, prevLI, cl.varOutput.LogIndexAgreed, cclMetrics, log.NewChildLogger("VLI")) - cl.varLocalView = NewVarLocalView(pipeliningLimit, cl.varOutput.TipAOChanged, log.NewChildLogger("VLV")) + cl.varLocalView = NewVarLocalView(pipeliningLimit, cl.varOutput.ConsInputChanged, log.NewChildLogger("VLV")) cl.asGPA = gpa.NewOwnHandler(me, cl) return cl, nil } @@ -245,7 +246,7 @@ func (cl *cmtLogImpl) Input(input gpa.Input) gpa.OutMessages { cl.log.LogDebugf("Input %T: %+v", input, input) switch input := input.(type) { case *inputAnchorOutputConfirmed: - return cl.handleInputAnchorOutputConfirmed(input) + return cl.handleInputChainOutputsConfirmed(input) case *inputConsensusOutputDone: return cl.handleInputConsensusOutputDone(input) case *inputConsensusOutputSkip: @@ -278,16 +279,21 @@ func (cl *cmtLogImpl) Message(msg gpa.Message) gpa.OutMessages { // > UPON AnchorOutput (AO) {Confirmed | Rejected} by L1: // > ... -func (cl *cmtLogImpl) handleInputAnchorOutputConfirmed(input *inputAnchorOutputConfirmed) gpa.OutMessages { - _, tipUpdated, cnfLogIndex := cl.varLocalView.AnchorOutputConfirmed(input.anchorOutput) - if tipUpdated { - cl.varOutput.Suspended(false) - return cl.varLogIndex.L1ReplacedBaseAnchorOutput() - } - if !cnfLogIndex.IsNil() { - return cl.varLogIndex.L1ConfirmedAnchorOutput(cnfLogIndex) +func (cl *cmtLogImpl) handleInputChainOutputsConfirmed(input *inputAnchorOutputConfirmed) gpa.OutMessages { + msgs := gpa.NoMessages() + tipChanged := false + cnfLogIndex := cl.varLocalView.ChainOutputsConfirmed( + input.confirmedOutputs, + func(consInput cons.Input) { + cl.varOutput.Suspended(false) + tipChanged = true + msgs.AddAll(cl.varLogIndex.L1ReplacedBaseAnchorOutput()) + }, + ) + if !tipChanged && !cnfLogIndex.IsNil() { + msgs.AddAll(cl.varLogIndex.L1ConfirmedAnchorOutput(cnfLogIndex)) } - return nil + return msgs } // > ... @@ -299,16 +305,16 @@ func (cl *cmtLogImpl) handleInputConsensusOutputConfirmed(input *inputConsensusO func (cl *cmtLogImpl) handleInputConsensusOutputRejected(input *inputConsensusOutputRejected) gpa.OutMessages { msgs := gpa.NoMessages() msgs.AddAll(cl.varLogIndex.ConsensusOutputReceived(input.logIndex)) // This should be superfluous, always follows handleInputConsensusOutputDone. - if _, tipUpdated := cl.varLocalView.AnchorOutputRejected(input.anchorOutput); tipUpdated { - return msgs.AddAll(cl.varLogIndex.L1ReplacedBaseAnchorOutput()) - } + cl.varLocalView.ChainOutputsRejected((input.anchorOutput), func(consInput cons.Input) { + msgs.AddAll(cl.varLogIndex.L1ReplacedBaseAnchorOutput()) + }) return msgs } // > ON ConsensusOutput/DONE (CD) // > ... func (cl *cmtLogImpl) handleInputConsensusOutputDone(input *inputConsensusOutputDone) gpa.OutMessages { - cl.varLocalView.ConsensusOutputDone(input.logIndex, input.baseAnchorOutputID, input.nextAnchorOutput) + cl.varLocalView.ConsensusOutputDone(input.logIndex, input.result, func(consInput cons.Input) {}) return cl.varLogIndex.ConsensusOutputReceived(input.logIndex) } diff --git a/packages/chain/cmt_log/cmt_log_rapid_test.go b/packages/chain/cmt_log/cmt_log_rapid_test.go__TODO similarity index 100% rename from packages/chain/cmt_log/cmt_log_rapid_test.go rename to packages/chain/cmt_log/cmt_log_rapid_test.go__TODO diff --git a/packages/chain/cmt_log/cmt_log_test.go b/packages/chain/cmt_log/cmt_log_test.go__TODO similarity index 100% rename from packages/chain/cmt_log/cmt_log_test.go rename to packages/chain/cmt_log/cmt_log_test.go__TODO diff --git a/packages/chain/cmt_log/input_alias_output_confirmed.go b/packages/chain/cmt_log/input_alias_output_confirmed.go index 14527aad27..33d5a61575 100644 --- a/packages/chain/cmt_log/input_alias_output_confirmed.go +++ b/packages/chain/cmt_log/input_alias_output_confirmed.go @@ -11,15 +11,17 @@ import ( ) type inputAnchorOutputConfirmed struct { - anchorOutput *isc.ChainOutputs + confirmedOutputs *isc.ChainOutputs } -func NewInputAnchorOutputConfirmed(anchorOutput *isc.ChainOutputs) gpa.Input { +func NewInputAnchorOutputConfirmed( + confirmedOutputs *isc.ChainOutputs, +) gpa.Input { return &inputAnchorOutputConfirmed{ - anchorOutput: anchorOutput, + confirmedOutputs: confirmedOutputs, } } func (inp *inputAnchorOutputConfirmed) String() string { - return fmt.Sprintf("{cmtLog.inputAnchorOutputConfirmed, %v}", inp.anchorOutput) + return fmt.Sprintf("{cmtLog.inputAnchorOutputConfirmed, %v}", inp.confirmedOutputs) } diff --git a/packages/chain/cmt_log/input_consensus_output_confirmed.go b/packages/chain/cmt_log/input_consensus_output_confirmed.go index 49ab249299..4e13429781 100644 --- a/packages/chain/cmt_log/input_consensus_output_confirmed.go +++ b/packages/chain/cmt_log/input_consensus_output_confirmed.go @@ -11,17 +11,17 @@ import ( ) type inputConsensusOutputConfirmed struct { - anchorOutput *isc.ChainOutputs + chainOutputs *isc.ChainOutputs logIndex LogIndex } -func NewInputConsensusOutputConfirmed(anchorOutput *isc.ChainOutputs, logIndex LogIndex) gpa.Input { +func NewInputConsensusOutputConfirmed(chainOutputs *isc.ChainOutputs, logIndex LogIndex) gpa.Input { return &inputConsensusOutputConfirmed{ - anchorOutput: anchorOutput, + chainOutputs: chainOutputs, logIndex: logIndex, } } func (inp *inputConsensusOutputConfirmed) String() string { - return fmt.Sprintf("{cmtLog.inputConsensusOutputConfirmed, %v, li=%v}", inp.anchorOutput, inp.logIndex) + return fmt.Sprintf("{cmtLog.inputConsensusOutputConfirmed, %v, li=%v}", inp.chainOutputs, inp.logIndex) } diff --git a/packages/chain/cmt_log/input_consensus_output_done.go b/packages/chain/cmt_log/input_consensus_output_done.go index 7dc10b4959..037fd3c0cc 100644 --- a/packages/chain/cmt_log/input_consensus_output_done.go +++ b/packages/chain/cmt_log/input_consensus_output_done.go @@ -6,36 +6,29 @@ package cmt_log import ( "fmt" - iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/wasp/packages/chain/cons" "github.com/iotaledger/wasp/packages/gpa" - "github.com/iotaledger/wasp/packages/isc" ) type inputConsensusOutputDone struct { - logIndex LogIndex - proposedBaseAO iotago.OutputID // Proposed BaseAO - baseAnchorOutputID iotago.OutputID // Decided BaseAO - nextAnchorOutput *isc.ChainOutputs // And the next one. + logIndex LogIndex + result *cons.Result } // This message is internal one, but should be sent by other components (e.g. consensus or the chain). func NewInputConsensusOutputDone( logIndex LogIndex, - proposedBaseAO iotago.OutputID, - baseAnchorOutputID iotago.OutputID, - nextAnchorOutput *isc.ChainOutputs, + result *cons.Result, ) gpa.Input { return &inputConsensusOutputDone{ - logIndex: logIndex, - proposedBaseAO: proposedBaseAO, - baseAnchorOutputID: baseAnchorOutputID, - nextAnchorOutput: nextAnchorOutput, + logIndex: logIndex, + result: result, } } func (inp *inputConsensusOutputDone) String() string { return fmt.Sprintf( - "{cmtLog.inputConsensusOutputDone, logIndex=%v, proposedBaseAO=%v, baseAnchorOutputID=%v, nextAnchorOutput=%v}", - inp.logIndex, inp.proposedBaseAO.ToHex(), inp.baseAnchorOutputID.ToHex(), inp.nextAnchorOutput, + "{cmtLog.inputConsensusOutputDone, logIndex=%v, %v}", + inp.logIndex, inp.result, ) } diff --git a/packages/chain/cmt_log/input_consensus_output_skip.go b/packages/chain/cmt_log/input_consensus_output_skip.go index 61278b0e7a..ae797444a7 100644 --- a/packages/chain/cmt_log/input_consensus_output_skip.go +++ b/packages/chain/cmt_log/input_consensus_output_skip.go @@ -6,29 +6,25 @@ package cmt_log import ( "fmt" - iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/wasp/packages/gpa" ) type inputConsensusOutputSkip struct { - logIndex LogIndex - proposedBaseAO iotago.OutputID + logIndex LogIndex } // This message is internal one, but should be sent by other components (e.g. consensus or the chain). func NewInputConsensusOutputSkip( logIndex LogIndex, - proposedBaseAO iotago.OutputID, ) gpa.Input { return &inputConsensusOutputSkip{ - logIndex: logIndex, - proposedBaseAO: proposedBaseAO, + logIndex: logIndex, } } func (inp *inputConsensusOutputSkip) String() string { return fmt.Sprintf( - "{cmtLog.inputConsensusOutputSkip, logIndex=%v, proposedBaseAO=%v}", - inp.logIndex, inp.proposedBaseAO.ToHex(), + "{cmtLog.inputConsensusOutputSkip, logIndex=%v}", + inp.logIndex, ) } diff --git a/packages/chain/cmt_log/var_localview.cfg b/packages/chain/cmt_log/var_localview.cfg new file mode 100644 index 0000000000..4c0168f865 --- /dev/null +++ b/packages/chain/cmt_log/var_localview.cfg @@ -0,0 +1,9 @@ +SPECIFICATION Spec +CONSTANTS + Accounts = {acc_1, acc_2} + Anchors = {anc_1, anc_2} + Blocks = {blk_1, blk_2} + NIL = NIL + StateIndexes = {0, 1} + LogIndexes = {10, 11} +INVARIANT TypeOK diff --git a/packages/chain/cmt_log/var_localview.go b/packages/chain/cmt_log/var_localview.go index c4919b06ff..d4cef993d2 100644 --- a/packages/chain/cmt_log/var_localview.go +++ b/packages/chain/cmt_log/var_localview.go @@ -1,67 +1,68 @@ // Copyright 2020 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 -// Here we implement the local view of a chain, maintained by a committee to decide which -// anchor output to propose to the ACS. The anchor output decided by the ACS will be used -// as an input for TX we build. +// Here we implement the local view of a tangle, maintained by a committee to decide which +// anchor and account outputs to propose to the consensus. The anchor and account outputs +// decided by the consensus will be used as an input for TX we build. // -// The LocalView maintains a list of Anchor Outputs (AOs). The are chained based on consumed/produced -// AOs in a transaction we publish. The goal here is to tract the unconfirmed anchor outputs, update +// The LocalView maintains a list of Anchor/Account Outputs (AOs). They are chained based on consumed/produced +// AOs in a transactions we publish. The goal here is to track the unconfirmed anchor/account outputs, update // the list based on confirmations/rejections from the L1. // // In overall, the LocalView acts as a filter between the L1 and LogIndex assignment in varLogIndex. // It has to distinguish between AOs that are confirming a prefix of the posted transaction (pipelining), -// from other changes in L1 (rotations, rollbacks, rejections, etc.). +// from other changes in L1 (rotations, rollbacks, rejections, reorgs, etc.). // -// We have several inputs: +// We have several inputs in this algorithm. // -// - **Anchor Output Confirmed**. -// It can be AO posted by this committee, -// as well as by other committee (e.g. chain was rotated to other committee and then back) +// Events from the L1: +// +// - **Anchor OR Account Output Confirmed**. +// They can posted by this committee, +// as well as by another committee (e.g. chain was rotated to other committee and then back) // or a user (e.g. external rotation TX). +// The anchor/account output confirmation events will be received independently of each other. // -// - **Anchor Output Rejected**. +// - **Anchor/Account Output Rejected**. // These events are always for TXes posted by this committee. // We assume for each TX we will get either Confirmation or Rejection. // -// - **Consensus Done**. -// Consensus produced a TX, and will post it to the L1. +// - **Block outdated**. +// The block was posted to the L1 network, but was not confirmed not rejected for long enough. +// This event means we assume the block will not be confirmed anymore because of the max +// block depth (L1 parameter) or is too old to be included into the current time slot. // -// - **Consensus Skip**. -// Consensus completed without producing a TX and a block. So the previous AO is left unspent. +// Events from the consensus: // -// - **Consensus Recover**. -// Consensus is still running, but it takes long time, so maybe something is wrong -// and we should consider spawning another consensus for the same base AO. +// - **Consensus Done**. +// Consensus produced a TX, and will post it to the L1. +// Only the successful decisions (non-skip) are interesting here, as they +// provide new information for the local view. E.g. consensus output with the decision +// SKIP will only cause increase of log index, which is irrelevant here. // // On the pipelining: // -// - During the normal operation, if consensus produces a TX, it can use the produced AO -// to build next TX on it. That's pipelining. It allows to produce multiple blocks per -// L1 milestone. This component tracks the AOs build in this way and not confirmed yet. +// - During the normal operation, if consensus produces a TX, the chain can use it +// to build next TX on it. That's pipelining. It allows to produce multiple TXes without +// waiting L1 confirmations. This component tracks the AOs build but not confirmed yet. // -// - If AO produced by the consensus is rejected, then all the AOs build on top of it will +// - If an AO produced by the consensus is rejected, then all the AOs build on top of it will // be rejected eventually as well, because they use the rejected AO as an input. On the -// other hand, it is unclear if unconfirmed AOs before the rejected one will be confirmed -// or rejected, we will wait until L1 decides on all of them. +// other hand, it is unclear if unconfirmed AOs chained before the rejected output will be +// confirmed or rejected, we will wait until L1 decides on all of them. // -// - If we get a confirmed AO, that is not one of the AOs we have posted (and still waiting +// - If we get a confirmed AO that is not one of the AOs we have posted (and still waiting // for a decision), then someone from the outside of a committee transitioned the chain. // In this case all our produced/pending transactions are not meaningful anymore and we // have to start building all the chain from the newly received AO. -// -// - Recovery notice is received from a consensus (with SI/LI...) a new consensus will -// be started after agreeing on the next LI. The new consensus will take the same AO as -// an input and therefore will race with the existing one (maybe it has stuck for some -// reason, that's a fallback). In this case we stop building an unconfirmed chain for -// the future state indexes and will wait for some AO to be confirmed or all the concurrent -// consensus TX'es to be rejected. +// It is possible that the received unseen AO is posted by other nodes in the current +// committee (e.g. the current node is lagging). // // Note on the AO as an input for a consensus. The provided AO is just a proposal. After ACS // is completed, the participants will select the actual AO, which can differ from the one // proposed by this node. // -// NOTE: On the rejections. When we get a rejection of an AO, we cannot mark all the subsequent +// On the rejections. When we get a rejection of an AO, we cannot mark all the subsequent // StateIndexes as rejected, because it is possible that the rejected AO was started to publish // before a reorg/reject. Thus, only that single AO has to be marked as rejected. Nevertheless, // the AOs explicitly (via consumed AO) depending on the rejected AO can be cleaned up. @@ -70,247 +71,388 @@ package cmt_log import ( "fmt" - "github.com/samber/lo" - "github.com/iotaledger/hive.go/ds/shrinkingmap" "github.com/iotaledger/hive.go/log" iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/wasp/packages/chain/cons" "github.com/iotaledger/wasp/packages/isc" ) type VarLocalView interface { - // - // Returns anchor output to produce next transaction on, or nil if we should wait. - // In the case of nil, we either wait for the first AO to receive, or we are - // still recovering from a TX rejection. - Value() *isc.ChainOutputs // // Corresponds to the `tx_posted` event in the specification. // Returns true, if the proposed BaseAnchorOutput has changed. - ConsensusOutputDone(logIndex LogIndex, consumed iotago.OutputID, published *isc.ChainOutputs) (*isc.ChainOutputs, bool) // TODO: Recheck, if consumed AO is the decided one. + ConsensusOutputDone( + logIndex LogIndex, + consResult *cons.Result, + eventOutputCB VarLocalViewOutputCB, + ) // - // Corresponds to the `ao_received` event in the specification. - // Returns true, if the proposed BaseAnchorOutput has changed. - // Also it returns confirmed log index, if a received AO confirms it, or NIL otherwise. - AnchorOutputConfirmed(confirmed *isc.ChainOutputs) (*isc.ChainOutputs, bool, LogIndex) + // Called upon receiving confirmation from the L1. + // In a normal scenario both (Anchor/Account) outputs will be confirmed together, + // because they are in the same TX. If someone moved one of those outputs externally, + // they can be moved independently. In such a case, one of the anc/acc parameters will be nil. + // - It is important to get these events in the correct order, otherwise the out-of-order + // events will be considered as an reorg. + ChainOutputsConfirmed( + confirmedOutputs *isc.ChainOutputs, + eventOutputCB VarLocalViewOutputCB, + ) LogIndex // - // Corresponds to the `tx_rejected` event in the specification. - // Returns true, if the proposed BaseAnchorOutput has changed. - AnchorOutputRejected(rejected *isc.ChainOutputs) (*isc.ChainOutputs, bool) + // Called when the TX containing the specified outputs was rejected. + // The outputs cannot be rejected independently because they are in the same TX. + ChainOutputsRejected( + rejected *isc.ChainOutputs, + eventOutputCB VarLocalViewOutputCB, + ) + // + // + BlockExpired( + blockID iotago.BlockID, + eventOutputCB VarLocalViewOutputCB, + ) // // Support functions. StatusString() string } +type varLocalViewOutput struct { // Implements cons.Input + baseBlock *iotago.Block + baseCO *isc.ChainOutputs + reattachTX *iotago.SignedTransaction +} + +type VarLocalViewOutputCB = func(consInput cons.Input) + +func newVarLocalViewOutput( + baseBlock *iotago.Block, + baseCO *isc.ChainOutputs, + reattachTX *iotago.SignedTransaction, +) *varLocalViewOutput { + return &varLocalViewOutput{ + baseBlock: baseBlock, + baseCO: baseCO, + reattachTX: reattachTX, + } +} + +func (o *varLocalViewOutput) BaseBlock() *iotago.Block { return o.baseBlock } +func (o *varLocalViewOutput) BaseCO() *isc.ChainOutputs { return o.baseCO } +func (o *varLocalViewOutput) ReattachTX() *iotago.SignedTransaction { return o.reattachTX } +func (o *varLocalViewOutput) Equals(other *varLocalViewOutput) bool { + // + // Compare the BaseBlock + if (o.baseBlock == nil) != (other.baseBlock == nil) { + return false + } + if o.baseBlock != nil && other.baseBlock != nil && o.baseBlock.MustID() != other.baseBlock.MustID() { + return false + } + // + // Compare the BaseCO + if (o.baseCO == nil) != (other.baseCO == nil) { + return false + } + if o.baseCO != nil && other.baseCO != nil && !o.baseCO.Equals(other.baseCO) { + return false + } + // + // Compare the ReattachTX + if (o.reattachTX == nil) != (other.reattachTX == nil) { + return false + } + if o.reattachTX != nil && other.reattachTX != nil { + id1, err1 := o.reattachTX.ID() + id2, err2 := other.reattachTX.ID() + if err1 != nil { + panic(fmt.Errorf("cannot extract TX ID: %v", err1)) + } + if err2 != nil { + panic(fmt.Errorf("cannot extract TX ID: %v", err2)) + } + if id1 != id2 { + return false + } + } + return true +} + type varLocalViewEntry struct { - output *isc.ChainOutputs // The AO published. - consumed iotago.OutputID // The AO used as an input for the TX. - rejected bool // True, if the AO as rejected. We keep them to detect the other rejected AOs. - logIndex LogIndex // LogIndex of the consensus produced the output, if any. + producedChainOutputs *isc.ChainOutputs // The produced chain outputs. + producedTransaction *iotago.SignedTransaction // The transaction publishing the chain outputs. + consumedAnchorOutputID iotago.OutputID // Consumed in the TX. + consumedAccountOutputID iotago.OutputID // Consumed in the TX. + blocks map[iotago.BlockID]*iotago.Block // All the non-expired blocks for this TX. + reuse bool // True, if the TX should be reused. + rejected bool // True, if the TX as rejected. We keep them to detect the other rejected TXes. + logIndex LogIndex // LogIndex of the consensus produced the output, if any. +} + +func (e *varLocalViewEntry) isTxReusable() bool { + return len(e.blocks) == 0 && e.reuse && !e.rejected +} + +func (e *varLocalViewEntry) txID() iotago.SignedTransactionID { + id, err := e.producedTransaction.ID() + if err != nil { + panic(fmt.Errorf("cannot extract TX ID: %v", err)) + } + return id } type varLocalViewImpl struct { - // The latest confirmed AO, as received from L1. + // + // The latest confirmed CO, as received from L1. // All the pending entries are built on top of this one. - // It can be nil, if the latest AO is unclear (either not received yet, or some rejections happened). - confirmed *isc.ChainOutputs - // AOs produced by this committee, but not confirmed yet. + // It can be nil, if the latest AO is unclear (either not received yet). + // + // We don't use the isc.ChainOutputs structure here, because we can + // receive the anchor/account outputs separately. + confirmedAnchor *isc.AnchorOutputWithID + confirmedAccount *isc.AccountOutputWithID + confirmedCO *isc.ChainOutputs // Derived from the above, when all of them are received. + // + // AOs and blocks produced by this committee, but not confirmed yet. // It is possible to have several AOs for a StateIndex in the case of // Recovery/Timeout notices. Then the next consensus is started o build a TX. // Both of them can still produce a TX, but only one of them will be confirmed. - pending *shrinkingmap.ShrinkingMap[uint32, []*varLocalViewEntry] - // Limit pipelining (a number of unconfirmed TXes to this number.) - // -1 -- infinite, 0 -- disabled, x -- up to x TXes ahead. + pending *shrinkingmap.ShrinkingMap[uint32, map[iotago.SignedTransactionID]*varLocalViewEntry] + // + // Limit pipelining (the maximal number of unconfirmed TXes to build). + // -1 -- infinite, 0 -- disabled, L > 0 -- up to L TXes ahead. pipeliningLimit int + // // Callback for the TIP changes. - tipUpdatedCB func(ao *isc.ChainOutputs) + outputCB VarLocalViewOutputCB + output *varLocalViewOutput + // // Just a logger. log log.Logger } -func NewVarLocalView(pipeliningLimit int, tipUpdatedCB func(ao *isc.ChainOutputs), log log.Logger) VarLocalView { +func NewVarLocalView(pipeliningLimit int, outputCB VarLocalViewOutputCB, log log.Logger) VarLocalView { log.LogDebugf("NewVarLocalView, pipeliningLimit=%v", pipeliningLimit) return &varLocalViewImpl{ - confirmed: nil, - pending: shrinkingmap.New[uint32, []*varLocalViewEntry](), - pipeliningLimit: pipeliningLimit, - tipUpdatedCB: tipUpdatedCB, - log: log, + confirmedAnchor: nil, + confirmedAccount: nil, + confirmedCO: nil, + pending: shrinkingmap.New[uint32, map[iotago.SignedTransactionID]*varLocalViewEntry](), + pipeliningLimit: pipeliningLimit, + outputCB: outputCB, + output: nil, + log: log, } } -// Return latest AO to be used as an input for the following TX. -// nil means we have to wait: either we have no AO, or we have some rejections and waiting until a re-sync. -func (lvi *varLocalViewImpl) Value() *isc.ChainOutputs { - return lvi.findLatestPending() -} - -func (lvi *varLocalViewImpl) ConsensusOutputDone(logIndex LogIndex, consumed iotago.OutputID, published *isc.ChainOutputs) (*isc.ChainOutputs, bool) { - lvi.log.LogDebugf("ConsensusOutputDone: logIndex=%v, consumed.ID=%v, published=%v", logIndex, consumed.ToHex(), published) - stateIndex := published.GetStateIndex() - prevLatest := lvi.findLatestPending() - // - // Check, if not outdated. - if lvi.confirmed == nil { - lvi.log.LogDebugf("⊳ Ignoring it, have no confirmed AO.") - return prevLatest, false - } - confirmedStateIndex := lvi.confirmed.GetStateIndex() - if stateIndex <= confirmedStateIndex { - lvi.log.LogDebugf("⊳ Ignoring it, outdated, current confirmed=%v", lvi.confirmed) - return prevLatest, false +func (lvi *varLocalViewImpl) ConsensusOutputDone( + logIndex LogIndex, + consResult *cons.Result, + eventOutputCB VarLocalViewOutputCB, +) { + lvi.log.LogDebugf("ConsensusOutputDone: logIndex=%v, consResult=", logIndex, consResult) + stateIndex := consResult.ProducedChainOutputs().GetStateIndex() + if lvi.confirmedCO != nil && lvi.confirmedCO.GetStateIndex() >= stateIndex { + // We already know it is outdated, so don't add it. + return } - // - // Add it to the pending list. - var entries []*varLocalViewEntry - entries, ok := lvi.pending.Get(stateIndex) + + var pendingForSI map[iotago.SignedTransactionID]*varLocalViewEntry + pendingForSI, ok := lvi.pending.Get(stateIndex) if !ok { - entries = []*varLocalViewEntry{} + pendingForSI = map[iotago.SignedTransactionID]*varLocalViewEntry{} + lvi.pending.Set(stateIndex, pendingForSI) } - if lo.ContainsBy(entries, func(e *varLocalViewEntry) bool { return e.output.Equals(published) }) { - lvi.log.LogDebugf("⊳ Ignoring it, duplicate.") - return prevLatest, false + txID := consResult.MustSignedTransactionID() + blID := consResult.MustIotaBlockID() + entry, ok := pendingForSI[txID] + if !ok { + entry = &varLocalViewEntry{ + producedChainOutputs: consResult.ProducedChainOutputs(), + producedTransaction: consResult.ProducedTransaction(), + consumedAnchorOutputID: consResult.ConsumedAnchorOutputID(), + consumedAccountOutputID: consResult.ConsumedAccountOutputID(), + reuse: false, // TODO: Reconsider this field. + rejected: false, + logIndex: logIndex, + } + pendingForSI[txID] = entry } - entries = append(entries, &varLocalViewEntry{ - output: published, - consumed: consumed, - rejected: false, - logIndex: logIndex, - }) - lvi.pending.Set(stateIndex, entries) - // - // Check, if the added AO is a new tip for the chain. - if published.Equals(lvi.findLatestPending()) { - lvi.log.LogDebugf("⊳ Will consider consensusOutput=%v as a tip, the current confirmed=%v.", published, lvi.confirmed) - lvi.tipUpdatedCB(published) - return published, true - } - lvi.log.LogDebugf("⊳ That's not a tip.") - return lvi.Value(), false + entry.blocks[blID] = consResult.ProducedIotaBlock() + lvi.outputIfChanged(eventOutputCB) } -// A confirmed AO is received from L1. Base on that, we either truncate our local -// history until the received AO (if we know it was posted before), or we replace -// the entire history with an unseen AO (probably produced not by this chain×cmt). -func (lvi *varLocalViewImpl) AnchorOutputConfirmed(confirmed *isc.ChainOutputs) (*isc.ChainOutputs, bool, LogIndex) { - lvi.log.LogDebugf("AnchorOutputConfirmed: confirmed=%v", confirmed) - cnfLogIndex := NilLogIndex() - stateIndex := confirmed.GetStateIndex() - oldTip := lvi.findLatestPending() - lvi.confirmed = confirmed - if lvi.isAnchorOutputPending(confirmed) { - lvi.pending.ForEach(func(si uint32, es []*varLocalViewEntry) bool { - if si <= stateIndex { - for _, e := range es { - lvi.log.LogDebugf("⊳ Removing[%v≤%v] %v", si, stateIndex, e.output) - if e.output.Equals(lvi.confirmed) { - cnfLogIndex = e.logIndex - } - } +// A confirmed Anchor/Account output is received from L1. Based on that, we either +// truncate our local history until the received CO (if we know it was posted before), +// or we replace the entire history with an unseen CO (probably produced not by this chain×cmt). +// +// The input here can contain either both - account and anchor outputs, of one of them. +// This is needed to keep the case of both outputs atomic, while supporting out-of-pair +// updates of the outputs. +// +// In the TLA+ spec this function corresponds to: +// - BothOutputsConfirmed, +// - AnchorOutputConfirmed, +// - AccountOutputConfirmed. +func (lvi *varLocalViewImpl) ChainOutputsConfirmed( + confirmedOutputs *isc.ChainOutputs, + eventOutputCB VarLocalViewOutputCB, +) LogIndex { + lvi.confirmedCO = confirmedOutputs + lvi.log.LogDebugf("AnchorOutputConfirmed: confirmed=%v", lvi.confirmedCO) + + confirmedLogIndex := NilLogIndex() + if pending, cnfLI := lvi.isConfirmedPending(); pending { + confirmedLogIndex = cnfLI + confirmedStateIndex := lvi.confirmedCO.GetStateIndex() + lvi.pending.ForEachKey(func(si uint32) bool { + if si <= confirmedStateIndex { lvi.pending.Delete(si) } return true }) - lvi.clearPendingIfAllRejected() } else { - lvi.pending.ForEach(func(si uint32, es []*varLocalViewEntry) bool { - for _, e := range es { - lvi.log.LogDebugf("⊳ Removing[all] %v", e.output) - } - lvi.pending.Delete(si) - return true - }) + lvi.pending.Clear() } - outAO, outChanged := lvi.outputIfChanged(oldTip, lvi.findLatestPending()) - return outAO, outChanged, cnfLogIndex + lvi.outputIfChanged(eventOutputCB) + return confirmedLogIndex } // Mark the specified AO as rejected. // Trim the suffix of rejected AOs. -func (lvi *varLocalViewImpl) AnchorOutputRejected(rejected *isc.ChainOutputs) (*isc.ChainOutputs, bool) { +func (lvi *varLocalViewImpl) ChainOutputsRejected(rejected *isc.ChainOutputs, eventOutputCB VarLocalViewOutputCB) { lvi.log.LogDebugf("AnchorOutputRejected: rejected=%v", rejected) stateIndex := rejected.GetStateIndex() - oldTip := lvi.findLatestPending() // // Mark the output as rejected, as well as all the outputs depending on it. if entries, ok := lvi.pending.Get(stateIndex); ok { for _, entry := range entries { - if entry.output.Equals(rejected) { + if entry.producedChainOutputs.Equals(rejected) { lvi.log.LogDebugf("⊳ Entry marked as rejected.") entry.rejected = true lvi.markDependentAsRejected(rejected) } } } - // - // If all remaining are rejected, remove them, and proceed from the confirmed one. - lvi.clearPendingIfAllRejected() - return lvi.outputIfChanged(oldTip, lvi.findLatestPending()) + lvi.outputIfChanged(eventOutputCB) } -func (lvi *varLocalViewImpl) markDependentAsRejected(ao *isc.ChainOutputs) { - accRejected := map[iotago.OutputID]struct{}{ao.AnchorOutputID: {}} - for si := ao.GetStateIndex() + 1; ; si++ { +func (lvi *varLocalViewImpl) BlockExpired(blockID iotago.BlockID, eventOutputCB VarLocalViewOutputCB) { + found := false + lvi.pending.ForEach(func(si uint32, es map[iotago.SignedTransactionID]*varLocalViewEntry) bool { + for _, e := range es { + if _, ok := e.blocks[blockID]; ok { + delete(e.blocks, blockID) + found = true + break + } + } + return !found + }) + if found { + lvi.outputIfChanged(eventOutputCB) + } +} + +func (lvi *varLocalViewImpl) markDependentAsRejected(co *isc.ChainOutputs) { + accRejected := map[iotago.OutputID]struct{}{co.AnchorOutputID: {}} + for si := co.GetStateIndex() + 1; ; si++ { es, esFound := lvi.pending.Get(si) if !esFound { break } for _, e := range es { - if _, ok := accRejected[e.consumed]; ok && !e.rejected { - lvi.log.LogDebugf("⊳ Also marking %v as rejected.", e.output) + if _, ok := accRejected[e.consumedAnchorOutputID]; ok && !e.rejected { + lvi.log.LogDebugf("⊳ Also marking %v as rejected.", e.producedChainOutputs) e.rejected = true - accRejected[e.output.AnchorOutputID] = struct{}{} + accRejected[e.producedChainOutputs.AnchorOutputID] = struct{}{} } } } } -func (lvi *varLocalViewImpl) clearPendingIfAllRejected() { - if !lvi.allRejected() || lvi.pending.IsEmpty() { +func (lvi *varLocalViewImpl) normalizePending() { + if !lvi.allRejectedOrExpired() || lvi.pending.IsEmpty() { return } - lvi.log.LogDebugf("⊳ All entries are rejected, clearing them.") - lvi.pending.ForEach(func(si uint32, es []*varLocalViewEntry) bool { + if lvi.confirmedCO == nil { + return + } + lvi.log.LogDebugf("⊳ All entries are rejected or expired, clearing them.") + // + // Only keep a prefix of entries forming a continuous chain + // with no forks nor rejections. + latestCO := lvi.confirmedCO + pendingSICount := uint32(lvi.pending.Size()) + remainingPendingEntries := map[iotago.SignedTransactionID]*varLocalViewEntry{} + for i := uint32(0); i < pendingSICount; i++ { + nextSIEntry := lvi.nextSinglePendingEntry(latestCO) + if nextSIEntry == nil { + // The pending entries don't form a continuous non-forked non-rejected chain. + break + } + if len(nextSIEntry.blocks) == 0 { + remainingPendingEntries[nextSIEntry.txID()] = nextSIEntry + } else { + break + } + latestCO = nextSIEntry.producedChainOutputs + } + lvi.pending.Clear() + for txID, e := range remainingPendingEntries { + e.reuse = true + lvi.pending.Set( + e.producedChainOutputs.GetStateIndex(), + map[iotago.SignedTransactionID]*varLocalViewEntry{txID: e}, + ) + } +} + +func (lvi *varLocalViewImpl) allRejectedOrExpired() bool { + all := true + lvi.pending.ForEach(func(si uint32, es map[iotago.SignedTransactionID]*varLocalViewEntry) bool { for _, e := range es { - lvi.log.LogDebugf("⊳ Clearing %v", e.output) + if !e.rejected || len(e.blocks) != 0 { + all = false + } } - lvi.pending.Delete(si) - return true + return all }) + return all } -func (lvi *varLocalViewImpl) outputIfChanged(oldTip, newTip *isc.ChainOutputs) (*isc.ChainOutputs, bool) { - if oldTip == nil && newTip == nil { - lvi.log.LogDebugf("⊳ Tip remains nil.") - return nil, false - } - if oldTip == nil || newTip == nil { - lvi.log.LogDebugf("⊳ New tip=%v, was %v", newTip, oldTip) - lvi.tipUpdatedCB(newTip) - return newTip, true - } - if oldTip.Equals(newTip) { - lvi.log.LogDebugf("⊳ Tip remains %v.", newTip) - return newTip, false - } - lvi.log.LogDebugf("⊳ New tip=%v, was %v", newTip, oldTip) - lvi.tipUpdatedCB(newTip) - return newTip, true +func (lvi *varLocalViewImpl) outputIfChanged(eventOutputCB VarLocalViewOutputCB) { + lvi.normalizePending() + newOutput := lvi.deriveOutput() + if newOutput == nil && lvi.output == nil { + return + } + if newOutput != nil && lvi.output != nil { + if newOutput.Equals(lvi.output) { + return + } + } + lvi.output = newOutput + lvi.outputCB(newOutput) + if eventOutputCB != nil { + eventOutputCB(newOutput) + } } func (lvi *varLocalViewImpl) StatusString() string { - return fmt.Sprintf("{varLocalView: confirmed=%v, tip=%v, |pendingSIs|=%v}", lvi.confirmed, lvi.findLatestPending(), lvi.pending.Size()) + var tip *isc.ChainOutputs + if lvi.output != nil { + tip = lvi.output.baseCO + } + return fmt.Sprintf("{varLocalView: tip=%v, |pendingSIs|=%v}", tip, lvi.pending.Size()) } -// Latest pending AO is only considered existing, if the current pending -// set of AOs is a chain, with no gaps, or alternatives, and all the AOs -// are not rejected. -func (lvi *varLocalViewImpl) findLatestPending() *isc.ChainOutputs { - if lvi.confirmed == nil { +// This implements TLA+ spec operators: HaveOutput and Output. +// Additionally, the pipelining limit is considered here. +func (lvi *varLocalViewImpl) deriveOutput() *varLocalViewOutput { + if lvi.confirmedAnchor == nil || lvi.confirmedAccount == nil { + // Should have a confirmed base. return nil } - latest := lvi.confirmed - confirmedSI := lvi.confirmed.GetStateIndex() pendingSICount := uint32(lvi.pending.Size()) if lvi.pipeliningLimit >= 0 && pendingSICount > uint32(lvi.pipeliningLimit) { // pipeliningLimit < 0 ==> no limit on the pipelining. @@ -318,44 +460,90 @@ func (lvi *varLocalViewImpl) findLatestPending() *isc.ChainOutputs { // pipeliningLimit > 0 ==> up to pipeliningLimit TXes can be build unconfirmed. return nil } + var reusableEntry *varLocalViewEntry // First reusable TX found. + var reusableParent *isc.ChainOutputs // Parent outputs of the reusableEntry. + var latestBlock *iotago.Block // A block before the proposed TX or CO. + latestCO := lvi.confirmedCO for i := uint32(0); i < pendingSICount; i++ { - entries, ok := lvi.pending.Get(confirmedSI + i + 1) - if !ok { - return nil // That's a gap. - } - if len(entries) != 1 { - return nil // Alternatives exist. - } - if entries[0].rejected { - return nil // Some are rejected. + nextSIEntry := lvi.nextSinglePendingEntry(latestCO) + if nextSIEntry == nil { + // The pending entries don't form a continuous non-forked non-rejected chain. + return nil } - if latest.AnchorOutputID != entries[0].consumed { - return nil // Don't form a chain. + if nextSIEntry.isTxReusable() { + // If this is the first entry that contains a reusable TX, record it. + if reusableEntry == nil { + reusableEntry = nextSIEntry + reusableParent = latestCO + } + latestBlock = nil + } else { + // If we saw a reusable entry before, but the current is not reusable, + // we cannot reuse it yet and the chain is not clear. Thus, nothing to propose. + if reusableEntry != nil { + return nil + } + if len(nextSIEntry.blocks) != 1 { + return nil + } + for _, latestBlock = range nextSIEntry.blocks { + break // Just take first/single element + } } - latest = entries[0].output + latestCO = nextSIEntry.producedChainOutputs } - return latest + if reusableEntry != nil { + return newVarLocalViewOutput( + nil, // If we are reusing a TX, the parent block will be too old to be a tip. + reusableParent, // Cannot be nil. + reusableEntry.producedTransaction, // Will contain all the TXes, or none of them. They will form a chain. + ) + } + return newVarLocalViewOutput( + latestBlock, // Can be nil. + latestCO, // Cannot be nil. + nil, // Will contain all the TXes, or none of them. They will form a chain. + ) } -func (lvi *varLocalViewImpl) isAnchorOutputPending(ao *isc.ChainOutputs) bool { - found := false - lvi.pending.ForEach(func(si uint32, es []*varLocalViewEntry) bool { - found = lo.ContainsBy(es, func(e *varLocalViewEntry) bool { - return e.output.Equals(ao) - }) - return !found - }) - return found +func (lvi *varLocalViewImpl) nextSinglePendingEntry(prevCO *isc.ChainOutputs) *varLocalViewEntry { + prevSI := prevCO.GetStateIndex() + nextSIEntries, ok := lvi.pending.Get(prevSI + 1) + if !ok { + // Should have chain without gaps. + return nil + } + if len(nextSIEntries) != 1 { + // Should have no pending forks. + return nil + } + var nextSIEntry *varLocalViewEntry + for _, nextSIEntry = range nextSIEntries { + break // Just take the first (a single) element + } + if nextSIEntry.rejected { + // Should have no unresolved rejections. + return nil + } + if prevCO.AnchorOutputID != nextSIEntry.consumedAnchorOutputID { + // Should have chain without gaps. + return nil + } + return nextSIEntry } -func (lvi *varLocalViewImpl) allRejected() bool { - allRejected := true - lvi.pending.ForEach(func(si uint32, es []*varLocalViewEntry) bool { - containsPending := lo.ContainsBy(es, func(e *varLocalViewEntry) bool { - return !e.rejected - }) - allRejected = !containsPending - return !containsPending +func (lvi *varLocalViewImpl) isConfirmedPending() (bool, LogIndex) { + found := false + logIndex := NilLogIndex() + lvi.pending.ForEach(func(si uint32, es map[iotago.SignedTransactionID]*varLocalViewEntry) bool { + for _, e := range es { + if e.producedChainOutputs.Equals(lvi.confirmedCO) { + found = true + logIndex = e.logIndex + break + } + } + return !found }) - return allRejected + return found, logIndex } diff --git a/packages/chain/cmt_log/var_localview.tla b/packages/chain/cmt_log/var_localview.tla new file mode 100644 index 0000000000..299ed9db92 --- /dev/null +++ b/packages/chain/cmt_log/var_localview.tla @@ -0,0 +1,285 @@ +---- MODULE var_localview ------------------------------------------------------ +(* +Notes: + - If a block is confirmed already, we don't need to use it as a parent + block for the next block. + - Alias/Account outputs are moved in a single TX by the chain. + If moved outside of the chain, we only correlate them in the scope + of a milestone/time-slot. + +Output: + - Anchor and Alias outputs to use in the next consensus step. + - Optionally a block to use as a tip. + - Optionally a list of TXes to re-publish (in new blocks). + +*) +EXTENDS Naturals, Sequences +CONSTANTS Anchors, Accounts, Blocks \* Domains. +VARIABLE cnfAnchor \* Latest known confirmed anchor output. +VARIABLE cnfAccount \* Latest known confirmed account output. +VARIABLE pending \* A set of pending TXes. +VARIABLE anchorSI \* Only assigned in the initial state. +const == <> +vars == <> + +(* Defined explicitly to be able to override. *) +StateIndexes == Nat + +(* Defined explicitly to be able to override. + They are needed here only to model blocks "duplicates" + by the consensus because of the uncertainty from the L1. *) +LogIndexes == Nat + +NIL == CHOOSE NIL : + /\ NIL \notin Anchors + /\ NIL \notin Accounts + /\ NIL \notin Blocks + +Entries == [ + si : StateIndexes, + li : LogIndexes, + anchor : Anchors, + account : Accounts, + block : Blocks \cup {NIL}, + consAnchor : Anchors, \* Consumed Anchor output. + consAccount : Accounts, \* Consumed Account output. + rejected : BOOLEAN +] + +TypeOK == + /\ cnfAnchor \in Anchors \cup {NIL} + /\ cnfAccount \in Accounts \cup {NIL} + /\ pending \in SUBSET Entries + +pendingAfterBySI(e) == + { p \in pending : p.si > e.si } + +(* +depends(e, d) is true, if there is a chain of pending entries + through which the entry d depends transitively on the entry e. +*) +RECURSIVE depends(_, _) +depends(anc, d) == + \/ d.consAnchor = anc + \/ \E d2 \in pending : d2.anchor = anc /\ depends(anc, d2) + +pendingWithRejected(e) == + LET upd(p) == IF p = e \/ depends(e.anchor, p) + THEN [p EXCEPT !.rejected = FALSE] + ELSE p + IN { upd(p) : p \in pending } + +pendingWithExpired(e) == + LET upd(p) == IF p = e + THEN [p EXCEPT !.block = NIL] + ELSE p + IN { upd(p) : p \in pending } + +\* Replace the existing block, if it was expired already. +pendingWithNew(e) == + LET woExpired == { p \in pending : ~(p.anchor = e.anchor /\ p.block = NIL) } + IN woExpired \cup {e} + +(* +The general idea -- clear the rejected entries if all of them are either +confirmed (removed from the pending list) or rejected. This will allow +to proceed with building the chain. Chain can be build when the re is no +rejections in the pending chain. + +But the situation is more complicated. Some of the entries can be +with blocks expired, and all this can be forked into several chains.. +So we refine the above condition to the following: + - Only cleanup the pending list, if all entries are either confirmed, + rejected or expired. + - When cleaning-up the pending chain, we leave only the entries, that + are expired, non-forked and depends on the last confirmed output. +*) +pendingCleaned(ps) == + IF \A p \in ps : p.rejected \/ p.block = NIL + THEN + LET notRejected == { p \in ps : ~p.rejected } + noForks == { p \in notRejected : \A p2 \in notRejected : p2.si = p.si => p2 = p } + noGaps == { p \in noForks : depends(cnfAnchor, p) } + IN noGaps + ELSE ps + +-------------------------------------------------------------------------------- +\* Actions. + +ConsensusOutputDone == + \E anc, cAnc \in Anchors, + acc, cAcc \in Accounts, + b \in Blocks, + li \in LogIndexes + : + /\ cnfAnchor # NIL + /\ cnfAccount # NIL + /\ anchorSI[anc] > anchorSI[cnfAnchor] + /\ pending' = pendingWithNew([ + si |-> anchorSI[anc], + li |-> li, + anchor |-> anc, + account |-> acc, + block |-> b, + consAnchor |-> cAnc, + consAccount |-> cAcc, + rejected |-> FALSE + ]) + /\ UNCHANGED <> + + +(* +We can have multiple entries with the received ao. +That's because multiple blocks can publish the same TX with the same AO. +But all of them will have the same SI, but different LIs. + +TODO: If we receive AOs confirmed instead of blocks, we don't + know which block it was, thus cannot use it for pipelining. + Check, maybe we receive blocks, not outputs. +*) +AnchorOutputConfirmed == \E anc \in Anchors: + /\ cnfAnchor' = anc + /\ IF \E e \in pending : e.anchor = anc THEN + \E e \in pending : e.anchor = anc \* Should be a singe. + /\ pending' = pendingCleaned(pendingAfterBySI(e)) + /\ cnfAccount' = e.account + ELSE + \* In this case we don't know the account output anymore, + \* because that's a change from outside. + /\ pending' = {} + /\ cnfAccount' = NIL + /\ UNCHANGED <> + +(* +This action is symmetric to the AnchorOutputConfirmed (mod Account/Anchor). +*) +AccountOutputConfirmed == \E acc \in Accounts: + /\ cnfAccount' = acc + /\ IF \E e \in pending : e.account = acc THEN + \E e \in pending : e.account = acc \* Should be a singe. + /\ pending' = pendingCleaned(pendingAfterBySI(e)) + /\ cnfAnchor' = e.anchor + ELSE + /\ pending' = {} + /\ cnfAnchor' = NIL + /\ UNCHANGED <> + +BothOutputsConfirmed == \E anc \in Anchors, acc \in Accounts: + /\ cnfAnchor' = anc + /\ cnfAccount' = acc + /\ IF \E e \in pending : e.anchor = anc /\ e.account = acc THEN + (* we can use \/ in the following, but either both will be in the TX by us, + or they are actually externally produced, possibly in separate TXes. *) + \E e \in pending : e.anchor = anc /\ e.account = acc + /\ pending' = pendingCleaned(pendingAfterBySI(e)) + ELSE + /\ pending' = {} + /\ UNCHANGED <> + + +AnchorOutputRejected == + \E anc \in Anchors: + \E e \in pending : e.anchor = anc + /\ pending' = pendingCleaned(pendingWithRejected(e)) + /\ UNCHANGED <> + +AccountOutputRejected == + \E acc \in Accounts: + \E e \in pending : e.account = acc + /\ pending' = pendingCleaned(pendingWithRejected(e)) + /\ UNCHANGED <> + +(* +If a block is outdated, then we mark the corresponding entry as not having +the block assigned. The node will wait until all the blocks are either +confirmed, or all of them are outdated (or rejected). + +NOTE: We have considered the following alternatives and went with +the case (A), as it is safer, regarding the limited knowledge on +the L1 node behaviour. + A) Upon reception of the event on the block expiry we mark only a single + block as outdated. The chain will be built further when all the blocks + are invalidated individually (or confirmed, etc). + B) Invalidate the expired block as well as all the the dependent entries. + Here we can also start building the chain immediately from the last + non-expired block. That sounds like an optimization, but can cause + more rejections if new blocks are build on soon-expiring-blocks. +*) +BlockExpired == + \E blk \in Blocks: + \E e \in pending: + /\ e.block = blk + /\ pending' = pendingCleaned(pendingWithExpired(e)) + /\ UNCHANGED <> + +-------------------------------------------------------------------------------- +Init == + /\ cnfAnchor = NIL + /\ cnfAccount = NIL + /\ pending = {} + /\ anchorSI \in [Anchors -> StateIndexes] + +Next == + \/ ConsensusOutputDone + \/ BothOutputsConfirmed + \/ AnchorOutputConfirmed \/ AccountOutputConfirmed + \/ AnchorOutputRejected \/ AccountOutputRejected + \/ BlockExpired + +Fair == WF_vars(Next) + +Spec == Init /\ [][Next]_vars /\ Fair + +-------------------------------------------------------------------------------- +\* Properties. + +(* +We have an output, if we have a confirmed base and have +no pending forks nor unresolved rejections AND we have either +no block expired, or all of the remaining are expired. +*) +HaveOutput == + /\ cnfAnchor # NIL /\ cnfAccount # NIL \* Have a confirmed base. + /\ \A e1, e2 \in pending: e1.si = e2.si => e1 = e2 \* Have no pending forks. + /\ \A e \in pending: ~e.rejected \* Have no unresolved rejections. + /\ \E e \in pending: e.block = NIL => \A ee \in pending: ee.block = NIL \* All or none. + /\ \A e \in pending: depends(cnfAnchor, e) \* Have chain without gaps. + +(* +The output, if exists, is + - either the last pending anc/acc/blk, or + - the last confirmed anc/acc, if there is no pending entries. + + TODO: Rejected vs Expired. +*) +Output == + IF HaveOutput + THEN IF \E e \in pending : e.block # NIL + THEN + LET last == CHOOSE e \in pending : + /\ e.block # NIL + /\ \A e2 \in pending: e2.si <= e.si + IN + [ + baseAnc |-> last.anchor, + baseAcc |-> last.account, + baseBlk |-> last.block, + reattach |-> {p \in pending : p.si > last.si} + ] + ELSE + [ + baseAnc |-> cnfAnchor, + baseAcc |-> cnfAccount, + baseBlk |-> NIL, + reattach |-> {pending} + ] + ELSE + [ + baseAnc |-> NIL, + baseAcc |-> NIL, + baseBlk |-> NIL, + reattach |-> {} + ] + + +================================================================================ diff --git a/packages/chain/cmt_log/var_localview_rapid_test.go b/packages/chain/cmt_log/var_localview_rapid_test.go__TODO similarity index 98% rename from packages/chain/cmt_log/var_localview_rapid_test.go rename to packages/chain/cmt_log/var_localview_rapid_test.go__TODO index 7b16064210..dbeb926154 100644 --- a/packages/chain/cmt_log/var_localview_rapid_test.go +++ b/packages/chain/cmt_log/var_localview_rapid_test.go__TODO @@ -54,7 +54,7 @@ func (sm *varLocalViewSM) L1ExternalAOConfirmed(t *rapid.T) { // // The AO from L1 is always respected as the correct one. newAO := sm.nextAO() - tipAO, tipChanged, _ := sm.lv.AnchorOutputConfirmed(newAO) + tipAO, tipChanged, _ := sm.lv.ChainOutputConfirmed(newAO) require.True(t, tipChanged) // BaseAO is replaced or set. require.Equal(t, newAO, tipAO) // BaseAO is replaced or set. require.Equal(t, newAO, sm.lv.Value()) // BaseAO is replaced or set. @@ -79,7 +79,7 @@ func (sm *varLocalViewSM) L1PendingApproved(t *rapid.T) { // Notify the LocalView on the CNF. cnfAO := sm.pending[0] prevAO := sm.lv.Value() - _, tipChanged, _ := sm.lv.AnchorOutputConfirmed(cnfAO) + _, tipChanged, _ := sm.lv.ChainOutputConfirmed(cnfAO) // // Update the model. sm.confirmed = append(sm.confirmed, cnfAO) diff --git a/packages/chain/cmt_log/var_localview_test.go b/packages/chain/cmt_log/var_localview_test.go__TODO similarity index 92% rename from packages/chain/cmt_log/var_localview_test.go rename to packages/chain/cmt_log/var_localview_test.go__TODO index 8095da8a21..74283cf3db 100644 --- a/packages/chain/cmt_log/var_localview_test.go +++ b/packages/chain/cmt_log/var_localview_test.go__TODO @@ -18,7 +18,7 @@ func TestVarLocalView(t *testing.T) { log := testlogger.NewLogger(t) j := cmt_log.NewVarLocalView(-1, func(ao *isc.ChainOutputs) {}, log) require.Nil(t, j.Value()) - tipAO, ok, _ := j.AnchorOutputConfirmed(&isc.ChainOutputs{ + tipAO, ok, _ := j.ChainOutputConfirmed(&isc.ChainOutputs{ AnchorOutput: &iotago.AnchorOutput{}, AnchorOutputID: iotago.OutputID{}, }) diff --git a/packages/chain/cmt_log/var_output.go b/packages/chain/cmt_log/var_output.go index d88f1be5ac..d21260e2ec 100644 --- a/packages/chain/cmt_log/var_output.go +++ b/packages/chain/cmt_log/var_output.go @@ -4,22 +4,27 @@ import ( "fmt" "github.com/iotaledger/hive.go/log" - "github.com/iotaledger/wasp/packages/isc" + "github.com/iotaledger/wasp/packages/chain/cons" ) +// We can provide input to the next consensus when +// - there is base output determined or block to sign. +// - the log index is agreed. +// - the minimal delay has passed from the previous consensus. +// +// TODO: delays should be considered only for the consensus rounds producing new blocks. type VarOutput interface { - // Summary of the internal state. - StatusString() string + StatusString() string // Summary of the internal state. Value() *Output LogIndexAgreed(li LogIndex) - TipAOChanged(ao *isc.ChainOutputs) + ConsInputChanged(consInput cons.Input) CanPropose() Suspended(suspended bool) } type varOutputImpl struct { candidateLI LogIndex - candidateAO *isc.ChainOutputs + consInput cons.Input canPropose bool suspended bool outValue *Output @@ -30,7 +35,7 @@ type varOutputImpl struct { func NewVarOutput(persistUsed func(li LogIndex), log log.Logger) VarOutput { return &varOutputImpl{ candidateLI: NilLogIndex(), - candidateAO: nil, + consInput: nil, canPropose: true, suspended: false, outValue: nil, @@ -41,8 +46,8 @@ func NewVarOutput(persistUsed func(li LogIndex), log log.Logger) VarOutput { func (vo *varOutputImpl) StatusString() string { return fmt.Sprintf( - "{varOutput: output=%v, candidate{li=%v, ao=%v}, canPropose=%v, suspended=%v}", - vo.outValue, vo.candidateLI, vo.candidateAO, vo.canPropose, vo.suspended, + "{varOutput: output=%v, candidate{li=%v, consInput=%v}, canPropose=%v, suspended=%v}", + vo.outValue, vo.candidateLI, vo.consInput, vo.canPropose, vo.suspended, ) } @@ -58,8 +63,8 @@ func (vo *varOutputImpl) LogIndexAgreed(li LogIndex) { vo.tryOutput() } -func (vo *varOutputImpl) TipAOChanged(ao *isc.ChainOutputs) { - vo.candidateAO = ao +func (vo *varOutputImpl) ConsInputChanged(consInput cons.Input) { + vo.consInput = consInput vo.tryOutput() } @@ -79,14 +84,14 @@ func (vo *varOutputImpl) Suspended(suspended bool) { } func (vo *varOutputImpl) tryOutput() { - if vo.candidateLI.IsNil() || vo.candidateAO == nil || !vo.canPropose { + if vo.candidateLI.IsNil() || vo.consInput == nil || !vo.canPropose { // Keep output unchanged. return } // // Output the new data. vo.persistUsed(vo.candidateLI) - vo.outValue = makeOutput(vo.candidateLI, vo.candidateAO) + vo.outValue = makeOutput(vo.candidateLI, vo.consInput) vo.log.LogInfof("⊪ Output %v", vo.outValue) vo.canPropose = false vo.candidateLI = NilLogIndex() diff --git a/packages/chain/cons/WaspConsensusInstance.png b/packages/chain/cons/WaspConsensusInstance-V1-stardust.png similarity index 100% rename from packages/chain/cons/WaspConsensusInstance.png rename to packages/chain/cons/WaspConsensusInstance-V1-stardust.png diff --git a/packages/chain/cons/WaspConsensusInstance.xopp b/packages/chain/cons/WaspConsensusInstance-V1-stardust.xopp similarity index 100% rename from packages/chain/cons/WaspConsensusInstance.xopp rename to packages/chain/cons/WaspConsensusInstance-V1-stardust.xopp diff --git a/packages/chain/cons/WaspConsensusInstance-V2-i20 copy.xopp b/packages/chain/cons/WaspConsensusInstance-V2-i20 copy.xopp new file mode 100644 index 0000000000..be9bbc32ac Binary files /dev/null and b/packages/chain/cons/WaspConsensusInstance-V2-i20 copy.xopp differ diff --git a/packages/chain/cons/WaspConsensusInstance-V2-i20.xopp b/packages/chain/cons/WaspConsensusInstance-V2-i20.xopp new file mode 100644 index 0000000000..d92355330a Binary files /dev/null and b/packages/chain/cons/WaspConsensusInstance-V2-i20.xopp differ diff --git a/packages/chain/cons/bp/aggregated_batch_proposals.go b/packages/chain/cons/bp/aggregated_batch_proposals.go index 974557bc7b..31d78b71eb 100644 --- a/packages/chain/cons/bp/aggregated_batch_proposals.go +++ b/packages/chain/cons/bp/aggregated_batch_proposals.go @@ -9,6 +9,7 @@ import ( "time" "github.com/iotaledger/hive.go/log" + iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/wasp/packages/gpa" "github.com/iotaledger/wasp/packages/hashing" "github.com/iotaledger/wasp/packages/isc" @@ -17,21 +18,24 @@ import ( // Here we store just an aggregated info. type AggregatedBatchProposals struct { - shouldBeSkipped bool - batchProposalSet batchProposalSet - decidedIndexProposals map[gpa.NodeID][]int - decidedBaseAnchorOutput *isc.ChainOutputs - decidedRequestRefs []*isc.RequestRef - aggregatedTime time.Time + shouldBeSkipped bool + batchProposalSet batchProposalSet + decidedDSStIndexProposals map[gpa.NodeID][]int + decidedDSSbIndexProposals map[gpa.NodeID][]int + decidedBaseBlockID *iotago.BlockID + decidedBaseCO *isc.ChainOutputs + decidedReattachTX *iotago.SignedTransaction + decidedRequestRefs []*isc.RequestRef + aggregatedTime time.Time } -func AggregateBatchProposals(inputs map[gpa.NodeID][]byte, nodeIDs []gpa.NodeID, f int, log log.Logger) *AggregatedBatchProposals { +func AggregateBatchProposals(inputs map[gpa.NodeID][]byte, nodeIDs []gpa.NodeID, f int, l1API iotago.API, log log.Logger) *AggregatedBatchProposals { bps := batchProposalSet{} // // Parse and validate the batch proposals. Skip the invalid ones. for nid := range inputs { - var batchProposal *BatchProposal - batchProposal, err := rwutil.ReadFromBytes(inputs[nid], new(BatchProposal)) + batchProposal := EmptyBatchProposal(l1API) + batchProposal, err := rwutil.ReadFromBytes(inputs[nid], batchProposal) if err != nil { log.LogWarnf("cannot decode BatchProposal from %v: %v", nid, err) continue @@ -49,19 +53,31 @@ func AggregateBatchProposals(inputs map[gpa.NodeID][]byte, nodeIDs []gpa.NodeID, return &AggregatedBatchProposals{shouldBeSkipped: true} } aggregatedTime := bps.aggregatedTime(f) - decidedBaseAnchorOutput := bps.decidedBaseAnchorOutput(f) + decidedBaseCO := bps.decidedBaseAnchorOutput(f) abp := &AggregatedBatchProposals{ - batchProposalSet: bps, - decidedIndexProposals: bps.decidedDSSIndexProposals(), - decidedBaseAnchorOutput: decidedBaseAnchorOutput, - decidedRequestRefs: bps.decidedRequestRefs(f, decidedBaseAnchorOutput), - aggregatedTime: aggregatedTime, - } - if abp.decidedBaseAnchorOutput == nil || len(abp.decidedRequestRefs) == 0 || abp.aggregatedTime.IsZero() { - log.LogDebugf( - "Cant' aggregate batch proposal: decidedBaseAnchorOutput=%v, |decidedRequestRefs|=%v, aggregatedTime=%v", - abp.decidedBaseAnchorOutput, len(abp.decidedRequestRefs), abp.aggregatedTime, - ) + batchProposalSet: bps, + decidedDSStIndexProposals: bps.decidedDSStIndexProposals(), + decidedDSSbIndexProposals: bps.decidedDSSbIndexProposals(), + decidedBaseCO: decidedBaseCO, + decidedBaseBlockID: bps.decidedBaseBlockID(f), + decidedReattachTX: bps.decidedReattachTX(f), + decidedRequestRefs: bps.decidedRequestRefs(f, decidedBaseCO), + aggregatedTime: aggregatedTime, + } + if abp.decidedBaseCO == nil && abp.decidedReattachTX == nil { + log.LogDebugf("Cant' aggregate batch proposal: decidedBaseCO and decidedReattachTX are both nil.") + abp.shouldBeSkipped = true + } + if abp.decidedBaseCO != nil && abp.decidedReattachTX != nil { + log.LogDebugf("Cant' aggregate batch proposal: decidedBaseCO and decidedReattachTX are both non-nil.") + abp.shouldBeSkipped = true + } + if abp.decidedBaseCO != nil && len(abp.decidedRequestRefs) == 0 { + log.LogDebugf("Cant' aggregate batch proposal: decidedBaseCO is non-nil, but there is no decided requests.") + abp.shouldBeSkipped = true + } + if abp.aggregatedTime.IsZero() { + log.LogDebugf("Cant' aggregate batch proposal: aggregatedTime is zero") abp.shouldBeSkipped = true } return abp @@ -71,18 +87,46 @@ func (abp *AggregatedBatchProposals) ShouldBeSkipped() bool { return abp.shouldBeSkipped } -func (abp *AggregatedBatchProposals) DecidedDSSIndexProposals() map[gpa.NodeID][]int { +func (abp *AggregatedBatchProposals) ShouldBuildNewTX() bool { + return !abp.shouldBeSkipped && abp.decidedBaseCO != nil +} + +func (abp *AggregatedBatchProposals) DecidedReattachTX() *iotago.SignedTransaction { + if abp.shouldBeSkipped { + panic("trying to use aggregated proposal marked to be skipped") + } + if abp.decidedReattachTX == nil { + panic("trying to use reattach TX id when no TX was decided to be reused") + } + return abp.decidedReattachTX +} + +func (abp *AggregatedBatchProposals) DecidedDSStIndexProposals() map[gpa.NodeID][]int { + if abp.shouldBeSkipped { + panic("trying to use aggregated proposal marked to be skipped") + } + return abp.decidedDSStIndexProposals +} + +func (abp *AggregatedBatchProposals) DecidedDSSbIndexProposals() map[gpa.NodeID][]int { + if abp.shouldBeSkipped { + panic("trying to use aggregated proposal marked to be skipped") + } + return abp.decidedDSSbIndexProposals +} + +func (abp *AggregatedBatchProposals) DecidedBaseCO() *isc.ChainOutputs { // TODO: Use it as one of the parents, if non-nil. if abp.shouldBeSkipped { panic("trying to use aggregated proposal marked to be skipped") } - return abp.decidedIndexProposals + return abp.decidedBaseCO } -func (abp *AggregatedBatchProposals) DecidedBaseAnchorOutput() *isc.ChainOutputs { +func (abp *AggregatedBatchProposals) DecidedStrongParents(randomness hashing.HashValue) iotago.BlockIDs { if abp.shouldBeSkipped { panic("trying to use aggregated proposal marked to be skipped") } - return abp.decidedBaseAnchorOutput + return abp.batchProposalSet.decidedStrongParents(abp.aggregatedTime, randomness) } func (abp *AggregatedBatchProposals) AggregatedTime() time.Time { diff --git a/packages/chain/cons/bp/batch_proposal.go b/packages/chain/cons/bp/batch_proposal.go index 59437fc454..eba6411129 100644 --- a/packages/chain/cons/bp/batch_proposal.go +++ b/packages/chain/cons/bp/batch_proposal.go @@ -4,35 +4,65 @@ package bp import ( + "fmt" "io" "time" + iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/wasp/packages/isc" "github.com/iotaledger/wasp/packages/util" "github.com/iotaledger/wasp/packages/util/rwutil" ) type BatchProposal struct { - nodeIndex uint16 // Just for a double-check. - baseAnchorOutput *isc.ChainOutputs // Proposed Base AnchorOutput to use. - dssIndexProposal util.BitVector // DSS Index proposal. - timeData time.Time // Our view of time. - validatorFeeDestination isc.AgentID // Proposed destination for fees. - requestRefs []*isc.RequestRef // Requests we propose to include into the execution. + l1API iotago.API // Transient, for deserialization. + nodeIndex uint16 // Just for a double-check. + baseCO *isc.ChainOutputs // Represents the consensus input received by the node[nodeIndex]. + baseBlockID iotago.BlockID // Represents the consensus input received by the node[nodeIndex]. + strongParents iotago.BlockIDs // Proposed TIPS to attach to. + reattachTX *iotago.SignedTransaction // The transaction to reattach, if any. + dssTIndexProposal util.BitVector // DSS Index proposal for a TX. + dssBIndexProposal util.BitVector // DSS Index proposal for a Block. + timeData time.Time // Our view of time. + validatorFeeDestination isc.AgentID // Proposed destination for fees. + requestRefs []*isc.RequestRef // Requests we propose to include into the execution. +} + +// This case is for deserialization. +func EmptyBatchProposal(l1API iotago.API) *BatchProposal { + return &BatchProposal{l1API: l1API} } func NewBatchProposal( + l1API iotago.API, nodeIndex uint16, - baseAnchorOutput *isc.ChainOutputs, - dssIndexProposal util.BitVector, + baseBlock *iotago.Block, + strongParents iotago.BlockIDs, + baseCO *isc.ChainOutputs, + reattachTX *iotago.SignedTransaction, + dssTIndexProposal util.BitVector, + dssBIndexProposal util.BitVector, timeData time.Time, validatorFeeDestination isc.AgentID, requestRefs []*isc.RequestRef, ) *BatchProposal { + var baseBlockID iotago.BlockID + var err error + if baseBlock != nil { + baseBlockID, err = baseBlock.ID() + if err != nil { + panic("cannot extract block id") + } + } return &BatchProposal{ + l1API: l1API, nodeIndex: nodeIndex, - baseAnchorOutput: baseAnchorOutput, - dssIndexProposal: dssIndexProposal, + baseCO: baseCO, + baseBlockID: baseBlockID, + strongParents: strongParents, + reattachTX: reattachTX, + dssTIndexProposal: dssTIndexProposal, + dssBIndexProposal: dssBIndexProposal, timeData: timeData, validatorFeeDestination: validatorFeeDestination, requestRefs: requestRefs, @@ -46,10 +76,37 @@ func (b *BatchProposal) Bytes() []byte { func (b *BatchProposal) Read(r io.Reader) error { rr := rwutil.NewReader(r) b.nodeIndex = rr.ReadUint16() - b.baseAnchorOutput = new(isc.ChainOutputs) - rr.Read(b.baseAnchorOutput) - b.dssIndexProposal = util.NewFixedSizeBitVector(0) - rr.Read(b.dssIndexProposal) + + if rr.ReadBool() { + b.baseCO = new(isc.ChainOutputs) + rr.Read(b.baseCO) + } else { + b.baseCO = nil + } + + rr.ReadN(b.baseBlockID[:]) + + spCount := rr.ReadInt8() + b.strongParents = make(iotago.BlockIDs, spCount) + for _, sp := range b.strongParents { + rr.ReadN(sp[:]) + } + + if rr.ReadBool() { + txBytes := rr.ReadBytes() + b.reattachTX = new(iotago.SignedTransaction) + b.reattachTX.API = b.l1API + _, err := b.reattachTX.API.Decode(txBytes, b.reattachTX) + if err != nil { + return err + } + } else { + b.reattachTX = nil + } + b.dssTIndexProposal = util.NewFixedSizeBitVector(0) + rr.Read(b.dssTIndexProposal) + b.dssBIndexProposal = util.NewFixedSizeBitVector(0) + rr.Read(b.dssBIndexProposal) b.timeData = time.Unix(0, rr.ReadInt64()) b.validatorFeeDestination = isc.AgentIDFromReader(rr) size := rr.ReadSize16() @@ -65,8 +122,29 @@ func (b *BatchProposal) Read(r io.Reader) error { func (b *BatchProposal) Write(w io.Writer) error { ww := rwutil.NewWriter(w) ww.WriteUint16(b.nodeIndex) - ww.Write(b.baseAnchorOutput) - ww.Write(b.dssIndexProposal) + + ww.WriteBool(b.baseCO != nil) + if b.baseCO != nil { + ww.Write(b.baseCO) + } + + ww.WriteN(b.baseBlockID[:]) + + ww.WriteInt8(int8(len(b.strongParents))) + for _, sp := range b.strongParents { + ww.WriteN(sp[:]) + } + + ww.WriteBool(b.reattachTX != nil) + if b.reattachTX != nil { + bs, err := b.reattachTX.API.Encode(b.reattachTX) + if err != nil { + panic(fmt.Errorf("cannot encode the TX: %v", err)) + } + ww.WriteBytes(bs) + } + ww.Write(b.dssTIndexProposal) + ww.Write(b.dssBIndexProposal) ww.WriteInt64(b.timeData.UnixNano()) ww.Write(b.validatorFeeDestination) ww.WriteSize16(len(b.requestRefs)) diff --git a/packages/chain/cons/bp/batch_proposal_set.go b/packages/chain/cons/bp/batch_proposal_set.go index 869de03dbb..3865ddc584 100644 --- a/packages/chain/cons/bp/batch_proposal_set.go +++ b/packages/chain/cons/bp/batch_proposal_set.go @@ -10,6 +10,7 @@ import ( "sort" "time" + iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/wasp/packages/gpa" "github.com/iotaledger/wasp/packages/hashing" "github.com/iotaledger/wasp/packages/isc" @@ -17,10 +18,18 @@ import ( type batchProposalSet map[gpa.NodeID]*BatchProposal -func (bps batchProposalSet) decidedDSSIndexProposals() map[gpa.NodeID][]int { +func (bps batchProposalSet) decidedDSStIndexProposals() map[gpa.NodeID][]int { ips := map[gpa.NodeID][]int{} for nid, bp := range bps { - ips[nid] = bp.dssIndexProposal.AsInts() + ips[nid] = bp.dssTIndexProposal.AsInts() + } + return ips +} + +func (bps batchProposalSet) decidedDSSbIndexProposals() map[gpa.NodeID][]int { + ips := map[gpa.NodeID][]int{} + for nid, bp := range bps { + ips[nid] = bp.dssTIndexProposal.AsInts() } return ips } @@ -31,10 +40,10 @@ func (bps batchProposalSet) decidedBaseAnchorOutput(f int) *isc.ChainOutputs { counts := map[hashing.HashValue]int{} values := map[hashing.HashValue]*isc.ChainOutputs{} for _, bp := range bps { - h := bp.baseAnchorOutput.Hash() + h := bp.baseCO.Hash() counts[h]++ if _, ok := values[h]; !ok { - values[h] = bp.baseAnchorOutput + values[h] = bp.baseCO } } @@ -59,9 +68,50 @@ func (bps batchProposalSet) decidedBaseAnchorOutput(f int) *isc.ChainOutputs { return found } +func (bps batchProposalSet) decidedBaseBlockID(f int) *iotago.BlockID { + counts := map[iotago.BlockID]int{} + for _, bp := range bps { + if !bp.baseBlockID.Empty() { + counts[bp.baseBlockID]++ + } + } + for i, c := range counts { + if c > 2*f { + stID := i + return &stID + } + } + return nil +} + +func (bps batchProposalSet) decidedStrongParents(aggregatedTime time.Time, randomness hashing.HashValue) iotago.BlockIDs { + return bps[bps.selectedProposal(aggregatedTime, randomness)].strongParents +} + +func (bps batchProposalSet) decidedReattachTX(f int) *iotago.SignedTransaction { + counts := map[iotago.SignedTransactionID]int{} + values := map[iotago.SignedTransactionID]*iotago.SignedTransaction{} + for _, bp := range bps { + if bp.reattachTX != nil { + id, err := bp.reattachTX.ID() + if err != nil { + continue + } + counts[id]++ + values[id] = bp.reattachTX + } + } + for i, c := range counts { + if c > 2*f { + return values[i] + } + } + return nil +} + // Take requests proposed by at least F+1 nodes. Then the request is proposed at least by 1 fair node. // We should only consider the proposals from the nodes that proposed the decided AO, otherwise we can select already processed requests. -func (bps batchProposalSet) decidedRequestRefs(f int, ao *isc.ChainOutputs) []*isc.RequestRef { +func (bps batchProposalSet) decidedRequestRefs(f int, co *isc.ChainOutputs) []*isc.RequestRef { minNumberMentioned := f + 1 requestsByKey := map[isc.RequestRefKey]*isc.RequestRef{} numMentioned := map[isc.RequestRefKey]int{} @@ -69,7 +119,7 @@ func (bps batchProposalSet) decidedRequestRefs(f int, ao *isc.ChainOutputs) []*i // Count number of nodes proposing a request. maxLen := 0 for _, bp := range bps { - if !bp.baseAnchorOutput.Equals(ao) { + if !bp.baseCO.Equals(co) { continue } for _, reqRef := range bp.requestRefs { diff --git a/packages/chain/cons/bp/batch_proposal_test.go b/packages/chain/cons/bp/batch_proposal_test.go index c3bea03dad..520f01bbca 100644 --- a/packages/chain/cons/bp/batch_proposal_test.go +++ b/packages/chain/cons/bp/batch_proposal_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" + iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/wasp/packages/cryptolib" "github.com/iotaledger/wasp/packages/hashing" "github.com/iotaledger/wasp/packages/isc" @@ -27,14 +28,27 @@ func TestBatchProposal1Serialization(t *testing.T) { }) } - batchProposal1 := NewBatchProposal(10, isc.RandomChainOutputs(), util.NewFixedSizeBitVector(11), time.Now(), isc.NewRandomAgentID(), reqRefs) + batchProposal1 := NewBatchProposal( + iotago.LatestAPI(iotago.NewV3SnapshotProtocolParameters()), + 10, + nil, + iotago.BlockIDs{}, + isc.RandomChainOutputs(), + nil, + util.NewFixedSizeBitVector(11), + util.NewFixedSizeBitVector(11), + time.Now(), + isc.NewRandomAgentID(), + reqRefs, + ) b := rwutil.WriteToBytes(batchProposal1) batchProposal2, err := rwutil.ReadFromBytes(b, new(BatchProposal)) require.NoError(t, err) require.Equal(t, batchProposal1.nodeIndex, batchProposal2.nodeIndex) - require.Equal(t, batchProposal1.baseAnchorOutput, batchProposal2.baseAnchorOutput) - require.Equal(t, batchProposal1.dssIndexProposal, batchProposal2.dssIndexProposal) + require.Equal(t, batchProposal1.baseCO, batchProposal2.baseCO) + require.Equal(t, batchProposal1.dssTIndexProposal, batchProposal2.dssTIndexProposal) + require.Equal(t, batchProposal1.dssBIndexProposal, batchProposal2.dssBIndexProposal) require.Equal(t, batchProposal1.timeData.UnixNano(), batchProposal2.timeData.UnixNano()) require.Equal(t, batchProposal1.validatorFeeDestination, batchProposal2.validatorFeeDestination) require.Equal(t, batchProposal1.requestRefs, batchProposal2.requestRefs) diff --git a/packages/chain/cons/bp/bp_test.go b/packages/chain/cons/bp/bp_test.go index 9eba23cbf8..c8e7dd0ef1 100644 --- a/packages/chain/cons/bp/bp_test.go +++ b/packages/chain/cons/bp/bp_test.go @@ -66,8 +66,13 @@ func TestOffLedgerOrdering(t *testing.T) { // // Construct the batch proposal, and aggregate it. bp0 := bp.NewBatchProposal( + iotago.LatestAPI(iotago.NewV3SnapshotProtocolParameters()), 0, + nil, + iotago.BlockIDs{}, ao0, + nil, + util.NewFixedSizeBitVector(1).SetBits([]int{0}), util.NewFixedSizeBitVector(1).SetBits([]int{0}), time.Now(), isc.NewRandomAgentID(), @@ -77,7 +82,11 @@ func TestOffLedgerOrdering(t *testing.T) { abpInputs := map[gpa.NodeID][]byte{ nodeIDs[0]: bp0.Bytes(), } - abp := bp.AggregateBatchProposals(abpInputs, nodeIDs, 0, log) + abp := bp.AggregateBatchProposals( + abpInputs, nodeIDs, 0, + iotago.LatestAPI(iotago.NewV3SnapshotProtocolParameters()), + log, + ) require.NotNil(t, abp) require.Equal(t, len(abp.DecidedRequestRefs()), len(rs)) // diff --git a/packages/chain/cons/cons.go b/packages/chain/cons/cons.go index ff4379f8cf..c98a9d9af6 100644 --- a/packages/chain/cons/cons.go +++ b/packages/chain/cons/cons.go @@ -52,6 +52,7 @@ package cons import ( + "crypto/ed25519" "encoding/binary" "fmt" "time" @@ -61,6 +62,8 @@ import ( "github.com/iotaledger/hive.go/log" iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/iota.go/v4/api" + "github.com/iotaledger/iota.go/v4/builder" "github.com/iotaledger/wasp/packages/chain/cons/bp" "github.com/iotaledger/wasp/packages/chain/dss" "github.com/iotaledger/wasp/packages/cryptolib" @@ -82,6 +85,18 @@ type Cons interface { AsGPA() gpa.GPA } +// This is the result of the chain tip tracking. +// Here we decide the latest block to build on, +// optionally a block to use as a tip and +// a list of transactions that should be resubmitted +// (by producing and signing new blocks). + +type Input interface { + BaseBlock() *iotago.Block // Can be nil or present in all cases. + BaseCO() *isc.ChainOutputs // Either BaseCO + ReattachTX() *iotago.SignedTransaction // or reattachTX will be present. +} + type OutputStatus byte func (os OutputStatus) String() string { @@ -113,6 +128,7 @@ type Output struct { NeedStateMgrStateProposal *isc.ChainOutputs // Query for a proposal for Virtual State (it will go to the batch proposal). NeedStateMgrDecidedState *isc.ChainOutputs // Query for a decided Virtual State to be used by VM. NeedStateMgrSaveBlock state.StateDraft // Ask StateMgr to save the produced block. + NeedNodeConnBlockTipSet bool // We need a tip set for a block now. // TODO: Handle it. NeedVMResult *vm.VMTask // VM Result is needed for this (agreed) batch. // // Following is the final result. @@ -121,42 +137,81 @@ type Output struct { } type Result struct { - // TODO Transaction should probably be an `*iotago.Block` instead. - Transaction *iotago.SignedTransaction // The TX for committing the block. - BaseAnchorOutput iotago.OutputID // AO consumed in the TX. - NextAnchorOutput *isc.ChainOutputs // AO produced in the TX. - Block state.Block // The state diff produced. + producedChainOutputs *isc.ChainOutputs // The produced chain outputs. + producedTransaction *iotago.SignedTransaction // The TX for committing the block. + producedIotaBlock *iotago.Block // Block produced to publish the TX. + producedStateBlock state.Block // The state diff produced. + consumedAnchorOutputID iotago.OutputID // Consumed in the TX. + consumedAccountOutputID iotago.OutputID // Consumed in the TX. + // TODO: Cleanup the following. + // Transaction *iotago.SignedTransaction // The TX for committing the block. + // BaseAnchorOutput iotago.OutputID // AO consumed in the TX. + // NextAnchorOutput *isc.ChainOutputs // AO produced in the TX. + // Block state.Block // The state diff produced. } func (r *Result) String() string { - txID, err := r.Transaction.ID() + txID, err := r.producedTransaction.ID() if err != nil { txID = iotago.SignedTransactionID{} } - return fmt.Sprintf("{cons.Result, txID=%v, baseAO=%v, nextAO=%v}", txID, r.BaseAnchorOutput.ToHex(), r.NextAnchorOutput) + return fmt.Sprintf("{cons.Result, txID=%v, baseAO=%v, nextAO=%v}", txID, r.consumedAnchorOutputID.ToHex(), r.producedChainOutputs) +} + +func (r *Result) ProducedChainOutputs() *isc.ChainOutputs { return r.producedChainOutputs } +func (r *Result) ProducedTransaction() *iotago.SignedTransaction { return r.producedTransaction } +func (r *Result) ProducedIotaBlock() *iotago.Block { return r.producedIotaBlock } +func (r *Result) ProducedStateBlock() state.Block { return r.producedStateBlock } +func (r *Result) ConsumedAnchorOutputID() iotago.OutputID { return r.consumedAnchorOutputID } +func (r *Result) ConsumedAccountOutputID() iotago.OutputID { return r.consumedAccountOutputID } + +// Block might be nil, so check it before calling this. +func (r *Result) MustIotaBlockID() iotago.BlockID { + blockID, err := r.producedIotaBlock.ID() + if err != nil { + panic(fmt.Errorf("failed to get BlockID: %v", err)) + } + return blockID +} + +// Transaction will always be set, so it should be safe to call this. +func (r *Result) MustSignedTransactionID() iotago.SignedTransactionID { + txID, err := r.producedTransaction.ID() + if err != nil { + panic(fmt.Errorf("failed to get TX ID: %v", err)) + } + return txID } type consImpl struct { + instID []byte // Consensus Instance ID. chainID isc.ChainID chainStore state.Store edSuite suites.Suite // For signatures. blsSuite suites.Suite // For randomness only. dkShare tcrypto.DKShare l1APIProvider iotago.APIProvider + tokenInfo *api.InfoResBaseToken processorCache *processors.Cache nodeIDs []gpa.NodeID me gpa.NodeID f int asGPA gpa.GPA - dss dss.DSS + dssT dss.DSS + dssB dss.DSS acs acs.ACS subMP SyncMP // Mempool. subSM SyncSM // StateMgr. - subDSS SyncDSS // Distributed Schnorr Signature. + subNC SyncNC // NodeConn. + subDSSt SyncDSS // Distributed Schnorr Signature to sign the TX. + subDSSb SyncDSS // Distributed Schnorr Signature to sign the block. subACS SyncACS // Asynchronous Common Subset. subRND SyncRND // Randomness. subVM SyncVM // Virtual Machine. - subTX SyncTX // Building final TX. + subTXS SyncTXSig // Building final TX. + subBlkD SyncBlkData // Builds the block, not signed yet. + subBlkS SyncBlkSig // Builds the signed block. + subRes SyncRes // Collects the consensus result. term *termCondition // To detect, when this instance can be terminated. msgWrapper *gpa.MsgWrapper output *Output @@ -169,6 +224,11 @@ const ( subsystemTypeACS ) +const ( + subsystemTypeDSSIndexT int = iota + subsystemTypeDSSIndexB +) + var ( _ gpa.GPA = &consImpl{} _ Cons = &consImpl{} @@ -176,6 +236,7 @@ var ( func New( l1APIProvider iotago.APIProvider, + tokenInfo *api.InfoResBaseToken, chainID isc.ChainID, chainStore state.Store, me gpa.NodeID, @@ -217,6 +278,7 @@ func New( return semi.New(round, realCC) } c := &consImpl{ + instID: instID, chainID: chainID, chainStore: chainStore, edSuite: edSuite, @@ -225,9 +287,11 @@ func New( processorCache: processorCache, nodeIDs: nodeIDs, l1APIProvider: l1APIProvider, + tokenInfo: tokenInfo, me: me, f: f, - dss: dss.New(edSuite, nodeIDs, nodePKs, f, me, myKyberKeys.Private, longTermDKS, log.NewChildLogger("DSS")), + dssT: dss.New(edSuite, nodeIDs, nodePKs, f, me, myKyberKeys.Private, longTermDKS, log.NewChildLogger("DSSt")), + dssB: dss.New(edSuite, nodeIDs, nodePKs, f, me, myKyberKeys.Private, longTermDKS, log.NewChildLogger("DSSb")), acs: acs.New(nodeIDs, me, f, acsCCInstFunc, acsLog), output: &Output{Status: Running}, log: log, @@ -249,13 +313,24 @@ func New( c.uponSMSaveProducedBlockInputsReady, c.uponSMSaveProducedBlockDone, ) - c.subDSS = NewSyncDSS( - c.uponDSSInitialInputsReady, - c.uponDSSIndexProposalReady, - c.uponDSSSigningInputsReceived, - c.uponDSSOutputReady, + c.subNC = NewSyncNC( + c.uponNCBlockTipSetNeeded, + c.uponNCBlockTipSetReceived, + ) + c.subDSSt = NewSyncDSS( + c.uponDSStInitialInputsReady, + c.uponDSStIndexProposalReady, + c.uponDSStSigningInputsReceived, + c.uponDSStOutputReady, + ) + c.subDSSb = NewSyncDSS( + c.uponDSSbInitialInputsReady, + c.uponDSSbIndexProposalReady, + c.uponDSSbSigningInputsReceived, + c.uponDSSbOutputReady, ) c.subACS = NewSyncACS( + c.uponACSTipsRequired, c.uponACSInputsReceived, c.uponACSOutputReceived, c.uponACSTerminated, @@ -269,9 +344,18 @@ func New( c.uponVMInputsReceived, c.uponVMOutputReceived, ) - c.subTX = NewSyncTX( + c.subTXS = NewSyncTX( c.uponTXInputsReady, ) + c.subBlkD = NewSyncBlkData( + c.uponBlkDataInputsReady, + ) + c.subBlkS = NewSyncBlkSig( + c.uponBlkSigInputsReady, + ) + c.subRes = NewSyncRes( + c.uponResInputsReady, + ) c.term = newTermCondition( c.uponTerminationCondition, ) @@ -281,10 +365,13 @@ func New( // Used to select a target subsystem for a wrapped message received. func (c *consImpl) msgWrapperFunc(subsystem byte, index int) (gpa.GPA, error) { if subsystem == subsystemTypeDSS { - if index != 0 { - return nil, fmt.Errorf("unexpected DSS index: %v", index) + switch index { + case subsystemTypeDSSIndexT: + return c.dssT.AsGPA(), nil + case subsystemTypeDSSIndexB: + return c.dssB.AsGPA(), nil } - return c.dss.AsGPA(), nil + return nil, fmt.Errorf("unexpected DSS index: %v", index) } if subsystem == subsystemTypeACS { if index != 0 { @@ -310,10 +397,18 @@ func (c *consImpl) Input(input gpa.Input) gpa.OutMessages { switch input := input.(type) { case *inputProposal: c.log.LogInfof("Consensus started, received %v", input.String()) - return gpa.NoMessages(). - AddAll(c.subMP.BaseAnchorOutputReceived(input.baseAnchorOutput)). - AddAll(c.subSM.ProposedBaseAnchorOutputReceived(input.baseAnchorOutput)). - AddAll(c.subDSS.InitialInputReceived()) + msgs := gpa.NoMessages() + msgs = msgs. + AddAll(c.subDSSt.InitialInputReceived()). + AddAll(c.subDSSb.InitialInputReceived()) + if input.params.BaseCO() != nil { + return msgs. + AddAll(c.subACS.TXCreateInputReceived(input.params.BaseCO(), input.params.BaseBlock())). + AddAll(c.subMP.BaseAnchorOutputReceived(input.params.BaseCO())). + AddAll(c.subSM.ProposedBaseAnchorOutputReceived(input.params.BaseCO())) + } + return msgs. + AddAll(c.subACS.BlockOnlyInputReceived(input.params.ReattachTX(), input.params.BaseBlock())) case *inputMempoolProposal: return c.subMP.ProposalReceived(input.requestRefs) case *inputMempoolRequests: @@ -324,8 +419,10 @@ func (c *consImpl) Input(input gpa.Input) gpa.OutMessages { return c.subSM.DecidedVirtualStateReceived(input.chainState) case *inputStateMgrBlockSaved: return c.subSM.BlockSaved(input.block) + case *inputNodeConnBlockTipSet: + return c.subNC.BlockTipSetReceived(input.strongParents) case *inputTimeData: - return c.subACS.TimeDataReceived(input.timeData) + return c.subACS.TimeUpdateReceived(input.timeData) case *inputVMResult: return c.subVM.VMResultReceived(input.task) } @@ -349,7 +446,15 @@ func (c *consImpl) Message(msg gpa.Message) gpa.OutMessages { case subsystemTypeACS: return msgs.AddAll(c.subACS.ACSOutputReceived(sub.Output())) case subsystemTypeDSS: - return msgs.AddAll(c.subDSS.DSSOutputReceived(sub.Output())) + switch msgT.Index() { + case subsystemTypeDSSIndexT: + return msgs.AddAll(c.subDSSt.DSSOutputReceived(sub.Output())) + case subsystemTypeDSSIndexB: + return msgs.AddAll(c.subDSSb.DSSOutputReceived(sub.Output())) + default: + c.log.LogWarnf("unexpected DSS index after check: %+v", msg) + return nil + } default: c.log.LogWarnf("unexpected subsystem after check: %+v", msg) return nil @@ -364,14 +469,15 @@ func (c *consImpl) Output() gpa.Output { func (c *consImpl) StatusString() string { // We con't include RND here, maybe that's less important, and visible from the VM status. - return fmt.Sprintf("{consImpl⟨%v⟩,%v,%v,%v,%v,%v,%v}", + return fmt.Sprintf("{consImpl⟨%v⟩,%v,%v,%v,%v,%v,%v,%v}", c.output.Status, c.subSM.String(), c.subMP.String(), - c.subDSS.String(), + c.subDSSt.String(), + c.subDSSb.String(), c.subACS.String(), c.subVM.String(), - c.subTX.String(), + c.subTXS.String(), ) } @@ -385,7 +491,9 @@ func (c *consImpl) uponMPProposalInputsReady(baseAnchorOutput *isc.ChainOutputs) func (c *consImpl) uponMPProposalReceived(requestRefs []*isc.RequestRef) gpa.OutMessages { c.output.NeedMempoolProposal = nil - return c.subACS.MempoolRequestsReceived(requestRefs) + return gpa.NoMessages(). + // AddAll(c.subNC.MempoolProposalReceived()). + AddAll(c.subACS.MempoolRequestsReceived(requestRefs)) } func (c *consImpl) uponMPRequestsNeeded(requestRefs []*isc.RequestRef) gpa.OutMessages { @@ -408,7 +516,9 @@ func (c *consImpl) uponSMStateProposalQueryInputsReady(baseAnchorOutput *isc.Cha func (c *consImpl) uponSMStateProposalReceived(proposedAnchorOutput *isc.ChainOutputs) gpa.OutMessages { c.output.NeedStateMgrStateProposal = nil - return c.subACS.StateProposalReceived(proposedAnchorOutput) + return gpa.NoMessages(). + // AddAll(c.subNC.StateMgrProposalReceived()). + AddAll(c.subACS.StateMgrProposalReceived(proposedAnchorOutput)) } func (c *consImpl) uponSMDecidedStateQueryInputsReady(decidedBaseAnchorOutput *isc.ChainOutputs) gpa.OutMessages { @@ -433,53 +543,153 @@ func (c *consImpl) uponSMSaveProducedBlockInputsReady(producedBlock state.StateD func (c *consImpl) uponSMSaveProducedBlockDone(block state.Block) gpa.OutMessages { c.output.NeedStateMgrSaveBlock = nil - return c.subTX.BlockSaved(block) + return gpa.NoMessages(). + AddAll(c.subTXS.BlockSaved(block)). + AddAll(c.subRes.HaveStateBlock(block)) } -//////////////////////////////////////////////////////////////////////////////// -// DSS +// ////////////////////////////////////////////////////////////////////////////// +// NC + +func (c *consImpl) uponNCBlockTipSetNeeded() gpa.OutMessages { + c.output.NeedNodeConnBlockTipSet = true + return nil +} -func (c *consImpl) uponDSSInitialInputsReady() gpa.OutMessages { - c.log.LogDebugf("uponDSSInitialInputsReady") - sub, subMsgs, err := c.msgWrapper.DelegateInput(subsystemTypeDSS, 0, dss.NewInputStart()) +func (c *consImpl) uponNCBlockTipSetReceived(strongParents iotago.BlockIDs) gpa.OutMessages { + c.output.NeedNodeConnBlockTipSet = false + return c.subACS.BlockTipSetProposalReceived(strongParents) +} + +// func (c *consImpl) uponNCInputsReady() gpa.OutMessages { +// c.output.NeedNodeConnBlockTipSet = true +// return nil +// } + +// func (c *consImpl) uponNCOutputReady( +// blockToRefer *iotago.Block, +// txCreateInputReceived *isc.ChainOutputs, +// blockOnlyInputReceived *iotago.SignedTransaction, +// mempoolProposalReceived []*isc.RequestRef, +// dssTIndexProposal []int, +// dssBIndexProposal []int, +// timeData time.Time, +// strongParents iotago.BlockIDs, +// ) gpa.OutMessages { +// return c.subACS.ACSInputsReceived( +// blockToRefer, +// txCreateInputReceived, +// blockOnlyInputReceived, +// mempoolProposalReceived, +// dssTIndexProposal, +// dssBIndexProposal, +// timeData, +// strongParents, +// ) +// } + +// ////////////////////////////////////////////////////////////////////////////// +// DSS_t + +func (c *consImpl) uponDSStInitialInputsReady() gpa.OutMessages { + c.log.LogDebugf("uponDSStInitialInputsReady") + sub, subMsgs, err := c.msgWrapper.DelegateInput(subsystemTypeDSS, subsystemTypeDSSIndexT, dss.NewInputStart()) if err != nil { - panic(fmt.Errorf("cannot provide input to DSS: %w", err)) + panic(fmt.Errorf("cannot provide input to DSSt: %w", err)) } return gpa.NoMessages(). AddAll(subMsgs). - AddAll(c.subDSS.DSSOutputReceived(sub.Output())) + AddAll(c.subDSSt.DSSOutputReceived(sub.Output())) } -func (c *consImpl) uponDSSIndexProposalReady(indexProposal []int) gpa.OutMessages { - c.log.LogDebugf("uponDSSIndexProposalReady") - return c.subACS.DSSIndexProposalReceived(indexProposal) +func (c *consImpl) uponDSStIndexProposalReady(indexProposal []int) gpa.OutMessages { + c.log.LogDebugf("uponDSStIndexProposalReady") + return gpa.NoMessages(). + // AddAll(c.subNC.DSStIndexProposalReceived()). + AddAll(c.subACS.DSStIndexProposalReceived(indexProposal)) } -func (c *consImpl) uponDSSSigningInputsReceived(decidedIndexProposals map[gpa.NodeID][]int, messageToSign []byte) gpa.OutMessages { - c.log.LogDebugf("uponDSSSigningInputsReceived(decidedIndexProposals=%+v, H(messageToSign)=%v)", decidedIndexProposals, hashing.HashDataBlake2b(messageToSign)) +func (c *consImpl) uponDSStSigningInputsReceived(decidedIndexProposals map[gpa.NodeID][]int, messageToSign []byte) gpa.OutMessages { + c.log.LogDebugf("uponDSStSigningInputsReceived(decidedIndexProposals=%+v, H(messageToSign)=%v)", decidedIndexProposals, hashing.HashDataBlake2b(messageToSign)) dssDecidedInput := dss.NewInputDecided(decidedIndexProposals, messageToSign) - subDSS, subMsgs, err := c.msgWrapper.DelegateInput(subsystemTypeDSS, 0, dssDecidedInput) + subDSSt, subMsgs, err := c.msgWrapper.DelegateInput(subsystemTypeDSS, subsystemTypeDSSIndexT, dssDecidedInput) if err != nil { panic(fmt.Errorf("cannot provide inputs for signing: %w", err)) } return gpa.NoMessages(). AddAll(subMsgs). - AddAll(c.subDSS.DSSOutputReceived(subDSS.Output())) + AddAll(c.subDSSt.DSSOutputReceived(subDSSt.Output())) } -func (c *consImpl) uponDSSOutputReady(signature []byte) gpa.OutMessages { - c.log.LogDebugf("uponDSSOutputReady") - return c.subTX.SignatureReceived(signature) +func (c *consImpl) uponDSStOutputReady(signature []byte) gpa.OutMessages { + c.log.LogDebugf("uponDSStOutputReady") + return c.subTXS.SignatureReceived(signature) +} + +// ////////////////////////////////////////////////////////////////////////////// +// DSS_b + +func (c *consImpl) uponDSSbInitialInputsReady() gpa.OutMessages { + c.log.LogDebugf("uponDSSbInitialInputsReady") + sub, subMsgs, err := c.msgWrapper.DelegateInput(subsystemTypeDSS, subsystemTypeDSSIndexB, dss.NewInputStart()) + if err != nil { + panic(fmt.Errorf("cannot provide input to DSSb: %w", err)) + } + return gpa.NoMessages(). + AddAll(subMsgs). + AddAll(c.subDSSb.DSSOutputReceived(sub.Output())) +} + +func (c *consImpl) uponDSSbIndexProposalReady(indexProposal []int) gpa.OutMessages { + c.log.LogDebugf("uponDSSbIndexProposalReady") + return gpa.NoMessages(). + // AddAll(c.subNC.DSSbIndexProposalReceived()). + AddAll(c.subACS.DSSbIndexProposalReceived(indexProposal)) +} + +func (c *consImpl) uponDSSbSigningInputsReceived(decidedIndexProposals map[gpa.NodeID][]int, messageToSign []byte) gpa.OutMessages { + c.log.LogDebugf("uponDSSbSigningInputsReceived(decidedIndexProposals=%+v, H(messageToSign)=%v)", decidedIndexProposals, hashing.HashDataBlake2b(messageToSign)) + dssDecidedInput := dss.NewInputDecided(decidedIndexProposals, messageToSign) + subDSSb, subMsgs, err := c.msgWrapper.DelegateInput(subsystemTypeDSS, subsystemTypeDSSIndexB, dssDecidedInput) + if err != nil { + panic(fmt.Errorf("cannot provide inputs for signing: %w", err)) + } + return gpa.NoMessages(). + AddAll(subMsgs). + AddAll(c.subDSSb.DSSOutputReceived(subDSSb.Output())) +} + +func (c *consImpl) uponDSSbOutputReady(signature []byte) gpa.OutMessages { + c.log.LogDebugf("uponDSSbOutputReady") + return c.subBlkS.HaveSig(signature) } //////////////////////////////////////////////////////////////////////////////// // ACS -func (c *consImpl) uponACSInputsReceived(baseAnchorOutput *isc.ChainOutputs, requestRefs []*isc.RequestRef, dssIndexProposal []int, timeData time.Time) gpa.OutMessages { +func (c *consImpl) uponACSTipsRequired() gpa.OutMessages { + return c.subNC.BlockTipSetNeeded() +} + +func (c *consImpl) uponACSInputsReceived( + blockToRefer *iotago.Block, + baseCO *isc.ChainOutputs, + resignTX *iotago.SignedTransaction, + requestRefs []*isc.RequestRef, + dssTIndexProposal []int, + dssBIndexProposal []int, + timeData time.Time, + strongParents iotago.BlockIDs, +) gpa.OutMessages { batchProposal := bp.NewBatchProposal( + c.l1APIProvider.LatestAPI(), *c.dkShare.GetIndex(), - baseAnchorOutput, - util.NewFixedSizeBitVector(c.dkShare.GetN()).SetBits(dssIndexProposal), + blockToRefer, + strongParents, + baseCO, + resignTX, + util.NewFixedSizeBitVector(c.dkShare.GetN()).SetBits(dssTIndexProposal), + util.NewFixedSizeBitVector(c.dkShare.GetN()).SetBits(dssBIndexProposal), timeData, c.validatorAgentID, requestRefs, @@ -494,7 +704,7 @@ func (c *consImpl) uponACSInputsReceived(baseAnchorOutput *isc.ChainOutputs, req } func (c *consImpl) uponACSOutputReceived(outputValues map[gpa.NodeID][]byte) gpa.OutMessages { - aggr := bp.AggregateBatchProposals(outputValues, c.nodeIDs, c.f, c.log) + aggr := bp.AggregateBatchProposals(outputValues, c.nodeIDs, c.f, c.l1APIProvider.LatestAPI(), c.log) if aggr.ShouldBeSkipped() { // Cannot proceed with such proposals. // Have to retry the consensus after some time with the next log index. @@ -503,16 +713,29 @@ func (c *consImpl) uponACSOutputReceived(outputValues map[gpa.NodeID][]byte) gpa c.term.haveOutputProduced() return nil } - bao := aggr.DecidedBaseAnchorOutput() - baoID := bao.AnchorOutputID - reqs := aggr.DecidedRequestRefs() - c.log.LogDebugf("ACS decision: baseAO=%v, requests=%v", bao, reqs) - return gpa.NoMessages(). - AddAll(c.subMP.RequestsNeeded(reqs)). - AddAll(c.subSM.DecidedVirtualStateNeeded(bao)). - AddAll(c.subVM.DecidedBatchProposalsReceived(aggr)). - AddAll(c.subRND.CanProceed(baoID[:])). - AddAll(c.subDSS.DecidedIndexProposalsReceived(aggr.DecidedDSSIndexProposals())) + + msgs := gpa.NoMessages(). + AddAll(c.subRND.CanProceed(c.instID)). + AddAll(c.subDSSb.DecidedIndexProposalsReceived(aggr.DecidedDSSbIndexProposals())). + AddAll(c.subBlkD.HaveTimestamp(aggr.AggregatedTime())). + AddAll(c.subBlkD.HaveTipsProposal(func(randomness hashing.HashValue) iotago.BlockIDs { return aggr.DecidedStrongParents(randomness) })) + + // + // Either we are going to build a fresh TX + if aggr.ShouldBuildNewTX() { + bao := aggr.DecidedBaseCO() + reqs := aggr.DecidedRequestRefs() + c.log.LogDebugf("ACS decision: baseAO=%v, requests=%v", bao, reqs) + return msgs. + AddAll(c.subMP.RequestsNeeded(reqs)). + AddAll(c.subSM.DecidedVirtualStateNeeded(bao)). + AddAll(c.subVM.DecidedBatchProposalsReceived(aggr)). + AddAll(c.subDSSt.DecidedIndexProposalsReceived(aggr.DecidedDSStIndexProposals())) + } + // Or we are going to reuse the existing TX. + return msgs. + AddAll(c.subRes.ReuseTX(aggr.DecidedReattachTX())). + AddAll(c.subBlkD.HaveSignedTX(aggr.DecidedReattachTX())) } func (c *consImpl) uponACSTerminated() { @@ -544,7 +767,10 @@ func (c *consImpl) uponRNDSigSharesReady(dataToSign []byte, partialSigs map[gpa. c.log.LogWarnf("Cannot reconstruct BLS signature from %v/%v sigShares: %v", len(partialSigs), c.dkShare.GetN(), err) return false, nil // Continue to wait for other sig shares. } - return true, c.subVM.RandomnessReceived(hashing.HashDataBlake2b(sig.Signature.Bytes())) + randomness := hashing.HashDataBlake2b(sig.Signature.Bytes()) + return true, gpa.NoMessages(). + AddAll(c.subVM.RandomnessReceived(randomness)). + AddAll(c.subBlkD.HaveRandomness(randomness)) } //////////////////////////////////////////////////////////////////////////////// @@ -553,20 +779,21 @@ func (c *consImpl) uponRNDSigSharesReady(dataToSign []byte, partialSigs map[gpa. func (c *consImpl) uponVMInputsReceived(aggregatedProposals *bp.AggregatedBatchProposals, chainState state.State, randomness *hashing.HashValue, requests []isc.Request) gpa.OutMessages { // TODO: chainState state.State is not used for now. That's because VM takes it form the store by itself. // The decided base anchor output can be different from that we have proposed! - decidedBaseAnchorOutput := aggregatedProposals.DecidedBaseAnchorOutput() + decidedBaseCO := aggregatedProposals.DecidedBaseCO() c.output.NeedVMResult = &vm.VMTask{ - Processors: c.processorCache, - Inputs: decidedBaseAnchorOutput, - Store: c.chainStore, - Requests: aggregatedProposals.OrderedRequests(requests, *randomness), - // TODO: Is TimeAssumption->Timestamp a 1:1 change? - // (old) TimeAssumption: aggregatedProposals.AggregatedTime(), + Processors: c.processorCache, + Inputs: decidedBaseCO, + Store: c.chainStore, + Requests: aggregatedProposals.OrderedRequests(requests, *randomness), Timestamp: aggregatedProposals.AggregatedTime(), Entropy: *randomness, ValidatorFeeTarget: aggregatedProposals.ValidatorFeeTarget(*randomness), EstimateGasMode: false, EnableGasBurnLogging: false, + BlockIssuerKey: iotago.Ed25519PublicKeyHashBlockIssuerKeyFromPublicKey(c.dkShare.GetSharedPublic().AsHiveEd25519PubKey()), Log: c.log.NewChildLogger("VM"), + L1APIProvider: c.l1APIProvider, + TokenInfo: c.tokenInfo, } return nil } @@ -605,10 +832,22 @@ func (c *consImpl) uponVMOutputReceived(vmResult *vm.VMTaskResult) gpa.OutMessag if err != nil { panic(fmt.Errorf("uponVMOutputReceived: cannot obtain signing message: %w", err)) } + + chained, err := isc.ChainOutputsFromTx(vmResult.Transaction, c.chainID.AsAddress()) + if err != nil { + panic(fmt.Errorf("cannot get AnchorOutput from produced TX: %w", err)) + } + + consumedAnchorOutputID := vmResult.Task.Inputs.AnchorOutputID + var consumedAccountOutputID iotago.OutputID + if accountOutputID, _, hasAccountOutputID := vmResult.Task.Inputs.AccountOutput(); hasAccountOutputID { + consumedAccountOutputID = accountOutputID + } return gpa.NoMessages(). AddAll(c.subSM.BlockProduced(vmResult.StateDraft)). - AddAll(c.subTX.VMResultReceived(vmResult)). - AddAll(c.subDSS.MessageToSignReceived(signingMsg)) + AddAll(c.subTXS.VMResultReceived(vmResult)). + AddAll(c.subDSSt.MessageToSignReceived(signingMsg)). + AddAll(c.subRes.HaveTransition(chained, consumedAnchorOutputID, consumedAccountOutputID)) } //////////////////////////////////////////////////////////////////////////////// @@ -616,7 +855,6 @@ func (c *consImpl) uponVMOutputReceived(vmResult *vm.VMTaskResult) gpa.OutMessag // Everything is ready for the output TX, produce it. func (c *consImpl) uponTXInputsReady(vmResult *vm.VMTaskResult, block state.Block, signature []byte) gpa.OutMessages { - panic("TODO rewrite uponTXInputsReady") // resultTx := vmResult.Transaction // publicKey := c.dkShare.GetSharedPublic() // var signatureArray [ed25519.SignatureSize]byte @@ -631,32 +869,102 @@ func (c *consImpl) uponTXInputsReady(vmResult *vm.VMTaskResult, block state.Bloc // panic(fmt.Errorf("cannot get inputs from result TX: %w", err)) // } - // // TODO: This is most likely just trash :D - // tx := &iotago.SignedTransaction{ - // Transaction: &iotago.Transaction{ - // TransactionEssence: resultTx.TransactionEssence, - // }, - // Unlocks: transaction.MakeSignatureAndReferenceUnlocks(len(resultInputs), signatureForUnlock), - // } + api := c.l1APIProvider.LatestAPI() + tx := &iotago.SignedTransaction{ + API: api, // TODO: Use the decided timestamp? + Transaction: vmResult.Transaction, + // TODO: Unlocks: vmResult.Transaction.MakeSignatureAndReferenceUnlocks(len(resultInputs), signatureForUnlock), + } - // txID, err := tx.ID() - // if err != nil { - // panic(fmt.Errorf("cannot get ID from the produced TX: %w", err)) - // } - // chained, err := isc.ChainOutputsFromTx(tx.Transaction, c.chainID.AsAddress()) - // if err != nil { - // panic(fmt.Errorf("cannot get AnchorOutput from produced TX: %w", err)) - // } - // c.output.Result = &Result{ - // Transaction: tx, - // BaseAnchorOutput: vmResult.Task.Inputs.AnchorOutputID, - // NextAnchorOutput: chained, - // Block: block, - // } - // c.output.Status = Completed - // c.log.LogInfof("Terminating consensus with status=Completed, produced tx.ID=%v, nextAO=%v, baseAO.ID=%v", txID.ToHex(), chained, vmResult.Task.Inputs.AnchorOutputID.ToHex()) - // c.term.haveOutputProduced() - // return nil + return gpa.NoMessages(). + AddAll(c.subBlkD.HaveSignedTX(tx)). + AddAll(c.subRes.BuiltTX(tx)) +} + +//////////////////////////////////////////////////////////////////////////////// +// BLK + +// readyCB func(tipsFn func(randomness hashing.HashValue) iotago.BlockIDs, randomness hashing.HashValue, timestamp time.Time, tx *iotago.SignedTransaction) gpa.OutMessages +func (c *consImpl) uponBlkDataInputsReady( + tipsFn func(randomness hashing.HashValue) iotago.BlockIDs, + randomness hashing.HashValue, + timestamp time.Time, + tx *iotago.SignedTransaction, +) gpa.OutMessages { + strongParents := tipsFn(randomness) + blk, err := builder. + NewBasicBlockBuilder(c.l1APIProvider.APIForTime(timestamp)). + StrongParents(strongParents). + IssuingTime(timestamp). + Payload(tx). + Build() + if err != nil { + panic(fmt.Errorf("cannot build iota block: %v", err)) + } + + co, err := isc.ChainOutputsFromTx(tx.Transaction, c.chainID.AsAddress()) + if err != nil { + panic(err) + } + blockIssuer := co.MustAccountOutput().AccountID + blk.Header.IssuerID = blockIssuer + c.log.LogDebugf("XXXXXXXXXXXX: blockIssuer=%v", blockIssuer) + + blkSigMsg, err := blk.SigningMessage() + if err != nil { + panic(fmt.Errorf("cannot build iota block: %v", err)) + } + + return gpa.NoMessages(). + AddAll(c.subBlkS.HaveBlock(blk)). + AddAll(c.subDSSb.MessageToSignReceived(blkSigMsg)) +} + +func (c *consImpl) uponBlkSigInputsReady( + bl *iotago.Block, + sig []byte, +) gpa.OutMessages { + var signatureArray [ed25519.SignatureSize]byte + copy(signatureArray[:], sig) + bl.Signature = &iotago.Ed25519Signature{ + PublicKey: c.dkShare.GetSharedPublic().AsKey(), + Signature: signatureArray, + } + return c.subRes.HaveIotaBlock(bl) +} + +//////////////////////////////////////////////////////////////////////////////// +// RES + +func (c *consImpl) uponResInputsReady( + transactionReused bool, + transaction *iotago.SignedTransaction, + producedIotaBlock *iotago.Block, + producedChainOutputs *isc.ChainOutputs, + producedStateBlock state.Block, + consumedAnchorOutputID iotago.OutputID, + consumedAccountOutputID iotago.OutputID, +) gpa.OutMessages { + transactionID, err := transaction.ID() + if err != nil { + panic(fmt.Errorf("cannot get ID from the produced TX: %w", err)) + } + + c.output.Result = &Result{ + producedTransaction: transaction, + producedChainOutputs: producedChainOutputs, + producedIotaBlock: producedIotaBlock, + producedStateBlock: producedStateBlock, + consumedAnchorOutputID: consumedAnchorOutputID, + consumedAccountOutputID: consumedAccountOutputID, + } + c.output.Status = Completed + c.log.LogInfof( + "Terminating consensus with status=Completed, produced tx.ID=%v, nextAO=%v, baseAO.ID=%v", + transactionID.ToHex(), producedChainOutputs, consumedAnchorOutputID.ToHex(), + ) + c.term.haveOutputProduced() + return nil } //////////////////////////////////////////////////////////////////////////////// diff --git a/packages/chain/cons/cons_gr/gr.go b/packages/chain/cons/cons_gr/gr.go index 0a488b22d7..7fe738b638 100644 --- a/packages/chain/cons/cons_gr/gr.go +++ b/packages/chain/cons/cons_gr/gr.go @@ -14,12 +14,11 @@ import ( "github.com/iotaledger/hive.go/log" iotago "github.com/iotaledger/iota.go/v4" - "github.com/iotaledger/wasp/packages/chain/cmt_log" + "github.com/iotaledger/iota.go/v4/api" "github.com/iotaledger/wasp/packages/chain/cons" "github.com/iotaledger/wasp/packages/cryptolib" "github.com/iotaledger/wasp/packages/gpa" "github.com/iotaledger/wasp/packages/isc" - "github.com/iotaledger/wasp/packages/kv/codec" "github.com/iotaledger/wasp/packages/metrics" "github.com/iotaledger/wasp/packages/peering" "github.com/iotaledger/wasp/packages/state" @@ -39,10 +38,10 @@ const ( type ConsensusID [iotago.Ed25519AddressBytesLength + 4]byte -func NewConsensusID(cmtAddr *iotago.Ed25519Address, logIndex *cmt_log.LogIndex) ConsensusID { +func NewConsensusID(cmtAddr *iotago.Ed25519Address, localConsID []byte) ConsensusID { ret := ConsensusID{} copy(ret[:], isc.AddressToBytes(cmtAddr)[1:]) // remove the byte kind prefix - copy(ret[iotago.Ed25519AddressBytesLength:], codec.Uint32.Encode(logIndex.AsUint32())) + copy(ret[iotago.Ed25519AddressBytesLength:], localConsID) return ret } @@ -91,14 +90,15 @@ func (o *Output) String() string { } type input struct { - baseAnchorOutput *isc.ChainOutputs - outputCB func(*Output) - recoverCB func() + params cons.Input + outputCB func(*Output) + recoverCB func() } type ConsGr struct { me gpa.NodeID l1APIProvider iotago.APIProvider + tokenInfo *api.InfoResBaseToken consInst gpa.AckHandler inputCh chan *input inputReceived *atomic.Bool @@ -140,8 +140,9 @@ func New( chainID isc.ChainID, chainStore state.Store, dkShare tcrypto.DKShare, - logIndex *cmt_log.LogIndex, + localConsID []byte, l1APIProvider iotago.APIProvider, + tokenInfo *api.InfoResBaseToken, myNodeIdentity *cryptolib.KeyPair, procCache *processors.Cache, mempool Mempool, @@ -156,7 +157,7 @@ func New( log log.Logger, ) *ConsGr { cmtPubKey := dkShare.GetSharedPublic() - netPeeringID := peering.HashPeeringIDFromBytes(chainID.Bytes(), cmtPubKey.AsBytes(), logIndex.Bytes()) // ChainID × Committee PubKey × LogIndex + netPeeringID := peering.HashPeeringIDFromBytes(chainID.Bytes(), cmtPubKey.AsBytes(), localConsID) // ChainID × Committee PubKey × LogIndex netPeerPubs := map[gpa.NodeID]*cryptolib.PublicKey{} for _, peerPubKey := range dkShare.GetNodePubKeys() { netPeerPubs[gpa.NodeIDFromPublicKey(peerPubKey)] = peerPubKey @@ -165,6 +166,7 @@ func New( cgr := &ConsGr{ me: me, l1APIProvider: l1APIProvider, + tokenInfo: tokenInfo, consInst: nil, // Set bellow. inputCh: make(chan *input, 1), inputReceived: atomic.NewBool(false), @@ -180,7 +182,7 @@ func New( netPeerPubs: netPeerPubs, netDisconnect: nil, // Set bellow. net: net, - consensusID: NewConsensusID(cmtPubKey.AsEd25519Address(), logIndex), + consensusID: NewConsensusID(cmtPubKey.AsEd25519Address(), localConsID), ctx: ctx, pipeMetrics: pipeMetrics, log: log, @@ -214,15 +216,15 @@ func New( return cgr } -func (cgr *ConsGr) Input(baseAnchorOutput *isc.ChainOutputs, outputCB func(*Output), recoverCB func()) { +func (cgr *ConsGr) Input(params cons.Input, outputCB func(*Output), recoverCB func()) { wasReceivedBefore := cgr.inputReceived.Swap(true) if wasReceivedBefore { - panic(fmt.Errorf("duplicate input: %v", baseAnchorOutput)) + panic(fmt.Errorf("duplicate input: %v", params)) } inp := &input{ - baseAnchorOutput: baseAnchorOutput, - outputCB: outputCB, - recoverCB: recoverCB, + params: params, + outputCB: outputCB, + recoverCB: recoverCB, } cgr.inputCh <- inp close(cgr.inputCh) @@ -266,7 +268,7 @@ func (cgr *ConsGr) run() { //nolint:gocyclo,funlen printStatusCh = time.After(cgr.printStatusPeriod) cgr.outputCB = inp.outputCB cgr.recoverCB = inp.recoverCB - cgr.handleConsInput(cons.NewInputProposal(inp.baseAnchorOutput)) + cgr.handleConsInput(cons.NewInputProposal(inp.params)) case t, ok := <-cgr.inputTimeCh: if !ok { cgr.inputTimeCh = nil diff --git a/packages/chain/cons/cons_gr/gr_test.go b/packages/chain/cons/cons_gr/gr_test.go index c070ec6134..30086cd586 100644 --- a/packages/chain/cons/cons_gr/gr_test.go +++ b/packages/chain/cons/cons_gr/gr_test.go @@ -118,7 +118,14 @@ func testGrBasic(t *testing.T, n, f int, reliable bool) { stateMgrs[i] = newTestStateMgr(t, chainStore) chainMetrics := chainMetricsProvider.GetChainMetrics(isc.EmptyChainID()) nodes[i] = consGR.New( - ctx, chainID, chainStore, dkShare, &logIndex, testutil.L1APIProvider, peerIdentities[i], + ctx, + chainID, + chainStore, + dkShare, + &logIndex, + testutil.L1APIProvider, + testutil.TokenInfo, + peerIdentities[i], procCache, mempools[i], stateMgrs[i], networkProviders[i], accounts.CommonAccount(), @@ -156,7 +163,7 @@ func testGrBasic(t *testing.T, n, f int, reliable bool) { if firstOutput == nil { firstOutput = output } - require.Equal(t, firstOutput.Result.Transaction, output.Result.Transaction) + require.Equal(t, firstOutput.Result.ProducedTransaction(), output.Result.ProducedTransaction()) } } diff --git a/packages/chain/cons/cons_test.go b/packages/chain/cons/cons_test.go index 77a33c8304..c4ef869154 100644 --- a/packages/chain/cons/cons_test.go +++ b/packages/chain/cons/cons_test.go @@ -39,6 +39,18 @@ import ( "github.com/iotaledger/wasp/packages/vm/vmimpl" ) +type consInput struct { + baseBlock *iotago.Block + baseCO *isc.ChainOutputs + reattachTX *iotago.SignedTransaction +} + +var _ cons.Input = &consInput{} + +func (ci *consInput) BaseBlock() *iotago.Block { return ci.baseBlock } +func (ci *consInput) BaseCO() *isc.ChainOutputs { return ci.baseCO } +func (ci *consInput) ReattachTX() *iotago.SignedTransaction { return ci.reattachTX } + // Here we run a single consensus instance, step by step with // regards to the requests to external components (mempool, stateMgr, VM). func TestConsBasic(t *testing.T) { @@ -82,6 +94,12 @@ func testConsBasic(t *testing.T, n, f int) { _, _, err := utxoDB.NewWalletWithFundsFromFaucet(originator) require.NoError(t, err) // + // + _, _, err = utxoDB.NewWalletWithFundsFromFaucet(committeePubKey) + + originatorAccIDs := utxoDB.GetAccountOutputs(originator.Address()) // TODO: This is the correct account? + require.NotEmpty(t, originatorAccIDs) + // // Construct the chain on L1: Create the origin TX. outputs := utxoDB.GetUnspentOutputs(originator.Address()) originTX, _, _, chainID, err := origin.NewChainOriginTransaction( @@ -102,7 +120,14 @@ func testConsBasic(t *testing.T, n, f int) { require.NoError(t, err) require.NotNil(t, stateAnchor) require.NotNil(t, anchorOutput) - ao0 := isc.NewChainOutputs(anchorOutput, stateAnchor.OutputID, nil, iotago.OutputID{}) + // txxx := transaction.NewAccountOutputForStateControllerTx( ) + accountOutput := transaction.NewAccountOutputForStateController(testutil.L1API, committeePubKey) + ao0 := isc.NewChainOutputs( + anchorOutput, + stateAnchor.OutputID, + accountOutput, + iotago.EmptyOutputID, // TODO: ... + ) err = utxoDB.AddToLedger(originTX) require.NoError(t, err) @@ -110,7 +135,7 @@ func testConsBasic(t *testing.T, n, f int) { // Deposit some funds outputs = utxoDB.GetUnspentOutputs(originator.Address()) - depositTx, err := transaction.NewRequestTransaction( + _, depositBl, err := transaction.NewRequestTransaction( originator, originator.Address(), outputs, @@ -131,7 +156,7 @@ func testConsBasic(t *testing.T, n, f int) { ) require.NoError(t, err) - err = utxoDB.AddToLedger(depositTx) + err = utxoDB.AddToLedger(depositBl) require.NoError(t, err) // @@ -171,11 +196,15 @@ func testConsBasic(t *testing.T, n, f int) { nodeLog := logger.NewChildLogger(nid.ShortString()) nodeSK := peerIdentities[i].GetPrivateKey() nodeDKShare, err := dkShareProviders[i].LoadDKShare(committeePubKey.AsEd25519Address()) + if err != nil { + panic(err) + } chainStates[nid] = state.NewStoreWithUniqueWriteMutex(mapdb.NewMapDB()) _, err = origin.InitChainByAnchorOutput(chainStates[nid], ao0, testutil.L1APIProvider, testutil.TokenInfo) require.NoError(t, err) nodes[nid] = cons.New( testutil.L1APIProvider, + testutil.TokenInfo, chainID, chainStates[nid], nid, @@ -194,7 +223,7 @@ func testConsBasic(t *testing.T, n, f int) { now := time.Now() inputs := map[gpa.NodeID]gpa.Input{} for _, nid := range nodeIDs { - inputs[nid] = cons.NewInputProposal(ao0) + inputs[nid] = cons.NewInputProposal(&consInput{baseCO: ao0}) } tc.WithInputs(inputs).RunAll() tc.PrintAllStatusStrings("After Inputs", t.Logf) @@ -206,6 +235,7 @@ func testConsBasic(t *testing.T, n, f int) { require.Equal(t, cons.Running, out.Status) require.NotNil(t, out.NeedMempoolProposal) require.NotNil(t, out.NeedStateMgrStateProposal) + require.False(t, out.NeedNodeConnBlockTipSet) // Not yet, only after other deps are ready to get fresh tips. tc.WithInput(nid, cons.NewInputMempoolProposal(reqRefs)) tc.WithInput(nid, cons.NewInputStateMgrProposalConfirmed()) tc.WithInput(nid, cons.NewInputTimeData(now)) @@ -213,6 +243,15 @@ func testConsBasic(t *testing.T, n, f int) { tc.RunAll() tc.PrintAllStatusStrings("After MP/SM proposals", t.Logf) // + // Provide block tips. + for nid, node := range nodes { + out := node.Output().(*cons.Output) + require.True(t, out.NeedNodeConnBlockTipSet) // Not yet, only after other deps are ready to get fresh tips. + tc.WithInput(nid, cons.NewInputNodeConnBlockTipSet(iotago.BlockIDs{})) + } + tc.RunAll() + tc.PrintAllStatusStrings("After tip sets provided", t.Logf) + // // Provide Decided data from SM and MP. t.Log("############ Provide Decided Data from SM/MP.") for nid, node := range nodes { @@ -231,6 +270,7 @@ func testConsBasic(t *testing.T, n, f int) { } tc.RunAll() tc.PrintAllStatusStrings("After MP/SM data", t.Logf) + // // Provide Decided data from SM and MP. t.Log("############ Run VM, validate the result.") @@ -242,7 +282,7 @@ func testConsBasic(t *testing.T, n, f int) { require.Nil(t, out.NeedMempoolRequests) require.Nil(t, out.NeedStateMgrDecidedState) require.NotNil(t, out.NeedVMResult) - out.NeedVMResult.Log = testlogger.WithLevel(out.NeedVMResult.Log, log.LevelError) // Decrease VM logging. + out.NeedVMResult.Log = testlogger.WithLevel(out.NeedVMResult.Log, log.LevelDebug) // Decrease VM logging. vmResult, err := vmimpl.Run(out.NeedVMResult) require.NoError(t, err) tc.WithInput(nid, cons.NewInputVMResult(vmResult)) @@ -277,11 +317,11 @@ func testConsBasic(t *testing.T, n, f int) { require.Nil(t, out.NeedMempoolRequests) require.Nil(t, out.NeedStateMgrDecidedState) require.Nil(t, out.NeedVMResult) - require.NotNil(t, out.Result.Transaction) - require.NotNil(t, out.Result.NextAnchorOutput) - require.NotNil(t, out.Result.Block) + require.NotNil(t, out.Result.ProducedTransaction()) + require.NotNil(t, out.Result.ProducedChainOutputs()) + require.NotNil(t, out.Result.ProducedStateBlock()) if nid == nodeIDs[0] { // Just do this once. - require.NoError(t, utxoDB.AddToLedger(out.Result.Transaction)) // TODO out.Result should probably be a block, instead of a signedTx? + require.NoError(t, utxoDB.AddToLedger(out.Result.ProducedIotaBlock())) } } } @@ -408,9 +448,9 @@ func testChained(t *testing.T, n, f, b int) { originState, err := testNodeStates[nid].StateByTrieRoot(originL1Commitment.TrieRoot()) require.NoError(t, err) testChainInsts[0].input(&testInstInput{ - nodeID: nid, - baseAnchorOutput: originAO, - baseState: originState, + nodeID: nid, + baseCO: originAO, + baseState: originState, }) } // Wait for all the instances to output. @@ -433,9 +473,9 @@ func testChained(t *testing.T, n, f, b int) { // testConsInst type testInstInput struct { - nodeID gpa.NodeID - baseAnchorOutput *isc.ChainOutputs - baseState state.State // State committed with the baseAnchorOutput + nodeID gpa.NodeID + baseCO *isc.ChainOutputs + baseState state.State // State committed with the baseAnchorOutput } type testConsInst struct { @@ -492,7 +532,20 @@ func newTestConsInst( nodeSK := peerIdentities[i].GetPrivateKey() nodeDKShare, err := dkShareRegistryProviders[i].LoadDKShare(committeeAddress) require.NoError(t, err) - nodes[nid] = cons.New(testutil.L1APIProvider, chainID, nodeStates[nid], nid, nodeSK, nodeDKShare, procCache, consInstID, gpa.NodeIDFromPublicKey, accounts.CommonAccount(), nodeLog).AsGPA() + nodes[nid] = cons.New( + testutil.L1APIProvider, + testutil.TokenInfo, + chainID, + nodeStates[nid], + nid, + nodeSK, + nodeDKShare, + procCache, + consInstID, + gpa.NodeIDFromPublicKey, + accounts.CommonAccount(), + nodeLog, + ).AsGPA() } tci := &testConsInst{ t: t, @@ -544,7 +597,7 @@ func (tci *testConsInst) run() { } tci.inputs[inp.nodeID] = inp tci.lock.Unlock() - tci.tcInputCh <- map[gpa.NodeID]gpa.Input{inp.nodeID: cons.NewInputProposal(inp.baseAnchorOutput)} + tci.tcInputCh <- map[gpa.NodeID]gpa.Input{inp.nodeID: cons.NewInputProposal(&consInput{baseCO: inp.baseCO})} timeForStatus = time.After(3 * time.Second) tci.tryHandleOutput(inp.nodeID) case compInp, ok := <-tci.compInputPipe: @@ -608,12 +661,12 @@ func (tci *testConsInst) tryHandleOutput(nodeID gpa.NodeID) { //nolint:gocyclo if tci.done[nodeID] { return } - resultState, err := tci.nodeStates[nodeID].StateByTrieRoot(out.Result.Block.TrieRoot()) + resultState, err := tci.nodeStates[nodeID].StateByTrieRoot(out.Result.ProducedStateBlock().TrieRoot()) require.NoError(tci.t, err) tci.doneCB(&testInstInput{ - nodeID: nodeID, - baseAnchorOutput: out.Result.NextAnchorOutput, - baseState: resultState, + nodeID: nodeID, + baseCO: out.Result.ProducedChainOutputs(), + baseState: resultState, }) tci.done[nodeID] = true return @@ -653,7 +706,7 @@ func (tci *testConsInst) tryHandleOutput(nodeID gpa.NodeID) { //nolint:gocyclo func (tci *testConsInst) tryHandledNeedMempoolProposal(nodeID gpa.NodeID, out *cons.Output, inp *testInstInput) { if out.NeedMempoolProposal != nil && !tci.handledNeedMempoolProposal[nodeID] { - require.Equal(tci.t, out.NeedMempoolProposal, inp.baseAnchorOutput) + require.Equal(tci.t, out.NeedMempoolProposal, inp.baseCO) reqRefs := []*isc.RequestRef{} for _, r := range tci.requests { reqRefs = append(reqRefs, isc.RequestRefFromRequest(r)) @@ -665,7 +718,7 @@ func (tci *testConsInst) tryHandledNeedMempoolProposal(nodeID gpa.NodeID, out *c func (tci *testConsInst) tryHandledNeedStateMgrStateProposal(nodeID gpa.NodeID, out *cons.Output, inp *testInstInput) { if out.NeedStateMgrStateProposal != nil && !tci.handledNeedStateMgrStateProposal[nodeID] { - require.Equal(tci.t, out.NeedStateMgrStateProposal, inp.baseAnchorOutput) + require.Equal(tci.t, out.NeedStateMgrStateProposal, inp.baseCO) tci.compInputPipe <- map[gpa.NodeID]gpa.Input{nodeID: cons.NewInputStateMgrProposalConfirmed()} tci.handledNeedStateMgrStateProposal[nodeID] = true } @@ -693,7 +746,7 @@ func (tci *testConsInst) tryHandledNeedMempoolRequests(nodeID gpa.NodeID, out *c func (tci *testConsInst) tryHandledNeedStateMgrDecidedState(nodeID gpa.NodeID, out *cons.Output, inp *testInstInput) { if out.NeedStateMgrDecidedState != nil && !tci.handledNeedStateMgrDecidedState[nodeID] { - if out.NeedStateMgrDecidedState.AnchorOutputID == inp.baseAnchorOutput.AnchorOutputID { + if out.NeedStateMgrDecidedState.AnchorOutputID == inp.baseCO.AnchorOutputID { tci.compInputPipe <- map[gpa.NodeID]gpa.Input{nodeID: cons.NewInputStateMgrDecidedVirtualState(inp.baseState)} } else { tci.t.Error("we have to sync between state managers, should not happen in this test") diff --git a/packages/chain/cons/input_nodeconn_block_tip_set.go b/packages/chain/cons/input_nodeconn_block_tip_set.go new file mode 100644 index 0000000000..84b54ce48b --- /dev/null +++ b/packages/chain/cons/input_nodeconn_block_tip_set.go @@ -0,0 +1,20 @@ +package cons + +import ( + "fmt" + + iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/wasp/packages/gpa" +) + +type inputNodeConnBlockTipSet struct { + strongParents iotago.BlockIDs +} + +func NewInputNodeConnBlockTipSet(strongParents iotago.BlockIDs) gpa.Input { + return &inputNodeConnBlockTipSet{strongParents: strongParents} +} + +func (inp *inputNodeConnBlockTipSet) String() string { + return fmt.Sprintf("{cons.inputNodeConnBlockTipSet, |strongParents|=%v}", len(inp.strongParents)) +} diff --git a/packages/chain/cons/input_proposal.go b/packages/chain/cons/input_proposal.go index e4598280ee..91cb3ed6b0 100644 --- a/packages/chain/cons/input_proposal.go +++ b/packages/chain/cons/input_proposal.go @@ -7,23 +7,17 @@ import ( "fmt" "github.com/iotaledger/wasp/packages/gpa" - "github.com/iotaledger/wasp/packages/isc" - "github.com/iotaledger/wasp/packages/transaction" ) // That's the main/initial input for the consensus. type inputProposal struct { - baseAnchorOutput *isc.ChainOutputs + params Input } -func NewInputProposal(baseAnchorOutput *isc.ChainOutputs) gpa.Input { - return &inputProposal{baseAnchorOutput: baseAnchorOutput} +func NewInputProposal(params Input) gpa.Input { + return &inputProposal{params: params} } func (ip *inputProposal) String() string { - l1Commitment, err := transaction.L1CommitmentFromAnchorOutput(ip.baseAnchorOutput.AnchorOutput) - if err != nil { - panic(fmt.Errorf("cannot extract L1 commitment from anchor output: %w", err)) - } - return fmt.Sprintf("{cons.inputProposal: baseAnchorOutput=%v, l1Commitment=%v}", ip.baseAnchorOutput, l1Commitment) + return fmt.Sprintf("{cons.inputProposal: %v}", ip.params) } diff --git a/packages/chain/cons/sync_acs.go b/packages/chain/cons/sync_acs.go index 362637d79d..f7386c6e6c 100644 --- a/packages/chain/cons/sync_acs.go +++ b/packages/chain/cons/sync_acs.go @@ -8,86 +8,195 @@ import ( "strings" "time" + iotago "github.com/iotaledger/iota.go/v4" "github.com/iotaledger/wasp/packages/gpa" "github.com/iotaledger/wasp/packages/gpa/acs" "github.com/iotaledger/wasp/packages/isc" ) type SyncACS interface { - StateProposalReceived(proposedBaseAnchorOutput *isc.ChainOutputs) gpa.OutMessages + TXCreateInputReceived(baseCO *isc.ChainOutputs, blockToRefer *iotago.Block) gpa.OutMessages + BlockOnlyInputReceived(txToPublish *iotago.SignedTransaction, blockToRefer *iotago.Block) gpa.OutMessages + MempoolRequestsReceived(requestRefs []*isc.RequestRef) gpa.OutMessages - DSSIndexProposalReceived(dssIndexProposal []int) gpa.OutMessages - TimeDataReceived(timeData time.Time) gpa.OutMessages + StateMgrProposalReceived(proposedBaseAnchorOutput *isc.ChainOutputs) gpa.OutMessages + DSStIndexProposalReceived(dssIndexProposal []int) gpa.OutMessages + DSSbIndexProposalReceived(dssIndexProposal []int) gpa.OutMessages + TimeUpdateReceived(timeData time.Time) gpa.OutMessages + BlockTipSetProposalReceived(strongParents iotago.BlockIDs) gpa.OutMessages + // ACSInputsReceived( + // blockToRefer *iotago.Block, + // txCreateInputReceived *isc.ChainOutputs, + // blockOnlyInputReceived *iotago.SignedTransaction, + // mempoolProposalReceived []*isc.RequestRef, + // dssTIndexProposal []int, + // dssBIndexProposal []int, + // timeData time.Time, + // strongParents iotago.BlockIDs, + // ) gpa.OutMessages ACSOutputReceived(output gpa.Output) gpa.OutMessages String() string } +type SyncACSBlockTipsNeededCB = func() gpa.OutMessages + +type SyncACSInputsReadyCB = func( + blockToRefer *iotago.Block, + txCreateInputReceived *isc.ChainOutputs, + blockOnlyInputReceived *iotago.SignedTransaction, + mempoolProposalReceived []*isc.RequestRef, + dssTIndexProposal []int, + dssBIndexProposal []int, + timeData time.Time, + strongParents iotago.BlockIDs, +) gpa.OutMessages + +type SyncACSOutputsReadyCB = func( + output map[gpa.NodeID][]byte, +) gpa.OutMessages + // > UPON Reception of responses from Mempool, StateMgr and DSS NonceIndexes: // > Produce a batch proposal. // > Start the ACS. type syncACSImpl struct { - BaseAnchorOutput *isc.ChainOutputs - RequestRefs []*isc.RequestRef - DSSIndexProposal []int - TimeData time.Time - inputsReady bool - inputsReadyCB func(baseAnchorOutput *isc.ChainOutputs, requestRefs []*isc.RequestRef, dssIndexProposal []int, timeData time.Time) gpa.OutMessages - outputReady bool - outputReadyCB func(output map[gpa.NodeID][]byte) gpa.OutMessages - terminated bool - terminatedCB func() + blockToRefer *iotago.Block + txCreateInputReceived *isc.ChainOutputs + blockOnlyInputReceived *iotago.SignedTransaction + + stateMgrProposalReceived *isc.ChainOutputs // Should be same as txCreateInputReceived + mempoolProposalReceived []*isc.RequestRef + dssTIndexProposal []int // Index proposals from the DSS for signing the TX. + dssBIndexProposal []int // Index proposals from the DSS for signing the Block. + timeData time.Time + strongParents iotago.BlockIDs + + blockTipsNeededCB SyncACSBlockTipsNeededCB + // inputsReady bool + inputsReadyCB SyncACSInputsReadyCB + outputReady bool + outputReadyCB SyncACSOutputsReadyCB + terminated bool + terminatedCB func() } func NewSyncACS( - inputsReadyCB func(baseAnchorOutput *isc.ChainOutputs, requestRefs []*isc.RequestRef, dssIndexProposal []int, timeData time.Time) gpa.OutMessages, - outputReadyCB func(output map[gpa.NodeID][]byte) gpa.OutMessages, + blockTipsNeededCB SyncACSBlockTipsNeededCB, + inputsReadyCB SyncACSInputsReadyCB, + outputReadyCB SyncACSOutputsReadyCB, terminatedCB func(), ) SyncACS { return &syncACSImpl{ - inputsReadyCB: inputsReadyCB, - outputReadyCB: outputReadyCB, - terminatedCB: terminatedCB, + blockTipsNeededCB: blockTipsNeededCB, + inputsReadyCB: inputsReadyCB, + outputReadyCB: outputReadyCB, + terminatedCB: terminatedCB, } } -func (sub *syncACSImpl) StateProposalReceived(proposedBaseAnchorOutput *isc.ChainOutputs) gpa.OutMessages { - if sub.BaseAnchorOutput != nil { +func (sub *syncACSImpl) TXCreateInputReceived(baseCO *isc.ChainOutputs, blockToRefer *iotago.Block) gpa.OutMessages { + if sub.txCreateInputReceived != nil || sub.blockOnlyInputReceived != nil { return nil } - sub.BaseAnchorOutput = proposedBaseAnchorOutput + sub.txCreateInputReceived = baseCO + sub.blockToRefer = blockToRefer + return sub.tryCompleteInput() +} + +func (sub *syncACSImpl) BlockOnlyInputReceived(txToPublish *iotago.SignedTransaction, blockToRefer *iotago.Block) gpa.OutMessages { + if sub.txCreateInputReceived != nil || sub.blockOnlyInputReceived != nil { + return nil + } + sub.blockOnlyInputReceived = txToPublish + sub.blockToRefer = blockToRefer + return sub.tryCompleteInput() +} + +func (sub *syncACSImpl) StateMgrProposalReceived(baseCO *isc.ChainOutputs) gpa.OutMessages { + if sub.stateMgrProposalReceived != nil { + return nil + } + sub.stateMgrProposalReceived = baseCO return sub.tryCompleteInput() } func (sub *syncACSImpl) MempoolRequestsReceived(requestRefs []*isc.RequestRef) gpa.OutMessages { - if sub.RequestRefs != nil { + if sub.mempoolProposalReceived != nil { + return nil + } + sub.mempoolProposalReceived = requestRefs + return sub.tryCompleteInput() +} + +func (sub *syncACSImpl) DSStIndexProposalReceived(indexProposal []int) gpa.OutMessages { + if sub.dssTIndexProposal != nil { return nil } - sub.RequestRefs = requestRefs + sub.dssTIndexProposal = indexProposal return sub.tryCompleteInput() } -func (sub *syncACSImpl) DSSIndexProposalReceived(dssIndexProposal []int) gpa.OutMessages { - if sub.DSSIndexProposal != nil { +func (sub *syncACSImpl) DSSbIndexProposalReceived(indexProposal []int) gpa.OutMessages { + if sub.dssBIndexProposal != nil { return nil } - sub.DSSIndexProposal = dssIndexProposal + sub.dssBIndexProposal = indexProposal return sub.tryCompleteInput() } -func (sub *syncACSImpl) TimeDataReceived(timeData time.Time) gpa.OutMessages { - if timeData.After(sub.TimeData) { - sub.TimeData = timeData +func (sub *syncACSImpl) TimeUpdateReceived(timeData time.Time) gpa.OutMessages { + if timeData.After(sub.timeData) { + sub.timeData = timeData return sub.tryCompleteInput() } return nil } +func (sub *syncACSImpl) BlockTipSetProposalReceived(strongParents iotago.BlockIDs) gpa.OutMessages { + if sub.strongParents != nil { + return nil // Already. + } + sub.strongParents = strongParents + return sub.tryCompleteInput() +} + func (sub *syncACSImpl) tryCompleteInput() gpa.OutMessages { - if sub.inputsReady || sub.BaseAnchorOutput == nil || sub.RequestRefs == nil || sub.DSSIndexProposal == nil || sub.TimeData.IsZero() { - return nil + if sub.inputsReadyCB == nil { + return nil // Done already. + } + if sub.txCreateInputReceived == nil && sub.blockOnlyInputReceived == nil { + return nil // At least one of these is required. + } + if sub.txCreateInputReceived != nil { + if sub.stateMgrProposalReceived == nil || sub.mempoolProposalReceived == nil { + return nil // Mempool and StateMgr are needed if we are going to build a TX. + } } - sub.inputsReady = true - return sub.inputsReadyCB(sub.BaseAnchorOutput, sub.RequestRefs, sub.DSSIndexProposal, sub.TimeData) + if sub.dssTIndexProposal == nil || sub.dssBIndexProposal == nil || sub.timeData.IsZero() { + return nil // These are required in any case. + } + + msgs := gpa.NoMessages() + if sub.blockTipsNeededCB != nil { + msgs.AddAll(sub.blockTipsNeededCB()) + sub.blockTipsNeededCB = nil + } + + if sub.strongParents == nil { + return msgs + } + + cb := sub.inputsReadyCB + sub.inputsReadyCB = nil + return msgs.AddAll(cb( + sub.blockToRefer, + sub.txCreateInputReceived, + sub.blockOnlyInputReceived, + sub.mempoolProposalReceived, + sub.dssTIndexProposal, + sub.dssBIndexProposal, + sub.timeData, + sub.strongParents, + )) } func (sub *syncACSImpl) ACSOutputReceived(output gpa.Output) gpa.OutMessages { @@ -114,22 +223,31 @@ func (sub *syncACSImpl) String() string { str := "ACS" if sub.outputReady { str += statusStrOK - } else if sub.inputsReady { + } else if sub.inputsReadyCB == nil { str += "/WAIT[ACS to complete]" } else { wait := []string{} - if sub.BaseAnchorOutput == nil { + if sub.txCreateInputReceived == nil && sub.blockOnlyInputReceived == nil { + wait = append(wait, "Input") + } + if sub.txCreateInputReceived != nil && sub.stateMgrProposalReceived == nil { wait = append(wait, "BaseAnchorOutput") } - if sub.RequestRefs == nil { + if sub.txCreateInputReceived != nil && sub.mempoolProposalReceived == nil { wait = append(wait, "RequestRefs") } - if sub.DSSIndexProposal == nil { - wait = append(wait, "DSSIndexProposal") + if sub.dssTIndexProposal == nil { + wait = append(wait, "DSStIndexProposal") } - if sub.TimeData.IsZero() { + if sub.dssBIndexProposal == nil { + wait = append(wait, "DSSbIndexProposal") + } + if sub.timeData.IsZero() { wait = append(wait, "TimeData") } + if sub.strongParents == nil { + wait = append(wait, "strongParents") + } str += fmt.Sprintf("/WAIT[%v]", strings.Join(wait, ",")) } return str diff --git a/packages/chain/cons/sync_blk_data.go b/packages/chain/cons/sync_blk_data.go new file mode 100644 index 0000000000..7c79fd74e3 --- /dev/null +++ b/packages/chain/cons/sync_blk_data.go @@ -0,0 +1,79 @@ +package cons + +import ( + "time" + + iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/wasp/packages/gpa" + "github.com/iotaledger/wasp/packages/hashing" +) + +type SyncBlkData interface { + HaveTipsProposal(tipsFn func(randomness hashing.HashValue) iotago.BlockIDs) gpa.OutMessages + HaveRandomness(randomness hashing.HashValue) gpa.OutMessages + HaveTimestamp(timestamp time.Time) gpa.OutMessages + HaveSignedTX(tx *iotago.SignedTransaction) gpa.OutMessages +} + +type SyncBlkDataCB = func( + tipsFn func(randomness hashing.HashValue) iotago.BlockIDs, + randomness hashing.HashValue, + timestamp time.Time, + tx *iotago.SignedTransaction, +) gpa.OutMessages + +type syncBlkData struct { + readyCB SyncBlkDataCB + tipsFn func(randomness hashing.HashValue) iotago.BlockIDs + randomness *hashing.HashValue + timestamp *time.Time + tx *iotago.SignedTransaction +} + +func NewSyncBlkData(readyCB SyncBlkDataCB) SyncBlkData { + return &syncBlkData{readyCB: readyCB} +} + +func (s *syncBlkData) HaveTipsProposal(tipsFn func(randomness hashing.HashValue) iotago.BlockIDs) gpa.OutMessages { + if s.tipsFn != nil { + return nil + } + s.tipsFn = tipsFn + return s.tryOutput() +} + +func (s *syncBlkData) HaveRandomness(randomness hashing.HashValue) gpa.OutMessages { + if s.randomness != nil { + return nil + } + s.randomness = &randomness + return s.tryOutput() +} + +func (s *syncBlkData) HaveTimestamp(timestamp time.Time) gpa.OutMessages { + if s.timestamp != nil { + return nil + } + s.timestamp = ×tamp + return s.tryOutput() +} + +func (s *syncBlkData) HaveSignedTX(tx *iotago.SignedTransaction) gpa.OutMessages { + if s.tx != nil { + return nil + } + s.tx = tx + return s.tryOutput() +} + +func (s *syncBlkData) tryOutput() gpa.OutMessages { + if s.tipsFn == nil || s.randomness == nil || s.timestamp == nil || s.tx == nil { + return nil // Not yet. + } + if s.readyCB == nil { + return nil // Already called. + } + cb := s.readyCB + s.readyCB = nil + return cb(s.tipsFn, *s.randomness, *s.timestamp, s.tx) +} diff --git a/packages/chain/cons/sync_blk_sig.go b/packages/chain/cons/sync_blk_sig.go new file mode 100644 index 0000000000..d2085435b0 --- /dev/null +++ b/packages/chain/cons/sync_blk_sig.go @@ -0,0 +1,51 @@ +package cons + +import ( + iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/wasp/packages/gpa" +) + +type SyncBlkSig interface { + HaveBlock(bl *iotago.Block) gpa.OutMessages + HaveSig(sig []byte) gpa.OutMessages +} + +type syncBlkSig struct { + readyCB func(bl *iotago.Block, sig []byte) gpa.OutMessages + bl *iotago.Block + sig []byte +} + +func NewSyncBlkSig( + readyCB func(bl *iotago.Block, sig []byte) gpa.OutMessages, +) SyncBlkSig { + return &syncBlkSig{readyCB: readyCB} +} + +func (s *syncBlkSig) HaveBlock(bl *iotago.Block) gpa.OutMessages { + if s.bl != nil { + return nil + } + s.bl = bl + return s.tryOutput() +} + +func (s *syncBlkSig) HaveSig(sig []byte) gpa.OutMessages { + if s.sig != nil { + return nil + } + s.sig = sig + return s.tryOutput() +} + +func (s *syncBlkSig) tryOutput() gpa.OutMessages { + if s.bl == nil || s.sig == nil { + return nil + } + if s.readyCB == nil { + return nil + } + cb := s.readyCB + s.readyCB = nil + return cb(s.bl, s.sig) +} diff --git a/packages/chain/cons/sync_nc.go b/packages/chain/cons/sync_nc.go new file mode 100644 index 0000000000..eb7b09e374 --- /dev/null +++ b/packages/chain/cons/sync_nc.go @@ -0,0 +1,228 @@ +package cons + +import ( + iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/wasp/packages/gpa" +) + +// Interaction with the NodeConnection. +// To get the tip proposals. +type SyncNC interface { + // TXCreateInputReceived(baseCO *isc.ChainOutputs, blockToRefer *iotago.Block) gpa.OutMessages + // BlockOnlyInputReceived(txToPublish *iotago.SignedTransaction, blockToRefer *iotago.Block) gpa.OutMessages + + // StateMgrProposalReceived() gpa.OutMessages + // MempoolProposalReceived() gpa.OutMessages + // DSStIndexProposalReceived() gpa.OutMessages + // DSSbIndexProposalReceived() gpa.OutMessages + // TimeUpdateReceived() gpa.OutMessages + + BlockTipSetNeeded() gpa.OutMessages + BlockTipSetReceived(strongParents iotago.BlockIDs) gpa.OutMessages +} + +type syncNCImpl struct { + blockTipSetNeededCB func() gpa.OutMessages + blockTipSetReceivedCB func(strongParents iotago.BlockIDs) gpa.OutMessages + // blockToRefer *iotago.Block // Optional, can be left nil. Set along with tx or block inputs. + // txCreateInputReceived *isc.ChainOutputs + // blockOnlyInputReceived *iotago.SignedTransaction + + // stateMgrProposalReceived bool + // mempoolProposalReceived []*isc.RequestRef + + // dssTIndexProposal []int // Index proposals from the DSS for signing the TX. + // dssBIndexProposal []int // Index proposals from the DSS for signing the Block. + // timeData time.Time + + // inputsReady bool + // inputsReadyCB func() gpa.OutMessages + + // // Output is all the inputs plus the tip proposal. + // strongParents iotago.BlockIDs + // outputReadyCB func( + // blockToRefer *iotago.Block, + // txCreateInputReceived *isc.ChainOutputs, + // blockOnlyInputReceived *iotago.SignedTransaction, + // mempoolProposalReceived []*isc.RequestRef, + // dssTIndexProposal []int, + // dssBIndexProposal []int, + // timeData time.Time, + // strongParents iotago.BlockIDs, + // ) gpa.OutMessages +} + +func NewSyncNC( + blockTipSetNeededCB func() gpa.OutMessages, + blockTipSetReceivedCB func(strongParents iotago.BlockIDs) gpa.OutMessages, +) SyncNC { + return &syncNCImpl{ + blockTipSetNeededCB: blockTipSetNeededCB, + blockTipSetReceivedCB: blockTipSetReceivedCB, + } +} + +func (s *syncNCImpl) BlockTipSetNeeded() gpa.OutMessages { + if s.blockTipSetNeededCB == nil { + return nil // Already done. + } + cb := s.blockTipSetNeededCB + s.blockTipSetNeededCB = nil + return cb() +} + +func (s *syncNCImpl) BlockTipSetReceived(strongParents iotago.BlockIDs) gpa.OutMessages { + if s.blockTipSetReceivedCB == nil { + return nil // Already done. + } + cb := s.blockTipSetReceivedCB + s.blockTipSetReceivedCB = nil + return cb(strongParents) +} + +// func NewSyncNC( +// inputsReadyCB func() gpa.OutMessages, +// outputReadyCB func( +// blockToRefer *iotago.Block, +// txCreateInputReceived *isc.ChainOutputs, +// blockOnlyInputReceived *iotago.SignedTransaction, +// mempoolProposalReceived []*isc.RequestRef, +// dssTIndexProposal []int, +// dssBIndexProposal []int, +// timeData time.Time, +// strongParents iotago.BlockIDs, +// ) gpa.OutMessages, +// ) SyncNC { +// return &syncNCImpl{ +// inputsReadyCB: inputsReadyCB, +// outputReadyCB: outputReadyCB, +// } +// } + +// func (s *syncNCImpl) TXCreateInputReceived(baseCO *isc.ChainOutputs, blockToRefer *iotago.Block) gpa.OutMessages { +// if s.txCreateInputReceived != nil || s.blockOnlyInputReceived != nil { +// return nil +// } +// s.txCreateInputReceived = baseCO +// s.blockToRefer = blockToRefer +// return s.tryInputsReady() +// } + +// func (s *syncNCImpl) BlockOnlyInputReceived(txToPublish *iotago.SignedTransaction, blockToRefer *iotago.Block) gpa.OutMessages { +// if s.txCreateInputReceived != nil || s.blockOnlyInputReceived != nil { +// return nil +// } +// s.blockOnlyInputReceived = txToPublish +// s.blockToRefer = blockToRefer +// return s.tryInputsReady() +// } + +// func (s *syncNCImpl) StateMgrProposalReceived() gpa.OutMessages { +// if s.stateMgrProposalReceived { +// return nil +// } +// s.stateMgrProposalReceived = true +// return s.tryInputsReady() +// } + +// func (s *syncNCImpl) MempoolProposalReceived(requestRefs []*isc.RequestRef) gpa.OutMessages { +// if s.mempoolProposalReceived != nil { +// return nil +// } +// s.mempoolProposalReceived = requestRefs +// return s.tryInputsReady() +// } + +// func (s *syncNCImpl) DSStIndexProposalReceived(indexProposal []int) gpa.OutMessages { +// if s.dssTIndexProposal != nil { +// return nil +// } +// s.dssTIndexProposal = indexProposal +// return s.tryInputsReady() +// } + +// func (s *syncNCImpl) DSSbIndexProposalReceived(indexProposal []int) gpa.OutMessages { +// if s.dssBIndexProposal != nil { +// return nil +// } +// s.dssBIndexProposal = indexProposal +// return s.tryInputsReady() +// } + +// func (s *syncNCImpl) TimeUpdateReceived(timeData time.Time) gpa.OutMessages { +// if timeData.Before(s.timeData) || timeData.Equal(s.timeData) { +// return nil +// } +// s.timeData = timeData +// return s.tryInputsReady() +// } + +// func (s *syncNCImpl) BlockTipSetReceived(strongParents iotago.BlockIDs) gpa.OutMessages { +// if strongParents == nil { +// panic(fmt.Errorf("nil as strongParents in cons.sync_nc.")) +// } +// if s.strongParents != nil { +// return nil // Received already. +// } +// if !s.inputsReady { +// return nil // Too early. +// } +// s.strongParents = strongParents +// return s.outputReadyCB( +// s.blockToRefer, +// s.txCreateInputReceived, +// s.blockOnlyInputReceived, +// s.mempoolProposalReceived, +// s.dssTIndexProposal, +// s.dssBIndexProposal, +// s.timeData, +// s.strongParents, +// ) +// } + +// func (s *syncNCImpl) tryInputsReady() gpa.OutMessages { +// if s.inputsReady { +// return nil // Done already. +// } +// if s.txCreateInputReceived == nil && s.blockOnlyInputReceived == nil { +// return nil // At least one of these is required. +// } +// if s.txCreateInputReceived != nil { +// if !s.stateMgrProposalReceived || s.mempoolProposalReceived == nil { +// return nil // Mempool and StateMgr are needed if we are going to build a TX. +// } +// } +// if s.dssTIndexProposal == nil || s.dssBIndexProposal == nil || s.timeData.IsZero() { +// return nil // These are required in any case. +// } +// s.inputsReady = true +// return s.inputsReadyCB() +// } + +// // Try to provide useful human-readable compact status. +// func (s *syncNCImpl) String() string { +// str := "NC" +// // if sub.indexProposalReady && sub.outputReady { // TODO: ... +// // return str + statusStrOK +// // } +// // if sub.indexProposalReady { +// // str += "/idx=OK" +// // } else { +// // str += fmt.Sprintf("/idx[initialInputsReady=%v,indexProposalReady=%v]", sub.initialInputsReady, sub.indexProposalReady) +// // } +// // if sub.outputReady { +// // str += "/sig=OK" +// // } else if sub.signingInputsReady { +// // str += "/sig[WaitingForDSS]" +// // } else { +// // wait := []string{} +// // if sub.MessageToSign == nil { +// // wait = append(wait, "MessageToSign") +// // } +// // if sub.DecidedIndexProposals == nil { +// // wait = append(wait, "DecidedIndexProposals") +// // } +// // str += fmt.Sprintf("/sig=WAIT[%v]", strings.Join(wait, ",")) +// // } +// return str +// } diff --git a/packages/chain/cons/sync_res.go b/packages/chain/cons/sync_res.go new file mode 100644 index 0000000000..374bf1c2df --- /dev/null +++ b/packages/chain/cons/sync_res.go @@ -0,0 +1,134 @@ +package cons + +import ( + iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/wasp/packages/gpa" + "github.com/iotaledger/wasp/packages/isc" + "github.com/iotaledger/wasp/packages/state" +) + +type SyncRes interface { + ReuseTX(tx *iotago.SignedTransaction) gpa.OutMessages + BuiltTX(tx *iotago.SignedTransaction) gpa.OutMessages + HaveTransition( + producedChainOutputs *isc.ChainOutputs, + consumedAnchorOutputID iotago.OutputID, + consumedAccountOutputID iotago.OutputID, + ) gpa.OutMessages + HaveStateBlock(producedStateBlock state.Block) gpa.OutMessages + HaveIotaBlock(producedIotaBlock *iotago.Block) gpa.OutMessages +} + +type syncResCB = func( + transactionReused bool, + transaction *iotago.SignedTransaction, + producedIotaBlock *iotago.Block, + producedChainOutputs *isc.ChainOutputs, + producedStateBlock state.Block, + consumedAnchorOutputID iotago.OutputID, + consumedAccountOutputID iotago.OutputID, +) gpa.OutMessages + +type syncRes struct { + readyCB syncResCB + + transactionReused bool + transaction *iotago.SignedTransaction + transactionReceived bool + + producedChainOutputs *isc.ChainOutputs + producedStateBlock state.Block + consumedAnchorOutputID iotago.OutputID + consumedAccountOutputID iotago.OutputID + transitionReceived bool + + producedIotaBlock *iotago.Block +} + +func NewSyncRes(readyCB syncResCB) SyncRes { + return &syncRes{readyCB: readyCB} +} + +func (s *syncRes) ReuseTX(tx *iotago.SignedTransaction) gpa.OutMessages { + if s.transactionReceived { + return nil // Already received. + } + if s.transitionReceived { + panic("transition received, but wer are going to reuse the TX.") + } + s.transactionReceived = true + s.transactionReused = true + s.transaction = tx + return s.tryOutput() +} + +func (s *syncRes) BuiltTX(tx *iotago.SignedTransaction) gpa.OutMessages { + if s.transactionReceived { + return nil // Already received. + } + s.transactionReceived = true + s.transactionReused = false + s.transaction = tx + return s.tryOutput() +} + +func (s *syncRes) HaveTransition( + producedChainOutputs *isc.ChainOutputs, + consumedAnchorOutputID iotago.OutputID, + consumedAccountOutputID iotago.OutputID, +) gpa.OutMessages { + if s.transactionReused { + panic("transaction is reused but we received the transition") + } + if s.transitionReceived { + return nil // Already received. + } + s.transitionReceived = true + s.producedChainOutputs = producedChainOutputs + s.consumedAnchorOutputID = consumedAnchorOutputID + s.consumedAccountOutputID = consumedAccountOutputID + return s.tryOutput() +} + +func (s *syncRes) HaveStateBlock(producedStateBlock state.Block) gpa.OutMessages { + if s.transactionReused { + panic("transaction is reused but we received the transition") + } + if s.producedStateBlock != nil { + panic("state block already received") + } + s.producedStateBlock = producedStateBlock + return s.tryOutput() +} + +func (s *syncRes) HaveIotaBlock(producedIotaBlock *iotago.Block) gpa.OutMessages { + if s.producedIotaBlock != nil { + return nil // Have already. + } + s.producedIotaBlock = producedIotaBlock + return s.tryOutput() +} + +func (s *syncRes) tryOutput() gpa.OutMessages { + if !s.transactionReceived || !s.transitionReceived || s.producedIotaBlock == nil { + return nil // Not yet. + } + if s.transactionReceived && !s.transactionReused && s.producedStateBlock == nil { + return nil // Have to wait for the block. + } + + if s.readyCB == nil { + return nil // Already + } + cb := s.readyCB + s.readyCB = nil + return cb( + s.transactionReused, + s.transaction, + s.producedIotaBlock, + s.producedChainOutputs, + s.producedStateBlock, + s.consumedAnchorOutputID, + s.consumedAccountOutputID, + ) +} diff --git a/packages/chain/cons/sync_tx.go b/packages/chain/cons/sync_tx_sig.go similarity index 76% rename from packages/chain/cons/sync_tx.go rename to packages/chain/cons/sync_tx_sig.go index c351b7e005..4e27c35c71 100644 --- a/packages/chain/cons/sync_tx.go +++ b/packages/chain/cons/sync_tx_sig.go @@ -12,14 +12,14 @@ import ( "github.com/iotaledger/wasp/packages/vm" ) -type SyncTX interface { +type SyncTXSig interface { VMResultReceived(vmResult *vm.VMTaskResult) gpa.OutMessages SignatureReceived(signature []byte) gpa.OutMessages BlockSaved(block state.Block) gpa.OutMessages String() string } -type syncTXImpl struct { +type syncTXSigImpl struct { vmResult *vm.VMTaskResult signature []byte blockSaved bool @@ -29,11 +29,11 @@ type syncTXImpl struct { inputsReadyCB func(vmResult *vm.VMTaskResult, block state.Block, signature []byte) gpa.OutMessages } -func NewSyncTX(inputsReadyCB func(vmResult *vm.VMTaskResult, block state.Block, signature []byte) gpa.OutMessages) SyncTX { - return &syncTXImpl{inputsReadyCB: inputsReadyCB} +func NewSyncTX(inputsReadyCB func(vmResult *vm.VMTaskResult, block state.Block, signature []byte) gpa.OutMessages) SyncTXSig { + return &syncTXSigImpl{inputsReadyCB: inputsReadyCB} } -func (sub *syncTXImpl) VMResultReceived(vmResult *vm.VMTaskResult) gpa.OutMessages { +func (sub *syncTXSigImpl) VMResultReceived(vmResult *vm.VMTaskResult) gpa.OutMessages { if sub.vmResult != nil || vmResult == nil { return nil } @@ -41,7 +41,7 @@ func (sub *syncTXImpl) VMResultReceived(vmResult *vm.VMTaskResult) gpa.OutMessag return sub.tryCompleteInputs() } -func (sub *syncTXImpl) SignatureReceived(signature []byte) gpa.OutMessages { +func (sub *syncTXSigImpl) SignatureReceived(signature []byte) gpa.OutMessages { if sub.signature != nil || signature == nil { return nil } @@ -49,7 +49,7 @@ func (sub *syncTXImpl) SignatureReceived(signature []byte) gpa.OutMessages { return sub.tryCompleteInputs() } -func (sub *syncTXImpl) BlockSaved(block state.Block) gpa.OutMessages { +func (sub *syncTXSigImpl) BlockSaved(block state.Block) gpa.OutMessages { if sub.blockSaved { return nil } @@ -58,7 +58,7 @@ func (sub *syncTXImpl) BlockSaved(block state.Block) gpa.OutMessages { return sub.tryCompleteInputs() } -func (sub *syncTXImpl) tryCompleteInputs() gpa.OutMessages { +func (sub *syncTXSigImpl) tryCompleteInputs() gpa.OutMessages { if sub.inputsReady || sub.vmResult == nil || sub.signature == nil || !sub.blockSaved { return nil } @@ -67,7 +67,7 @@ func (sub *syncTXImpl) tryCompleteInputs() gpa.OutMessages { } // Try to provide useful human-readable compact status. -func (sub *syncTXImpl) String() string { +func (sub *syncTXSigImpl) String() string { str := "TX" if sub.inputsReady { str += statusStrOK diff --git a/packages/chain/mempool/mempool.go b/packages/chain/mempool/mempool.go index b4ecd35962..287b24add8 100644 --- a/packages/chain/mempool/mempool.go +++ b/packages/chain/mempool/mempool.go @@ -181,10 +181,10 @@ type reqConsensusInstancesUpdated struct { } type reqConsensusProposal struct { - ctx context.Context - accountOutput *isc.ChainOutputs - consensusID consGR.ConsensusID - responseCh chan<- []*isc.RequestRef + ctx context.Context + chainOutputs *isc.ChainOutputs + consensusID consGR.ConsensusID + responseCh chan<- []*isc.RequestRef } func (r *reqConsensusProposal) Respond(reqRefs []*isc.RequestRef) { @@ -331,10 +331,10 @@ func (mpi *mempoolImpl) ConsensusInstancesUpdated(activeConsensusInstances []con func (mpi *mempoolImpl) ConsensusProposalAsync(ctx context.Context, accountOutput *isc.ChainOutputs, consensusID consGR.ConsensusID) <-chan []*isc.RequestRef { res := make(chan []*isc.RequestRef, 1) req := &reqConsensusProposal{ - ctx: ctx, - accountOutput: accountOutput, - consensusID: consensusID, - responseCh: res, + ctx: ctx, + chainOutputs: accountOutput, + consensusID: consensusID, + responseCh: res, } mpi.reqConsensusProposalPipe.In() <- req return res @@ -581,12 +581,12 @@ func (mpi *mempoolImpl) handleAccessNodesUpdated(recv *reqAccessNodesUpdated) { // This implementation only tracks a single branch. So, we will only respond // to the request matching the TrackNewChainHead call. func (mpi *mempoolImpl) handleConsensusProposal(recv *reqConsensusProposal) { - if mpi.chainHeadAO == nil || !recv.accountOutput.Equals(mpi.chainHeadAO) { - mpi.log.LogDebugf("handleConsensusProposal, have to wait for chain head to become %v", recv.accountOutput) + if mpi.chainHeadAO == nil || !recv.chainOutputs.Equals(mpi.chainHeadAO) { + mpi.log.LogDebugf("handleConsensusProposal, have to wait for chain head to become %v", recv.chainOutputs) mpi.waitChainHead = append(mpi.waitChainHead, recv) return } - mpi.log.LogDebugf("handleConsensusProposal, already have the chain head %v", recv.accountOutput) + mpi.log.LogDebugf("handleConsensusProposal, already have the chain head %v", recv.chainOutputs) mpi.handleConsensusProposalForChainHead(recv) } @@ -868,7 +868,7 @@ func (mpi *mempoolImpl) handleTrackNewChainHead(req *reqTrackNewChainHead) { if waiting.ctx.Err() != nil { continue // Drop it. } - if waiting.accountOutput.Equals(mpi.chainHeadAO) { + if waiting.chainOutputs.Equals(mpi.chainHeadAO) { mpi.handleConsensusProposalForChainHead(waiting) continue // Drop it from wait queue. } diff --git a/packages/chain/node.go b/packages/chain/node.go index 91634eb148..bf36f79600 100644 --- a/packages/chain/node.go +++ b/packages/chain/node.go @@ -110,30 +110,30 @@ type ChainNodeConn interface { } type chainNodeImpl struct { - me gpa.NodeID - nodeIdentity *cryptolib.KeyPair - chainID isc.ChainID - chainMgr gpa.AckHandler - chainStore indexedstore.IndexedStore - nodeConn NodeConnection - tangleTime time.Time - mempool mempool.Mempool - stateMgr statemanager.StateMgr - recvAnchorOutputPipe pipe.Pipe[*isc.ChainOutputs] - recvTxPublishedPipe pipe.Pipe[*txPublished] - recvMilestonePipe pipe.Pipe[time.Time] - consensusInsts *shrinkingmap.ShrinkingMap[iotago.Ed25519Address, *shrinkingmap.ShrinkingMap[cmt_log.LogIndex, *consensusInst]] // Running consensus instances. - consOutputPipe pipe.Pipe[*consOutput] - consRecoverPipe pipe.Pipe[*consRecover] - publishingTXes *shrinkingmap.ShrinkingMap[iotago.TransactionID, context.CancelFunc] // TX'es now being published. - procCache *processors.Cache // Cache for the SC processors. - configUpdatedCh chan *configUpdate - serversUpdatedPipe pipe.Pipe[*serversUpdate] - awaitReceiptActCh chan *awaitReceiptReq - awaitReceiptCnfCh chan *awaitReceiptReq - stateTrackerAct StateTracker - stateTrackerCnf StateTracker - blockWAL sm_gpa_utils.BlockWAL + me gpa.NodeID + nodeIdentity *cryptolib.KeyPair + chainID isc.ChainID + chainMgr gpa.AckHandler + chainStore indexedstore.IndexedStore + nodeConn NodeConnection + tangleTime time.Time + mempool mempool.Mempool + stateMgr statemanager.StateMgr + recvAncAccOutputsPipe pipe.Pipe[*isc.AnchorAccountOutput] + recvTxPublishedPipe pipe.Pipe[*txPublished] + recvMilestonePipe pipe.Pipe[time.Time] + consensusInsts *shrinkingmap.ShrinkingMap[iotago.Ed25519Address, *shrinkingmap.ShrinkingMap[cmt_log.LogIndex, *consensusInst]] // Running consensus instances. + consOutputPipe pipe.Pipe[*consOutput] + consRecoverPipe pipe.Pipe[*consRecover] + publishingTXes *shrinkingmap.ShrinkingMap[iotago.TransactionID, context.CancelFunc] // TX'es now being published. + procCache *processors.Cache // Cache for the SC processors. + configUpdatedCh chan *configUpdate + serversUpdatedPipe pipe.Pipe[*serversUpdate] + awaitReceiptActCh chan *awaitReceiptReq + awaitReceiptCnfCh chan *awaitReceiptReq + stateTrackerAct StateTracker + stateTrackerCnf StateTracker + blockWAL sm_gpa_utils.BlockWAL // // Configuration values. consensusDelay time.Duration @@ -150,7 +150,9 @@ type chainNodeImpl struct { accessNodesFromCNF []*cryptolib.PublicKey // Access nodes, as configured in the governance contract (for the active state). accessNodesFromACT []*cryptolib.PublicKey // Access nodes, as configured in the governance contract (for the confirmed state). serverNodes []*cryptolib.PublicKey // The nodes we can query (because they consider us an access node). - latestConfirmedAO *isc.ChainOutputs // Confirmed by L1, can be lagging from latestActiveAO. + latestConfirmedAncOut *isc.AnchorOutputWithID // Part of latestConfirmedAO, before we have both. + latestConfirmedAccOut *isc.AccountOutputWithID // Part of latestConfirmedAO, before we have both. + latestConfirmedAO *isc.ChainOutputs // Confirmed by L1, can be lagging from latestActiveAO. // TODO: Recheck vs latestConfirmedStateAO. latestConfirmedState state.State // State corresponding to latestConfirmedAO, for performance reasons. latestConfirmedStateAO *isc.ChainOutputs // Set only when the corresponding state is retrieved. latestActiveAO *isc.ChainOutputs // This is the AO the chain is build on. @@ -265,7 +267,7 @@ func New( chainStore: chainStore, nodeConn: nodeConn, tangleTime: time.Time{}, // Zero time, while we haven't received it from the L1. - recvAnchorOutputPipe: pipe.NewInfinitePipe[*isc.ChainOutputs](), + recvAncAccOutputsPipe: pipe.NewInfinitePipe[*isc.AnchorAccountOutput](), recvTxPublishedPipe: pipe.NewInfinitePipe[*txPublished](), recvMilestonePipe: pipe.NewInfinitePipe[time.Time](), consensusInsts: shrinkingmap.New[iotago.Ed25519Address, *shrinkingmap.ShrinkingMap[cmt_log.LogIndex, *consensusInst]](), @@ -307,7 +309,7 @@ func New( log: log, } - cni.chainMetrics.Pipe.TrackPipeLen("node-recvAnchorOutputPipe", cni.recvAnchorOutputPipe.Len) + cni.chainMetrics.Pipe.TrackPipeLen("node-recvAncAccOutputsPipe", cni.recvAncAccOutputsPipe.Len) cni.chainMetrics.Pipe.TrackPipeLen("node-recvTxPublishedPipe", cni.recvTxPublishedPipe.Len) cni.chainMetrics.Pipe.TrackPipeLen("node-recvMilestonePipe", cni.recvMilestonePipe.Len) cni.chainMetrics.Pipe.TrackPipeLen("node-consOutputPipe", cni.consOutputPipe.Len) @@ -438,16 +440,11 @@ func New( } cni.mempool.ReceiveOnLedgerRequest(req) } - recvAnchorOutputPipeInCh := cni.recvAnchorOutputPipe.In() - recvAnchorOutputCB := func(outputInfo *isc.OutputInfo) { - log.LogDebugf("recvAnchorOutputCB[%p], %v", cni, outputInfo.OutputID.ToHex()) + // TODO: recvAncAccOutputsPipeInCh := cni.recvAncAccOutputsPipe.In() + recvAnchorOutputCB := func(ancAccOutput *isc.OutputInfo) { + log.LogDebugf("recvAnchorOutputCB[%p], %v", cni, ancAccOutput) cni.chainMetrics.NodeConn.L1AnchorOutputReceived() - if outputInfo.Consumed() { - // we don't need to send consumed anchor outputs to the pipe - return - } - // TODO: Turned the output manually into an AnchorOutput - recvAnchorOutputPipeInCh <- isc.NewChainOutputs(outputInfo.Output.(*iotago.AnchorOutput), outputInfo.OutputID, nil, iotago.OutputID{}) + // TODO: Call it when the types are fixed. recvAncAccOutputsPipeInCh <- ancAccOutput } recvMilestonePipeInCh := cni.recvMilestonePipe.In() recvMilestoneCB := func(timestamp time.Time) { @@ -490,7 +487,7 @@ func (cni *chainNodeImpl) ServersUpdated(serverNodes []*cryptolib.PublicKey) { func (cni *chainNodeImpl) run(ctx context.Context, cleanupFunc context.CancelFunc) { defer util.ExecuteIfNotNil(cleanupFunc) - recvAnchorOutputPipeOutCh := cni.recvAnchorOutputPipe.Out() + recvAncAccOutputsPipeOutCh := cni.recvAncAccOutputsPipe.Out() recvTxPublishedPipeOutCh := cni.recvTxPublishedPipe.Out() recvMilestonePipeOutCh := cni.recvMilestonePipe.Out() netRecvPipeOutCh := cni.netRecvPipe.Out() @@ -516,12 +513,12 @@ func (cni *chainNodeImpl) run(ctx context.Context, cleanupFunc context.CancelFun continue } cni.handleTxPublished(ctx, txPublishResult) - case anchorOutput, ok := <-recvAnchorOutputPipeOutCh: + case ancAccOutputs, ok := <-recvAncAccOutputsPipeOutCh: if !ok { - recvAnchorOutputPipeOutCh = nil + recvAncAccOutputsPipeOutCh = nil continue } - cni.handleAnchorOutput(ctx, anchorOutput) + cni.handleAnchorAccountOutputs(ctx, ancAccOutputs) case timestamp, ok := <-recvMilestonePipeOutCh: if !ok { recvMilestonePipeOutCh = nil @@ -674,10 +671,33 @@ func (cni *chainNodeImpl) handleTxPublished(ctx context.Context, txPubResult *tx cni.handleChainMgrOutput(ctx, cni.chainMgr.Output()) } -func (cni *chainNodeImpl) handleAnchorOutput(ctx context.Context, anchorOutput *isc.ChainOutputs) { - cni.log.LogDebugf("handleAnchorOutput: %v", anchorOutput) - if anchorOutput.GetStateIndex() == 0 { - initBlock, err := origin.InitChainByAnchorOutput(cni.chainStore, anchorOutput, cni.nodeConn.L1APIProvider(), cni.nodeConn.BaseTokenInfo()) +func (cni *chainNodeImpl) handleAnchorAccountOutputs(ctx context.Context, ancAccOutput *isc.AnchorAccountOutput) { + cni.log.LogDebugf("handleAnchorAccountOutputs: %v", ancAccOutput) + // + // Make sure we have both the Anchor and Account outputs. + anc := ancAccOutput.AnchorOutputWithID() + acc := ancAccOutput.AccountOutputWithID() + if anc != nil { + cni.latestConfirmedAncOut = anc + } + if acc != nil { + cni.latestConfirmedAccOut = acc + } + if cni.latestConfirmedAncOut == nil || cni.latestConfirmedAccOut == nil { + cni.log.LogDebugf("Don't have both outputs yet, will wait") + return + } + latestConfirmedAO := isc.NewChainOutputs( + cni.latestConfirmedAncOut.AnchorOutput(), + cni.latestConfirmedAncOut.OutputID(), + cni.latestConfirmedAccOut.AccountOutput(), + cni.latestConfirmedAccOut.OutputID(), + ) + cni.latestConfirmedAO = latestConfirmedAO + // + // Have them both, so proceed. Further use the ChainOutputs everywhere. + if latestConfirmedAO.GetStateIndex() == 0 { + initBlock, err := origin.InitChainByAnchorOutput(cni.chainStore, latestConfirmedAO, cni.nodeConn.L1APIProvider(), nil) // TODO: TokenInfo if err != nil { cni.log.LogErrorf("Ignoring InitialAO for the chain: %v", err) return @@ -687,10 +707,10 @@ func (cni *chainNodeImpl) handleAnchorOutput(ctx context.Context, anchorOutput * } } - cni.stateTrackerCnf.TrackAnchorOutput(anchorOutput, true) - cni.stateTrackerAct.TrackAnchorOutput(anchorOutput, false) // ACT state will be equal to CNF or ahead of it. + cni.stateTrackerCnf.TrackAnchorOutput(latestConfirmedAO, true) + cni.stateTrackerAct.TrackAnchorOutput(latestConfirmedAO, false) // ACT state will be equal to CNF or ahead of it. outMsgs := cni.chainMgr.Input( - chainmanager.NewInputAnchorOutputConfirmed(anchorOutput), + chainmanager.NewInputAnchorOutputConfirmed(latestConfirmedAO), ) cni.sendMessages(outMsgs) cni.handleChainMgrOutput(ctx, cni.chainMgr.Output()) @@ -785,14 +805,12 @@ func (cni *chainNodeImpl) handleConsensusOutput(ctx context.Context, out *consOu chainMgrInput = chainmanager.NewInputConsensusOutputDone( out.request.CommitteeAddr, out.request.LogIndex, - out.request.BaseAnchorOutput.AnchorOutputID, out.output.Result, ) case cons.Skipped: chainMgrInput = chainmanager.NewInputConsensusOutputSkip( out.request.CommitteeAddr, out.request.LogIndex, - out.request.BaseAnchorOutput.AnchorOutputID, ) default: panic(fmt.Errorf("unexpected output state from consensus: %+v", out)) @@ -825,8 +843,10 @@ func (cni *chainNodeImpl) ensureConsensusInput(ctx context.Context, needConsensu cni.consRecoverPipe.In() <- &consRecover{request: needConsensus} } ci.request = needConsensus - cni.stateTrackerAct.TrackAnchorOutput(needConsensus.BaseAnchorOutput, true) - ci.consensus.Input(needConsensus.BaseAnchorOutput, outputCB, recoverCB) + if needConsensus.ConsensusInput.BaseCO() != nil { + cni.stateTrackerAct.TrackAnchorOutput(needConsensus.ConsensusInput.BaseCO(), true) + } + ci.consensus.Input(needConsensus.ConsensusInput, outputCB, recoverCB) } } @@ -845,7 +865,14 @@ func (cni *chainNodeImpl) ensureConsensusInst(ctx context.Context, needConsensus consGrCtx, consGrCancel := context.WithCancel(ctx) logIndexCopy := addLogIndex cgr := consGR.New( - consGrCtx, cni.chainID, cni.chainStore, dkShare, &logIndexCopy, cni.nodeConn.L1APIProvider(), cni.nodeIdentity, + consGrCtx, + cni.chainID, + cni.chainStore, + dkShare, + logIndex.Bytes(), + cni.nodeConn.L1APIProvider(), + cni.TokenInfo(), + cni.nodeIdentity, cni.procCache, cni.mempool, cni.stateMgr, cni.net, cni.validatorAgentID, cni.recoveryTimeout, RedeliveryPeriod, PrintStatusPeriod, @@ -871,7 +898,7 @@ func (cni *chainNodeImpl) ensureConsensusInst(ctx context.Context, needConsensus activeConsensusInstances := []consGR.ConsensusID{} cni.consensusInsts.ForEach(func(cAddr iotago.Ed25519Address, consMap *shrinkingmap.ShrinkingMap[cmt_log.LogIndex, *consensusInst]) bool { consMap.ForEach(func(li cmt_log.LogIndex, _ *consensusInst) bool { - activeConsensusInstances = append(activeConsensusInstances, consGR.NewConsensusID(&cAddr, &li)) + activeConsensusInstances = append(activeConsensusInstances, consGR.NewConsensusID(&cAddr, li.Bytes())) return true }) return true diff --git a/packages/chain/node_test.go b/packages/chain/node_test.go index d1d6894b8e..ae49f3e3aa 100644 --- a/packages/chain/node_test.go +++ b/packages/chain/node_test.go @@ -207,8 +207,9 @@ func testNodeBasic(t *testing.T, n, f int, reliable bool, timeout time.Duration) activeAO, err := node.LatestChainOutputs(chaintypes.ActiveState) require.NoError(t, err) lastPublishedTX := te.nodeConns[i].published[len(te.nodeConns[i].published)-1] - lastPublishedAO, err := isc.ChainOutputsFromTx(lastPublishedTX, te.chainID.AsAddress()) + lastPublishedCO, err := isc.ChainOutputsFromTx(lastPublishedTX, te.chainID.AsAddress()) require.NoError(t, err) + lastPublishedAO := isc.AnchorOutputWithIDFromChainOutputs(lastPublishedCO) if !lastPublishedAO.Equals(confirmedAO) { // In this test we confirm outputs immediately. te.log.LogDebugf("lastPublishedAO(%v) != confirmedAO(%v)", lastPublishedAO, confirmedAO) return false diff --git a/packages/isc/output.go b/packages/isc/output.go index ba7b5189bf..3c8879f0d0 100644 --- a/packages/isc/output.go +++ b/packages/isc/output.go @@ -44,13 +44,13 @@ type ChainOutputs struct { // TODO this doesn't make sense anymore since the accountOutput is not owned by the chain... func NewChainOutputs( - AnchorOutput *iotago.AnchorOutput, + anchorOutput *iotago.AnchorOutput, anchorOutputID iotago.OutputID, accountOutput *iotago.AccountOutput, accountOutputID iotago.OutputID, ) *ChainOutputs { return &ChainOutputs{ - AnchorOutput: AnchorOutput, + AnchorOutput: anchorOutput, AnchorOutputID: anchorOutputID, accountOutput: accountOutput, accountOutputID: accountOutputID, @@ -61,12 +61,20 @@ func NewChainOutputs( func RandomChainOutputs() *ChainOutputs { return NewChainOutputs( &iotago.AnchorOutput{ + StateIndex: 1, Features: iotago.AnchorOutputFeatures{ &iotago.StateMetadataFeature{Entries: iotago.StateMetadataFeatureEntries{}}, }, + UnlockConditions: iotago.UnlockConditions[iotago.AnchorOutputUnlockCondition]{}, + ImmutableFeatures: iotago.Features[iotago.AnchorOutputImmFeature]{}, }, testiotago.RandOutputID(), - &iotago.AccountOutput{}, + &iotago.AccountOutput{ + FoundryCounter: 0, + UnlockConditions: iotago.UnlockConditions[iotago.AccountOutputUnlockCondition]{}, + Features: iotago.Features[iotago.AccountOutputFeature]{}, + ImmutableFeatures: iotago.Features[iotago.AccountOutputImmFeature]{}, + }, testiotago.RandOutputID(), ) } @@ -158,33 +166,33 @@ func (c *ChainOutputs) StorageDeposit(l1 iotago.APIProvider) iotago.BaseToken { return sd } -func (a *ChainOutputs) String() string { - if a == nil { +func (c *ChainOutputs) String() string { + if c == nil { return "nil" } - return fmt.Sprintf("AO[si#%v]%v", a.AnchorOutput.StateIndex, a.AnchorOutputID.ToHex()) + return fmt.Sprintf("CO[si#%v]%v", c.AnchorOutput.StateIndex, c.AnchorOutputID.ToHex()) } -func (a *ChainOutputs) Read(r io.Reader) error { +func (c *ChainOutputs) Read(r io.Reader) error { rr := rwutil.NewReader(r) - rr.ReadN(a.AnchorOutputID[:]) - a.AnchorOutput = new(iotago.AnchorOutput) - rr.ReadSerialized(a.AnchorOutput, math.MaxInt32) - if a.AnchorOutput.StateIndex >= 1 { - rr.ReadN(a.accountOutputID[:]) - a.accountOutput = new(iotago.AccountOutput) - rr.ReadSerialized(a.accountOutput, math.MaxInt32) + rr.ReadN(c.AnchorOutputID[:]) + c.AnchorOutput = new(iotago.AnchorOutput) + rr.ReadSerialized(c.AnchorOutput, math.MaxInt32) + if c.AnchorOutput.StateIndex >= 1 { + rr.ReadN(c.accountOutputID[:]) + c.accountOutput = new(iotago.AccountOutput) + rr.ReadSerialized(c.accountOutput, math.MaxInt32) } return rr.Err } -func (a *ChainOutputs) Write(w io.Writer) error { +func (c *ChainOutputs) Write(w io.Writer) error { ww := rwutil.NewWriter(w) - ww.WriteN(a.AnchorOutputID[:]) - ww.WriteSerialized(a.AnchorOutput, math.MaxInt32) - if a.AnchorOutput.StateIndex >= 1 { - ww.WriteN(a.accountOutputID[:]) - ww.WriteSerialized(a.accountOutput, math.MaxInt32) + ww.WriteN(c.AnchorOutputID[:]) + ww.WriteSerialized(c.AnchorOutput, math.MaxInt32) + if c.AnchorOutput.StateIndex >= 1 { + ww.WriteN(c.accountOutputID[:]) + ww.WriteSerialized(c.accountOutput, math.MaxInt32) } return ww.Err } diff --git a/packages/isc/output2.go b/packages/isc/output2.go new file mode 100644 index 0000000000..51065fe345 --- /dev/null +++ b/packages/isc/output2.go @@ -0,0 +1,203 @@ +// Copyright 2020 IOTA Stiftung +// SPDX-License-Identifier: Apache-2.0 + +package isc + +import ( + iotago "github.com/iotaledger/iota.go/v4" +) + +type OutputWithID interface { + OutputID() iotago.OutputID + Output() iotago.Output + OutputType() iotago.OutputType + TransactionID() iotago.TransactionID +} + +type outputWithIDImpl struct { + outputID iotago.OutputID + output iotago.Output +} + +func newOutputWithID(output iotago.Output, outputID iotago.OutputID) *outputWithIDImpl { + return &outputWithIDImpl{ + outputID: outputID, + output: output, + } +} + +func (o *outputWithIDImpl) OutputID() iotago.OutputID { + return o.outputID +} + +func (o *outputWithIDImpl) Output() iotago.Output { + return o.output +} + +func (o *outputWithIDImpl) OutputType() iotago.OutputType { + return o.Output().Type() +} + +func (o *outputWithIDImpl) TransactionID() iotago.TransactionID { + return o.OutputID().TransactionID() +} + +type AnchorOutputWithID struct { + *outputWithIDImpl +} + +var _ OutputWithID = &AnchorOutputWithID{} + +func NewAnchorOutputWithID(output *iotago.AnchorOutput, outputID iotago.OutputID) *AnchorOutputWithID { + return &AnchorOutputWithID{ + outputWithIDImpl: newOutputWithID(output, outputID), + } +} + +func AnchorOutputWithIDFromChainOutputs(co *ChainOutputs) *AnchorOutputWithID { + return NewAnchorOutputWithID(co.AnchorOutput, co.AnchorOutputID) +} + +func (o *AnchorOutputWithID) AnchorOutput() *iotago.AnchorOutput { + return o.Output().(*iotago.AnchorOutput) +} + +func (o *AnchorOutputWithID) Equals(other *AnchorOutputWithID) bool { + return o.outputID == other.outputID +} + +func (o *AnchorOutputWithID) InChainOutputs(co *ChainOutputs) bool { + return o.outputID == co.accountOutputID +} + +func (o *AnchorOutputWithID) StateIndex() uint32 { + return o.AnchorOutput().StateIndex +} + +func (o *AnchorOutputWithID) StateController() iotago.Address { + return o.AnchorOutput().StateController() +} + +type AccountOutputWithID struct { + *outputWithIDImpl +} + +var _ OutputWithID = &AccountOutputWithID{} + +func NewAccountOutputWithID(output *iotago.AccountOutput, outputID iotago.OutputID) *AccountOutputWithID { + return &AccountOutputWithID{ + outputWithIDImpl: newOutputWithID(output, outputID), + } +} + +func (o *AccountOutputWithID) AccountOutput() *iotago.AccountOutput { + return o.Output().(*iotago.AccountOutput) +} + +type BasicOutputWithID struct { + *outputWithIDImpl +} + +var _ OutputWithID = &BasicOutputWithID{} + +func NewBasicOutputWithID(output *iotago.BasicOutput, outputID iotago.OutputID) *BasicOutputWithID { + return &BasicOutputWithID{ + outputWithIDImpl: newOutputWithID(output, outputID), + } +} + +func (o *BasicOutputWithID) BasicOutput() *iotago.BasicOutput { + return o.Output().(*iotago.BasicOutput) +} + +type FoundryOutputWithID struct { + *outputWithIDImpl +} + +var _ OutputWithID = &FoundryOutputWithID{} + +func NewFoundryOutputWithID(output *iotago.FoundryOutput, outputID iotago.OutputID) *FoundryOutputWithID { + return &FoundryOutputWithID{ + outputWithIDImpl: newOutputWithID(output, outputID), + } +} + +func (o *FoundryOutputWithID) FoundryOutput() *iotago.FoundryOutput { + return o.Output().(*iotago.FoundryOutput) +} + +type NFTOutputWithID struct { + *outputWithIDImpl +} + +var _ OutputWithID = &NFTOutputWithID{} + +func NewNFTOutputWithID(output *iotago.NFTOutput, outputID iotago.OutputID) *NFTOutputWithID { + return &NFTOutputWithID{ + outputWithIDImpl: newOutputWithID(output, outputID), + } +} + +func (o *NFTOutputWithID) NFTOutput() *iotago.NFTOutput { + return o.Output().(*iotago.NFTOutput) +} + +type AnchorAccountOutput struct { + anchorOutput *AnchorOutputWithID + accountOutput *AccountOutputWithID +} + +func NewAnchorAccountOutput(anchorOutput *AnchorOutputWithID, accountOutput *AccountOutputWithID) *AnchorAccountOutput { + return &AnchorAccountOutput{ + anchorOutput: anchorOutput, + accountOutput: accountOutput, + } +} + +func NewAnchorAccountOutputAnchor(anchorOutput *AnchorOutputWithID) *AnchorAccountOutput { + return NewAnchorAccountOutput(anchorOutput, nil) +} + +func NewAnchorAccountOutputAccount(accountOutput *AccountOutputWithID) *AnchorAccountOutput { + return NewAnchorAccountOutput(nil, accountOutput) +} + +func (o *AnchorAccountOutput) AnchorOutputWithID() *AnchorOutputWithID { + return o.anchorOutput +} + +func (o *AnchorAccountOutput) AnchorOutput() *iotago.AnchorOutput { + aoID := o.AnchorOutputWithID() + if aoID == nil { + return nil + } + return aoID.AnchorOutput() +} + +func (o *AnchorAccountOutput) AnchorOutputID() iotago.OutputID { + aoID := o.AnchorOutputWithID() + if aoID == nil { + return iotago.OutputID{} + } + return aoID.OutputID() +} + +func (o *AnchorAccountOutput) AccountOutputWithID() *AccountOutputWithID { + return o.accountOutput +} + +func (o *AnchorAccountOutput) AccountOutput() *iotago.AccountOutput { + aoID := o.AccountOutputWithID() + if aoID == nil { + return nil + } + return aoID.AccountOutput() +} + +func (o *AnchorAccountOutput) AccountOutputID() iotago.OutputID { + aoID := o.AccountOutputWithID() + if aoID == nil { + return iotago.OutputID{} + } + return aoID.OutputID() +} diff --git a/packages/origin/origin.go b/packages/origin/origin.go index b967845ac5..331387dabc 100644 --- a/packages/origin/origin.go +++ b/packages/origin/origin.go @@ -252,7 +252,7 @@ func NewChainOriginTransaction( outputs[OriginTxAccountOutputIndex] = accountOutput outputs = append(outputs, remainder...) - block, err := transaction.FinalizeTxAndBuildBlock( + _, block, err := transaction.FinalizeTxAndBuildBlock( l1API, transaction.TxBuilderFromInputsAndOutputs(l1API, txInputs, outputs, keyPair), blockIssuance, diff --git a/packages/solo/chain.go b/packages/solo/chain.go index 7bc56d34c6..273d4a329e 100644 --- a/packages/solo/chain.go +++ b/packages/solo/chain.go @@ -503,7 +503,7 @@ func (ch *Chain) CreateNewBlockIssuer(senderKeyPair *cryptolib.KeyPair, newState newOwnerAccountOutput.Mana = 0 txBuilder.AddOutput(newOwnerAccountOutput) - block, err := transaction.FinalizeTxAndBuildBlock( + _, block, err := transaction.FinalizeTxAndBuildBlock( testutil.L1API, txBuilder, ch.Env.BlockIssuance(), diff --git a/packages/solo/req.go b/packages/solo/req.go index 1f5619b292..12f66e5f1d 100644 --- a/packages/solo/req.go +++ b/packages/solo/req.go @@ -186,7 +186,7 @@ func (ch *Chain) createRequestTx(req *CallParams, keyPair *cryptolib.KeyPair) (* keyPair, senderAddr := ch.requestSender(req, keyPair) unspentOutputs := ch.Env.utxoDB.GetUnspentOutputs(keyPair.Address()) reqParams := req.Build(ch.ChainID.AsAddress()) - block, err := transaction.NewRequestTransaction( + _, block, err := transaction.NewRequestTransaction( keyPair, senderAddr, unspentOutputs, diff --git a/packages/solo/solo.go b/packages/solo/solo.go index 52adde5cf4..1198a136f6 100644 --- a/packages/solo/solo.go +++ b/packages/solo/solo.go @@ -644,7 +644,7 @@ func (env *Solo) MintNFTL1(issuer *cryptolib.KeyPair, target iotago.Address, imm func (env *Solo) MintNFTsL1(issuer *cryptolib.KeyPair, target iotago.Address, collectionOutputID *iotago.OutputID, immutableMetadata []iotago.MetadataFeatureEntries) ([]*isc.NFT, []*NFTMintedInfo, error) { allOuts := env.utxoDB.GetUnspentOutputs(issuer.Address()) - block, err := transaction.NewMintNFTsTransaction( + _, block, err := transaction.NewMintNFTsTransaction( issuer, collectionOutputID, target, @@ -692,7 +692,7 @@ func (env *Solo) MintNFTsL1(issuer *cryptolib.KeyPair, target iotago.Address, co // SendL1 sends base or native tokens to another L1 address func (env *Solo) SendL1(targetAddress iotago.Address, fts *isc.FungibleTokens, wallet *cryptolib.KeyPair) { allOuts := env.utxoDB.GetUnspentOutputs(wallet.Address()) - tx, err := transaction.NewTransferTransaction( + _, bl, err := transaction.NewTransferTransaction( fts, 0, wallet.Address(), @@ -706,7 +706,7 @@ func (env *Solo) SendL1(targetAddress iotago.Address, fts *isc.FungibleTokens, w env.BlockIssuance(), ) require.NoError(env.T, err) - err = env.AddToLedger(tx) + err = env.AddToLedger(bl) require.NoError(env.T, err) } diff --git a/packages/testutil/testchain/test_chain_ledger.go b/packages/testutil/testchain/test_chain_ledger.go index 11c90b266c..adbae9c8a9 100644 --- a/packages/testutil/testchain/test_chain_ledger.go +++ b/packages/testutil/testchain/test_chain_ledger.go @@ -79,7 +79,7 @@ func (tcl *TestChainLedger) MakeTxChainOrigin(committeePubKey *cryptolib.PublicK func (tcl *TestChainLedger) MakeTxAccountsDeposit(account *cryptolib.KeyPair) []isc.Request { outs := tcl.utxoDB.GetUnspentOutputs(account.Address()) - block, err := transaction.NewRequestTransaction( + _, block, err := transaction.NewRequestTransaction( account, account.Address(), outs, @@ -106,7 +106,7 @@ func (tcl *TestChainLedger) MakeTxAccountsDeposit(account *cryptolib.KeyPair) [] func (tcl *TestChainLedger) MakeTxDeployIncCounterContract() []isc.Request { sender := tcl.governor outs := tcl.utxoDB.GetUnspentOutputs(sender.Address()) - block, err := transaction.NewRequestTransaction( + _, block, err := transaction.NewRequestTransaction( sender, sender.Address(), outs, @@ -172,15 +172,16 @@ func (tcl *TestChainLedger) FakeStateTransition(chainOuts *isc.ChainOutputs, sta ) } -func (tcl *TestChainLedger) FakeRotationTX(chainOuts *isc.ChainOutputs, nextCommitteeAddr iotago.Address) (*isc.ChainOutputs, *iotago.SignedTransaction) { - tx, err := transaction.NewRotateChainStateControllerTx( +func (tcl *TestChainLedger) FakeRotationTX(chainOuts *isc.ChainOutputs, nextStateController *cryptolib.PublicKey) (*isc.ChainOutputs, *iotago.SignedTransaction) { + tx, _, err := transaction.NewRotateChainStateControllerTx( + iotago.OutputSet{}, // No outputs to consume? + tcl.governor, tcl.chainID.AsAnchorID(), - nextCommitteeAddr, + nextStateController, chainOuts.AnchorOutputID, chainOuts.AnchorOutput, - testutil.L1API.TimeProvider().SlotFromTime(time.Now()), - testutil.L1API, - tcl.governor, + testutil.L1APIProvider, + tcl.utxoDB.BlockIssuance(), ) if err != nil { panic(err) diff --git a/packages/testutil/utxodb/utxodb.go b/packages/testutil/utxodb/utxodb.go index e487c95bf6..db3b873305 100644 --- a/packages/testutil/utxodb/utxodb.go +++ b/packages/testutil/utxodb/utxodb.go @@ -289,7 +289,7 @@ func (u *UtxoDB) NewWalletWithFundsFromFaucet(keyPair ...*cryptolib.KeyPair) (*c blockIssuance := u.BlockIssuance() - block, err := transaction.FinalizeTxAndBuildBlock( + tx, block, err := transaction.FinalizeTxAndBuildBlock( testutil.L1API, txBuilder, blockIssuance, @@ -307,7 +307,6 @@ func (u *UtxoDB) NewWalletWithFundsFromFaucet(keyPair ...*cryptolib.KeyPair) (*c } // now take the basic output owned by the implicit acount and convert it to an AccountOutput - tx := util.TxFromBlock(block) outputToConvert := tx.Transaction.Outputs[0].Clone() outputToConvertID := iotago.OutputIDFromTransactionIDAndIndex(lo.Must(tx.Transaction.ID()), 0) @@ -340,7 +339,7 @@ func (u *UtxoDB) NewWalletWithFundsFromFaucet(keyPair ...*cryptolib.KeyPair) (*c }, }) - convertBlock, err := transaction.FinalizeTxAndBuildBlock( + _, convertBlock, err := transaction.FinalizeTxAndBuildBlock( testutil.L1API, txBuilderTarget, u.BlockIssuance(), @@ -461,7 +460,13 @@ func (u *UtxoDB) AddToLedger(block *iotago.Block) error { // verify that there is an account issuer for the block if _, ok := u.blockIssuer[block.Header.IssuerID]; !ok { - return fmt.Errorf("block issuer not found") + // TODO: Cleanup. + str := "block issuer" + block.Header.IssuerID.String() + ", existing: " + for x := range u.blockIssuer { + str = str + " bi=" + x.String() + } + + return fmt.Errorf("block issuer not found" + str) } // check block mana diff --git a/packages/testutil/utxodb/utxodb_test.go b/packages/testutil/utxodb/utxodb_test.go index 35d4155671..9b73c8e5e0 100644 --- a/packages/testutil/utxodb/utxodb_test.go +++ b/packages/testutil/utxodb/utxodb_test.go @@ -65,7 +65,7 @@ func TestDoubleSpend(t *testing.T) { }). AddOutput(accountOutput) - blockSpend2, err := transaction.FinalizeTxAndBuildBlock( + _, blockSpend2, err := transaction.FinalizeTxAndBuildBlock( testutil.L1API, txb2, u.BlockIssuance(), @@ -87,7 +87,7 @@ func TestDoubleSpend(t *testing.T) { }). AddOutput(accountOutput) - blockDoubleSpend, err := transaction.FinalizeTxAndBuildBlock( + _, blockDoubleSpend, err := transaction.FinalizeTxAndBuildBlock( testutil.L1API, txb3, u.BlockIssuance(), diff --git a/packages/transaction/nfttransaction.go b/packages/transaction/nfttransaction.go index 87840d8e4d..52c828a926 100644 --- a/packages/transaction/nfttransaction.go +++ b/packages/transaction/nfttransaction.go @@ -16,7 +16,7 @@ func NewMintNFTsTransaction( creationSlot iotago.SlotIndex, l1APIProvider iotago.APIProvider, blockIssuance *api.IssuanceBlockHeaderResponse, -) (*iotago.Block, error) { +) (*iotago.SignedTransaction, *iotago.Block, error) { senderAddress := nftIssuerKeyPair.Address() outputAssets := NewEmptyAssetsWithMana() @@ -73,7 +73,7 @@ func NewMintNFTsTransaction( l1APIProvider, ) if err != nil { - return nil, err + return nil, nil, err } outputs = append(outputs, remainder...) diff --git a/packages/transaction/requesttx.go b/packages/transaction/requesttx.go index 01d4377b30..44c694a06b 100644 --- a/packages/transaction/requesttx.go +++ b/packages/transaction/requesttx.go @@ -24,7 +24,7 @@ func NewTransferTransaction( disableAutoAdjustStorageDeposit bool, // if true, the minimal storage deposit won't be adjusted automatically l1APIProvider iotago.APIProvider, blockIssuance *api.IssuanceBlockHeaderResponse, -) (*iotago.Block, error) { +) (*iotago.SignedTransaction, *iotago.Block, error) { l1API := l1APIProvider.APIForSlot(creationSlot) output := MakeBasicOutput( targetAddress, @@ -40,10 +40,10 @@ func NewTransferTransaction( storageDeposit, err := l1API.StorageScoreStructure().MinDeposit(output) if err != nil { - return nil, err + return nil, nil, err } if output.BaseTokenAmount() < storageDeposit { - return nil, fmt.Errorf("%v: available %d < required %d base tokens", + return nil, nil, fmt.Errorf("%v: available %d < required %d base tokens", ErrNotEnoughBaseTokensForStorageDeposit, output.BaseTokenAmount(), storageDeposit) } @@ -55,7 +55,7 @@ func NewTransferTransaction( l1APIProvider, ) if err != nil { - return nil, err + return nil, nil, err } outputs := append([]iotago.Output{output}, remainder...) @@ -84,7 +84,7 @@ func NewRequestTransaction( disableAutoAdjustStorageDeposit bool, // if true, the minimal storage deposit won't be adjusted automatically l1APIProvider iotago.APIProvider, blockIssuance *api.IssuanceBlockHeaderResponse, -) (*iotago.Block, error) { +) (*iotago.SignedTransaction, *iotago.Block, error) { outputs := []iotago.Output{} l1API := l1APIProvider.APIForSlot(creationSlot) @@ -96,10 +96,10 @@ func NewRequestTransaction( storageDeposit, err := l1API.StorageScoreStructure().MinDeposit(out) if err != nil { - return nil, err + return nil, nil, err } if out.BaseTokenAmount() < storageDeposit { - return nil, fmt.Errorf("%v: available %d < required %d base tokens", + return nil, nil, fmt.Errorf("%v: available %d < required %d base tokens", ErrNotEnoughBaseTokensForStorageDeposit, out.BaseTokenAmount(), storageDeposit) } outputs = append(outputs, out) @@ -123,7 +123,7 @@ func NewRequestTransaction( l1APIProvider, ) if err != nil { - return nil, err + return nil, nil, err } outputs = append(outputs, remainder...) diff --git a/packages/transaction/rotate.go b/packages/transaction/rotate.go index 15e9e50c74..ba460a1cbd 100644 --- a/packages/transaction/rotate.go +++ b/packages/transaction/rotate.go @@ -63,7 +63,7 @@ func NewAccountOutputForStateControllerTx( outputs := []iotago.Output{accountOutput} outputs = append(outputs, remainder...) - block, err := FinalizeTxAndBuildBlock( + _, block, err := FinalizeTxAndBuildBlock( l1API, TxBuilderFromInputsAndOutputs(l1API, inputs, outputs, sender), blockIssuance, @@ -87,17 +87,17 @@ func NewRotateChainStateControllerTx( chainOutput iotago.Output, l1APIProvider iotago.APIProvider, blockIssuance *api.IssuanceBlockHeaderResponse, -) (*iotago.Block, error) { +) (*iotago.SignedTransaction, *iotago.Block, error) { slot := blockIssuance.LatestCommitment.Slot l1API := l1APIProvider.APIForSlot(slot) o, ok := chainOutput.(*iotago.AnchorOutput) if !ok { - return nil, fmt.Errorf("provided output is not the correct one. Expected AnchorOutput, received %T=%v", chainOutput, chainOutput) + return nil, nil, fmt.Errorf("provided output is not the correct one. Expected AnchorOutput, received %T=%v", chainOutput, chainOutput) } resolvedAnchorID := util.AnchorIDFromAnchorOutput(o, chainOutputID) if resolvedAnchorID != anchorID { - return nil, fmt.Errorf("provided output is not the correct one. Expected ChainID: %s, got: %s", + return nil, nil, fmt.Errorf("provided output is not the correct one. Expected ChainID: %s, got: %s", anchorID.ToHex(), chainOutput.(*iotago.AnchorOutput).AnchorID.ToHex(), ) @@ -143,7 +143,7 @@ func NewRotateChainStateControllerTx( } newChainOutput.Features = newFeatures - // create an isser account for the next state controller + // create an issuer account for the next state controller accountOutput := NewAccountOutputForStateController(l1API, newStateController) inputs, remainder, blockIssuerAccountID, err := ComputeInputsAndRemainder( @@ -154,7 +154,7 @@ func NewRotateChainStateControllerTx( l1APIProvider, ) if err != nil { - return nil, err + return nil, nil, err } outputs := []iotago.Output{accountOutput, newChainOutput} diff --git a/packages/transaction/util.go b/packages/transaction/util.go index cd9bc75e8e..282788c473 100644 --- a/packages/transaction/util.go +++ b/packages/transaction/util.go @@ -271,10 +271,14 @@ func FinalizeTxAndBuildBlock( storedManaOutputIndex int, blockIssuerID iotago.AccountID, signer cryptolib.VariantKeyPair, -) (*iotago.Block, error) { +) (*iotago.SignedTransaction, *iotago.Block, error) { tx, err := finalizeAndSignTx(txBuilder, blockIssuance, storedManaOutputIndex, blockIssuerID) if err != nil { - return nil, err + return nil, nil, err + } + bl, err := BlockFromTx(l1API, blockIssuance, tx, blockIssuerID, signer) + if err != nil { + return nil, nil, err } - return BlockFromTx(l1API, blockIssuance, tx, blockIssuerID, signer) + return tx, bl, nil } diff --git a/packages/webapi/corecontracts/blocklog.go b/packages/webapi/corecontracts/blocklog.go index 2b6702b875..ff8c1a3790 100644 --- a/packages/webapi/corecontracts/blocklog.go +++ b/packages/webapi/corecontracts/blocklog.go @@ -14,7 +14,7 @@ func GetControlAddresses(ch chaintypes.Chain) (*isc.ControlAddresses, error) { anchorOutput := chainOutput.AnchorOutput controlAddresses := &isc.ControlAddresses{ - StateAddress: chainOutput.AnchorOutput.StateController(), + StateAddress: anchorOutput.StateController(), GoverningAddress: anchorOutput.GovernorAddress(), SinceBlockIndex: anchorOutput.StateIndex, }