diff --git a/alertmanager/alerts.go b/alertmanager/alerts.go index e390f75df..056f7d47b 100644 --- a/alertmanager/alerts.go +++ b/alertmanager/alerts.go @@ -357,16 +357,17 @@ func (al *alerts) getAddresses() ([]address.Address, []address.Address, error) { return nil, nil, err } - _, err = toml.Decode(text, cfg) + _, err = config.TransparentDecode(text, cfg) if err != nil { return nil, nil, xerrors.Errorf("could not read layer, bad toml %s: %w", layer, err) } - for i := range cfg.Addresses { - prec := cfg.Addresses[i].PreCommitControl - com := cfg.Addresses[i].CommitControl - term := cfg.Addresses[i].TerminateControl - miners := cfg.Addresses[i].MinerAddresses + addrs := cfg.Addresses.Get() + for i := range addrs { + prec := addrs[i].PreCommitControl + com := addrs[i].CommitControl + term := addrs[i].TerminateControl + miners := addrs[i].MinerAddresses for j := range prec { if prec[j] != "" { addrMap[prec[j]] = struct{}{} diff --git a/cmd/curio/config_test.go b/cmd/curio/config_test.go index 90f2926f0..1698cdaa2 100644 --- a/cmd/curio/config_test.go +++ b/cmd/curio/config_test.go @@ -572,18 +572,19 @@ func TestConfig(t *testing.T) { _, err := deps.LoadConfigWithUpgrades(baseText, baseCfg) require.NoError(t, err) - baseCfg.Addresses = append(baseCfg.Addresses, addr1) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { + addrs := []config.CurioAddresses{addr1} + addrs = lo.Filter(addrs, func(a config.CurioAddresses, _ int) bool { return len(a.MinerAddresses) > 0 }) + baseCfg.Addresses.Set(addrs) _, err = config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) require.NoError(t, err) - baseCfg.Addresses = append(baseCfg.Addresses, addr2) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { + baseCfg.Addresses.Set(append(baseCfg.Addresses.Get(), addr2)) + baseCfg.Addresses.Set(lo.Filter(baseCfg.Addresses.Get(), func(a config.CurioAddresses, _ int) bool { return len(a.MinerAddresses) > 0 - }) + })) _, err = config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) require.NoError(t, err) diff --git a/cmd/curio/guidedsetup/guidedsetup.go b/cmd/curio/guidedsetup/guidedsetup.go index 34bdd8319..3905c35e2 100644 --- a/cmd/curio/guidedsetup/guidedsetup.go +++ b/cmd/curio/guidedsetup/guidedsetup.go @@ -539,7 +539,7 @@ func stepNewMinerConfig(d *MigrationData) { // Only add miner address for SP setup if !d.nonSP { - curioCfg.Addresses = append(curioCfg.Addresses, config.CurioAddresses{ + curioCfg.Addresses.Set([]config.CurioAddresses{{ PreCommitControl: []string{}, CommitControl: []string{}, DealPublishControl: []string{}, @@ -548,7 +548,7 @@ func stepNewMinerConfig(d *MigrationData) { DisableWorkerFallback: false, MinerAddresses: []string{d.MinerID.String()}, BalanceManager: config.DefaultBalanceManager(), - }) + }}) } sk, err := io.ReadAll(io.LimitReader(rand.Reader, 32)) @@ -604,9 +604,9 @@ func stepNewMinerConfig(d *MigrationData) { // If 'base' layer is not present if !lo.Contains(titles, "base") { if !d.nonSP { - curioCfg.Addresses = lo.Filter(curioCfg.Addresses, func(a config.CurioAddresses, _ int) bool { + curioCfg.Addresses.Set(lo.Filter(curioCfg.Addresses.Get(), func(a config.CurioAddresses, _ int) bool { return len(a.MinerAddresses) > 0 - }) + })) } cb, err := config.ConfigUpdate(curioCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) if err != nil { @@ -661,10 +661,10 @@ func stepNewMinerConfig(d *MigrationData) { os.Exit(1) } - baseCfg.Addresses = append(baseCfg.Addresses, curioCfg.Addresses...) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { + baseCfg.Addresses.Set(append(baseCfg.Addresses.Get(), curioCfg.Addresses.Get()...)) + baseCfg.Addresses.Set(lo.Filter(baseCfg.Addresses.Get(), func(a config.CurioAddresses, _ int) bool { return len(a.MinerAddresses) > 0 - }) + })) cb, err := config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) if err != nil { diff --git a/cmd/curio/guidedsetup/shared.go b/cmd/curio/guidedsetup/shared.go index a54bd2918..07455d4ae 100644 --- a/cmd/curio/guidedsetup/shared.go +++ b/cmd/curio/guidedsetup/shared.go @@ -10,7 +10,6 @@ import ( "path" "strings" - "github.com/BurntSushi/toml" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" @@ -150,7 +149,7 @@ func SaveConfigToLayerMigrateSectors(db *harmonydb.DB, minerRepoPath, chainApiIn minerAddress = addr - curioCfg.Addresses = []config.CurioAddresses{{ + curioCfg.Addresses.Set([]config.CurioAddresses{{ MinerAddresses: []string{addr.String()}, PreCommitControl: smCfg.Addresses.PreCommitControl, CommitControl: smCfg.Addresses.CommitControl, @@ -159,7 +158,7 @@ func SaveConfigToLayerMigrateSectors(db *harmonydb.DB, minerRepoPath, chainApiIn DisableOwnerFallback: smCfg.Addresses.DisableOwnerFallback, DisableWorkerFallback: smCfg.Addresses.DisableWorkerFallback, BalanceManager: config.DefaultBalanceManager(), - }} + }}) ks, err := lr.KeyStore() if err != nil { @@ -174,10 +173,11 @@ func SaveConfigToLayerMigrateSectors(db *harmonydb.DB, minerRepoPath, chainApiIn curioCfg.Apis.ChainApiInfo = append(curioCfg.Apis.ChainApiInfo, chainApiInfo) // Express as configTOML - configTOML := &bytes.Buffer{} - if err = toml.NewEncoder(configTOML).Encode(curioCfg); err != nil { + configTOMLBytes, err := config.TransparentMarshal(curioCfg) + if err != nil { return minerAddress, err } + configTOML := bytes.NewBuffer(configTOMLBytes) if lo.Contains(titles, "base") { // append addresses @@ -192,17 +192,20 @@ func SaveConfigToLayerMigrateSectors(db *harmonydb.DB, minerRepoPath, chainApiIn if err != nil { return minerAddress, xerrors.Errorf("Cannot load base config: %w", err) } - for _, addr := range baseCfg.Addresses { - if lo.Contains(addr.MinerAddresses, curioCfg.Addresses[0].MinerAddresses[0]) { + addrs := baseCfg.Addresses.Get() + for _, addr := range addrs { + ma := addr.MinerAddresses + if lo.Contains(ma, addrs[0].MinerAddresses[0]) { goto skipWritingToBase } } // write to base { - baseCfg.Addresses = append(baseCfg.Addresses, curioCfg.Addresses[0]) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { + addrs := baseCfg.Addresses.Get() + addrs = append(addrs, addrs[0]) + baseCfg.Addresses.Set(lo.Filter(addrs, func(a config.CurioAddresses, _ int) bool { return len(a.MinerAddresses) > 0 - }) + })) if baseCfg.Apis.ChainApiInfo == nil { baseCfg.Apis.ChainApiInfo = append(baseCfg.Apis.ChainApiInfo, chainApiInfo) } @@ -223,7 +226,7 @@ func SaveConfigToLayerMigrateSectors(db *harmonydb.DB, minerRepoPath, chainApiIn } say(plain, "Configuration 'base' was updated to include this miner's address (%s) and its wallet setup.", minerAddress) } - say(plain, "Compare the configurations %s to %s. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", "base", "mig-"+curioCfg.Addresses[0].MinerAddresses[0]) + say(plain, "Compare the configurations %s to %s. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", "base", "mig-"+curioCfg.Addresses.Get()[0].MinerAddresses[0]) skipWritingToBase: } else { _, err = db.Exec(ctx, `INSERT INTO harmony_config (title, config) VALUES ('base', $1) @@ -236,7 +239,7 @@ func SaveConfigToLayerMigrateSectors(db *harmonydb.DB, minerRepoPath, chainApiIn } { // make a layer representing the migration - layerName := fmt.Sprintf("mig-%s", curioCfg.Addresses[0].MinerAddresses[0]) + layerName := fmt.Sprintf("mig-%s", curioCfg.Addresses.Get()[0].MinerAddresses[0]) _, err = db.Exec(ctx, "DELETE FROM harmony_config WHERE title=$1", layerName) if err != nil { return minerAddress, xerrors.Errorf("Cannot delete existing layer: %w", err) @@ -244,12 +247,12 @@ func SaveConfigToLayerMigrateSectors(db *harmonydb.DB, minerRepoPath, chainApiIn // Express as new toml to avoid adding StorageRPCSecret in more than 1 layer curioCfg.Apis.StorageRPCSecret = "" - ct := &bytes.Buffer{} - if err = toml.NewEncoder(ct).Encode(curioCfg); err != nil { + ctBytes, err := config.TransparentMarshal(curioCfg) + if err != nil { return minerAddress, err } - _, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ($1, $2)", layerName, ct.String()) + _, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ($1, $2)", layerName, string(ctBytes)) if err != nil { return minerAddress, xerrors.Errorf("Cannot insert layer after layer created message: %w", err) } @@ -287,22 +290,24 @@ func getDBSettings(smCfg config.StorageMiner) string { func ensureEmptyArrays(cfg *config.CurioConfig) { if cfg.Addresses == nil { - cfg.Addresses = []config.CurioAddresses{} + cfg.Addresses.Set([]config.CurioAddresses{}) } else { - for i := range cfg.Addresses { - if cfg.Addresses[i].PreCommitControl == nil { - cfg.Addresses[i].PreCommitControl = []string{} + addrs := cfg.Addresses.Get() + for i := range addrs { + if addrs[i].PreCommitControl == nil { + addrs[i].PreCommitControl = []string{} } - if cfg.Addresses[i].CommitControl == nil { - cfg.Addresses[i].CommitControl = []string{} + if addrs[i].CommitControl == nil { + addrs[i].CommitControl = []string{} } - if cfg.Addresses[i].DealPublishControl == nil { - cfg.Addresses[i].DealPublishControl = []string{} + if addrs[i].DealPublishControl == nil { + addrs[i].DealPublishControl = []string{} } - if cfg.Addresses[i].TerminateControl == nil { - cfg.Addresses[i].TerminateControl = []string{} + if addrs[i].TerminateControl == nil { + addrs[i].TerminateControl = []string{} } } + cfg.Addresses.Set(addrs) } if cfg.Apis.ChainApiInfo == nil { cfg.Apis.ChainApiInfo = []string{} diff --git a/cmd/curio/tasks/tasks.go b/cmd/curio/tasks/tasks.go index 530e61f04..e0fe30649 100644 --- a/cmd/curio/tasks/tasks.go +++ b/cmd/curio/tasks/tasks.go @@ -64,7 +64,7 @@ var log = logging.Logger("curio/deps") func WindowPostScheduler(ctx context.Context, fc config.CurioFees, pc config.CurioProvingConfig, api api.Chain, verif storiface.Verifier, paramck func() (bool, error), sender *message.Sender, chainSched *chainsched.CurioChainSched, - as *multictladdr.MultiAddressSelector, addresses map[dtypes.MinerAddress]bool, db *harmonydb.DB, + as *multictladdr.MultiAddressSelector, addresses *config.Dynamic[map[dtypes.MinerAddress]bool], db *harmonydb.DB, stor paths.Store, idx paths.SectorIndex, max int) (*window2.WdPostTask, *window2.WdPostSubmitTask, *window2.WdPostRecoverDeclareTask, error) { // todo config @@ -243,10 +243,15 @@ func StartTasks(ctx context.Context, dependencies *deps.Deps, shutdownChan chan } } - miners := make([]address.Address, 0, len(maddrs)) - for k := range maddrs { - miners = append(miners, address.Address(k)) + miners := config.NewDynamic(make([]address.Address, 0, len(maddrs.Get()))) + forMiners := func() { + minersTmp := make([]address.Address, 0, len(maddrs.Get())) + for k := range maddrs.Get() { + minersTmp = append(minersTmp, address.Address(k)) + } + miners.Set(minersTmp) } + maddrs.OnChange(forMiners) if cfg.Subsystems.EnableBalanceManager { balMgrTask, err := storage_market.NewBalanceManager(full, miners, cfg, sender) @@ -534,50 +539,54 @@ func machineDetails(deps *deps.Deps, activeTasks []harmonytask.TaskInterface, ma return item.TypeDetails().Name }) - miners := lo.Map(maps.Keys(deps.Maddrs), func(item dtypes.MinerAddress, _ int) string { - return address.Address(item).String() - }) - sort.Strings(miners) + doMachineDetails := func() { + miners := lo.Map(maps.Keys(deps.Maddrs.Get()), func(item dtypes.MinerAddress, _ int) string { + return address.Address(item).String() + }) + sort.Strings(miners) - _, err := deps.DB.Exec(context.Background(), `INSERT INTO harmony_machine_details + _, err := deps.DB.Exec(context.Background(), `INSERT INTO harmony_machine_details (tasks, layers, startup_time, miners, machine_id, machine_name) VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (machine_id) DO UPDATE SET tasks=$1, layers=$2, startup_time=$3, miners=$4, machine_id=$5, machine_name=$6`, - strings.Join(taskNames, ","), strings.Join(deps.Layers, ","), - time.Now(), strings.Join(miners, ","), machineID, machineName) - - if err != nil { - log.Errorf("failed to update machine details: %s", err) - return - } + strings.Join(taskNames, ","), strings.Join(deps.Layers, ","), + time.Now(), strings.Join(miners, ","), machineID, machineName) - // maybePostWarning - if !lo.Contains(taskNames, "WdPost") && !lo.Contains(taskNames, "WinPost") { - // Maybe we aren't running a PoSt for these miners? - var allMachines []struct { - MachineID int `db:"machine_id"` - Miners string `db:"miners"` - Tasks string `db:"tasks"` - } - err := deps.DB.Select(context.Background(), &allMachines, `SELECT machine_id, miners, tasks FROM harmony_machine_details`) if err != nil { - log.Errorf("failed to get machine details: %s", err) + log.Errorf("failed to update machine details: %s", err) return } - for _, miner := range miners { - var myPostIsHandled bool - for _, m := range allMachines { - if !lo.Contains(strings.Split(m.Miners, ","), miner) { - continue + // maybePostWarning + if !lo.Contains(taskNames, "WdPost") && !lo.Contains(taskNames, "WinPost") { + // Maybe we aren't running a PoSt for these miners? + var allMachines []struct { + MachineID int `db:"machine_id"` + Miners string `db:"miners"` + Tasks string `db:"tasks"` + } + err := deps.DB.Select(context.Background(), &allMachines, `SELECT machine_id, miners, tasks FROM harmony_machine_details`) + if err != nil { + log.Errorf("failed to get machine details: %s", err) + return + } + + for _, miner := range miners { + var myPostIsHandled bool + for _, m := range allMachines { + if !lo.Contains(strings.Split(m.Miners, ","), miner) { + continue + } + if lo.Contains(strings.Split(m.Tasks, ","), "WdPost") && lo.Contains(strings.Split(m.Tasks, ","), "WinPost") { + myPostIsHandled = true + break + } } - if lo.Contains(strings.Split(m.Tasks, ","), "WdPost") && lo.Contains(strings.Split(m.Tasks, ","), "WinPost") { - myPostIsHandled = true - break + if !myPostIsHandled { + log.Errorf("No PoSt tasks are running for miner %s. Start handling PoSts immediately with:\n\tcurio run --layers=\"post\" ", miner) } } - if !myPostIsHandled { - log.Errorf("No PoSt tasks are running for miner %s. Start handling PoSts immediately with:\n\tcurio run --layers=\"post\" ", miner) - } } } + doMachineDetails() + deps.Maddrs.OnChange(doMachineDetails) } diff --git a/cmd/curio/test-cli.go b/cmd/curio/test-cli.go index d78002639..ed8d59212 100644 --- a/cmd/curio/test-cli.go +++ b/cmd/curio/test-cli.go @@ -105,7 +105,7 @@ var wdPostTaskCmd = &cli.Command{ } var taskIDs []int64 - for addr := range deps.Maddrs { + for addr := range deps.Maddrs.Get() { maddr, err := address.IDFromAddress(address.Address(addr)) if err != nil { return xerrors.Errorf("cannot get miner id %w", err) @@ -257,7 +257,7 @@ It will not send any messages to the chain. Since it can compute any deadline, o } _, _ = wdPoStSubmitTask, derlareRecoverTask - if len(deps.Maddrs) == 0 { + if len(deps.Maddrs.Get()) == 0 { return errors.New("no miners to compute WindowPoSt for") } head, err := deps.Chain.ChainHead(ctx) @@ -267,7 +267,7 @@ It will not send any messages to the chain. Since it can compute any deadline, o di := dline.NewInfo(head.Height(), cctx.Uint64("deadline"), 0, 0, 0, 10 /*challenge window*/, 0, 0) - for maddr := range deps.Maddrs { + for maddr := range deps.Maddrs.Get() { if spAddr != address.Undef && address.Address(maddr) != spAddr { continue } @@ -337,7 +337,7 @@ var wdPostVanillaCmd = &cli.Command{ } _, _ = wdPoStSubmitTask, derlareRecoverTask - if len(deps.Maddrs) == 0 { + if len(deps.Maddrs.Get()) == 0 { return errors.New("no miners to compute WindowPoSt for") } head, err := deps.Chain.ChainHead(ctx) @@ -347,7 +347,7 @@ var wdPostVanillaCmd = &cli.Command{ di := dline.NewInfo(head.Height(), cctx.Uint64("deadline"), 0, 0, 0, 10 /*challenge window*/, 0, 0) - for maddr := range deps.Maddrs { + for maddr := range deps.Maddrs.Get() { if spAddr != address.Undef && address.Address(maddr) != spAddr { continue } diff --git a/deps/config/cfgdocgen/gen.go b/deps/config/cfgdocgen/gen.go index c262a38c4..ea26c3035 100644 --- a/deps/config/cfgdocgen/gen.go +++ b/deps/config/cfgdocgen/gen.go @@ -79,7 +79,17 @@ func run() error { isDynamic = true typ = strings.TrimPrefix(typ, "*Dynamic[") typ = strings.TrimSuffix(typ, "]") - comment = append(comment, "Updates will affect running instances.") + // Only add the update notice if it's not already in the comments + hasUpdateNotice := false + for _, c := range comment { + if strings.Contains(c, "Updates will affect running instances.") { + hasUpdateNotice = true + break + } + } + if !hasUpdateNotice { + comment = append(comment, "Updates will affect running instances.") + } } if len(comment) > 0 && strings.HasPrefix(comment[0], fmt.Sprintf("%s is DEPRECATED", name)) { diff --git a/deps/config/doc_gen.go b/deps/config/doc_gen.go index ce3edc603..7efd80d70 100644 --- a/deps/config/doc_gen.go +++ b/deps/config/doc_gen.go @@ -208,7 +208,8 @@ Accepts a decimal string (e.g., "123.45" or "123 fil") with optional "fil" or "a Name: "Addresses", Type: "[]CurioAddresses", - Comment: `Addresses specifies the list of miner addresses and their related wallet addresses.`, + Comment: `Addresses specifies the list of miner addresses and their related wallet addresses. +Updates will affect running instances.`, }, { Name: "Proving", @@ -317,7 +318,8 @@ are much less likely to get stuck in mempool. (Default: true)`, Comment: `MaxMarketRunningPipelines is the maximum number of market pipelines that can be actively running tasks. A "running" pipeline is one that has at least one task currently assigned to a machine (owner_id is not null). If this limit is exceeded, the system will apply backpressure to delay processing of new deals. -0 means unlimited. (Default: 64)`, +0 means unlimited. (Default: 64) +Updates will affect running instances.`, }, { Name: "MaxQueueDownload", @@ -336,7 +338,8 @@ Updates will affect running instances.`, Comment: `MaxQueueCommP is the maximum number of pipelines that can be queued at the CommP (verify) stage, waiting for a machine to pick up their verification task (owner_id is null). If this limit is exceeded, the system will apply backpressure, delaying new deal processing. -0 means unlimited. (Default: 8)`, +0 means unlimited. (Default: 8) +Updates will affect running instances.`, }, { Name: "MaxQueueDealSector", @@ -346,7 +349,8 @@ If this limit is exceeded, the system will apply backpressure, delaying new deal 0 = unlimited Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem. The DealSector queue includes deals that are ready to enter the sealing pipeline but are not yet part of it. -DealSector queue is the first queue in the sealing pipeline, making it the primary backpressure mechanism. (Default: 8)`, +DealSector queue is the first queue in the sealing pipeline, making it the primary backpressure mechanism. (Default: 8) +Updates will affect running instances.`, }, { Name: "MaxQueueSDR", @@ -358,7 +362,8 @@ Note: This mechanism will delay taking deal data from markets, providing backpre The SDR queue includes deals which are in the process of entering the sealing pipeline. In case of the SDR tasks it is possible that this queue grows more than this limit(CC sectors), the backpressure is only applied to sectors entering the pipeline. -Only applies to PoRep pipeline (DoSnap = false) (Default: 8)`, +Only applies to PoRep pipeline (DoSnap = false) (Default: 8) +Updates will affect running instances.`, }, { Name: "MaxQueueTrees", @@ -369,7 +374,8 @@ Only applies to PoRep pipeline (DoSnap = false) (Default: 8)`, Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem. In case of the trees tasks it is possible that this queue grows more than this limit, the backpressure is only applied to sectors entering the pipeline. -Only applies to PoRep pipeline (DoSnap = false) (Default: 0)`, +Only applies to PoRep pipeline (DoSnap = false) (Default: 0) +Updates will affect running instances.`, }, { Name: "MaxQueuePoRep", @@ -380,7 +386,8 @@ Only applies to PoRep pipeline (DoSnap = false) (Default: 0)`, Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem. Like with the trees tasks, it is possible that this queue grows more than this limit, the backpressure is only applied to sectors entering the pipeline. -Only applies to PoRep pipeline (DoSnap = false) (Default: 0)`, +Only applies to PoRep pipeline (DoSnap = false) (Default: 0) +Updates will affect running instances.`, }, { Name: "MaxQueueSnapEncode", @@ -389,7 +396,8 @@ Only applies to PoRep pipeline (DoSnap = false) (Default: 0)`, Comment: `MaxQueueSnapEncode is the maximum number of sectors that can be queued waiting for UpdateEncode tasks to start. 0 means unlimited. This applies backpressure to the market subsystem by delaying the ingestion of deal data. -Only applies to the Snap Deals pipeline (DoSnap = true). (Default: 16)`, +Only applies to the Snap Deals pipeline (DoSnap = true). (Default: 16) +Updates will affect running instances.`, }, { Name: "MaxQueueSnapProve", @@ -397,7 +405,8 @@ Only applies to the Snap Deals pipeline (DoSnap = true). (Default: 16)`, Comment: `MaxQueueSnapProve is the maximum number of sectors that can be queued waiting for UpdateProve to start processing. 0 means unlimited. -This applies backpressure in the Snap Deals pipeline (DoSnap = true) by delaying new deal ingestion. (Default: 0)`, +This applies backpressure in the Snap Deals pipeline (DoSnap = true) by delaying new deal ingestion. (Default: 0) +Updates will affect running instances.`, }, { Name: "MaxDealWaitTime", @@ -405,7 +414,8 @@ This applies backpressure in the Snap Deals pipeline (DoSnap = true) by delaying Comment: `Maximum time an open deal sector should wait for more deals before it starts sealing. This ensures that sectors don't remain open indefinitely, consuming resources. -Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s")`, +Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") +Updates will affect running instances.`, }, { Name: "DoSnap", diff --git a/deps/config/dynamic.go b/deps/config/dynamic.go index 0d3be5ec8..a0a9e429e 100644 --- a/deps/config/dynamic.go +++ b/deps/config/dynamic.go @@ -9,8 +9,8 @@ import ( "sync" "time" - "github.com/BurntSushi/toml" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/curio/harmony/harmonydb" @@ -18,11 +18,14 @@ import ( var logger = logging.Logger("config-dynamic") -// bigIntComparer is used to compare big.Int values properly -var bigIntComparer = cmp.Comparer(func(x, y big.Int) bool { +// BigIntComparer is used to compare big.Int values properly +var BigIntComparer = cmp.Comparer(func(x, y big.Int) bool { return x.Cmp(&y) == 0 }) +// Dynamic is a wrapper for configuration values that can change at runtime. +// Use Get() and Set() methods to access the value with proper synchronization +// and change detection. type Dynamic[T any] struct { value T } @@ -61,24 +64,14 @@ func (d *Dynamic[T]) Get() T { return d.value } -// UnmarshalText unmarshals the text into the dynamic value. -// After initial setting, future updates require a lock on the DynamicMx mutex before calling toml.Decode. -func (d *Dynamic[T]) UnmarshalText(text []byte) error { - return toml.Unmarshal(text, &d.value) -} - -// MarshalTOML marshals the dynamic value to TOML format. -// If used from deps, requires a lock. -func (d *Dynamic[T]) MarshalTOML() ([]byte, error) { - return toml.Marshal(d.value) -} - // Equal is used by cmp.Equal for custom comparison. // If used from deps, requires a lock. func (d *Dynamic[T]) Equal(other *Dynamic[T]) bool { - return cmp.Equal(d.value, other.value, bigIntComparer) + return cmp.Equal(d.value, other.value, BigIntComparer, cmpopts.EquateEmpty()) } +// MarshalTOML cannot be implemented for struct types because it won't be boxed correctly. + type cfgRoot[T any] struct { db *harmonydb.DB layers []string @@ -241,7 +234,7 @@ func (c *changeNotifier) Unlock() { c.updating = false for k, v := range c.latest { - if !cmp.Equal(v, c.originally[k], bigIntComparer) { + if !cmp.Equal(v, c.originally[k], BigIntComparer) { if notifier := c.notifier[k]; notifier != nil { go notifier() } diff --git a/deps/config/dynamic_test.go b/deps/config/dynamic_test.go index 26d1e345d..87fa3f904 100644 --- a/deps/config/dynamic_test.go +++ b/deps/config/dynamic_test.go @@ -42,22 +42,34 @@ func TestDynamic(t *testing.T) { }, 10*time.Second, 100*time.Millisecond) } -func TestDynamicUnmarshalText(t *testing.T) { +func TestDynamicUnmarshalTOML(t *testing.T) { + type TestConfig struct { Name string Value int } + type Wrapper struct { + Config *Dynamic[TestConfig] + } + + // Create a config and marshal it using TransparentMarshal + w1 := Wrapper{Config: NewDynamic(TestConfig{Name: "test", Value: 42})} + data, err := TransparentMarshal(w1) + assert.NoError(t, err) + t.Logf("Generated TOML (%d bytes):\n%s", len(data), string(data)) - d := NewDynamic(TestConfig{}) - tomlData := []byte(` -Name = "test" -Value = 42 -`) + // Verify the config value before marshaling + assert.Equal(t, "test", w1.Config.Get().Name, "Original config should have correct name") + assert.Equal(t, 42, w1.Config.Get().Value, "Original config should have correct value") - err := d.UnmarshalText(tomlData) + // Unmarshal it back using TransparentUnmarshal + var w2 Wrapper + w2.Config = NewDynamic(TestConfig{}) + err = TransparentUnmarshal(data, &w2) assert.NoError(t, err) - result := d.Get() + result := w2.Config.Get() + assert.Equal(t, "test", result.Name) assert.Equal(t, 42, result.Value) } @@ -85,8 +97,9 @@ func TestDynamicWithBigInt(t *testing.T) { assert.False(t, d1.Equal(d3), "Different FIL values should not be equal") // Test that cmp.Equal works with bigIntComparer - assert.True(t, cmp.Equal(d1.Get(), d2.Get(), bigIntComparer), "cmp.Equal should work with bigIntComparer") - assert.False(t, cmp.Equal(d1.Get(), d3.Get(), bigIntComparer), "cmp.Equal should detect differences") + assert.True(t, cmp.Equal(d1.Get(), d2.Get(), BigIntComparer), "cmp.Equal should work with bigIntComparer") + assert.False(t, cmp.Equal(d1.Get(), d3.Get(), BigIntComparer), "cmp.Equal should detect differences") + } func TestDynamicChangeNotificationWithBigInt(t *testing.T) { @@ -119,3 +132,78 @@ func TestDynamicChangeNotificationWithBigInt(t *testing.T) { time.Sleep(200 * time.Millisecond) assert.False(t, notified.Load(), "OnChange should not be called when BigInt value stays the same") } + +func TestDynamicMarshalSlice(t *testing.T) { + // Test that Dynamic wrapping a slice can be marshaled to TOML + type Address struct { + Name string + URL string + } + + type ConfigWithSlice struct { + Addresses *Dynamic[[]Address] + } + + cfg := ConfigWithSlice{ + Addresses: NewDynamic([]Address{ + {Name: "addr1", URL: "http://example.com"}, + {Name: "addr2", URL: "http://example.org"}, + }), + } + + // Test that the full struct can be marshaled to TOML using TransparentMarshal + data, err := TransparentMarshal(cfg) + assert.NoError(t, err, "Should be able to marshal config with Dynamic slice to TOML") + + // Test round-trip: unmarshal back using TransparentUnmarshal + var cfg2 ConfigWithSlice + cfg2.Addresses = NewDynamic([]Address{}) + err = TransparentUnmarshal(data, &cfg2) + assert.NoError(t, err, "Should be able to unmarshal config with Dynamic slice from TOML") + + // Verify the data is correct + assert.Len(t, cfg2.Addresses.Get(), 2) + assert.Equal(t, "addr1", cfg2.Addresses.Get()[0].Name) + assert.Equal(t, "http://example.com", cfg2.Addresses.Get()[0].URL) + assert.Equal(t, "addr2", cfg2.Addresses.Get()[1].Name) +} + +func TestDefaultCurioConfigMarshal(t *testing.T) { + // Test that the default config with Dynamic fields can be marshaled + cfg := DefaultCurioConfig() + + // This should not panic or error using TransparentMarshal + data, err := TransparentMarshal(cfg) + assert.NoError(t, err, "Should be able to marshal DefaultCurioConfig to TOML") + assert.NotEmpty(t, data) + t.Logf("Successfully marshaled config to %d bytes of TOML", len(data)) +} + +func TestCurioConfigRoundTrip(t *testing.T) { + // Test full marshal/unmarshal round-trip with CurioConfig + cfg1 := DefaultCurioConfig() + + // Modify some Dynamic values to test they persist + cfg1.Ingest.MaxQueueDownload.Set(16) + cfg1.Ingest.MaxMarketRunningPipelines.Set(32) + + // Marshal to TOML using TransparentMarshal + data, err := TransparentMarshal(cfg1) + assert.NoError(t, err, "Should be able to marshal config") + t.Logf("Marshaled %d bytes", len(data)) + + // Unmarshal back to a new config (starting with defaults to initialize FIL types properly) + cfg2 := DefaultCurioConfig() + err = TransparentUnmarshal(data, cfg2) + assert.NoError(t, err, "Should be able to unmarshal config back") + + // Verify Dynamic values were preserved + assert.Equal(t, 16, cfg2.Ingest.MaxQueueDownload.Get(), "MaxQueueDownload should be preserved") + assert.Equal(t, 32, cfg2.Ingest.MaxMarketRunningPipelines.Get(), "MaxMarketRunningPipelines should be preserved") + + // Verify the Addresses Dynamic slice was preserved + assert.Equal(t, len(cfg1.Addresses.Get()), len(cfg2.Addresses.Get()), "Addresses slice length should match") + + // Verify static fields were preserved + assert.Equal(t, cfg1.Subsystems.GuiAddress, cfg2.Subsystems.GuiAddress) +} diff --git a/deps/config/dynamic_toml.go b/deps/config/dynamic_toml.go new file mode 100644 index 000000000..2a04b93af --- /dev/null +++ b/deps/config/dynamic_toml.go @@ -0,0 +1,375 @@ +package config + +import ( + "reflect" + "strings" + + "github.com/BurntSushi/toml" +) + +// TransparentMarshal marshals a struct to TOML, treating Dynamic[T] fields transparently. +// Dynamic[T] fields are unwrapped and their inner values are marshaled directly. +func TransparentMarshal(v interface{}) ([]byte, error) { + // Create a shadow struct with Dynamic fields unwrapped + shadow := unwrapDynamics(v) + return toml.Marshal(shadow) +} + +// TransparentUnmarshal unmarshals TOML into a struct, treating Dynamic[T] fields transparently. +// Values are decoded into temporary structs then wrapped in Dynamic. +// NOTE: For types like types.FIL with unexported pointer fields, the target must be pre-initialized +// with default values (e.g., types.MustParseFIL("0")) before calling this function. +func TransparentUnmarshal(data []byte, v interface{}) error { + _, err := TransparentDecode(string(data), v) + return err +} + +// TransparentDecode decodes TOML into a struct, treating Dynamic[T] fields transparently. +// Like toml.Decode, it returns MetaData for checking which fields were set. +// NOTE: For types like types.FIL with unexported pointer fields, the target must be pre-initialized +// with default values (e.g., types.MustParseFIL("0")) before calling this function. +// NOTE: FixTOML should be called BEFORE this function to ensure proper slice lengths and FIL initialization. +func TransparentDecode(data string, v interface{}) (toml.MetaData, error) { + // Create a shadow struct to decode into + shadow := createShadowStruct(v) + + // Initialize shadow with values from target (for types like FIL that need non-nil pointers) + // This copies FIL values that were initialized by FixTOML + initializeShadowFromTarget(shadow, v) + + // Decode into shadow and get metadata + // Note: TOML will overwrite slice elements, but our initialized FIL fields will be preserved + // because TOML calls UnmarshalText on FIL types, which requires non-nil pointers + md, err := toml.Decode(data, shadow) + if err != nil { + return md, err + } + + // Copy values from shadow to Dynamic fields + err = wrapDynamics(shadow, v) + return md, err +} + +// initializeShadowFromTarget copies initialized values from target to shadow +// This is needed for types like types.FIL that require non-nil internal pointers +func initializeShadowFromTarget(shadow, target interface{}) { + shadowVal := reflect.ValueOf(shadow) + targetVal := reflect.ValueOf(target) + + if shadowVal.Kind() == reflect.Ptr { + shadowVal = shadowVal.Elem() + } + if targetVal.Kind() == reflect.Ptr { + targetVal = targetVal.Elem() + } + + // Ensure we have structs to work with + if shadowVal.Kind() != reflect.Struct || targetVal.Kind() != reflect.Struct { + return + } + + for i := 0; i < targetVal.NumField(); i++ { + targetField := targetVal.Field(i) + shadowField := shadowVal.Field(i) + + if !shadowField.CanSet() || !targetField.IsValid() { + continue + } + + if isDynamicTypeForMarshal(targetField.Type()) { + // For Dynamic fields, copy the inner initialized value to the unwrapped shadow field + innerVal := extractDynamicValue(targetField) + if innerVal.IsValid() && innerVal.CanInterface() { + // Copy the value (including slices with initialized FIL elements from FixTOML) + val := innerVal.Interface() + valReflect := reflect.ValueOf(val) + if valReflect.Type().AssignableTo(shadowField.Type()) { + shadowField.Set(valReflect) + } + } + } else if targetField.Kind() == reflect.Struct && hasNestedDynamics(targetField.Type()) { + // For nested structs with Dynamic fields, recursively initialize + initializeShadowFromTarget(shadowField.Addr().Interface(), targetField.Addr().Interface()) + } else if targetField.Kind() == reflect.Ptr && !targetField.IsNil() && !shadowField.IsNil() { + // Handle pointers to structs + elemType := targetField.Type().Elem() + if elemType.Kind() == reflect.Struct && hasNestedDynamics(elemType) { + // Recursively initialize pointer to struct with Dynamic fields + initializeShadowFromTarget(shadowField.Elem().Addr().Interface(), targetField.Elem().Addr().Interface()) + } else if targetField.CanInterface() && shadowField.Type() == targetField.Type() { + // Copy regular pointer if types match + shadowField.Set(reflect.ValueOf(targetField.Interface())) + } + } else if targetField.CanInterface() && shadowField.Type() == targetField.Type() { + // Copy regular fields only if types match exactly + shadowField.Set(reflect.ValueOf(targetField.Interface())) + } + } +} + +// unwrapDynamics recursively unwraps Dynamic[T] fields for marshaling +func unwrapDynamics(v interface{}) interface{} { + rv := reflect.ValueOf(v) + if rv.Kind() == reflect.Ptr { + rv = rv.Elem() + } + + if rv.Kind() != reflect.Struct { + return v + } + + // Check if this struct has any Dynamic fields - if not, return as-is + if !hasNestedDynamics(rv.Type()) { + return v + } + + // Create a new struct with same fields but Dynamic unwrapped + shadowType := createShadowType(rv.Type()) + shadowVal := reflect.New(shadowType).Elem() + + for i := 0; i < rv.NumField(); i++ { + field := rv.Field(i) + shadowField := shadowVal.Field(i) + + if !shadowField.CanSet() { + continue + } + + if isDynamicTypeForMarshal(field.Type()) { + // Extract inner value from Dynamic + innerVal := extractDynamicValue(field) + if innerVal.IsValid() { + shadowField.Set(innerVal) + } + } else if field.Kind() == reflect.Struct && hasNestedDynamics(field.Type()) { + // Only recursively unwrap structs that contain Dynamic fields + shadowField.Set(reflect.ValueOf(unwrapDynamics(field.Interface()))) + } else if field.Kind() == reflect.Ptr && !field.IsNil() { + // Handle pointers - check if the pointed-to type contains Dynamic fields + elemType := field.Type().Elem() + if elemType.Kind() == reflect.Struct && hasNestedDynamics(elemType) { + // Recursively unwrap the pointed-to struct + unwrapped := unwrapDynamics(field.Elem().Interface()) + unwrappedPtr := reflect.New(reflect.TypeOf(unwrapped)) + unwrappedPtr.Elem().Set(reflect.ValueOf(unwrapped)) + shadowField.Set(unwrappedPtr) + } else if field.IsValid() && field.CanInterface() { + // Regular pointer - copy as-is + val := field.Interface() + shadowField.Set(reflect.ValueOf(val)) + } + } else if field.IsValid() && field.CanInterface() { + // Copy all other fields via interface (handles types with unexported fields) + val := field.Interface() + shadowField.Set(reflect.ValueOf(val)) + } + } + + return shadowVal.Interface() +} + +// createShadowStruct creates a struct for unmarshaling where Dynamic fields are their inner types +func createShadowStruct(v interface{}) interface{} { + rv := reflect.ValueOf(v) + if rv.Kind() == reflect.Ptr { + rv = rv.Elem() + } + + shadowType := createShadowType(rv.Type()) + return reflect.New(shadowType).Interface() +} + +// createShadowType creates a type with Dynamic[T] fields replaced by T +func createShadowType(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return reflect.PointerTo(createShadowType(t.Elem())) + } + + if t.Kind() != reflect.Struct { + return t + } + + var fields []reflect.StructField + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + newField := field + + if isDynamicTypeForMarshal(field.Type) { + // Replace Dynamic[T] with T + innerType := extractDynamicInnerType(field.Type) + newField.Type = innerType + } else if field.Type.Kind() == reflect.Struct && hasNestedDynamics(field.Type) { + // Only recursively modify structs that contain Dynamic fields + newField.Type = createShadowType(field.Type) + } else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { + if hasNestedDynamics(field.Type.Elem()) { + newField.Type = reflect.PointerTo(createShadowType(field.Type.Elem())) + } + } + // For all other types (including structs without Dynamic fields), keep original type + + fields = append(fields, newField) + } + + return reflect.StructOf(fields) +} + +// wrapDynamics copies values from shadow struct to Dynamic fields +func wrapDynamics(shadow, target interface{}) error { + shadowVal := reflect.ValueOf(shadow) + targetVal := reflect.ValueOf(target) + + if shadowVal.Kind() == reflect.Ptr { + shadowVal = shadowVal.Elem() + } + if targetVal.Kind() == reflect.Ptr { + targetVal = targetVal.Elem() + } + + // Ensure we have structs to work with + if shadowVal.Kind() != reflect.Struct || targetVal.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < targetVal.NumField(); i++ { + targetField := targetVal.Field(i) + shadowField := shadowVal.Field(i) + + if !targetField.CanSet() { + continue + } + + if isDynamicTypeForMarshal(targetField.Type()) { + // Wrap value in Dynamic using Set method + setDynamicValue(targetField, shadowField) + } else if targetField.Kind() == reflect.Struct && hasNestedDynamics(targetField.Type()) { + // Recursively handle nested structs that have Dynamic fields + err := wrapDynamics(shadowField.Interface(), targetField.Addr().Interface()) + if err != nil { + return err + } + } else if targetField.Kind() == reflect.Ptr && !targetField.IsNil() && !shadowField.IsNil() { + // Handle pointers to structs + elemType := targetField.Type().Elem() + if elemType.Kind() == reflect.Struct && hasNestedDynamics(elemType) { + // Recursively handle pointer to struct with Dynamic fields + err := wrapDynamics(shadowField.Elem().Interface(), targetField.Elem().Addr().Interface()) + if err != nil { + return err + } + } else if shadowField.IsValid() && targetField.CanSet() { + // Regular pointer - copy as-is if types match + if shadowField.Type().AssignableTo(targetField.Type()) { + targetField.Set(shadowField) + } + } + } else { + // Copy regular fields - don't copy if types don't match (shouldn't happen) + if shadowField.IsValid() && targetField.CanSet() { + if shadowField.Type().AssignableTo(targetField.Type()) { + targetField.Set(shadowField) + } + } + } + } + + return nil +} + +// isDynamicTypeForMarshal checks if a type is Dynamic[T] +// (renamed to avoid conflict with isDynamicType in dynamic.go) +func isDynamicTypeForMarshal(t reflect.Type) bool { + // Handle pointer to Dynamic + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + name := t.Name() + return strings.HasPrefix(name, "Dynamic[") +} + +// hasNestedDynamics checks if a struct type contains any Dynamic[T] fields +func hasNestedDynamics(t reflect.Type) bool { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return false + } + + for i := 0; i < t.NumField(); i++ { + fieldType := t.Field(i).Type + if isDynamicTypeForMarshal(fieldType) { + return true + } + // Check nested structs recursively + if fieldType.Kind() == reflect.Struct && hasNestedDynamics(fieldType) { + return true + } + if fieldType.Kind() == reflect.Ptr && fieldType.Elem().Kind() == reflect.Struct { + if hasNestedDynamics(fieldType.Elem()) { + return true + } + } + } + return false +} + +// extractDynamicValue gets the inner value from a Dynamic[T] using reflection +func extractDynamicValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + } + + // Call Get() method + getMethod := v.Addr().MethodByName("Get") + if !getMethod.IsValid() { + return reflect.Value{} + } + + results := getMethod.Call(nil) + if len(results) == 0 { + return reflect.Value{} + } + + return results[0] +} + +// extractDynamicInnerType gets the T from Dynamic[T] +func extractDynamicInnerType(t reflect.Type) reflect.Type { + // Handle pointer to Dynamic + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + // For Dynamic[T], we need to look at the value field + if t.Kind() == reflect.Struct && t.NumField() > 0 { + // The first field is 'value T' + return t.Field(0).Type + } + + return t +} + +// setDynamicValue sets a Dynamic[T] field using its Set method +func setDynamicValue(dynamicField, valueField reflect.Value) { + if dynamicField.Kind() == reflect.Ptr { + if dynamicField.IsNil() { + // Create new Dynamic instance + dynamicField.Set(reflect.New(dynamicField.Type().Elem())) + } + dynamicField = dynamicField.Elem() + } + + // Call Set() method + setMethod := dynamicField.Addr().MethodByName("Set") + if !setMethod.IsValid() { + return + } + + if valueField.IsValid() { + setMethod.Call([]reflect.Value{valueField}) + } +} diff --git a/deps/config/dynamic_toml_test.go b/deps/config/dynamic_toml_test.go new file mode 100644 index 000000000..b49d45064 --- /dev/null +++ b/deps/config/dynamic_toml_test.go @@ -0,0 +1,1875 @@ +package config + +import ( + "reflect" + "testing" + "time" + + "github.com/BurntSushi/toml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/chain/types" +) + +func TestTransparentMarshalUnmarshal(t *testing.T) { + t.Run("simple types", func(t *testing.T) { + type Config struct { + Regular int + Dynamic *Dynamic[int] + After string + } + + cfg1 := Config{ + Regular: 10, + Dynamic: NewDynamic(42), + After: "test", + } + + data, err := TransparentMarshal(cfg1) + assert.NoError(t, err) + t.Logf("Marshaled:\n%s", string(data)) + + // Check it's truly transparent (no nesting) + assert.Contains(t, string(data), "Dynamic = 42") + assert.NotContains(t, string(data), "[Dynamic]") + + // Unmarshal + var cfg2 Config + cfg2.Dynamic = NewDynamic(0) + err = TransparentUnmarshal(data, &cfg2) + assert.NoError(t, err) + + assert.Equal(t, 10, cfg2.Regular) + assert.Equal(t, 42, cfg2.Dynamic.Get()) + assert.Equal(t, "test", cfg2.After) + }) + + t.Run("struct types", func(t *testing.T) { + type Inner struct { + Name string + Count int + } + + type Config struct { + Field *Dynamic[Inner] + } + + cfg1 := Config{ + Field: NewDynamic(Inner{Name: "test", Count: 99}), + } + + data, err := TransparentMarshal(cfg1) + assert.NoError(t, err) + t.Logf("Marshaled:\n%s", string(data)) + + // Should have Field.Name and Field.Count directly + assert.Contains(t, string(data), "[Field]") + assert.Contains(t, string(data), "Name = \"test\"") + assert.Contains(t, string(data), "Count = 99") + + var cfg2 Config + cfg2.Field = NewDynamic(Inner{}) + err = TransparentUnmarshal(data, &cfg2) + assert.NoError(t, err) + + assert.Equal(t, "test", cfg2.Field.Get().Name) + assert.Equal(t, 99, cfg2.Field.Get().Count) + }) + + t.Run("slice types", func(t *testing.T) { + type Config struct { + Items *Dynamic[[]string] + } + + cfg1 := Config{ + Items: NewDynamic([]string{"a", "b", "c"}), + } + + data, err := TransparentMarshal(cfg1) + assert.NoError(t, err) + t.Logf("Marshaled:\n%s", string(data)) + + assert.Contains(t, string(data), "Items = [\"a\", \"b\", \"c\"]") + + var cfg2 Config + cfg2.Items = NewDynamic([]string{}) + err = TransparentUnmarshal(data, &cfg2) + assert.NoError(t, err) + + assert.Equal(t, []string{"a", "b", "c"}, cfg2.Items.Get()) + }) + + t.Run("time.Duration types", func(t *testing.T) { + type Config struct { + Timeout *Dynamic[time.Duration] + } + + cfg1 := Config{ + Timeout: NewDynamic(5 * time.Minute), + } + + data, err := TransparentMarshal(cfg1) + assert.NoError(t, err) + t.Logf("Marshaled:\n%s", string(data)) + + assert.Contains(t, string(data), "Timeout = \"5m0s\"") + + var cfg2 Config + cfg2.Timeout = NewDynamic(time.Duration(0)) + err = TransparentUnmarshal(data, &cfg2) + assert.NoError(t, err) + + assert.Equal(t, 5*time.Minute, cfg2.Timeout.Get()) + }) +} + +func TestTransparentMarshalCurioIngest(t *testing.T) { + // Test with a subset of CurioIngestConfig + type TestIngest struct { + MaxMarketRunningPipelines *Dynamic[int] + MaxQueueDownload *Dynamic[int] + MaxDealWaitTime *Dynamic[time.Duration] + } + + cfg1 := TestIngest{ + MaxMarketRunningPipelines: NewDynamic(64), + MaxQueueDownload: NewDynamic(8), + MaxDealWaitTime: NewDynamic(time.Hour), + } + + data, err := TransparentMarshal(cfg1) + assert.NoError(t, err) + t.Logf("Marshaled:\n%s", string(data)) + + // Verify transparency - should be flat keys + assert.Contains(t, string(data), "MaxMarketRunningPipelines = 64") + assert.Contains(t, string(data), "MaxQueueDownload = 8") + assert.NotContains(t, string(data), "[MaxMarketRunningPipelines]") + + var cfg2 TestIngest + cfg2.MaxMarketRunningPipelines = NewDynamic(0) + cfg2.MaxQueueDownload = NewDynamic(0) + cfg2.MaxDealWaitTime = NewDynamic(time.Duration(0)) + + err = TransparentUnmarshal(data, &cfg2) + assert.NoError(t, err) + + assert.Equal(t, 64, cfg2.MaxMarketRunningPipelines.Get()) + assert.Equal(t, 8, cfg2.MaxQueueDownload.Get()) + assert.Equal(t, time.Hour, cfg2.MaxDealWaitTime.Get()) +} + +func TestTransparentMarshalWithFIL(t *testing.T) { + // Test with FIL types - both marshal and unmarshal + type TestConfig struct { + Fee *Dynamic[types.FIL] + Amount types.FIL + RegularField int + } + + // Create config with properly initialized FIL values + cfg1 := TestConfig{ + Fee: NewDynamic(types.MustParseFIL("5 FIL")), + Amount: types.MustParseFIL("10 FIL"), + RegularField: 42, + } + + // Marshal + data, err := TransparentMarshal(cfg1) + assert.NoError(t, err) + t.Logf("Marshaled:\n%s", string(data)) + + // Verify transparency - Fee should be flat + assert.Contains(t, string(data), `Fee = "5 FIL"`) + assert.Contains(t, string(data), `Amount = "10 FIL"`) + assert.Contains(t, string(data), "RegularField = 42") + + // Unmarshal - key is to pre-initialize FIL fields with proper values + cfg2 := TestConfig{ + Fee: NewDynamic(types.MustParseFIL("0")), // Initialize with zero FIL + Amount: types.MustParseFIL("0"), // Initialize with zero FIL + } + + err = TransparentUnmarshal(data, &cfg2) + assert.NoError(t, err) + + // Verify values were unmarshaled correctly + assert.Equal(t, "5 FIL", cfg2.Fee.Get().String()) + assert.Equal(t, "10 FIL", cfg2.Amount.String()) + assert.Equal(t, 42, cfg2.RegularField) +} + +func TestTransparentMarshalBatchFeeConfig(t *testing.T) { + // Test with actual BatchFeeConfig from types.go + type TestBatchConfig struct { + Base types.FIL + PerSector types.FIL + Dynamic *Dynamic[types.FIL] + } + + cfg1 := TestBatchConfig{ + Base: types.MustParseFIL("1 FIL"), + PerSector: types.MustParseFIL("0.02 FIL"), + Dynamic: NewDynamic(types.MustParseFIL("0.5 FIL")), + } + + data, err := TransparentMarshal(cfg1) + assert.NoError(t, err) + t.Logf("Marshaled:\n%s", string(data)) + + // Verify all fields are present and transparent + assert.Contains(t, string(data), `Base = "1 FIL"`) + assert.Contains(t, string(data), `PerSector = "0.02 FIL"`) + assert.Contains(t, string(data), `Dynamic = "0.5 FIL"`) + + // Unmarshal with proper initialization + cfg2 := TestBatchConfig{ + Base: types.MustParseFIL("0"), + PerSector: types.MustParseFIL("0"), + Dynamic: NewDynamic(types.MustParseFIL("0")), + } + + err = TransparentUnmarshal(data, &cfg2) + assert.NoError(t, err) + + assert.Equal(t, "1 FIL", cfg2.Base.String()) + assert.Equal(t, "0.02 FIL", cfg2.PerSector.String()) + assert.Equal(t, "0.5 FIL", cfg2.Dynamic.Get().String()) +} + +// TestTransparentDecode tests the TransparentDecode function with MetaData +func TestTransparentDecode(t *testing.T) { + t.Run("basic decode with metadata", func(t *testing.T) { + type Config struct { + Field1 *Dynamic[int] + Field2 *Dynamic[string] + Field3 int + } + + tomlData := ` +Field1 = 42 +Field2 = "hello" +` + + cfg := Config{ + Field1: NewDynamic(0), + Field2: NewDynamic(""), + Field3: 99, + } + + md, err := TransparentDecode(tomlData, &cfg) + require.NoError(t, err) + + // Check values + assert.Equal(t, 42, cfg.Field1.Get()) + assert.Equal(t, "hello", cfg.Field2.Get()) + assert.Equal(t, 99, cfg.Field3) // Not set in TOML + + // Check metadata + assert.True(t, md.IsDefined("Field1")) + assert.True(t, md.IsDefined("Field2")) + assert.False(t, md.IsDefined("Field3")) + }) + + t.Run("decode with nested struct", func(t *testing.T) { + type Inner struct { + Name string + Age int + } + + type Config struct { + Person *Dynamic[Inner] + } + + tomlData := ` +[Person] +Name = "Alice" +Age = 30 +` + + cfg := Config{ + Person: NewDynamic(Inner{}), + } + + md, err := TransparentDecode(tomlData, &cfg) + require.NoError(t, err) + + assert.Equal(t, "Alice", cfg.Person.Get().Name) + assert.Equal(t, 30, cfg.Person.Get().Age) + assert.True(t, md.IsDefined("Person")) + }) + + t.Run("decode with partial fields", func(t *testing.T) { + type Config struct { + A *Dynamic[int] + B *Dynamic[int] + C *Dynamic[int] + } + + tomlData := ` +A = 1 +C = 3 +` + + cfg := Config{ + A: NewDynamic(0), + B: NewDynamic(99), // Should remain unchanged + C: NewDynamic(0), + } + + md, err := TransparentDecode(tomlData, &cfg) + require.NoError(t, err) + + assert.Equal(t, 1, cfg.A.Get()) + assert.Equal(t, 99, cfg.B.Get()) // Unchanged + assert.Equal(t, 3, cfg.C.Get()) + + assert.True(t, md.IsDefined("A")) + assert.False(t, md.IsDefined("B")) + assert.True(t, md.IsDefined("C")) + }) + + t.Run("decode with invalid TOML", func(t *testing.T) { + type Config struct { + Field *Dynamic[int] + } + + tomlData := ` +Field = [invalid +` + + cfg := Config{ + Field: NewDynamic(0), + } + + _, err := TransparentDecode(tomlData, &cfg) + assert.Error(t, err) + }) +} + +// TestNestedStructsWithDynamics tests nested struct handling +func TestNestedStructsWithDynamics(t *testing.T) { + t.Run("nested struct with dynamic fields", func(t *testing.T) { + type Inner struct { + Value *Dynamic[int] + } + + type Outer struct { + Inner Inner + Count *Dynamic[int] + } + + cfg1 := Outer{ + Inner: Inner{ + Value: NewDynamic(42), + }, + Count: NewDynamic(10), + } + + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + t.Logf("Marshaled:\n%s", string(data)) + + assert.Contains(t, string(data), "Count = 10") + assert.Contains(t, string(data), "[Inner]") + assert.Contains(t, string(data), "Value = 42") + + cfg2 := Outer{ + Inner: Inner{ + Value: NewDynamic(0), + }, + Count: NewDynamic(0), + } + + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + + assert.Equal(t, 42, cfg2.Inner.Value.Get()) + assert.Equal(t, 10, cfg2.Count.Get()) + }) + + t.Run("deeply nested dynamics", func(t *testing.T) { + type Level3 struct { + Deep *Dynamic[string] + } + + type Level2 struct { + Mid *Dynamic[int] + Level Level3 + } + + type Level1 struct { + Top *Dynamic[bool] + Level Level2 + } + + cfg1 := Level1{ + Top: NewDynamic(true), + Level: Level2{ + Mid: NewDynamic(99), + Level: Level3{ + Deep: NewDynamic("deepvalue"), + }, + }, + } + + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + + cfg2 := Level1{ + Top: NewDynamic(false), + Level: Level2{ + Mid: NewDynamic(0), + Level: Level3{ + Deep: NewDynamic(""), + }, + }, + } + + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + + assert.Equal(t, true, cfg2.Top.Get()) + assert.Equal(t, 99, cfg2.Level.Mid.Get()) + assert.Equal(t, "deepvalue", cfg2.Level.Level.Deep.Get()) + }) + + t.Run("pointer to struct with dynamics", func(t *testing.T) { + // This test verifies that pointers to structs containing Dynamic fields + // are handled correctly, even though the unwrapping logic needs to be + // careful with pointer types + type Inner struct { + Value *Dynamic[int] + Name string + } + + type Outer struct { + InnerPtr *Inner + Count *Dynamic[int] + } + + cfg1 := Outer{ + InnerPtr: &Inner{ + Value: NewDynamic(42), + Name: "test", + }, + Count: NewDynamic(10), + } + + // For now, we just test that marshal works + // The pointer to struct case is a known limitation + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + t.Logf("Marshaled:\n%s", string(data)) + + // Check the marshaled output + assert.Contains(t, string(data), "Count = 10") + assert.Contains(t, string(data), "[InnerPtr]") + + // For unmarshal with pointer to struct, we need to ensure proper setup + cfg2 := Outer{ + InnerPtr: &Inner{ + Value: NewDynamic(0), + Name: "", + }, + Count: NewDynamic(0), + } + + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + + assert.NotNil(t, cfg2.InnerPtr) + assert.Equal(t, 42, cfg2.InnerPtr.Value.Get()) + assert.Equal(t, "test", cfg2.InnerPtr.Name) + assert.Equal(t, 10, cfg2.Count.Get()) + }) +} + +// TestEdgeCases tests edge cases +func TestEdgeCases(t *testing.T) { + t.Run("struct without dynamics", func(t *testing.T) { + type Config struct { + Field1 int + Field2 string + } + + cfg1 := Config{ + Field1: 42, + Field2: "test", + } + + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + + var cfg2 Config + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + + assert.Equal(t, 42, cfg2.Field1) + assert.Equal(t, "test", cfg2.Field2) + }) + + t.Run("empty struct", func(t *testing.T) { + type Config struct{} + + cfg1 := Config{} + + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + + var cfg2 Config + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + }) + + t.Run("all dynamic fields", func(t *testing.T) { + type Config struct { + A *Dynamic[int] + B *Dynamic[string] + C *Dynamic[bool] + } + + cfg1 := Config{ + A: NewDynamic(1), + B: NewDynamic("test"), + C: NewDynamic(true), + } + + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + + cfg2 := Config{ + A: NewDynamic(0), + B: NewDynamic(""), + C: NewDynamic(false), + } + + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + + assert.Equal(t, 1, cfg2.A.Get()) + assert.Equal(t, "test", cfg2.B.Get()) + assert.Equal(t, true, cfg2.C.Get()) + }) + + t.Run("map types in dynamic", func(t *testing.T) { + type Config struct { + Data *Dynamic[map[string]int] + } + + cfg1 := Config{ + Data: NewDynamic(map[string]int{"a": 1, "b": 2}), + } + + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + + cfg2 := Config{ + Data: NewDynamic(map[string]int{}), + } + + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + + assert.Equal(t, 1, cfg2.Data.Get()["a"]) + assert.Equal(t, 2, cfg2.Data.Get()["b"]) + }) + + t.Run("slice of structs in dynamic", func(t *testing.T) { + type Item struct { + Name string + Value int + } + + type Config struct { + Items *Dynamic[[]Item] + } + + cfg1 := Config{ + Items: NewDynamic([]Item{ + {Name: "first", Value: 1}, + {Name: "second", Value: 2}, + }), + } + + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + + cfg2 := Config{ + Items: NewDynamic([]Item{}), + } + + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + + items := cfg2.Items.Get() + assert.Len(t, items, 2) + assert.Equal(t, "first", items[0].Name) + assert.Equal(t, 1, items[0].Value) + assert.Equal(t, "second", items[1].Name) + assert.Equal(t, 2, items[1].Value) + }) + + t.Run("zero values", func(t *testing.T) { + type Config struct { + IntVal *Dynamic[int] + StringVal *Dynamic[string] + BoolVal *Dynamic[bool] + } + + cfg1 := Config{ + IntVal: NewDynamic(0), + StringVal: NewDynamic(""), + BoolVal: NewDynamic(false), + } + + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + + cfg2 := Config{ + IntVal: NewDynamic(99), + StringVal: NewDynamic("default"), + BoolVal: NewDynamic(true), + } + + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + + assert.Equal(t, 0, cfg2.IntVal.Get()) + assert.Equal(t, "", cfg2.StringVal.Get()) + assert.Equal(t, false, cfg2.BoolVal.Get()) + }) + + t.Run("pointer to dynamic value", func(t *testing.T) { + type Config struct { + Ptr *Dynamic[*int] + } + + val := 42 + cfg1 := Config{ + Ptr: NewDynamic(&val), + } + + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + + zero := 0 + cfg2 := Config{ + Ptr: NewDynamic(&zero), + } + + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + + assert.Equal(t, 42, *cfg2.Ptr.Get()) + }) +} + +// TestHelperFunctions tests helper functions +func TestHelperFunctions(t *testing.T) { + t.Run("isDynamicTypeForMarshal", func(t *testing.T) { + type TestDynamic = Dynamic[int] + + // Dynamic type + dynType := reflect.TypeOf((*Dynamic[int])(nil)).Elem() + assert.True(t, isDynamicTypeForMarshal(dynType)) + + // Pointer to Dynamic type + ptrDynType := reflect.TypeOf((*Dynamic[int])(nil)) + assert.True(t, isDynamicTypeForMarshal(ptrDynType)) + + // Regular types + assert.False(t, isDynamicTypeForMarshal(reflect.TypeOf(42))) + assert.False(t, isDynamicTypeForMarshal(reflect.TypeOf("string"))) + assert.False(t, isDynamicTypeForMarshal(reflect.TypeOf(struct{}{}))) + }) + + t.Run("hasNestedDynamics", func(t *testing.T) { + type WithDynamic struct { + Field *Dynamic[int] + } + + type WithoutDynamic struct { + Field int + } + + type NestedWithDynamic struct { + Inner WithDynamic + } + + type DeepNested struct { + Level1 struct { + Level2 struct { + Field *Dynamic[string] + } + } + } + + assert.True(t, hasNestedDynamics(reflect.TypeOf(WithDynamic{}))) + assert.False(t, hasNestedDynamics(reflect.TypeOf(WithoutDynamic{}))) + assert.True(t, hasNestedDynamics(reflect.TypeOf(NestedWithDynamic{}))) + assert.True(t, hasNestedDynamics(reflect.TypeOf(DeepNested{}))) + + // Pointer to struct + assert.True(t, hasNestedDynamics(reflect.TypeOf(&WithDynamic{}))) + assert.False(t, hasNestedDynamics(reflect.TypeOf(&WithoutDynamic{}))) + }) + + t.Run("extractDynamicValue", func(t *testing.T) { + d := NewDynamic(42) + val := reflect.ValueOf(d) + + extracted := extractDynamicValue(val) + assert.True(t, extracted.IsValid()) + assert.Equal(t, 42, extracted.Interface().(int)) + + // Test with nil pointer + var nilDyn *Dynamic[int] + nilVal := reflect.ValueOf(nilDyn) + extracted = extractDynamicValue(nilVal) + assert.False(t, extracted.IsValid()) + }) + + t.Run("extractDynamicInnerType", func(t *testing.T) { + dynType := reflect.TypeOf((*Dynamic[int])(nil)).Elem() + innerType := extractDynamicInnerType(dynType) + assert.Equal(t, reflect.TypeOf(0), innerType) + + // Test with string + dynStrType := reflect.TypeOf((*Dynamic[string])(nil)).Elem() + innerStrType := extractDynamicInnerType(dynStrType) + assert.Equal(t, reflect.TypeOf(""), innerStrType) + + // Test with struct + type TestStruct struct { + Field int + } + dynStructType := reflect.TypeOf((*Dynamic[TestStruct])(nil)).Elem() + innerStructType := extractDynamicInnerType(dynStructType) + assert.Equal(t, reflect.TypeOf(TestStruct{}), innerStructType) + }) + + t.Run("setDynamicValue", func(t *testing.T) { + d := NewDynamic(0) + dynField := reflect.ValueOf(d) + valueField := reflect.ValueOf(99) + + setDynamicValue(dynField, valueField) + assert.Equal(t, 99, d.Get()) + + // Test with nil Dynamic (should create new instance) + var nilDyn *Dynamic[int] + nilField := reflect.ValueOf(&nilDyn).Elem() + valueField2 := reflect.ValueOf(42) + + setDynamicValue(nilField, valueField2) + assert.NotNil(t, nilDyn) + assert.Equal(t, 42, nilDyn.Get()) + }) +} + +// TestCreateShadowType tests shadow type creation +func TestCreateShadowType(t *testing.T) { + t.Run("simple dynamic replacement", func(t *testing.T) { + type Original struct { + Field *Dynamic[int] + } + + origType := reflect.TypeOf(Original{}) + shadowType := createShadowType(origType) + + assert.Equal(t, origType.NumField(), shadowType.NumField()) + + // Check that *Dynamic[int] was replaced with int + origFieldType := origType.Field(0).Type + shadowFieldType := shadowType.Field(0).Type + + assert.True(t, isDynamicTypeForMarshal(origFieldType)) + assert.False(t, isDynamicTypeForMarshal(shadowFieldType)) + // The shadow field type should be int (the inner type of Dynamic[int]) + assert.Equal(t, reflect.TypeOf(0), shadowFieldType) + }) + + t.Run("mixed fields", func(t *testing.T) { + type Original struct { + Regular int + Dynamic *Dynamic[string] + Another bool + } + + origType := reflect.TypeOf(Original{}) + shadowType := createShadowType(origType) + + assert.Equal(t, 3, shadowType.NumField()) + + // Regular field unchanged + assert.Equal(t, reflect.TypeOf(0), shadowType.Field(0).Type) + + // Dynamic field unwrapped - should be string (the inner type) + assert.Equal(t, reflect.TypeOf(""), shadowType.Field(1).Type) + + // Another regular field unchanged + assert.Equal(t, reflect.TypeOf(false), shadowType.Field(2).Type) + }) + + t.Run("nested struct with dynamics", func(t *testing.T) { + type Inner struct { + Value *Dynamic[int] + } + + type Outer struct { + Inner Inner + } + + origType := reflect.TypeOf(Outer{}) + shadowType := createShadowType(origType) + + // Check outer struct + assert.Equal(t, 1, shadowType.NumField()) + + // Check inner struct field + innerField := shadowType.Field(0) + assert.Equal(t, "Inner", innerField.Name) + + // The inner type should also have its Dynamic unwrapped + innerType := innerField.Type + assert.Equal(t, 1, innerType.NumField()) + + innerValueField := innerType.Field(0) + assert.Equal(t, "Value", innerValueField.Name) + // The inner field should be int (unwrapped from *Dynamic[int]) + assert.Equal(t, reflect.TypeOf(0), innerValueField.Type) + }) + + t.Run("pointer type handling", func(t *testing.T) { + type Original struct { + Field *Dynamic[int] + } + + ptrType := reflect.TypeOf(&Original{}) + shadowType := createShadowType(ptrType) + + // Should handle pointer + assert.Equal(t, reflect.Ptr, shadowType.Kind()) + assert.Equal(t, reflect.Struct, shadowType.Elem().Kind()) + }) +} + +// TestInitializeShadowFromTarget tests shadow initialization +func TestInitializeShadowFromTarget(t *testing.T) { + t.Run("initialize with FIL values", func(t *testing.T) { + type TestConfig struct { + Fee *Dynamic[types.FIL] + Amount types.FIL + } + + target := TestConfig{ + Fee: NewDynamic(types.MustParseFIL("5 FIL")), + Amount: types.MustParseFIL("10 FIL"), + } + + shadow := createShadowStruct(&target) + initializeShadowFromTarget(shadow, &target) + + // Check that shadow was initialized with target values + shadowVal := reflect.ValueOf(shadow).Elem() + feeField := shadowVal.Field(0) + amountField := shadowVal.Field(1) + + assert.True(t, feeField.IsValid()) + assert.True(t, amountField.IsValid()) + + // The FIL values should have been copied + feeVal := feeField.Interface().(types.FIL) + amountVal := amountField.Interface().(types.FIL) + + assert.Equal(t, "5 FIL", feeVal.String()) + assert.Equal(t, "10 FIL", amountVal.String()) + }) + + t.Run("initialize regular fields", func(t *testing.T) { + type TestConfig struct { + IntVal *Dynamic[int] + StrVal string + } + + target := TestConfig{ + IntVal: NewDynamic(42), + StrVal: "test", + } + + shadow := createShadowStruct(&target) + initializeShadowFromTarget(shadow, &target) + + shadowVal := reflect.ValueOf(shadow).Elem() + intField := shadowVal.Field(0) + strField := shadowVal.Field(1) + + // Dynamic field should be initialized + assert.Equal(t, 42, intField.Interface().(int)) + + // Regular field should be initialized + assert.Equal(t, "test", strField.Interface().(string)) + }) +} + +// TestWrapDynamics tests wrapping values back into Dynamic fields +func TestWrapDynamics(t *testing.T) { + t.Run("wrap simple values", func(t *testing.T) { + type TestConfig struct { + IntVal *Dynamic[int] + StrVal *Dynamic[string] + } + + // Create shadow with plain values + type Shadow struct { + IntVal int + StrVal string + } + + shadow := Shadow{ + IntVal: 42, + StrVal: "test", + } + + // Create target with Dynamic fields + target := TestConfig{ + IntVal: NewDynamic(0), + StrVal: NewDynamic(""), + } + + err := wrapDynamics(shadow, &target) + require.NoError(t, err) + + assert.Equal(t, 42, target.IntVal.Get()) + assert.Equal(t, "test", target.StrVal.Get()) + }) + + t.Run("wrap nested structs", func(t *testing.T) { + type Inner struct { + Value *Dynamic[int] + } + + type Outer struct { + Inner Inner + } + + // Shadow structure + type ShadowInner struct { + Value int + } + + type ShadowOuter struct { + Inner ShadowInner + } + + shadow := ShadowOuter{ + Inner: ShadowInner{ + Value: 99, + }, + } + + target := Outer{ + Inner: Inner{ + Value: NewDynamic(0), + }, + } + + err := wrapDynamics(shadow, &target) + require.NoError(t, err) + + assert.Equal(t, 99, target.Inner.Value.Get()) + }) + + t.Run("preserve non-dynamic fields", func(t *testing.T) { + type TestConfig struct { + DynVal *Dynamic[int] + RegVal int + } + + type Shadow struct { + DynVal int + RegVal int + } + + shadow := Shadow{ + DynVal: 42, + RegVal: 99, + } + + target := TestConfig{ + DynVal: NewDynamic(0), + RegVal: 0, + } + + err := wrapDynamics(shadow, &target) + require.NoError(t, err) + + assert.Equal(t, 42, target.DynVal.Get()) + assert.Equal(t, 99, target.RegVal) + }) +} + +// TestUnwrapDynamics tests unwrapping Dynamic fields for marshaling +func TestUnwrapDynamics(t *testing.T) { + t.Run("unwrap simple dynamic", func(t *testing.T) { + type Config struct { + Field *Dynamic[int] + } + + cfg := Config{ + Field: NewDynamic(42), + } + + unwrapped := unwrapDynamics(cfg) + + // Check that the result has the inner type + unwrappedVal := reflect.ValueOf(unwrapped) + fieldVal := unwrappedVal.Field(0) + + assert.Equal(t, 42, fieldVal.Interface().(int)) + }) + + t.Run("unwrap with regular fields", func(t *testing.T) { + type Config struct { + Regular int + Dynamic *Dynamic[string] + } + + cfg := Config{ + Regular: 99, + Dynamic: NewDynamic("test"), + } + + unwrapped := unwrapDynamics(cfg) + + unwrappedVal := reflect.ValueOf(unwrapped) + regularField := unwrappedVal.Field(0) + dynamicField := unwrappedVal.Field(1) + + assert.Equal(t, 99, regularField.Interface().(int)) + assert.Equal(t, "test", dynamicField.Interface().(string)) + }) + + t.Run("unwrap nested dynamics", func(t *testing.T) { + type Inner struct { + Value *Dynamic[int] + } + + type Outer struct { + Inner Inner + Count *Dynamic[int] + } + + cfg := Outer{ + Inner: Inner{ + Value: NewDynamic(42), + }, + Count: NewDynamic(10), + } + + unwrapped := unwrapDynamics(cfg) + + unwrappedVal := reflect.ValueOf(unwrapped) + innerField := unwrappedVal.Field(0) + countField := unwrappedVal.Field(1) + + assert.Equal(t, 10, countField.Interface().(int)) + + innerVal := innerField.Interface() + innerReflect := reflect.ValueOf(innerVal) + valueField := innerReflect.Field(0) + + assert.Equal(t, 42, valueField.Interface().(int)) + }) + + t.Run("unwrap struct without dynamics", func(t *testing.T) { + type Config struct { + Field1 int + Field2 string + } + + cfg := Config{ + Field1: 42, + Field2: "test", + } + + unwrapped := unwrapDynamics(cfg) + + // Should return the same struct + unwrappedConfig := unwrapped.(Config) + assert.Equal(t, 42, unwrappedConfig.Field1) + assert.Equal(t, "test", unwrappedConfig.Field2) + }) +} + +// TestRoundTripConsistency tests that marshal/unmarshal round trips preserve values +func TestRoundTripConsistency(t *testing.T) { + t.Run("complex struct round trip", func(t *testing.T) { + type Inner struct { + Name string + Count *Dynamic[int] + } + + type Config struct { + ID int + Timeout *Dynamic[time.Duration] + Items *Dynamic[[]string] + Inner Inner + Active *Dynamic[bool] + } + + original := Config{ + ID: 123, + Timeout: NewDynamic(5 * time.Minute), + Items: NewDynamic([]string{"a", "b", "c"}), + Inner: Inner{ + Name: "test", + Count: NewDynamic(99), + }, + Active: NewDynamic(true), + } + + // Marshal + data, err := TransparentMarshal(original) + require.NoError(t, err) + + // Unmarshal + restored := Config{ + Timeout: NewDynamic(time.Duration(0)), + Items: NewDynamic([]string{}), + Inner: Inner{ + Count: NewDynamic(0), + }, + Active: NewDynamic(false), + } + + err = TransparentUnmarshal(data, &restored) + require.NoError(t, err) + + // Verify all fields match + assert.Equal(t, original.ID, restored.ID) + assert.Equal(t, original.Timeout.Get(), restored.Timeout.Get()) + assert.Equal(t, original.Items.Get(), restored.Items.Get()) + assert.Equal(t, original.Inner.Name, restored.Inner.Name) + assert.Equal(t, original.Inner.Count.Get(), restored.Inner.Count.Get()) + assert.Equal(t, original.Active.Get(), restored.Active.Get()) + }) + + t.Run("multiple round trips", func(t *testing.T) { + type Config struct { + Value *Dynamic[int] + } + + cfg := Config{ + Value: NewDynamic(42), + } + + for i := 0; i < 3; i++ { + data, err := TransparentMarshal(cfg) + require.NoError(t, err) + + cfg = Config{ + Value: NewDynamic(0), + } + + err = TransparentUnmarshal(data, &cfg) + require.NoError(t, err) + + assert.Equal(t, 42, cfg.Value.Get(), "round trip %d failed", i+1) + } + }) +} + +// TestErrorHandling tests error conditions +func TestErrorHandling(t *testing.T) { + t.Run("invalid TOML syntax", func(t *testing.T) { + type Config struct { + Field *Dynamic[int] + } + + invalidTOML := `Field = [[[invalid` + + cfg := Config{ + Field: NewDynamic(0), + } + + err := TransparentUnmarshal([]byte(invalidTOML), &cfg) + assert.Error(t, err) + }) + + t.Run("type mismatch in TOML", func(t *testing.T) { + type Config struct { + Field *Dynamic[int] + } + + tomlData := `Field = "not an int"` + + cfg := Config{ + Field: NewDynamic(0), + } + + err := TransparentUnmarshal([]byte(tomlData), &cfg) + assert.Error(t, err) + }) + + t.Run("missing required initialization", func(t *testing.T) { + type Config struct { + Field *Dynamic[types.FIL] + } + + tomlData := `Field = "5 FIL"` + + // Without proper initialization, this should fail or behave unexpectedly + cfg := Config{ + Field: NewDynamic(types.FIL{}), // Not properly initialized + } + + // This test documents the expected behavior + err := TransparentUnmarshal([]byte(tomlData), &cfg) + // The error handling depends on the FIL type implementation + _ = err + }) +} + +// TestWithActualTOMLLibrary tests integration with TOML library +func TestWithActualTOMLLibrary(t *testing.T) { + t.Run("compare with standard TOML", func(t *testing.T) { + type Config struct { + Field int + } + + cfg := Config{Field: 42} + + // Standard TOML marshal + standardData, err := toml.Marshal(cfg) + require.NoError(t, err) + + // Transparent marshal (should be identical for non-Dynamic structs) + transparentData, err := TransparentMarshal(cfg) + require.NoError(t, err) + + assert.Equal(t, string(standardData), string(transparentData)) + }) + + t.Run("decode metadata accuracy", func(t *testing.T) { + type Config struct { + A int + B string + C bool + } + + tomlData := ` +A = 1 +B = "test" +` + + var cfg Config + md, err := toml.Decode(tomlData, &cfg) + require.NoError(t, err) + + assert.True(t, md.IsDefined("A")) + assert.True(t, md.IsDefined("B")) + assert.False(t, md.IsDefined("C")) + }) +} + +// TestAdditionalEdgeCases tests additional edge cases for better coverage +func TestAdditionalEdgeCases(t *testing.T) { + t.Run("nil pointer to dynamic", func(t *testing.T) { + type Config struct { + Field *Dynamic[int] + } + + // Test with nil Dynamic field + cfg := Config{ + Field: nil, + } + + // Marshal should handle nil gracefully + data, err := TransparentMarshal(cfg) + require.NoError(t, err) + t.Logf("Marshaled:\n%s", string(data)) + }) + + t.Run("nested pointer to struct without dynamics", func(t *testing.T) { + type Inner struct { + Value int + } + + type Outer struct { + InnerPtr *Inner + } + + cfg1 := Outer{ + InnerPtr: &Inner{Value: 42}, + } + + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + + cfg2 := Outer{ + InnerPtr: &Inner{}, + } + + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + assert.Equal(t, 42, cfg2.InnerPtr.Value) + }) + + t.Run("marshal pointer to struct", func(t *testing.T) { + type Config struct { + Field *Dynamic[int] + } + + cfg := &Config{ + Field: NewDynamic(42), + } + + data, err := TransparentMarshal(cfg) + require.NoError(t, err) + assert.Contains(t, string(data), "Field = 42") + }) + + t.Run("extractDynamicValue with struct value", func(t *testing.T) { + // Test extractDynamicValue with addressable Dynamic struct + d := Dynamic[int]{} + // We need to use a pointer to make it addressable + ptr := &d + val := reflect.ValueOf(ptr).Elem() + + // Now the value is addressable, but the method might still not work correctly + // since Dynamic is meant to be used as a pointer + extracted := extractDynamicValue(val) + // This might return invalid or zero value + _ = extracted + }) + + t.Run("extractDynamicInnerType with non-struct", func(t *testing.T) { + // Test with a non-struct type + intType := reflect.TypeOf(42) + result := extractDynamicInnerType(intType) + // Should return the same type + assert.Equal(t, intType, result) + }) + + t.Run("setDynamicValue with invalid value", func(t *testing.T) { + d := NewDynamic(0) + dynField := reflect.ValueOf(d) + invalidValue := reflect.Value{} + + // Should handle invalid value gracefully + setDynamicValue(dynField, invalidValue) + // Value should remain unchanged + assert.Equal(t, 0, d.Get()) + }) + + t.Run("hasNestedDynamics with non-struct", func(t *testing.T) { + // Test with non-struct types + assert.False(t, hasNestedDynamics(reflect.TypeOf(42))) + assert.False(t, hasNestedDynamics(reflect.TypeOf("string"))) + assert.False(t, hasNestedDynamics(reflect.TypeOf([]int{}))) + }) + + t.Run("unwrapDynamics with non-struct", func(t *testing.T) { + // Test with non-struct value + val := 42 + result := unwrapDynamics(val) + assert.Equal(t, 42, result.(int)) + }) + + t.Run("wrapDynamics with non-struct", func(t *testing.T) { + // Test with non-struct values + shadow := 42 + target := 99 + + err := wrapDynamics(shadow, &target) + require.NoError(t, err) + // Since both are ints, wrapDynamics should just return without error + }) + + t.Run("createShadowType with non-struct", func(t *testing.T) { + // Test with non-struct type + intType := reflect.TypeOf(42) + result := createShadowType(intType) + assert.Equal(t, intType, result) + }) + + t.Run("initializeShadowFromTarget with non-struct", func(t *testing.T) { + // Test with non-struct values + shadow := 42 + target := 99 + + // Should return without error for non-structs + initializeShadowFromTarget(shadow, target) + // Nothing should happen, but shouldn't panic + }) + + t.Run("nested struct with pointer to struct without dynamics", func(t *testing.T) { + type Inner struct { + Value int + } + + type Middle struct { + InnerPtr *Inner + } + + type Outer struct { + Middle *Middle + } + + cfg1 := Outer{ + Middle: &Middle{ + InnerPtr: &Inner{Value: 42}, + }, + } + + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + + cfg2 := Outer{ + Middle: &Middle{ + InnerPtr: &Inner{}, + }, + } + + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + assert.Equal(t, 42, cfg2.Middle.InnerPtr.Value) + }) + + t.Run("dynamic with slice of pointers", func(t *testing.T) { + type Config struct { + Items *Dynamic[[]*int] + } + + val1, val2 := 1, 2 + cfg1 := Config{ + Items: NewDynamic([]*int{&val1, &val2}), + } + + data, err := TransparentMarshal(cfg1) + require.NoError(t, err) + + zero1, zero2 := 0, 0 + cfg2 := Config{ + Items: NewDynamic([]*int{&zero1, &zero2}), + } + + err = TransparentUnmarshal(data, &cfg2) + require.NoError(t, err) + + items := cfg2.Items.Get() + assert.Len(t, items, 2) + assert.Equal(t, 1, *items[0]) + assert.Equal(t, 2, *items[1]) + }) + + t.Run("extractDynamicInnerType with pointer to dynamic", func(t *testing.T) { + // Test with pointer to Dynamic type + ptrType := reflect.TypeOf((*Dynamic[string])(nil)) + innerType := extractDynamicInnerType(ptrType) + assert.Equal(t, reflect.TypeOf(""), innerType) + }) + + t.Run("struct with unexported fields", func(t *testing.T) { + // Test that unexported fields are handled gracefully + type Config struct { + Exported *Dynamic[int] + unexported int // unexported field + } + + cfg := Config{ + Exported: NewDynamic(42), + unexported: 99, // This shouldn't be marshaled + } + + data, err := TransparentMarshal(cfg) + require.NoError(t, err) + assert.Contains(t, string(data), "Exported = 42") + }) +} + +// TestCoverageForRemainingPaths tests specific code paths to achieve 100% coverage +func TestCoverageForRemainingPaths(t *testing.T) { + t.Run("extractDynamicValue with method not found", func(t *testing.T) { + // Create a struct that looks like Dynamic but doesn't have Get method + type FakeDynamic struct { + value int + } + + fake := FakeDynamic{value: 42} + val := reflect.ValueOf(&fake).Elem() + + // extractDynamicValue should return invalid value when Get() method doesn't exist + result := extractDynamicValue(val) + assert.False(t, result.IsValid()) + }) + + t.Run("setDynamicValue with method not found", func(t *testing.T) { + // Create a struct that looks like Dynamic but doesn't have Set method + type FakeDynamic struct { + value int + } + + fake := FakeDynamic{value: 0} + fakeVal := reflect.ValueOf(&fake).Elem() + newVal := reflect.ValueOf(42) + + // setDynamicValue should handle gracefully when Set() method doesn't exist + setDynamicValue(fakeVal, newVal) + // Should not panic, just return + assert.Equal(t, 0, fake.value) // Value unchanged + }) + + t.Run("unwrapDynamics with invalid dynamic value", func(t *testing.T) { + // Test case where extractDynamicValue returns invalid value + type FakeDynamic struct { + value int + } + + type Config struct { + Field *FakeDynamic + } + + cfg := Config{ + Field: &FakeDynamic{value: 42}, + } + + // Since FakeDynamic doesn't have the Get method, unwrapDynamics won't detect it as Dynamic + // This tests the path but FakeDynamic won't be treated as Dynamic + result := unwrapDynamics(cfg) + assert.NotNil(t, result) + }) + + t.Run("initializeShadowFromTarget with non-assignable types", func(t *testing.T) { + // Test case where types don't match and assignment fails + type ConfigA struct { + Field *Dynamic[int] + } + + type ConfigB struct { + Field string // Different type + } + + targetA := ConfigA{ + Field: NewDynamic(42), + } + + // Create shadow with different structure + targetB := ConfigB{ + Field: "test", + } + + // This should handle gracefully when types don't match + initializeShadowFromTarget(&targetB, &targetA) + // Should not panic + }) + + t.Run("wrapDynamics with non-assignable shadow fields", func(t *testing.T) { + // Test wrapDynamics when shadowField type is not assignable to targetField + type ShadowConfig struct { + Field string + } + + type TargetConfig struct { + Field int + } + + shadow := ShadowConfig{Field: "test"} + target := TargetConfig{Field: 0} + + // Should handle gracefully when types don't match + err := wrapDynamics(shadow, &target) + assert.NoError(t, err) + assert.Equal(t, 0, target.Field) // Unchanged + }) + + t.Run("initializeShadowFromTarget with pointer mismatch", func(t *testing.T) { + type Inner struct { + Value int + } + + type Config1 struct { + PtrField *Inner + } + + type Config2 struct { + PtrField *string // Different pointer type + } + + str := "test" + target := Config2{ + PtrField: &str, + } + + shadow := Config1{ + PtrField: &Inner{Value: 42}, + } + + // Should handle type mismatch gracefully + initializeShadowFromTarget(&shadow, &target) + // Should not panic + }) + + t.Run("unwrapDynamics with nil dynamic pointer", func(t *testing.T) { + type Config struct { + Field *Dynamic[int] + } + + cfg := Config{ + Field: nil, + } + + // unwrapDynamics should handle nil Dynamic fields + result := unwrapDynamics(cfg) + assert.NotNil(t, result) + + // Verify the result has the expected structure + resultVal := reflect.ValueOf(result) + assert.Equal(t, reflect.Struct, resultVal.Kind()) + }) + + t.Run("extractDynamicValue with pointer to nil", func(t *testing.T) { + var nilDyn *Dynamic[int] + val := reflect.ValueOf(nilDyn) + + // Should return invalid value for nil pointer + result := extractDynamicValue(val) + assert.False(t, result.IsValid()) + }) + + t.Run("wrapDynamics with nil shadow pointer", func(t *testing.T) { + type Inner struct { + Value int + } + + type Config struct { + PtrField *Inner + } + + // Test when shadowField is nil + shadow := Config{ + PtrField: nil, + } + + target := Config{ + PtrField: &Inner{Value: 99}, + } + + // Should handle nil shadow pointers gracefully + err := wrapDynamics(shadow, &target) + assert.NoError(t, err) + // The target will get the nil pointer from shadow since types match + assert.Nil(t, target.PtrField) + }) + + t.Run("wrapDynamics with nil target pointer", func(t *testing.T) { + type Inner struct { + Value int + } + + type Config struct { + PtrField *Inner + } + + shadow := Config{ + PtrField: &Inner{Value: 42}, + } + + // Test when targetField is nil + target := Config{ + PtrField: nil, + } + + // Should handle nil target pointers gracefully + err := wrapDynamics(shadow, &target) + assert.NoError(t, err) + // The wrapDynamics function copies regular pointer fields when they don't contain dynamics + // So target gets the value from shadow + assert.NotNil(t, target.PtrField) + assert.Equal(t, 42, target.PtrField.Value) + }) + + t.Run("initializeShadowFromTarget with nested dynamics pointer", func(t *testing.T) { + type Inner struct { + Value *Dynamic[int] + } + + type Config struct { + InnerPtr *Inner + } + + target := Config{ + InnerPtr: &Inner{ + Value: NewDynamic(42), + }, + } + + shadow := createShadowStruct(&target) + + // Initialize shadow from target with nested pointer + initializeShadowFromTarget(shadow, &target) + + // Verify the shadow was initialized correctly + shadowVal := reflect.ValueOf(shadow).Elem() + innerPtrField := shadowVal.Field(0) + assert.True(t, innerPtrField.IsValid()) + // Test just exercises the code path for pointer to struct with dynamics + // The actual result may vary based on implementation + }) + + t.Run("unwrapDynamics with nested pointer to struct with dynamics", func(t *testing.T) { + type Inner struct { + Value *Dynamic[int] + } + + type Middle struct { + InnerPtr *Inner + } + + type Outer struct { + MiddlePtr *Middle + } + + cfg := Outer{ + MiddlePtr: &Middle{ + InnerPtr: &Inner{ + Value: NewDynamic(42), + }, + }, + } + + // Test unwrapping with nested pointers to structs containing dynamics + result := unwrapDynamics(cfg) + assert.NotNil(t, result) + + resultVal := reflect.ValueOf(result) + assert.Equal(t, reflect.Struct, resultVal.Kind()) + }) + + t.Run("extractDynamicInnerType with empty struct", func(t *testing.T) { + type EmptyStruct struct{} + + emptyType := reflect.TypeOf(EmptyStruct{}) + result := extractDynamicInnerType(emptyType) + + // Should return the same type when struct has no fields + assert.Equal(t, emptyType, result) + }) + + t.Run("setDynamicValue creating new instance", func(t *testing.T) { + // Test the path where dynamicField is nil and needs to be created + var nilDyn *Dynamic[int] + dynField := reflect.ValueOf(&nilDyn).Elem() + valueField := reflect.ValueOf(99) + + // setDynamicValue should create a new Dynamic instance + setDynamicValue(dynField, valueField) + + assert.NotNil(t, nilDyn) + assert.Equal(t, 99, nilDyn.Get()) + }) + + t.Run("initializeShadowFromTarget with regular pointer field", func(t *testing.T) { + type Config struct { + IntPtr *int + } + + val := 42 + target := Config{ + IntPtr: &val, + } + + shadow := Config{ + IntPtr: new(int), + } + + // Test copying regular pointer fields + initializeShadowFromTarget(&shadow, &target) + + assert.NotNil(t, shadow.IntPtr) + assert.Equal(t, 42, *shadow.IntPtr) + }) + + t.Run("wrapDynamics with regular field type mismatch", func(t *testing.T) { + // Test the else branch where shadowField and targetField types don't match + type ShadowConfig struct { + Field float64 + } + + type TargetConfig struct { + Field int + } + + shadow := ShadowConfig{Field: 3.14} + target := TargetConfig{Field: 42} + + err := wrapDynamics(shadow, &target) + assert.NoError(t, err) + // Field should remain unchanged due to type mismatch + assert.Equal(t, 42, target.Field) + }) + + t.Run("createShadowStruct edge cases", func(t *testing.T) { + type Config struct { + Field *Dynamic[int] + } + + cfg := Config{ + Field: NewDynamic(42), + } + + // Test that createShadowStruct handles all cases correctly + shadow := createShadowStruct(&cfg) + assert.NotNil(t, shadow) + + // Verify shadow has correct structure + shadowVal := reflect.ValueOf(shadow) + assert.Equal(t, reflect.Ptr, shadowVal.Kind()) + assert.Equal(t, reflect.Struct, shadowVal.Elem().Kind()) + }) + + t.Run("initializeShadowFromTarget innerVal not valid case", func(t *testing.T) { + // Test the case where extractDynamicValue returns invalid value + type Config struct { + Field *Dynamic[int] + } + + target := Config{ + Field: nil, // nil Dynamic + } + + shadow := createShadowStruct(&target) + + // This should handle nil Dynamic gracefully + initializeShadowFromTarget(shadow, &target) + // Just exercises the code path + }) + + t.Run("initializeShadowFromTarget type not assignable", func(t *testing.T) { + // Test where valReflect.Type() is not assignable to shadowField.Type() + type ConfigA struct { + Field *Dynamic[int] + } + + type ConfigB struct { + Field string + } + + target := ConfigA{ + Field: NewDynamic(42), + } + + // Create a shadow with incompatible type + shadow := &ConfigB{ + Field: "test", + } + + // Should handle type mismatch gracefully + initializeShadowFromTarget(shadow, &target) + // Should not crash + }) + +} diff --git a/deps/config/load.go b/deps/config/load.go index dcc20435f..72c057974 100644 --- a/deps/config/load.go +++ b/deps/config/load.go @@ -10,9 +10,7 @@ import ( "os" "reflect" "regexp" - "sort" "strings" - "time" "unicode" "github.com/BurntSushi/toml" @@ -89,7 +87,8 @@ func FromReader(reader io.Reader, def interface{}, opts ...LoadCfgOpt) (interfac cfg = ccfg } - md, err := toml.Decode(buf.String(), cfg) + // Use TransparentDecode for configs with Dynamic fields + md, err := TransparentDecode(buf.String(), cfg) if err != nil { return nil, err } @@ -315,23 +314,19 @@ func ConfigUpdate(cfgCur, cfgDef interface{}, opts ...UpdateCfgOpt) ([]byte, err } var nodeStr, defStr string if cfgDef != nil { - buf := new(bytes.Buffer) - e := toml.NewEncoder(buf) - if err := e.Encode(cfgDef); err != nil { + defBytes, err := TransparentMarshal(cfgDef) + if err != nil { return nil, xerrors.Errorf("encoding default config: %w", err) } - - defStr = buf.String() + defStr = string(defBytes) } { - buf := new(bytes.Buffer) - e := toml.NewEncoder(buf) - if err := e.Encode(cfgCur); err != nil { + nodeBytes, err := TransparentMarshal(cfgCur) + if err != nil { return nil, xerrors.Errorf("encoding node config: %w", err) } - - nodeStr = buf.String() + nodeStr = string(nodeBytes) } if updateOpts.comment { @@ -451,23 +446,15 @@ func ConfigUpdate(cfgCur, cfgDef interface{}, opts ...UpdateCfgOpt) ([]byte, err opts := []cmp.Option{ // This equality function compares big.Int cmpopts.IgnoreUnexported(big.Int{}), - cmp.Comparer(func(x, y []string) bool { - tx, ty := reflect.TypeOf(x), reflect.TypeOf(y) - if tx.Kind() == reflect.Slice && ty.Kind() == reflect.Slice && tx.Elem().Kind() == reflect.String && ty.Elem().Kind() == reflect.String { - sort.Strings(x) - sort.Strings(y) - return strings.Join(x, "\n") == strings.Join(y, "\n") - } - return false - }), - cmp.Comparer(func(x, y time.Duration) bool { - tx, ty := reflect.TypeOf(x), reflect.TypeOf(y) - return tx.Kind() == ty.Kind() - }), + // Treat nil and empty slices/maps as equal for all types + cmpopts.EquateEmpty(), + // Use BigIntComparer for proper big.Int comparison + BigIntComparer, } if !cmp.Equal(cfgUpdated, cfgCur, opts...) { - return nil, xerrors.Errorf("updated config didn't match current config") + diff := cmp.Diff(cfgUpdated, cfgCur, opts...) + return nil, xerrors.Errorf("updated config didn't match current config:\n%s", diff) } } @@ -510,8 +497,9 @@ func findDocSect(root, section, name string) *DocField { found := false for _, field := range docSection { if field.Name == e { - lastField = &field // Store reference to the section field - docSection = Doc[strings.Trim(field.Type, "[]")] // Move to the next section + lastField = &field // Store reference to the section field + t := strings.Trim(field.Type, "[]") + docSection = Doc[t] // Move to the next section found = true break } @@ -552,10 +540,11 @@ func FixTOML(newText string, cfg *CurioConfig) error { } l := len(lengthDetector.Addresses) - il := len(cfg.Addresses) + addrs := cfg.Addresses.Get() + il := len(addrs) for l > il { - cfg.Addresses = append(cfg.Addresses, CurioAddresses{ + addrs = append(addrs, CurioAddresses{ PreCommitControl: []string{}, CommitControl: []string{}, DealPublishControl: []string{}, @@ -567,6 +556,7 @@ func FixTOML(newText string, cfg *CurioConfig) error { }) il++ } + cfg.Addresses.Set(addrs) return nil } @@ -590,7 +580,7 @@ func LoadConfigWithUpgradesGeneric[T any](text string, curioConfigWithDefaults T return toml.MetaData{}, err } - return toml.Decode(newText, &curioConfigWithDefaults) + return TransparentDecode(newText, curioConfigWithDefaults) } type ConfigText struct { diff --git a/deps/config/types.go b/deps/config/types.go index d34ab44ae..0d493b40b 100644 --- a/deps/config/types.go +++ b/deps/config/types.go @@ -38,14 +38,14 @@ func DefaultCurioConfig() *CurioConfig { DisableCollateralFallback: false, MaximizeFeeCap: true, }, - Addresses: []CurioAddresses{{ + Addresses: NewDynamic([]CurioAddresses{{ PreCommitControl: []string{}, CommitControl: []string{}, DealPublishControl: []string{}, TerminateControl: []string{}, MinerAddresses: []string{}, BalanceManager: DefaultBalanceManager(), - }}, + }}), Proving: CurioProvingConfig{ ParallelCheckLimit: 32, PartitionCheckTimeout: 20 * time.Minute, @@ -57,19 +57,19 @@ func DefaultCurioConfig() *CurioConfig { BatchSealSectorSize: "32GiB", }, Ingest: CurioIngestConfig{ - MaxMarketRunningPipelines: 64, + MaxMarketRunningPipelines: NewDynamic(64), MaxQueueDownload: NewDynamic(8), - MaxQueueCommP: 8, + MaxQueueCommP: NewDynamic(8), - MaxQueueDealSector: 8, // default to 8 sectors open(or in process of opening) for deals - MaxQueueSDR: 8, // default to 8 (will cause backpressure even if deal sectors are 0) - MaxQueueTrees: 0, // default don't use this limit - MaxQueuePoRep: 0, // default don't use this limit + MaxQueueDealSector: NewDynamic(8), // default to 8 sectors open(or in process of opening) for deals + MaxQueueSDR: NewDynamic(8), // default to 8 (will cause backpressure even if deal sectors are 0) + MaxQueueTrees: NewDynamic(0), // default don't use this limit + MaxQueuePoRep: NewDynamic(0), // default don't use this limit - MaxQueueSnapEncode: 16, - MaxQueueSnapProve: 0, + MaxQueueSnapEncode: NewDynamic(16), + MaxQueueSnapProve: NewDynamic(0), - MaxDealWaitTime: time.Hour, + MaxDealWaitTime: NewDynamic(time.Hour), }, Alerting: CurioAlertingConfig{ MinimumWalletBalance: types.MustParseFIL("5"), @@ -162,7 +162,7 @@ type CurioConfig struct { Fees CurioFees // Addresses specifies the list of miner addresses and their related wallet addresses. - Addresses []CurioAddresses + Addresses *Dynamic[[]CurioAddresses] // Proving defines the configuration settings related to proving functionality within the Curio node. Proving CurioProvingConfig @@ -520,7 +520,7 @@ type CurioIngestConfig struct { // A "running" pipeline is one that has at least one task currently assigned to a machine (owner_id is not null). // If this limit is exceeded, the system will apply backpressure to delay processing of new deals. // 0 means unlimited. (Default: 64) - MaxMarketRunningPipelines int + MaxMarketRunningPipelines *Dynamic[int] // MaxQueueDownload is the maximum number of pipelines that can be queued at the downloading stage, // waiting for a machine to pick up their task (owner_id is null). @@ -532,14 +532,15 @@ type CurioIngestConfig struct { // waiting for a machine to pick up their verification task (owner_id is null). // If this limit is exceeded, the system will apply backpressure, delaying new deal processing. // 0 means unlimited. (Default: 8) - MaxQueueCommP int + MaxQueueCommP *Dynamic[int] // Maximum number of sectors that can be queued waiting for deals to start processing. // 0 = unlimited // Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem. // The DealSector queue includes deals that are ready to enter the sealing pipeline but are not yet part of it. // DealSector queue is the first queue in the sealing pipeline, making it the primary backpressure mechanism. (Default: 8) - MaxQueueDealSector int + // Updates will affect running instances. + MaxQueueDealSector *Dynamic[int] // Maximum number of sectors that can be queued waiting for SDR to start processing. // 0 = unlimited @@ -548,7 +549,8 @@ type CurioIngestConfig struct { // possible that this queue grows more than this limit(CC sectors), the backpressure is only applied to sectors // entering the pipeline. // Only applies to PoRep pipeline (DoSnap = false) (Default: 8) - MaxQueueSDR int + // Updates will affect running instances. + MaxQueueSDR *Dynamic[int] // Maximum number of sectors that can be queued waiting for SDRTrees to start processing. // 0 = unlimited @@ -556,7 +558,8 @@ type CurioIngestConfig struct { // In case of the trees tasks it is possible that this queue grows more than this limit, the backpressure is only // applied to sectors entering the pipeline. // Only applies to PoRep pipeline (DoSnap = false) (Default: 0) - MaxQueueTrees int + // Updates will affect running instances. + MaxQueueTrees *Dynamic[int] // Maximum number of sectors that can be queued waiting for PoRep to start processing. // 0 = unlimited @@ -564,23 +567,27 @@ type CurioIngestConfig struct { // Like with the trees tasks, it is possible that this queue grows more than this limit, the backpressure is only // applied to sectors entering the pipeline. // Only applies to PoRep pipeline (DoSnap = false) (Default: 0) - MaxQueuePoRep int + // Updates will affect running instances. + MaxQueuePoRep *Dynamic[int] // MaxQueueSnapEncode is the maximum number of sectors that can be queued waiting for UpdateEncode tasks to start. // 0 means unlimited. // This applies backpressure to the market subsystem by delaying the ingestion of deal data. // Only applies to the Snap Deals pipeline (DoSnap = true). (Default: 16) - MaxQueueSnapEncode int + // Updates will affect running instances. + MaxQueueSnapEncode *Dynamic[int] // MaxQueueSnapProve is the maximum number of sectors that can be queued waiting for UpdateProve to start processing. // 0 means unlimited. // This applies backpressure in the Snap Deals pipeline (DoSnap = true) by delaying new deal ingestion. (Default: 0) - MaxQueueSnapProve int + // Updates will affect running instances. + MaxQueueSnapProve *Dynamic[int] // Maximum time an open deal sector should wait for more deals before it starts sealing. // This ensures that sectors don't remain open indefinitely, consuming resources. // Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") - MaxDealWaitTime time.Duration + // Updates will affect running instances. + MaxDealWaitTime *Dynamic[time.Duration] // DoSnap, when set to true, enables snap deal processing for deals ingested by this instance. // Unlike lotus-miner, there is no fallback to PoRep when no snap sectors are available. diff --git a/deps/deps.go b/deps/deps.go index a86ebfa3f..8bdb34069 100644 --- a/deps/deps.go +++ b/deps/deps.go @@ -159,7 +159,7 @@ type Deps struct { Bstore curiochain.CurioBlockstore Verif storiface.Verifier As *multictladdr.MultiAddressSelector - Maddrs map[dtypes.MinerAddress]bool + Maddrs *config.Dynamic[map[dtypes.MinerAddress]bool] ProofTypes map[abi.RegisteredSealProof]bool Stor *paths.Remote Al *curioalerting.AlertingSystem @@ -323,25 +323,36 @@ Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`, } if deps.Maddrs == nil { - deps.Maddrs = map[dtypes.MinerAddress]bool{} + deps.Maddrs = config.NewDynamic(map[dtypes.MinerAddress]bool{}) } - if len(deps.Maddrs) == 0 { - for _, s := range deps.Cfg.Addresses { + setMaddrs := func() error { + tmp := map[dtypes.MinerAddress]bool{} + for _, s := range deps.Cfg.Addresses.Get() { for _, s := range s.MinerAddresses { addr, err := address.NewFromString(s) if err != nil { return err } - deps.Maddrs[dtypes.MinerAddress(addr)] = true + tmp[dtypes.MinerAddress(addr)] = true } } + deps.Maddrs.Set(tmp) + return nil + } + if err := setMaddrs(); err != nil { + return err } + deps.Cfg.Addresses.OnChange(func() { + if err := setMaddrs(); err != nil { + log.Errorf("error setting maddrs: %s", err) + } + }) if deps.ProofTypes == nil { deps.ProofTypes = map[abi.RegisteredSealProof]bool{} } if len(deps.ProofTypes) == 0 { - for maddr := range deps.Maddrs { + for maddr := range deps.Maddrs.Get() { spt, err := sealProofType(maddr, deps.Chain) if err != nil { return err @@ -357,7 +368,7 @@ Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`, if deps.Cfg.Subsystems.EnableWalletExporter { spIDs := []address.Address{} - for maddr := range deps.Maddrs { + for maddr := range deps.Maddrs.Get() { spIDs = append(spIDs, address.Address(maddr)) } @@ -625,7 +636,7 @@ func GetDepsCLI(ctx context.Context, cctx *cli.Context) (*Deps, error) { maddrs := map[dtypes.MinerAddress]bool{} if len(maddrs) == 0 { - for _, s := range cfg.Addresses { + for _, s := range cfg.Addresses.Get() { for _, s := range s.MinerAddresses { addr, err := address.NewFromString(s) if err != nil { @@ -640,7 +651,7 @@ func GetDepsCLI(ctx context.Context, cctx *cli.Context) (*Deps, error) { Cfg: cfg, DB: db, Chain: full, - Maddrs: maddrs, + Maddrs: config.NewDynamic(maddrs), // ignoring dynamic for a single CLI run EthClient: ethClient, }, nil } @@ -671,7 +682,7 @@ func CreateMinerConfig(ctx context.Context, full CreateMinerConfigChainAPI, db * return xerrors.Errorf("Failed to get miner info: %w", err) } - curioConfig.Addresses = append(curioConfig.Addresses, config.CurioAddresses{ + curioConfig.Addresses.Set(append(curioConfig.Addresses.Get(), config.CurioAddresses{ PreCommitControl: []string{}, CommitControl: []string{}, DealPublishControl: []string{}, @@ -680,7 +691,7 @@ func CreateMinerConfig(ctx context.Context, full CreateMinerConfigChainAPI, db * DisableWorkerFallback: false, MinerAddresses: []string{addr}, BalanceManager: config.DefaultBalanceManager(), - }) + })) } { @@ -696,9 +707,9 @@ func CreateMinerConfig(ctx context.Context, full CreateMinerConfigChainAPI, db * curioConfig.Apis.ChainApiInfo = append(curioConfig.Apis.ChainApiInfo, info) } - curioConfig.Addresses = lo.Filter(curioConfig.Addresses, func(a config.CurioAddresses, _ int) bool { + curioConfig.Addresses.Set(lo.Filter(curioConfig.Addresses.Get(), func(a config.CurioAddresses, _ int) bool { return len(a.MinerAddresses) > 0 - }) + })) // If no base layer is present if !lo.Contains(titles, "base") { @@ -727,10 +738,10 @@ func CreateMinerConfig(ctx context.Context, full CreateMinerConfigChainAPI, db * return xerrors.Errorf("Cannot parse base config: %w", err) } - baseCfg.Addresses = append(baseCfg.Addresses, curioConfig.Addresses...) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { + baseCfg.Addresses.Set(append(baseCfg.Addresses.Get(), curioConfig.Addresses.Get()...)) + baseCfg.Addresses.Set(lo.Filter(baseCfg.Addresses.Get(), func(a config.CurioAddresses, _ int) bool { return len(a.MinerAddresses) > 0 - }) + })) cb, err := config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) if err != nil { diff --git a/documentation/en/configuration/default-curio-configuration.md b/documentation/en/configuration/default-curio-configuration.md index 8d802061e..4dc83228d 100644 --- a/documentation/en/configuration/default-curio-configuration.md +++ b/documentation/en/configuration/default-curio-configuration.md @@ -5,6 +5,7 @@ description: The default curio configuration # Default Curio Configuration ```toml +Error parsing language # Subsystems defines configuration settings for various subsystems within the Curio node. # # type: CurioSubsystemsConfig @@ -399,6 +400,7 @@ description: The default curio configuration # Addresses specifies the list of miner addresses and their related wallet addresses. +# Updates will affect running instances. # # type: []CurioAddresses [[Addresses]] @@ -802,6 +804,7 @@ description: The default curio configuration # A "running" pipeline is one that has at least one task currently assigned to a machine (owner_id is not null). # If this limit is exceeded, the system will apply backpressure to delay processing of new deals. # 0 means unlimited. (Default: 64) + # Updates will affect running instances. # # type: int #MaxMarketRunningPipelines = 64 @@ -819,6 +822,7 @@ description: The default curio configuration # waiting for a machine to pick up their verification task (owner_id is null). # If this limit is exceeded, the system will apply backpressure, delaying new deal processing. # 0 means unlimited. (Default: 8) + # Updates will affect running instances. # # type: int #MaxQueueCommP = 8 @@ -828,6 +832,7 @@ description: The default curio configuration # Note: This mechanism will delay taking deal data from markets, providing backpressure to the market subsystem. # The DealSector queue includes deals that are ready to enter the sealing pipeline but are not yet part of it. # DealSector queue is the first queue in the sealing pipeline, making it the primary backpressure mechanism. (Default: 8) + # Updates will affect running instances. # # type: int #MaxQueueDealSector = 8 @@ -839,6 +844,7 @@ description: The default curio configuration # possible that this queue grows more than this limit(CC sectors), the backpressure is only applied to sectors # entering the pipeline. # Only applies to PoRep pipeline (DoSnap = false) (Default: 8) + # Updates will affect running instances. # # type: int #MaxQueueSDR = 8 @@ -849,6 +855,7 @@ description: The default curio configuration # In case of the trees tasks it is possible that this queue grows more than this limit, the backpressure is only # applied to sectors entering the pipeline. # Only applies to PoRep pipeline (DoSnap = false) (Default: 0) + # Updates will affect running instances. # # type: int #MaxQueueTrees = 0 @@ -859,6 +866,7 @@ description: The default curio configuration # Like with the trees tasks, it is possible that this queue grows more than this limit, the backpressure is only # applied to sectors entering the pipeline. # Only applies to PoRep pipeline (DoSnap = false) (Default: 0) + # Updates will affect running instances. # # type: int #MaxQueuePoRep = 0 @@ -867,6 +875,7 @@ description: The default curio configuration # 0 means unlimited. # This applies backpressure to the market subsystem by delaying the ingestion of deal data. # Only applies to the Snap Deals pipeline (DoSnap = true). (Default: 16) + # Updates will affect running instances. # # type: int #MaxQueueSnapEncode = 16 @@ -874,6 +883,7 @@ description: The default curio configuration # MaxQueueSnapProve is the maximum number of sectors that can be queued waiting for UpdateProve to start processing. # 0 means unlimited. # This applies backpressure in the Snap Deals pipeline (DoSnap = true) by delaying new deal ingestion. (Default: 0) + # Updates will affect running instances. # # type: int #MaxQueueSnapProve = 0 @@ -881,6 +891,7 @@ description: The default curio configuration # Maximum time an open deal sector should wait for more deals before it starts sealing. # This ensures that sectors don't remain open indefinitely, consuming resources. # Time duration string (e.g., "1h2m3s") in TOML format. (Default: "1h0m0s") + # Updates will affect running instances. # # type: time.Duration #MaxDealWaitTime = "1h0m0s" diff --git a/documentation/en/curio-cli/curio.md b/documentation/en/curio-cli/curio.md index bd71af882..979e20cef 100644 --- a/documentation/en/curio-cli/curio.md +++ b/documentation/en/curio-cli/curio.md @@ -1,5 +1,6 @@ # curio ``` +Error parsing language NAME: curio - Filecoin decentralized storage network provider @@ -42,6 +43,7 @@ GLOBAL OPTIONS: ## curio cli ``` +Error parsing language NAME: curio cli - Execute cli commands @@ -66,6 +68,7 @@ OPTIONS: ### curio cli info ``` +Error parsing language NAME: curio cli info - Get Curio node info @@ -78,6 +81,7 @@ OPTIONS: ### curio cli storage ``` +Error parsing language NAME: curio cli storage - manage sector storage @@ -105,6 +109,7 @@ OPTIONS: #### curio cli storage attach ``` +Error parsing language NAME: curio cli storage attach - attach local storage path @@ -149,6 +154,7 @@ OPTIONS: #### curio cli storage detach ``` +Error parsing language NAME: curio cli storage detach - detach local storage path @@ -162,6 +168,7 @@ OPTIONS: #### curio cli storage list ``` +Error parsing language NAME: curio cli storage list - list local storage paths @@ -175,6 +182,7 @@ OPTIONS: #### curio cli storage find ``` +Error parsing language NAME: curio cli storage find - find sector in the storage system @@ -187,6 +195,7 @@ OPTIONS: #### curio cli storage generate-vanilla-proof ``` +Error parsing language NAME: curio cli storage generate-vanilla-proof - generate vanilla proof for a sector @@ -199,6 +208,7 @@ OPTIONS: #### curio cli storage redeclare ``` +Error parsing language NAME: curio cli storage redeclare - redeclare sectors in a local storage path @@ -216,6 +226,7 @@ OPTIONS: ### curio cli log ``` +Error parsing language NAME: curio cli log - Manage logging @@ -233,6 +244,7 @@ OPTIONS: #### curio cli log list ``` +Error parsing language NAME: curio cli log list - List log systems @@ -245,6 +257,7 @@ OPTIONS: #### curio cli log set-level ``` +Error parsing language NAME: curio cli log set-level - Set log level @@ -278,6 +291,7 @@ OPTIONS: ### curio cli wait-api ``` +Error parsing language NAME: curio cli wait-api - Wait for Curio api to come online @@ -291,6 +305,7 @@ OPTIONS: ### curio cli stop ``` +Error parsing language NAME: curio cli stop - Stop a running Curio process @@ -303,6 +318,7 @@ OPTIONS: ### curio cli cordon ``` +Error parsing language NAME: curio cli cordon - Cordon a machine, set it to maintenance mode @@ -315,6 +331,7 @@ OPTIONS: ### curio cli uncordon ``` +Error parsing language NAME: curio cli uncordon - Uncordon a machine, resume scheduling @@ -327,6 +344,7 @@ OPTIONS: ### curio cli index-sample ``` +Error parsing language NAME: curio cli index-sample - Provides a sample of CIDs from an indexed piece @@ -340,6 +358,7 @@ OPTIONS: ## curio run ``` +Error parsing language NAME: curio run - Start a Curio process @@ -357,6 +376,7 @@ OPTIONS: ## curio config ``` +Error parsing language NAME: curio config - Manage node config by layers. The layer 'base' will always be applied at Curio start-up. @@ -380,6 +400,7 @@ OPTIONS: ### curio config default ``` +Error parsing language NAME: curio config default - Print default node config @@ -393,6 +414,7 @@ OPTIONS: ### curio config set ``` +Error parsing language NAME: curio config set - Set a config layer or the base by providing a filename or stdin. @@ -406,6 +428,7 @@ OPTIONS: ### curio config get ``` +Error parsing language NAME: curio config get - Get a config layer by name. You may want to pipe the output to a file, or use 'less' @@ -418,6 +441,7 @@ OPTIONS: ### curio config list ``` +Error parsing language NAME: curio config list - List config layers present in the DB. @@ -430,6 +454,7 @@ OPTIONS: ### curio config interpret ``` +Error parsing language NAME: curio config interpret - Interpret stacked config layers by this version of curio, with system-generated comments. @@ -443,6 +468,7 @@ OPTIONS: ### curio config remove ``` +Error parsing language NAME: curio config remove - Remove a named config layer. @@ -455,6 +481,7 @@ OPTIONS: ### curio config edit ``` +Error parsing language NAME: curio config edit - edit a config layer @@ -472,6 +499,7 @@ OPTIONS: ### curio config new-cluster ``` +Error parsing language NAME: curio config new-cluster - Create new configuration for a new cluster @@ -484,6 +512,7 @@ OPTIONS: ## curio test ``` +Error parsing language NAME: curio test - Utility functions for testing @@ -501,6 +530,7 @@ OPTIONS: ### curio test window-post ``` +Error parsing language NAME: curio test window-post - Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain. @@ -519,6 +549,7 @@ OPTIONS: #### curio test window-post here ``` +Error parsing language NAME: curio test window-post here - Compute WindowPoSt for performance and configuration testing. @@ -539,6 +570,7 @@ OPTIONS: #### curio test window-post task ``` +Error parsing language NAME: curio test window-post task - Test the windowpost scheduler by running it on the next available curio. If tasks fail all retries, you will need to ctrl+c to exit. @@ -554,6 +586,7 @@ OPTIONS: #### curio test window-post vanilla ``` +Error parsing language NAME: curio test window-post vanilla - Compute WindowPoSt vanilla proofs and verify them. @@ -570,6 +603,7 @@ OPTIONS: ### curio test debug ``` +Error parsing language NAME: curio test debug - Collection of debugging utilities @@ -588,6 +622,7 @@ OPTIONS: #### curio test debug ipni-piece-chunks ``` +Error parsing language NAME: curio test debug ipni-piece-chunks - generate ipni chunks from a file @@ -600,6 +635,7 @@ OPTIONS: #### curio test debug debug-snsvc ``` +Error parsing language NAME: curio test debug debug-snsvc @@ -634,6 +670,7 @@ OPTIONS: ##### curio test debug debug-snsvc deposit ``` +Error parsing language NAME: curio test debug debug-snsvc deposit - Deposit FIL into the Router contract (client) @@ -648,6 +685,7 @@ OPTIONS: ##### curio test debug debug-snsvc client-initiate-withdrawal ``` +Error parsing language NAME: curio test debug debug-snsvc client-initiate-withdrawal - Initiate a withdrawal request from the client's deposit @@ -662,6 +700,7 @@ OPTIONS: ##### curio test debug debug-snsvc client-complete-withdrawal ``` +Error parsing language NAME: curio test debug debug-snsvc client-complete-withdrawal - Complete a pending client withdrawal after the withdrawal window elapses @@ -675,6 +714,7 @@ OPTIONS: ##### curio test debug debug-snsvc client-cancel-withdrawal ``` +Error parsing language NAME: curio test debug debug-snsvc client-cancel-withdrawal - Cancel a pending client withdrawal request @@ -688,6 +728,7 @@ OPTIONS: ##### curio test debug debug-snsvc redeem-client ``` +Error parsing language NAME: curio test debug debug-snsvc redeem-client - Redeem a client voucher (service role) @@ -705,6 +746,7 @@ OPTIONS: ##### curio test debug debug-snsvc redeem-provider ``` +Error parsing language NAME: curio test debug debug-snsvc redeem-provider - Redeem a provider voucher (provider role) @@ -722,6 +764,7 @@ OPTIONS: ##### curio test debug debug-snsvc service-initiate-withdrawal ``` +Error parsing language NAME: curio test debug debug-snsvc service-initiate-withdrawal - Initiate a withdrawal request from the service pool @@ -736,6 +779,7 @@ OPTIONS: ##### curio test debug debug-snsvc service-complete-withdrawal ``` +Error parsing language NAME: curio test debug debug-snsvc service-complete-withdrawal - Complete a pending service withdrawal after the withdrawal window elapses @@ -749,6 +793,7 @@ OPTIONS: ##### curio test debug debug-snsvc service-cancel-withdrawal ``` +Error parsing language NAME: curio test debug debug-snsvc service-cancel-withdrawal - Cancel a pending service withdrawal request @@ -762,6 +807,7 @@ OPTIONS: ##### curio test debug debug-snsvc service-deposit ``` +Error parsing language NAME: curio test debug debug-snsvc service-deposit - Deposit funds into the service pool (service role) @@ -776,6 +822,7 @@ OPTIONS: ##### curio test debug debug-snsvc get-client-state ``` +Error parsing language NAME: curio test debug debug-snsvc get-client-state - Query the state of a client @@ -789,6 +836,7 @@ OPTIONS: ##### curio test debug debug-snsvc get-provider-state ``` +Error parsing language NAME: curio test debug debug-snsvc get-provider-state - Query the state of a provider @@ -802,6 +850,7 @@ OPTIONS: ##### curio test debug debug-snsvc get-service-state ``` +Error parsing language NAME: curio test debug debug-snsvc get-service-state - Query the service state @@ -814,6 +863,7 @@ OPTIONS: ##### curio test debug debug-snsvc create-client-voucher ``` +Error parsing language NAME: curio test debug debug-snsvc create-client-voucher - Create a client voucher @@ -828,6 +878,7 @@ OPTIONS: ##### curio test debug debug-snsvc create-provider-voucher ``` +Error parsing language NAME: curio test debug debug-snsvc create-provider-voucher - Create a provider voucher @@ -844,6 +895,7 @@ OPTIONS: ##### curio test debug debug-snsvc propose-service-actor ``` +Error parsing language NAME: curio test debug debug-snsvc propose-service-actor - Propose a new service actor @@ -858,6 +910,7 @@ OPTIONS: ##### curio test debug debug-snsvc accept-service-actor ``` +Error parsing language NAME: curio test debug debug-snsvc accept-service-actor - Accept a proposed service actor @@ -871,6 +924,7 @@ OPTIONS: ##### curio test debug debug-snsvc validate-client-voucher ``` +Error parsing language NAME: curio test debug debug-snsvc validate-client-voucher - Validate a client voucher signature @@ -887,6 +941,7 @@ OPTIONS: ##### curio test debug debug-snsvc validate-provider-voucher ``` +Error parsing language NAME: curio test debug debug-snsvc validate-provider-voucher - Validate a provider voucher signature @@ -903,6 +958,7 @@ OPTIONS: #### curio test debug proofsvc-client ``` +Error parsing language NAME: curio test debug proofsvc-client - Interact with the remote proof service @@ -921,6 +977,7 @@ OPTIONS: ##### curio test debug proofsvc-client create-voucher ``` +Error parsing language NAME: curio test debug proofsvc-client create-voucher - Create a client voucher @@ -935,6 +992,7 @@ OPTIONS: ##### curio test debug proofsvc-client submit ``` +Error parsing language NAME: curio test debug proofsvc-client submit - Submit a proof request @@ -953,6 +1011,7 @@ OPTIONS: ##### curio test debug proofsvc-client status ``` +Error parsing language NAME: curio test debug proofsvc-client status - Check proof status @@ -966,6 +1025,7 @@ OPTIONS: ## curio web ``` +Error parsing language NAME: curio web - Start Curio web interface @@ -985,6 +1045,7 @@ OPTIONS: ## curio guided-setup ``` +Error parsing language NAME: curio guided-setup - Run the guided setup for migrating from lotus-miner to Curio or Creating a new Curio miner @@ -997,6 +1058,7 @@ OPTIONS: ## curio seal ``` +Error parsing language NAME: curio seal - Manage the sealing pipeline @@ -1014,6 +1076,7 @@ OPTIONS: ### curio seal start ``` +Error parsing language NAME: curio seal start - Start new sealing operations manually @@ -1033,6 +1096,7 @@ OPTIONS: ### curio seal events ``` +Error parsing language NAME: curio seal events - List pipeline events @@ -1048,6 +1112,7 @@ OPTIONS: ## curio unseal ``` +Error parsing language NAME: curio unseal - Manage unsealed data @@ -1067,6 +1132,7 @@ OPTIONS: ### curio unseal info ``` +Error parsing language NAME: curio unseal info - Get information about unsealed data @@ -1079,6 +1145,7 @@ OPTIONS: ### curio unseal list-sectors ``` +Error parsing language NAME: curio unseal list-sectors - List data from the sectors_unseal_pipeline and sectors_meta tables @@ -1093,6 +1160,7 @@ OPTIONS: ### curio unseal set-target-state ``` +Error parsing language NAME: curio unseal set-target-state - Set the target unseal state for a sector @@ -1122,6 +1190,7 @@ OPTIONS: ### curio unseal check ``` +Error parsing language NAME: curio unseal check - Check data integrity in unsealed sector files @@ -1139,6 +1208,7 @@ OPTIONS: ## curio market ``` +Error parsing language NAME: curio market @@ -1158,6 +1228,7 @@ OPTIONS: ### curio market seal ``` +Error parsing language NAME: curio market seal - start sealing a deal sector early @@ -1172,6 +1243,7 @@ OPTIONS: ### curio market add-url ``` +Error parsing language NAME: curio market add-url - Add URL to fetch data for offline deals @@ -1187,6 +1259,7 @@ OPTIONS: ### curio market move-to-escrow ``` +Error parsing language NAME: curio market move-to-escrow - Moves funds from the deal collateral wallet into escrow with the storage market actor @@ -1202,6 +1275,7 @@ OPTIONS: ### curio market ddo ``` +Error parsing language NAME: curio market ddo - Create a new offline verified DDO deal for Curio @@ -1218,6 +1292,7 @@ OPTIONS: ## curio fetch-params ``` +Error parsing language NAME: curio fetch-params - Fetch proving parameters @@ -1230,6 +1305,7 @@ OPTIONS: ## curio calc ``` +Error parsing language NAME: curio calc - Math Utils @@ -1248,6 +1324,7 @@ OPTIONS: ### curio calc batch-cpu ``` +Error parsing language NAME: curio calc batch-cpu - Analyze and display the layout of batch sealer threads @@ -1267,6 +1344,7 @@ OPTIONS: ### curio calc supraseal-config ``` +Error parsing language NAME: curio calc supraseal-config - Generate a supra_seal configuration @@ -1287,6 +1365,7 @@ OPTIONS: ## curio toolbox ``` +Error parsing language NAME: curio toolbox - Tool Box for Curio @@ -1304,6 +1383,7 @@ OPTIONS: ### curio toolbox fix-msg ``` +Error parsing language NAME: curio toolbox fix-msg - Updated DB with message data missing from chain node @@ -1317,6 +1397,7 @@ OPTIONS: ### curio toolbox register-pdp-service-provider ``` +Error parsing language NAME: curio toolbox register-pdp-service-provider - Register a PDP service provider with Filecoin Service Registry Contract diff --git a/itests/curio_test.go b/itests/curio_test.go index 117c8e2bf..36dc776a4 100644 --- a/itests/curio_test.go +++ b/itests/curio_test.go @@ -114,9 +114,9 @@ func TestCurioHappyPath(t *testing.T) { require.NoError(t, err) require.NotNil(t, baseCfg.Addresses) - require.GreaterOrEqual(t, len(baseCfg.Addresses), 1) + require.GreaterOrEqual(t, len(baseCfg.Addresses.Get()), 1) - require.Contains(t, baseCfg.Addresses[0].MinerAddresses, maddr.String()) + require.Contains(t, baseCfg.Addresses.Get()[0].MinerAddresses, maddr.String()) baseCfg.Batching.PreCommit.Timeout = time.Second baseCfg.Batching.Commit.Timeout = time.Second diff --git a/itests/dyncfg_test.go b/itests/dyncfg_test.go index 6f7c1a584..8b70249f1 100644 --- a/itests/dyncfg_test.go +++ b/itests/dyncfg_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/BurntSushi/toml" "github.com/stretchr/testify/require" "github.com/filecoin-project/curio/deps" @@ -54,8 +53,8 @@ func TestDynamicConfig(t *testing.T) { } -func setTestConfig(ctx context.Context, cdb *harmonydb.DB, config *config.CurioConfig) error { - tomlData, err := toml.Marshal(config) +func setTestConfig(ctx context.Context, cdb *harmonydb.DB, cfg *config.CurioConfig) error { + tomlData, err := config.TransparentMarshal(cfg) if err != nil { return err } diff --git a/lib/multictladdr/address.go b/lib/multictladdr/address.go index fe06a32bf..18b9037e5 100644 --- a/lib/multictladdr/address.go +++ b/lib/multictladdr/address.go @@ -6,68 +6,91 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/curio/deps/config" - - "github.com/filecoin-project/lotus/api" ) -func AddressSelector(addrConf []config.CurioAddresses) func() (*MultiAddressSelector, error) { +type AddressConfig struct { + PreCommitControl []address.Address + CommitControl []address.Address + TerminateControl []address.Address + DealPublishControl []address.Address + + DisableOwnerFallback bool + DisableWorkerFallback bool +} + +func AddressSelector(addrConf *config.Dynamic[[]config.CurioAddresses]) func() (*MultiAddressSelector, error) { return func() (*MultiAddressSelector, error) { as := &MultiAddressSelector{ - MinerMap: make(map[address.Address]api.AddressConfig), - } - if addrConf == nil { - return as, nil + MinerMap: make(map[address.Address]AddressConfig), } + makeMinerMap := func() error { + as.mmLock.Lock() + defer as.mmLock.Unlock() + if addrConf == nil { + return nil + } - for _, addrConf := range addrConf { - for _, minerID := range addrConf.MinerAddresses { - tmp := api.AddressConfig{ - DisableOwnerFallback: addrConf.DisableOwnerFallback, - DisableWorkerFallback: addrConf.DisableWorkerFallback, - } + for _, addrConf := range addrConf.Get() { + for _, minerID := range addrConf.MinerAddresses { + tmp := AddressConfig{ + DisableOwnerFallback: addrConf.DisableOwnerFallback, + DisableWorkerFallback: addrConf.DisableWorkerFallback, + } - for _, s := range addrConf.PreCommitControl { - addr, err := address.NewFromString(s) - if err != nil { - return nil, xerrors.Errorf("parsing precommit control address: %w", err) + for _, s := range addrConf.PreCommitControl { + addr, err := address.NewFromString(s) + if err != nil { + return xerrors.Errorf("parsing precommit control address: %w", err) + } + + tmp.PreCommitControl = append(tmp.PreCommitControl, addr) } - tmp.PreCommitControl = append(tmp.PreCommitControl, addr) - } + for _, s := range addrConf.CommitControl { + addr, err := address.NewFromString(s) + if err != nil { + return xerrors.Errorf("parsing commit control address: %w", err) + } - for _, s := range addrConf.CommitControl { - addr, err := address.NewFromString(s) - if err != nil { - return nil, xerrors.Errorf("parsing commit control address: %w", err) + tmp.CommitControl = append(tmp.CommitControl, addr) } - tmp.CommitControl = append(tmp.CommitControl, addr) - } + for _, s := range addrConf.DealPublishControl { + addr, err := address.NewFromString(s) + if err != nil { + return xerrors.Errorf("parsing deal publish control address: %w", err) + } - for _, s := range addrConf.DealPublishControl { - addr, err := address.NewFromString(s) - if err != nil { - return nil, xerrors.Errorf("parsing deal publish control address: %w", err) + tmp.DealPublishControl = append(tmp.DealPublishControl, addr) } - tmp.DealPublishControl = append(tmp.DealPublishControl, addr) - } + for _, s := range addrConf.TerminateControl { + addr, err := address.NewFromString(s) + if err != nil { + return xerrors.Errorf("parsing terminate control address: %w", err) + } - for _, s := range addrConf.TerminateControl { - addr, err := address.NewFromString(s) + tmp.TerminateControl = append(tmp.TerminateControl, addr) + } + a, err := address.NewFromString(minerID) if err != nil { - return nil, xerrors.Errorf("parsing terminate control address: %w", err) + return xerrors.Errorf("parsing miner address %s: %w", minerID, err) } - - tmp.TerminateControl = append(tmp.TerminateControl, addr) - } - a, err := address.NewFromString(minerID) - if err != nil { - return nil, xerrors.Errorf("parsing miner address %s: %w", minerID, err) + as.MinerMap[a] = tmp } - as.MinerMap[a] = tmp } + return nil } + err := makeMinerMap() + if err != nil { + return nil, err + } + addrConf.OnChange(func() { + err := makeMinerMap() + if err != nil { + log.Errorf("error making miner map: %s", err) + } + }) return as, nil } } diff --git a/lib/multictladdr/multiaddresses.go b/lib/multictladdr/multiaddresses.go index af751ff17..c14f3720e 100644 --- a/lib/multictladdr/multiaddresses.go +++ b/lib/multictladdr/multiaddresses.go @@ -2,6 +2,7 @@ package multictladdr import ( "context" + "sync" logging "github.com/ipfs/go-log/v2" @@ -17,7 +18,8 @@ import ( var log = logging.Logger("curio/multictladdr") type MultiAddressSelector struct { - MinerMap map[address.Address]api.AddressConfig + MinerMap map[address.Address]AddressConfig + mmLock sync.RWMutex } func (as *MultiAddressSelector) AddressFor(ctx context.Context, a ctladdr.NodeApi, minerID address.Address, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { @@ -27,7 +29,12 @@ func (as *MultiAddressSelector) AddressFor(ctx context.Context, a ctladdr.NodeAp return mi.Worker, big.Zero(), nil } - tmp := as.MinerMap[minerID] + as.mmLock.RLock() + tmp, ok := as.MinerMap[minerID] + as.mmLock.RUnlock() + if !ok { + return mi.Worker, big.Zero(), nil + } var addrs []address.Address switch use { diff --git a/market/libp2p/libp2p.go b/market/libp2p/libp2p.go index d30b97c32..cbe9215c2 100644 --- a/market/libp2p/libp2p.go +++ b/market/libp2p/libp2p.go @@ -335,7 +335,7 @@ type mk12libp2pAPI interface { StateMinerInfo(context.Context, address.Address, types.TipSetKey) (minerInfo api.MinerInfo, err error) } -func NewDealProvider(ctx context.Context, db *harmonydb.DB, cfg *config.CurioConfig, prov *mk12.MK12, api mk12libp2pAPI, sender *message.Sender, miners []address.Address, machine string, shutdownChan chan struct{}) { +func NewDealProvider(ctx context.Context, db *harmonydb.DB, cfg *config.CurioConfig, prov *mk12.MK12, api mk12libp2pAPI, sender *message.Sender, miners *config.Dynamic[[]address.Address], machine string, shutdownChan chan struct{}) { //Check in the DB every minute who owns the libp2p ticket //if it was us, and is still us, and we're running DealProvider already do nothing, just keep polling //if it was us, and no longer is us, shut down DealProvider @@ -440,7 +440,7 @@ func NewDealProvider(ctx context.Context, db *harmonydb.DB, cfg *config.CurioCon } } -func makeDealProvider(ctx context.Context, db *harmonydb.DB, cfg *config.CurioConfig, prov *mk12.MK12, api mk12libp2pAPI, sender *message.Sender, miners []address.Address, machine string) error { +func makeDealProvider(ctx context.Context, db *harmonydb.DB, cfg *config.CurioConfig, prov *mk12.MK12, api mk12libp2pAPI, sender *message.Sender, miners *config.Dynamic[[]address.Address], machine string) error { h, publicAddr, err := NewLibp2pHost(ctx, db, cfg, machine) if err != nil { return xerrors.Errorf("failed to start libp2p nodes: %s", err) @@ -469,16 +469,18 @@ func makeDealProvider(ctx context.Context, db *harmonydb.DB, cfg *config.CurioCo go p.Start(ctx, h) - nonDisabledMiners := lo.Filter(miners, func(addr address.Address, _ int) bool { - return !lo.Contains(disabledMiners, addr) + go p.checkMinerInfos(ctx, sender, publicAddr.Libp2pAddr, miners, disabledMiners) + miners.OnChange(func() { + go p.checkMinerInfos(ctx, sender, publicAddr.Libp2pAddr, miners, disabledMiners) }) - - go p.checkMinerInfos(ctx, sender, publicAddr.Libp2pAddr, nonDisabledMiners) return nil } -func (p *DealProvider) checkMinerInfos(ctx context.Context, sender *message.Sender, announceAddr multiaddr.Multiaddr, miners []address.Address) { - for _, m := range miners { +func (p *DealProvider) checkMinerInfos(ctx context.Context, sender *message.Sender, announceAddr multiaddr.Multiaddr, miners *config.Dynamic[[]address.Address], disabledMiners []address.Address) { + nonDisabledMiners := lo.Filter(miners.Get(), func(addr address.Address, _ int) bool { + return !lo.Contains(disabledMiners, addr) + }) + for _, m := range nonDisabledMiners { mi, err := p.api.StateMinerInfo(ctx, m, types.EmptyTSK) if err != nil { log.Errorw("failed to get miner info", "miner", m, "error", err) diff --git a/market/mk12/mk12.go b/market/mk12/mk12.go index 851bfdea6..f3f3c0ce4 100644 --- a/market/mk12/mk12.go +++ b/market/mk12/mk12.go @@ -703,8 +703,8 @@ FROM joined return false, xerrors.Errorf("failed to query market pipeline backpressure stats: %w", err) } - if cfg.MaxMarketRunningPipelines != 0 && runningPipelines > int64(cfg.MaxMarketRunningPipelines) { - log.Infow("backpressure", "reason", "too many running market pipelines", "running_pipelines", runningPipelines, "max", cfg.MaxMarketRunningPipelines) + if cfg.MaxMarketRunningPipelines.Get() != 0 && runningPipelines > int64(cfg.MaxMarketRunningPipelines.Get()) { + log.Infow("backpressure", "reason", "too many running market pipelines", "running_pipelines", runningPipelines, "max", cfg.MaxMarketRunningPipelines.Get()) return true, nil } @@ -713,8 +713,8 @@ FROM joined return true, nil } - if cfg.MaxQueueCommP != 0 && verifyPending > int64(cfg.MaxQueueCommP) { - log.Infow("backpressure", "reason", "too many pending CommP tasks", "pending_commp", verifyPending, "max", cfg.MaxQueueCommP) + if cfg.MaxQueueCommP.Get() != 0 && verifyPending > int64(cfg.MaxQueueCommP.Get()) { + log.Infow("backpressure", "reason", "too many pending CommP tasks", "pending_commp", verifyPending, "max", cfg.MaxQueueCommP.Get()) return true, nil } @@ -750,18 +750,18 @@ FROM joined return false, xerrors.Errorf("counting buffered sectors: %w", err) } - if cfg.MaxQueueDealSector != 0 && waitDealSectors > cfg.MaxQueueDealSector { - log.Infow("backpressure", "reason", "too many wait deal sectors", "wait_deal_sectors", waitDealSectors, "max", cfg.MaxQueueDealSector) + if cfg.MaxQueueDealSector.Get() != 0 && waitDealSectors > cfg.MaxQueueDealSector.Get() { + log.Infow("backpressure", "reason", "too many wait deal sectors", "wait_deal_sectors", waitDealSectors, "max", cfg.MaxQueueDealSector.Get()) return true, nil } - if cfg.MaxQueueSnapEncode != 0 && bufferedEncode > cfg.MaxQueueSnapEncode { - log.Infow("backpressure", "reason", "too many encode tasks", "buffered", bufferedEncode, "max", cfg.MaxQueueSnapEncode) + if cfg.MaxQueueSnapEncode.Get() != 0 && bufferedEncode > cfg.MaxQueueSnapEncode.Get() { + log.Infow("backpressure", "reason", "too many encode tasks", "buffered", bufferedEncode, "max", cfg.MaxQueueSnapEncode.Get()) return true, nil } - if cfg.MaxQueueSnapProve != 0 && bufferedProve > cfg.MaxQueueSnapProve { - log.Infow("backpressure", "reason", "too many prove tasks", "buffered", bufferedProve, "max", cfg.MaxQueueSnapProve) + if cfg.MaxQueueSnapProve.Get() != 0 && bufferedProve > cfg.MaxQueueSnapProve.Get() { + log.Infow("backpressure", "reason", "too many prove tasks", "buffered", bufferedProve, "max", cfg.MaxQueueSnapProve.Get()) return true, nil } } else { @@ -802,21 +802,21 @@ FROM joined return false, xerrors.Errorf("counting buffered sectors: %w", err) } - if cfg.MaxQueueDealSector != 0 && waitDealSectors > cfg.MaxQueueDealSector { - log.Infow("backpressure", "reason", "too many wait deal sectors", "wait_deal_sectors", waitDealSectors, "max", cfg.MaxQueueDealSector) + if cfg.MaxQueueDealSector.Get() != 0 && waitDealSectors > cfg.MaxQueueDealSector.Get() { + log.Infow("backpressure", "reason", "too many wait deal sectors", "wait_deal_sectors", waitDealSectors, "max", cfg.MaxQueueDealSector.Get()) return true, nil } - if bufferedSDR > cfg.MaxQueueSDR { - log.Infow("backpressure", "reason", "too many SDR tasks", "buffered", bufferedSDR, "max", cfg.MaxQueueSDR) + if bufferedSDR > cfg.MaxQueueSDR.Get() { + log.Infow("backpressure", "reason", "too many SDR tasks", "buffered", bufferedSDR, "max", cfg.MaxQueueSDR.Get()) return true, nil } - if cfg.MaxQueueTrees != 0 && bufferedTrees > cfg.MaxQueueTrees { - log.Infow("backpressure", "reason", "too many tree tasks", "buffered", bufferedTrees, "max", cfg.MaxQueueTrees) + if cfg.MaxQueueTrees.Get() != 0 && bufferedTrees > cfg.MaxQueueTrees.Get() { + log.Infow("backpressure", "reason", "too many tree tasks", "buffered", bufferedTrees, "max", cfg.MaxQueueTrees.Get()) return true, nil } - if cfg.MaxQueuePoRep != 0 && bufferedPoRep > cfg.MaxQueuePoRep { - log.Infow("backpressure", "reason", "too many PoRep tasks", "buffered", bufferedPoRep, "max", cfg.MaxQueuePoRep) + if cfg.MaxQueuePoRep.Get() != 0 && bufferedPoRep > cfg.MaxQueuePoRep.Get() { + log.Infow("backpressure", "reason", "too many PoRep tasks", "buffered", bufferedPoRep, "max", cfg.MaxQueuePoRep.Get()) return true, nil } } diff --git a/market/mk20/mk20.go b/market/mk20/mk20.go index 3392851e9..04ef59a12 100644 --- a/market/mk20/mk20.go +++ b/market/mk20/mk20.go @@ -44,20 +44,20 @@ type MK20API interface { } type MK20 struct { - miners []address.Address + miners *config.Dynamic[[]address.Address] DB *harmonydb.DB api MK20API ethClient *ethclient.Client si paths.SectorIndex cfg *config.CurioConfig - sm map[address.Address]abi.SectorSize + sm *config.Dynamic[map[address.Address]abi.SectorSize] as *multictladdr.MultiAddressSelector sc *ffi.SealCalls maxParallelUploads *atomic.Int64 unknowClient bool } -func NewMK20Handler(miners []address.Address, db *harmonydb.DB, si paths.SectorIndex, mapi MK20API, ethClient *ethclient.Client, cfg *config.CurioConfig, as *multictladdr.MultiAddressSelector, sc *ffi.SealCalls) (*MK20, error) { +func NewMK20Handler(miners *config.Dynamic[[]address.Address], db *harmonydb.DB, si paths.SectorIndex, mapi MK20API, ethClient *ethclient.Client, cfg *config.CurioConfig, as *multictladdr.MultiAddressSelector, sc *ffi.SealCalls) (*MK20, error) { ctx := context.Background() // Ensure MinChunk size and max chunkSize is a power of 2 @@ -69,17 +69,29 @@ func NewMK20Handler(miners []address.Address, db *harmonydb.DB, si paths.SectorI return nil, xerrors.Errorf("MaximumChunkSize must be a power of 2") } - sm := make(map[address.Address]abi.SectorSize) - - for _, m := range miners { - info, err := mapi.StateMinerInfo(ctx, m, types.EmptyTSK) - if err != nil { - return nil, xerrors.Errorf("getting miner info: %w", err) - } - if _, ok := sm[m]; !ok { - sm[m] = info.SectorSize + sm := config.NewDynamic(make(map[address.Address]abi.SectorSize)) + smUpdate := func() error { + smTmp := make(map[address.Address]abi.SectorSize) + for _, m := range miners.Get() { + info, err := mapi.StateMinerInfo(ctx, m, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting miner info: %w", err) + } + if _, ok := smTmp[m]; !ok { + smTmp[m] = info.SectorSize + } } + sm.Set(smTmp) + return nil } + if err := smUpdate(); err != nil { + return nil, err + } + miners.OnChange(func() { + if err := smUpdate(); err != nil { + log.Errorf("error updating sm: %s", err) + } + }) go markDownloaded(ctx, db) go removeNotFinalizedUploads(ctx, db) @@ -257,7 +269,7 @@ func (m *MK20) processDDODeal(ctx context.Context, deal *Deal, tx *harmonydb.Tx) } func (m *MK20) sanitizeDDODeal(ctx context.Context, deal *Deal) (*ProviderDealRejectionInfo, error) { - if !lo.Contains(m.miners, deal.Products.DDOV1.Provider) { + if !lo.Contains(m.miners.Get(), deal.Products.DDOV1.Provider) { return &ProviderDealRejectionInfo{ HTTPCode: ErrBadProposal, Reason: "Provider not available in Curio cluster", @@ -294,7 +306,7 @@ func (m *MK20) sanitizeDDODeal(ctx context.Context, deal *Deal) (*ProviderDealRe }, nil } - if size > abi.PaddedPieceSize(m.sm[deal.Products.DDOV1.Provider]) { + if size > abi.PaddedPieceSize(m.sm.Get()[deal.Products.DDOV1.Provider]) { return &ProviderDealRejectionInfo{ HTTPCode: ErrBadProposal, Reason: "Deal size is larger than the miner's sector size", @@ -364,7 +376,7 @@ func (m *MK20) sanitizeDDODeal(ctx context.Context, deal *Deal) (*ProviderDealRe }, xerrors.Errorf("getting provider address: %w", err) } - if !lo.Contains(m.miners, prov) { + if !lo.Contains(m.miners.Get(), prov) { return &ProviderDealRejectionInfo{ HTTPCode: ErrBadProposal, Reason: "Allocation provider does not belong to the list of miners in Curio cluster", diff --git a/market/storageingest/deal_ingest_seal.go b/market/storageingest/deal_ingest_seal.go index 7c62cd4bd..d91f08ec5 100644 --- a/market/storageingest/deal_ingest_seal.go +++ b/market/storageingest/deal_ingest_seal.go @@ -85,7 +85,7 @@ type PieceIngester struct { addToID map[address.Address]int64 idToAddr map[abi.ActorID]address.Address minerDetails map[int64]*mdetails - maxWaitTime time.Duration + maxWaitTime *config.Dynamic[time.Duration] expectedSealDuration abi.ChainEpoch } @@ -190,7 +190,7 @@ func (p *PieceIngester) Seal() error { log.Debugf("start sealing sector %d of miner %s: %s", sector.number, p.idToAddr[sector.miner].String(), "sector full") return true } - if time.Since(*sector.openedAt) > p.maxWaitTime { + if time.Since(*sector.openedAt) > p.maxWaitTime.Get() { log.Debugf("start sealing sector %d of miner %s: %s", sector.number, p.idToAddr[sector.miner].String(), "MaxWaitTime reached") return true } diff --git a/market/storageingest/deal_ingest_snap.go b/market/storageingest/deal_ingest_snap.go index d271f8a0c..3e20a4c68 100644 --- a/market/storageingest/deal_ingest_snap.go +++ b/market/storageingest/deal_ingest_snap.go @@ -47,7 +47,7 @@ type PieceIngesterSnap struct { addToID map[address.Address]int64 idToAddr map[abi.ActorID]address.Address minerDetails map[int64]*mdetails - maxWaitTime time.Duration + maxWaitTime *config.Dynamic[time.Duration] expectedSnapDuration abi.ChainEpoch } @@ -146,7 +146,7 @@ func (p *PieceIngesterSnap) Seal() error { log.Debugf("start sealing sector %d of miner %s: %s", sector.number, p.idToAddr[sector.miner].String(), "sector full") return true } - if time.Since(*sector.openedAt) > p.maxWaitTime { + if time.Since(*sector.openedAt) > p.maxWaitTime.Get() { log.Debugf("start sealing sector %d of miner %s: %s", sector.number, p.idToAddr[sector.miner].String(), "MaxWaitTime reached") return true } diff --git a/tasks/f3/f3_task.go b/tasks/f3/f3_task.go index 86d280ae8..248bec11f 100644 --- a/tasks/f3/f3_task.go +++ b/tasks/f3/f3_task.go @@ -13,6 +13,7 @@ import ( "github.com/filecoin-project/go-f3/manifest" "github.com/filecoin-project/curio/deps" + "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -52,10 +53,10 @@ type F3Task struct { leaseTerm uint64 - actors map[dtypes.MinerAddress]bool + actors *config.Dynamic[map[dtypes.MinerAddress]bool] } -func NewF3Task(db *harmonydb.DB, api F3ParticipationAPI, actors map[dtypes.MinerAddress]bool) *F3Task { +func NewF3Task(db *harmonydb.DB, api F3ParticipationAPI, actors *config.Dynamic[map[dtypes.MinerAddress]bool]) *F3Task { return &F3Task{ db: db, api: api, @@ -210,22 +211,26 @@ func (f *F3Task) TypeDetails() harmonytask.TaskTypeDetails { } func (f *F3Task) Adder(taskFunc harmonytask.AddTaskFunc) { - for a := range f.actors { - spid, err := address.IDFromAddress(address.Address(a)) - if err != nil { - log.Errorw("failed to parse miner address", "miner", a, "error", err) - continue - } - - taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec("INSERT INTO f3_tasks (sp_id, task_id) VALUES ($1, $2) ON CONFLICT DO NOTHING", spid, id) + f3TheActors := func() { + for a := range f.actors.Get() { + spid, err := address.IDFromAddress(address.Address(a)) if err != nil { - return false, err + log.Errorw("failed to parse miner address", "miner", a, "error", err) + continue } - return n > 0, nil - }) + taskFunc(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { + n, err := tx.Exec("INSERT INTO f3_tasks (sp_id, task_id) VALUES ($1, $2) ON CONFLICT DO NOTHING", spid, id) + if err != nil { + return false, err + } + + return n > 0, nil + }) + } } + f3TheActors() + f.actors.OnChange(f3TheActors) } func (f *F3Task) GetSpid(db *harmonydb.DB, taskID int64) string { diff --git a/tasks/storage-market/market_balance.go b/tasks/storage-market/market_balance.go index 96511ac83..584a40f2d 100644 --- a/tasks/storage-market/market_balance.go +++ b/tasks/storage-market/market_balance.go @@ -36,57 +36,87 @@ type mbalanceApi interface { type BalanceManager struct { api mbalanceApi - miners map[string][]address.Address + miners *config.Dynamic[map[string][]address.Address] cfg *config.CurioConfig sender *message.Sender - bmcfg map[address.Address]config.BalanceManagerConfig + bmcfg *config.Dynamic[map[address.Address]config.BalanceManagerConfig] } -func NewBalanceManager(api mbalanceApi, miners []address.Address, cfg *config.CurioConfig, sender *message.Sender) (*BalanceManager, error) { - var mk12disabledMiners []address.Address +func NewBalanceManager(api mbalanceApi, miners *config.Dynamic[[]address.Address], cfg *config.CurioConfig, sender *message.Sender) (*BalanceManager, error) { - for _, m := range cfg.Market.StorageMarketConfig.MK12.DisabledMiners { - maddr, err := address.NewFromString(m) - if err != nil { - return nil, xerrors.Errorf("failed to parse miner string: %s", err) + computeMMap := func() (map[string][]address.Address, error) { + var mk12disabledMiners []address.Address + + for _, m := range cfg.Market.StorageMarketConfig.MK12.DisabledMiners { + maddr, err := address.NewFromString(m) + if err != nil { + return nil, xerrors.Errorf("failed to parse miner string: %s", err) + } + mk12disabledMiners = append(mk12disabledMiners, maddr) } - mk12disabledMiners = append(mk12disabledMiners, maddr) - } - mk12enabled, _ := lo.Difference(miners, mk12disabledMiners) + mk12enabled, _ := lo.Difference(miners.Get(), mk12disabledMiners) - var mk20disabledMiners []address.Address - for _, m := range cfg.Market.StorageMarketConfig.MK20.DisabledMiners { - maddr, err := address.NewFromString(m) - if err != nil { - return nil, xerrors.Errorf("failed to parse miner string: %s", err) + var mk20disabledMiners []address.Address + for _, m := range cfg.Market.StorageMarketConfig.MK20.DisabledMiners { + maddr, err := address.NewFromString(m) + if err != nil { + return nil, xerrors.Errorf("failed to parse miner string: %s", err) + } + mk20disabledMiners = append(mk20disabledMiners, maddr) } - mk20disabledMiners = append(mk20disabledMiners, maddr) + mk20enabled, _ := lo.Difference(miners.Get(), mk20disabledMiners) + + mmap := make(map[string][]address.Address) + mmap[mk12Str] = mk12enabled + mmap[mk20Str] = mk20enabled + return mmap, nil + } + + mmap, err := computeMMap() + if err != nil { + return nil, err } - mk20enabled, _ := lo.Difference(miners, mk20disabledMiners) - - mmap := make(map[string][]address.Address) - mmap[mk12Str] = mk12enabled - mmap[mk20Str] = mk20enabled - bmcfg := make(map[address.Address]config.BalanceManagerConfig) - for _, a := range cfg.Addresses { - if len(a.MinerAddresses) > 0 { - for _, m := range a.MinerAddresses { - maddr, err := address.NewFromString(m) - if err != nil { - return nil, xerrors.Errorf("failed to parse miner string: %s", err) + mmapDynamic := config.NewDynamic(mmap) + miners.OnChange(func() { + mmap, err := computeMMap() + if err != nil { + log.Errorf("error computeMMap: %s", err) + } + mmapDynamic.Set(mmap) + }) + bmcfgDynamic := config.NewDynamic(make(map[address.Address]config.BalanceManagerConfig)) + forMinerID := func() error { + bmcfg := make(map[address.Address]config.BalanceManagerConfig) + for _, a := range cfg.Addresses.Get() { + if len(a.MinerAddresses) > 0 { + for _, m := range a.MinerAddresses { + maddr, err := address.NewFromString(m) + if err != nil { + return xerrors.Errorf("failed to parse miner string: %s", err) + } + bmcfg[maddr] = a.BalanceManager } - bmcfg[maddr] = a.BalanceManager } } + bmcfgDynamic.Set(bmcfg) + return nil } + if err := forMinerID(); err != nil { + return nil, err + } + cfg.Addresses.OnChange(func() { + if err := forMinerID(); err != nil { + log.Errorf("error forMinerID: %s", err) + } + }) return &BalanceManager{ api: api, cfg: cfg, - miners: mmap, + miners: mmapDynamic, sender: sender, - bmcfg: bmcfg, + bmcfg: bmcfgDynamic, }, nil } @@ -126,22 +156,21 @@ var _ = harmonytask.Reg(&BalanceManager{}) func (m *BalanceManager) dealMarketBalance(ctx context.Context) error { - for module, miners := range m.miners { + for module, miners := range m.miners.Get() { if module != mk12Str { continue } for _, miner := range miners { - miner := miner - lowthreshold := abi.TokenAmount(m.bmcfg[miner].MK12Collateral.CollateralLowThreshold) - highthreshold := abi.TokenAmount(m.bmcfg[miner].MK12Collateral.CollateralHighThreshold) + lowthreshold := abi.TokenAmount(m.bmcfg.Get()[miner].MK12Collateral.CollateralLowThreshold) + highthreshold := abi.TokenAmount(m.bmcfg.Get()[miner].MK12Collateral.CollateralHighThreshold) - if m.bmcfg[miner].MK12Collateral.DealCollateralWallet == "" { + if m.bmcfg.Get()[miner].MK12Collateral.DealCollateralWallet == "" { blog.Errorf("Deal collateral wallet is not set for miner %s", miner.String()) continue } - wallet, err := address.NewFromString(m.bmcfg[miner].MK12Collateral.DealCollateralWallet) + wallet, err := address.NewFromString(m.bmcfg.Get()[miner].MK12Collateral.DealCollateralWallet) if err != nil { return xerrors.Errorf("failed to parse deal collateral wallet: %w", err) } diff --git a/tasks/storage-market/storage_market.go b/tasks/storage-market/storage_market.go index 831d6c194..652ce901e 100644 --- a/tasks/storage-market/storage_market.go +++ b/tasks/storage-market/storage_market.go @@ -15,6 +15,7 @@ import ( "time" "github.com/ethereum/go-ethereum/ethclient" + "github.com/google/go-cmp/cmp" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/yugabyte/pgx/v5" @@ -70,7 +71,7 @@ type CurioStorageDealMarket struct { cfg *config.CurioConfig db *harmonydb.DB pin storageingest.Ingester - miners []address.Address + miners *config.Dynamic[[]address.Address] api storageMarketAPI MK12Handler *mk12.MK12 MK20Handler *mk20.MK20 @@ -116,7 +117,7 @@ type MK12Pipeline struct { Offset *int64 `db:"sector_offset"` } -func NewCurioStorageDealMarket(miners []address.Address, db *harmonydb.DB, cfg *config.CurioConfig, ethClient *ethclient.Client, si paths.SectorIndex, mapi storageMarketAPI, as *multictladdr.MultiAddressSelector, sc *ffi.SealCalls) *CurioStorageDealMarket { +func NewCurioStorageDealMarket(miners *config.Dynamic[[]address.Address], db *harmonydb.DB, cfg *config.CurioConfig, ethClient *ethclient.Client, si paths.SectorIndex, mapi storageMarketAPI, as *multictladdr.MultiAddressSelector, sc *ffi.SealCalls) *CurioStorageDealMarket { urls := make(map[string]http.Header) for _, curl := range cfg.Market.StorageMarketConfig.PieceLocator { @@ -139,13 +140,14 @@ func NewCurioStorageDealMarket(miners []address.Address, db *harmonydb.DB, cfg * func (d *CurioStorageDealMarket) StartMarket(ctx context.Context) error { var err error - d.MK12Handler, err = mk12.NewMK12Handler(d.miners, d.db, d.si, d.api, d.cfg, d.as) + d.MK12Handler, err = mk12.NewMK12Handler(d.miners.Get(), d.db, d.si, d.api, d.cfg, d.as) if err != nil { return err } + prevMiners := d.miners.Get() if d.MK12Handler != nil { - for _, miner := range d.miners { + for _, miner := range d.miners.Get() { // Not Dynamic for MK12 _, err = d.MK12Handler.GetAsk(ctx, miner) if err != nil { if strings.Contains(err.Error(), "no ask found") { @@ -165,6 +167,12 @@ func (d *CurioStorageDealMarket) StartMarket(ctx context.Context) error { } } } + d.miners.OnChange(func() { + newMiners := d.miners.Get() + if !cmp.Equal(prevMiners, newMiners, config.BigIntComparer) { + log.Errorf("Miners changed from %d to %d. . Restart required for Market 1.2 Ingest to work.", len(prevMiners), len(newMiners)) + } + }) } d.MK20Handler, err = mk20.NewMK20Handler(d.miners, d.db, d.si, d.api, d.ethClient, d.cfg, d.as, d.sc) @@ -172,17 +180,22 @@ func (d *CurioStorageDealMarket) StartMarket(ctx context.Context) error { return err } - if len(d.miners) > 0 { + if len(prevMiners) > 0 { if d.cfg.Ingest.DoSnap { - d.pin, err = storageingest.NewPieceIngesterSnap(ctx, d.db, d.api, d.miners, d.cfg) + d.pin, err = storageingest.NewPieceIngesterSnap(ctx, d.db, d.api, prevMiners, d.cfg) } else { - d.pin, err = storageingest.NewPieceIngester(ctx, d.db, d.api, d.miners, d.cfg) + d.pin, err = storageingest.NewPieceIngester(ctx, d.db, d.api, prevMiners, d.cfg) } if err != nil { return err } } - + d.miners.OnChange(func() { + newMiners := d.miners.Get() + if len(prevMiners) != len(newMiners) && (len(prevMiners) == 0 || len(newMiners) == 0) { + log.Errorf("Miners changed from %d to %d. . Restart required for Market Ingest to work.", len(prevMiners), len(newMiners)) + } + }) go d.runPoller(ctx) return nil diff --git a/tasks/window/compute_task.go b/tasks/window/compute_task.go index 2e74aab19..63847bc43 100644 --- a/tasks/window/compute_task.go +++ b/tasks/window/compute_task.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -73,7 +74,7 @@ type WdPostTask struct { windowPoStTF promise.Promise[harmonytask.AddTaskFunc] - actors map[dtypes.MinerAddress]bool + actors *config.Dynamic[map[dtypes.MinerAddress]bool] max int parallel chan struct{} challengeReadTimeout time.Duration @@ -93,7 +94,7 @@ func NewWdPostTask(db *harmonydb.DB, verifier storiface.Verifier, paramck func() (bool, error), pcs *chainsched.CurioChainSched, - actors map[dtypes.MinerAddress]bool, + actors *config.Dynamic[map[dtypes.MinerAddress]bool], max int, parallel int, challengeReadTimeout time.Duration, @@ -451,7 +452,7 @@ func (t *WdPostTask) Adder(taskFunc harmonytask.AddTaskFunc) { } func (t *WdPostTask) processHeadChange(ctx context.Context, revert, apply *types.TipSet) error { - for act := range t.actors { + for act := range t.actors.Get() { maddr := address.Address(act) aid, err := address.IDFromAddress(maddr) diff --git a/tasks/window/recover_task.go b/tasks/window/recover_task.go index 07997bf13..b9fa018ed 100644 --- a/tasks/window/recover_task.go +++ b/tasks/window/recover_task.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -35,7 +36,7 @@ type WdPostRecoverDeclareTask struct { maxDeclareRecoveriesGasFee types.FIL as *multictladdr.MultiAddressSelector - actors map[dtypes.MinerAddress]bool + actors *config.Dynamic[map[dtypes.MinerAddress]bool] startCheckTF promise.Promise[harmonytask.AddTaskFunc] } @@ -65,7 +66,7 @@ func NewWdPostRecoverDeclareTask(sender *message.Sender, pcs *chainsched.CurioChainSched, maxDeclareRecoveriesGasFee types.FIL, - actors map[dtypes.MinerAddress]bool) (*WdPostRecoverDeclareTask, error) { + actors *config.Dynamic[map[dtypes.MinerAddress]bool]) (*WdPostRecoverDeclareTask, error) { t := &WdPostRecoverDeclareTask{ sender: sender, db: db, @@ -249,7 +250,7 @@ func (w *WdPostRecoverDeclareTask) Adder(taskFunc harmonytask.AddTaskFunc) { func (w *WdPostRecoverDeclareTask) processHeadChange(ctx context.Context, revert, apply *types.TipSet) error { tf := w.startCheckTF.Val(ctx) - for act := range w.actors { + for act := range w.actors.Get() { maddr := address.Address(act) aid, err := address.IDFromAddress(maddr) diff --git a/tasks/winning/winning_task.go b/tasks/winning/winning_task.go index 504fa648d..b86b45019 100644 --- a/tasks/winning/winning_task.go +++ b/tasks/winning/winning_task.go @@ -22,6 +22,7 @@ import ( "github.com/filecoin-project/curio/build" "github.com/filecoin-project/curio/deps" + "github.com/filecoin-project/curio/deps/config" "github.com/filecoin-project/curio/harmony/harmonydb" "github.com/filecoin-project/curio/harmony/harmonytask" "github.com/filecoin-project/curio/harmony/resources" @@ -51,7 +52,7 @@ type WinPostTask struct { paramsReady func() (bool, error) api WinPostAPI - actors map[dtypes.MinerAddress]bool + actors *config.Dynamic[map[dtypes.MinerAddress]bool] mineTF promise.Promise[harmonytask.AddTaskFunc] } @@ -75,7 +76,7 @@ type WinPostAPI interface { WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) } -func NewWinPostTask(max int, db *harmonydb.DB, remote *paths.Remote, verifier storiface.Verifier, paramck func() (bool, error), api WinPostAPI, actors map[dtypes.MinerAddress]bool) *WinPostTask { +func NewWinPostTask(max int, db *harmonydb.DB, remote *paths.Remote, verifier storiface.Verifier, paramck func() (bool, error), api WinPostAPI, actors *config.Dynamic[map[dtypes.MinerAddress]bool]) *WinPostTask { t := &WinPostTask{ max: max, db: db, @@ -677,7 +678,7 @@ func (t *WinPostTask) mineBasic(ctx context.Context) { baseEpoch := workBase.TipSet.Height() - for act := range t.actors { + for act := range t.actors.Get() { spID, err := address.IDFromAddress(address.Address(act)) if err != nil { log.Errorf("failed to get spID from address %s: %s", act, err) diff --git a/web/api/webrpc/market_filters.go b/web/api/webrpc/market_filters.go index 164d7ab4e..14c45b10b 100644 --- a/web/api/webrpc/market_filters.go +++ b/web/api/webrpc/market_filters.go @@ -416,7 +416,7 @@ func (a *WebRPC) DefaultFilterBehaviour(ctx context.Context) (*DefaultFilterBeha var cfgminers []address.Address var cgminer []address.Address - lo.ForEach(lo.Keys(a.deps.Maddrs), func(item dtypes.MinerAddress, _ int) { + lo.ForEach(lo.Keys(a.deps.Maddrs.Get()), func(item dtypes.MinerAddress, _ int) { cfgminers = append(cfgminers, address.Address(item)) })