From af4d2ceeaf081be4de8e125f38ad62f3a3650abd Mon Sep 17 00:00:00 2001 From: slasher Date: Fri, 1 Sep 2023 12:18:14 +0800 Subject: [PATCH] style(blobstore): duplicating const string close: #2574 Signed-off-by: slasher --- blobstore/access/controller/service.go | 2 +- blobstore/access/server.go | 5 --- blobstore/access/stream_get.go | 4 +-- blobstore/blobnode/chunk.go | 34 +++++++------------ blobstore/blobnode/iostat.go | 2 +- blobstore/cli/access/cluster.go | 6 ++-- blobstore/cli/blobnode/chunk.go | 6 ++-- blobstore/cli/blobnode/shard.go | 6 ++-- blobstore/cli/clustermgr/background.go | 6 ++-- blobstore/cli/clustermgr/backup.go | 10 ++++-- blobstore/cli/clustermgr/config.go | 6 ++-- blobstore/cli/clustermgr/kvmgr.go | 6 ++-- blobstore/cli/clustermgr/manage.go | 6 ++-- blobstore/cli/common/cfmt/access.go | 2 +- blobstore/cli/common/cfmt/blobnode.go | 8 ++--- blobstore/cli/common/cfmt/cfmt.go | 3 +- blobstore/cli/common/cfmt/cluster.go | 4 +-- blobstore/cli/proxy/allocator.go | 6 ++-- blobstore/cli/scheduler/migrate.go | 4 +-- blobstore/clustermgr/disk.go | 30 ++++++++-------- blobstore/clustermgr/diskmgr/applier.go | 14 ++++---- .../persistence/normaldb/disktbl.go | 6 ++-- .../persistence/volumedb/volumetbl.go | 12 ++++--- blobstore/clustermgr/service.go | 6 ++-- blobstore/clustermgr/servicemgr/applier.go | 6 ++-- blobstore/clustermgr/volume.go | 32 ++++++++--------- blobstore/clustermgr/volumemgr/applier.go | 30 ++++++++-------- blobstore/clustermgr/volumemgr/volumeunit.go | 6 ++-- blobstore/common/profile/profile.go | 11 +++--- blobstore/common/rpc/auditlog/request_row.go | 11 +++--- blobstore/common/rpc/example/main/main.go | 6 ++-- blobstore/scheduler/client/clustermgr.go | 14 ++++---- blobstore/scheduler/disk_droper.go | 8 ++--- blobstore/scheduler/disk_repairer.go | 8 ++--- blobstore/util/defaulter/defaulter.go | 6 ++-- 35 files changed, 166 insertions(+), 166 deletions(-) diff --git a/blobstore/access/controller/service.go b/blobstore/access/controller/service.go index c885192f73..364697610a 100644 --- a/blobstore/access/controller/service.go +++ b/blobstore/access/controller/service.go @@ -259,7 +259,7 @@ RETRY: span := trace.SpanFromContextSafe(ctx) if lastHost == "" { - span.Errorf("no any host of %s", name) + span.Errorf("no any service host of %s", name) return "", errors.Newf("no any host of %s", name) } span.Warnf("all host were punished of %s, return the last one %s", name, lastHost) diff --git a/blobstore/access/server.go b/blobstore/access/server.go index c54216aeef..6e00bf6b1f 100644 --- a/blobstore/access/server.go +++ b/blobstore/access/server.go @@ -255,7 +255,6 @@ func (s *Service) Put(c *rpc.Context) { span.Debugf("accept /put request args:%+v", args) if !args.IsValid() { - span.Debugf("invalid args:%+v", args) c.RespondError(errcode.ErrIllegalArguments) return } @@ -306,7 +305,6 @@ func (s *Service) PutAt(c *rpc.Context) { span.Debugf("accept /putat request args:%+v", args) if !args.IsValid() { - span.Debugf("invalid args:%+v", args) c.RespondError(errcode.ErrIllegalArguments) return } @@ -362,7 +360,6 @@ func (s *Service) Alloc(c *rpc.Context) { span.Debugf("accept /alloc request args:%+v", args) if !args.IsValid() { - span.Debugf("invalid args:%+v", args) c.RespondError(errcode.ErrIllegalArguments) return } @@ -401,7 +398,6 @@ func (s *Service) Get(c *rpc.Context) { span.Debugf("accept /get request args:%+v", args) if !args.IsValid() || !verifyCrc(&args.Location) { - span.Debugf("invalid args:%+v", args) c.RespondError(errcode.ErrIllegalArguments) return } @@ -559,7 +555,6 @@ func (s *Service) DeleteBlob(c *rpc.Context) { span.Debugf("accept /deleteblob request args:%+v", args) if !args.IsValid() { - span.Debugf("invalid args:%+v", args) c.RespondError(errcode.ErrIllegalArguments) return } diff --git a/blobstore/access/stream_get.go b/blobstore/access/stream_get.go index 004d5d5e8b..6fd29f3aa9 100644 --- a/blobstore/access/stream_get.go +++ b/blobstore/access/stream_get.go @@ -502,7 +502,7 @@ func (h *Handler) readOneShard(ctx context.Context, serviceController controller span.Warnf("read %s on %s: %s", blob.ID(), vuid.ID(), err.Error()) return shardResult } - span.Warnf("read %s on %s: %s", blob.ID(), vuid.ID(), errors.Detail(err)) + span.Warnf("rpc read %s on %s: %s", blob.ID(), vuid.ID(), errors.Detail(err)) return shardResult } defer body.Close() @@ -516,7 +516,7 @@ func (h *Handler) readOneShard(ctx context.Context, serviceController controller _, err = io.ReadFull(body, buf[shardOffset:shardOffset+shardReadSize]) if err != nil { h.memPool.Put(buf) - span.Warnf("read %s on %s: %s", blob.ID(), vuid.ID(), err.Error()) + span.Warnf("io read %s on %s: %s", blob.ID(), vuid.ID(), err.Error()) return shardResult } diff --git a/blobstore/blobnode/chunk.go b/blobstore/blobnode/chunk.go index f7fc0ccb69..3a627e8eb0 100644 --- a/blobstore/blobnode/chunk.go +++ b/blobstore/blobnode/chunk.go @@ -41,13 +41,11 @@ func (s *Service) ChunkCreate(c *rpc.Context) { span.Infof("chunk create args:%v", args) if args.ChunkSize < 0 || args.ChunkSize > disk.MaxChunkSize { - span.Debugf("args:%v", args) c.RespondError(bloberr.ErrInvalidParam) return } if !bnapi.IsValidDiskID(args.DiskID) { - span.Debugf("args:%v", args) c.RespondError(bloberr.ErrInvalidDiskId) return } @@ -99,7 +97,6 @@ func (s *Service) ChunkInspect(c *rpc.Context) { span.Debugf("chunk inspect args: %v", args) if !bnapi.IsValidDiskID(args.DiskID) { - span.Debugf("args:%v", args) c.RespondError(bloberr.ErrInvalidDiskId) return } @@ -148,10 +145,9 @@ func (s *Service) ChunkRelease(c *rpc.Context) { ctx := c.Request.Context() span := trace.SpanFromContextSafe(ctx) - span.Debugf("args: %v", args) + span.Debugf("chunk release args: %v", args) if !bnapi.IsValidDiskID(args.DiskID) { - span.Debugf("args:%v", args) c.RespondError(bloberr.ErrInvalidDiskId) return } @@ -159,7 +155,7 @@ func (s *Service) ChunkRelease(c *rpc.Context) { limitKey := args.Vuid err := s.ChunkLimitPerVuid.Acquire(limitKey) if err != nil { - span.Errorf("vuid(%v) status concurry conflict", args.Vuid) + span.Errorf("release vuid(%v) status concurry conflict", args.Vuid) c.RespondError(bloberr.ErrOverload) return } @@ -169,14 +165,14 @@ func (s *Service) ChunkRelease(c *rpc.Context) { ds, exist := s.Disks[args.DiskID] s.lock.RUnlock() if !exist { - span.Errorf("disk:%v not found", args.DiskID) + span.Errorf("release disk:%v not found", args.DiskID) c.RespondError(bloberr.ErrNoSuchDisk) return } cs, exist := ds.GetChunkStorage(args.Vuid) if !exist { - span.Errorf("vuid:%v not found", args.Vuid) + span.Errorf("release vuid:%v not found", args.Vuid) c.RespondError(bloberr.ErrNoSuchVuid) return } @@ -213,10 +209,9 @@ func (s *Service) ChunkReadonly(c *rpc.Context) { ctx := c.Request.Context() span := trace.SpanFromContextSafe(ctx) - span.Debugf("args: %v", args) + span.Debugf("chunk readonly args: %v", args) if !bnapi.IsValidDiskID(args.DiskID) { - span.Debugf("args:%v", args) c.RespondError(bloberr.ErrInvalidDiskId) return } @@ -224,7 +219,7 @@ func (s *Service) ChunkReadonly(c *rpc.Context) { limitKey := args.Vuid err := s.ChunkLimitPerVuid.Acquire(limitKey) if err != nil { - span.Errorf("vuid(%v) status concurry conflict", args.Vuid) + span.Errorf("readonly vuid(%v) status concurry conflict", args.Vuid) c.RespondError(bloberr.ErrOverload) return } @@ -234,14 +229,14 @@ func (s *Service) ChunkReadonly(c *rpc.Context) { ds, exist := s.Disks[args.DiskID] s.lock.RUnlock() if !exist { - span.Errorf("disk:%v not found", args.DiskID) + span.Errorf("readonly disk:%v not found", args.DiskID) c.RespondError(bloberr.ErrNoSuchDisk) return } cs, exist := ds.GetChunkStorage(args.Vuid) if !exist { - span.Errorf("vuid:%v not found", args.Vuid) + span.Errorf("readonly vuid:%v not found", args.Vuid) c.RespondError(bloberr.ErrNoSuchVuid) return } @@ -283,10 +278,9 @@ func (s *Service) ChunkReadwrite(c *rpc.Context) { ctx := c.Request.Context() span := trace.SpanFromContextSafe(ctx) - span.Debugf("args: %v", args) + span.Debugf("chunk readwrite args: %v", args) if !bnapi.IsValidDiskID(args.DiskID) { - span.Debugf("args:%v", args) c.RespondError(bloberr.ErrInvalidDiskId) return } @@ -294,7 +288,7 @@ func (s *Service) ChunkReadwrite(c *rpc.Context) { limitKey := args.Vuid err := s.ChunkLimitPerVuid.Acquire(limitKey) if err != nil { - span.Errorf("vuid(%v) status concurry conflict", args.Vuid) + span.Errorf("readwrite vuid(%v) status concurry conflict", args.Vuid) c.RespondError(bloberr.ErrOverload) return } @@ -304,14 +298,14 @@ func (s *Service) ChunkReadwrite(c *rpc.Context) { ds, exist := s.Disks[args.DiskID] s.lock.RUnlock() if !exist { - span.Errorf("disk:%v not found", args.DiskID) + span.Errorf("readwrite disk:%v not found", args.DiskID) c.RespondError(bloberr.ErrNoSuchDisk) return } cs, exist := ds.GetChunkStorage(args.Vuid) if !exist { - span.Errorf("vuid:%v not found", args.Vuid) + span.Errorf("readwrite vuid:%v not found", args.Vuid) c.RespondError(bloberr.ErrNoSuchVuid) return } @@ -356,7 +350,6 @@ func (s *Service) ChunkList(c *rpc.Context) { span.Infof("chunk list args: %v", args) if !bnapi.IsValidDiskID(args.DiskID) { - span.Debugf("args:%v", args) c.RespondError(bloberr.ErrInvalidDiskId) return } @@ -406,7 +399,6 @@ func (s *Service) ChunkStat(c *rpc.Context) { span.Infof("chunk stat args:%v", args) if !bnapi.IsValidDiskID(args.DiskID) { - span.Debugf("args:%v", args) c.RespondError(bloberr.ErrInvalidDiskId) return } @@ -415,7 +407,7 @@ func (s *Service) ChunkStat(c *rpc.Context) { ds, exist := s.Disks[args.DiskID] s.lock.RUnlock() if !exist { - span.Errorf("disk:%v not found", args.DiskID) + span.Errorf("stat disk:%v not found", args.DiskID) c.RespondError(bloberr.ErrNoSuchDisk) return } diff --git a/blobstore/blobnode/iostat.go b/blobstore/blobnode/iostat.go index 65f3d7c38d..106ef72c64 100644 --- a/blobstore/blobnode/iostat.go +++ b/blobstore/blobnode/iostat.go @@ -90,7 +90,7 @@ func (s *Service) cleanExpiredStatFile() { info, err := os.Stat(path) if err != nil { - span.Errorf("path:%v, err:%v", path, err) + span.Errorf("stat path:%v, err:%v", path, err) return err } diff --git a/blobstore/cli/access/cluster.go b/blobstore/cli/access/cluster.go index f686f261ad..9ad584ae70 100644 --- a/blobstore/cli/access/cluster.go +++ b/blobstore/cli/access/cluster.go @@ -72,7 +72,7 @@ func showClusters(c *grumble.Context) error { path := cmapi.GetConsulClusterPath(region) pairs, _, err := cli.KV().List(path, nil) if err != nil { - fmt.Println("\terror:", err) + fmt.Println("\tlist error:", err) continue } @@ -82,7 +82,7 @@ func showClusters(c *grumble.Context) error { clusterInfo := &cmapi.ClusterInfo{} err := common.Unmarshal(pair.Value, clusterInfo) if err != nil { - fmt.Println("\terror:", err) + fmt.Println("\tjson error:", err) continue } @@ -111,7 +111,7 @@ func showClusterWithConfig() error { client := config.NewCluster(clusterID, nil, "") stat, err := client.Stat(context.Background()) if err != nil { - fmt.Println("\terror:", err) + fmt.Println("\tstat error:", err) continue } diff --git a/blobstore/cli/blobnode/chunk.go b/blobstore/cli/blobnode/chunk.go index 59ec103393..e309f614f8 100644 --- a/blobstore/cli/blobnode/chunk.go +++ b/blobstore/cli/blobnode/chunk.go @@ -53,7 +53,7 @@ func addCmdChunk(cmd *grumble.Command) { Help: "show stat of chunk", Flags: func(f *grumble.Flags) { blobnodeFlags(f) - f.UintL("diskid", 1, "disk id") + f.UintL("diskid", 1, "disk id to stat") f.UintL("vuid", 1, "vuid") }, Run: func(c *grumble.Context) error { @@ -77,7 +77,7 @@ func addCmdChunk(cmd *grumble.Command) { Help: "list of chunks", Flags: func(f *grumble.Flags) { blobnodeFlags(f) - f.UintL("diskid", 1, "disk id") + f.UintL("diskid", 1, "disk id to list") }, Run: func(c *grumble.Context) error { cli := blobnode.New(&blobnode.Config{}) @@ -97,7 +97,7 @@ func addCmdChunk(cmd *grumble.Command) { Help: "create chunk", Flags: func(f *grumble.Flags) { blobnodeFlags(f) - f.UintL("diskid", 1, "disk id") + f.UintL("diskid", 1, "disk id to create") f.UintL("vuid", 101, "vuid") f.Int64L("chunksize", 1024, "chunk size") }, diff --git a/blobstore/cli/blobnode/shard.go b/blobstore/cli/blobnode/shard.go index 2cd4bf18a2..b75575e3ec 100644 --- a/blobstore/cli/blobnode/shard.go +++ b/blobstore/cli/blobnode/shard.go @@ -36,7 +36,7 @@ func addCmdShard(cmd *grumble.Command) { Help: "shard stat", Flags: func(f *grumble.Flags) { blobnodeFlags(f) - f.UintL("diskid", 1, "disk id") + f.UintL("diskid", 1, "disk id to stat") f.UintL("vuid", 1, "vuid") f.UintL("bid", 1, "bid") }, @@ -62,7 +62,7 @@ func addCmdShard(cmd *grumble.Command) { Help: "get shard", Flags: func(f *grumble.Flags) { blobnodeFlags(f) - f.UintL("diskid", 1, "disk id") + f.UintL("diskid", 1, "disk id to get") f.UintL("vuid", 1, "vuid") f.UintL("bid", 1, "bid") }, @@ -91,7 +91,7 @@ func addCmdShard(cmd *grumble.Command) { blobnodeFlags(f) }, Args: func(c *grumble.Args) { - c.Uint64("diskid", "disk id") + c.Uint64("diskid", "disk id to mark") c.Uint64("vuid", "vuid") c.Uint64("bid", "bid") }, diff --git a/blobstore/cli/clustermgr/background.go b/blobstore/cli/clustermgr/background.go index 4bb98ab8d2..7233507115 100644 --- a/blobstore/cli/clustermgr/background.go +++ b/blobstore/cli/clustermgr/background.go @@ -56,7 +56,7 @@ func addCmdBackground(cmd *grumble.Command) { Help: "show status of a background task switch", LongHelp: "Show status of a specific background task type, currently supported: " + BackgroundTaskTypeString, Args: func(a *grumble.Args) { - a.String("task", "background task type") + a.String("task", "background task type to status") }, Run: cmdListBackgroundStatus, Flags: func(f *grumble.Flags) { @@ -69,7 +69,7 @@ func addCmdBackground(cmd *grumble.Command) { Help: "enable background task", LongHelp: "Enable a specific background task type, currently supported: " + BackgroundTaskTypeString, Args: func(a *grumble.Args) { - a.String("task", "background task type") + a.String("task", "background task type to enable") }, Flags: clusterFlags, Run: func(c *grumble.Context) error { @@ -81,7 +81,7 @@ func addCmdBackground(cmd *grumble.Command) { Help: "disable background task", LongHelp: "Disable a specific background task type, currently supported: " + BackgroundTaskTypeString, Args: func(a *grumble.Args) { - a.String("task", "background task type") + a.String("task", "background task type to disable") }, Flags: clusterFlags, Run: func(c *grumble.Context) error { diff --git a/blobstore/cli/clustermgr/backup.go b/blobstore/cli/clustermgr/backup.go index 6cb4f5cc9a..55503c2072 100644 --- a/blobstore/cli/clustermgr/backup.go +++ b/blobstore/cli/clustermgr/backup.go @@ -39,6 +39,10 @@ import ( "github.com/cubefs/cubefs/blobstore/util/log" ) +const ( + timeFormat = "2006-01-02" +) + const ( volumeDBPathName = "snapshot-volumedb" normalDBPathName = "snapshot-normaldb" @@ -159,7 +163,7 @@ func cmdDumpSnapshot(c *grumble.Context) error { log.Fatalf("parse snapshot index failed: %s", err.Error()) } - date := time.Now().Format("2006-01-02") + date := time.Now().Format(timeFormat) tmpNormalDBPath := dbPath + "/" + date + "/" + normalDBPathName tmpVolumeDBPath := dbPath + "/" + date + "/" + volumeDBPathName tmpRaftDBPath := dbPath + "/" + date + "/" + raftDBPathName @@ -244,7 +248,7 @@ func cmdDumpSnapshot(c *grumble.Context) error { } backups := make([]string, 0) for i := range backupDirFiless { - if _, err := time.Parse("2006-01-02", backupDirFiless[i].Name()); err == nil { + if _, err := time.Parse(timeFormat, backupDirFiless[i].Name()); err == nil { log.Infof("name: %s", backupDirFiless[i].Name()) backups = append(backups, backupDirFiless[i].Name()) } @@ -255,7 +259,7 @@ func cmdDumpSnapshot(c *grumble.Context) error { oldestT := time.Now() oldestBackup := "" for i := range backups { - t, err := time.Parse("2006-01-02", backups[i]) + t, err := time.Parse(timeFormat, backups[i]) if err == nil && t.Before(oldestT) { oldestT = t oldestBackup = backups[i] diff --git a/blobstore/cli/clustermgr/config.go b/blobstore/cli/clustermgr/config.go index 9688584724..ae30575199 100644 --- a/blobstore/cli/clustermgr/config.go +++ b/blobstore/cli/clustermgr/config.go @@ -63,7 +63,7 @@ func addCmdConfig(cmd *grumble.Command) { Help: "show config of [key]", Run: cmdGetConfig, Args: func(a *grumble.Args) { - a.String("key", "config key", grumble.Default("")) + a.String("key", "config key to get", grumble.Default("")) }, Flags: func(f *grumble.Flags) { flags.VerboseRegister(f) @@ -74,7 +74,7 @@ func addCmdConfig(cmd *grumble.Command) { Name: "set", Help: "set config of key", Args: func(a *grumble.Args) { - a.String("key", "config key") + a.String("key", "config key to set") a.String("value", "config value") }, Flags: clusterFlags, @@ -108,7 +108,7 @@ func addCmdConfig(cmd *grumble.Command) { Name: "del", Help: "del config of key", Args: func(a *grumble.Args) { - a.String("key", "config key") + a.String("key", "config key to delete") }, Flags: clusterFlags, Run: func(c *grumble.Context) error { diff --git a/blobstore/cli/clustermgr/kvmgr.go b/blobstore/cli/clustermgr/kvmgr.go index 1680983285..ccf8fdf655 100644 --- a/blobstore/cli/clustermgr/kvmgr.go +++ b/blobstore/cli/clustermgr/kvmgr.go @@ -34,7 +34,7 @@ func addCmdKV(cmd *grumble.Command) { Name: "get", Help: "get by key", Args: func(a *grumble.Args) { - a.String("key", "key name") + a.String("key", "key name to get") }, Flags: func(f *grumble.Flags) { clusterFlags(f) @@ -55,7 +55,7 @@ func addCmdKV(cmd *grumble.Command) { Name: "set", Help: "set value to key", Args: func(a *grumble.Args) { - a.String("key", "key name") + a.String("key", "key name to set") a.String("value", "value of key, string") }, Flags: clusterFlags, @@ -71,7 +71,7 @@ func addCmdKV(cmd *grumble.Command) { Name: "del", Help: "del the key", Args: func(a *grumble.Args) { - a.String("key", "key name") + a.String("key", "key name to delete") }, Flags: clusterFlags, Run: func(c *grumble.Context) error { diff --git a/blobstore/cli/clustermgr/manage.go b/blobstore/cli/clustermgr/manage.go index 7f91b32c46..6ccf826b2c 100644 --- a/blobstore/cli/clustermgr/manage.go +++ b/blobstore/cli/clustermgr/manage.go @@ -87,7 +87,7 @@ func cmdAddMember(c *grumble.Context) error { NodeHost: nodeHost, } if !common.Confirm("confirm add?") { - fmt.Println("command canceled") + fmt.Println("add command canceled") return nil } return cli.AddMember(ctx, memberArgs) @@ -99,7 +99,7 @@ func cmdRemoveMember(c *grumble.Context) error { id := c.Args.Uint64("peer_id") if !common.Confirm("confirm remove?") { - fmt.Println("command canceled") + fmt.Println("remove command canceled") return nil } return cli.RemoveMember(ctx, id) @@ -112,7 +112,7 @@ func cmdTransferLeadership(c *grumble.Context) error { id := c.Args.Uint64("peer_id") if !common.Confirm("confirm transfer leadership?") { - fmt.Println("command canceled") + fmt.Println("transfer command canceled") return nil } diff --git a/blobstore/cli/common/cfmt/access.go b/blobstore/cli/common/cfmt/access.go index 8fc1ca5fc3..4e34f7c995 100644 --- a/blobstore/cli/common/cfmt/access.go +++ b/blobstore/cli/common/cfmt/access.go @@ -47,7 +47,7 @@ func LocationJoin(loc *access.Location, prefix string) string { // LocationF fmt pointer of Location func LocationF(loc *access.Location) (vals []string) { if loc == nil { - return []string{" "} + return nilStrings[:] } vals = make([]string, 0, 8) vals = append(vals, []string{ diff --git a/blobstore/cli/common/cfmt/blobnode.go b/blobstore/cli/common/cfmt/blobnode.go index 037d9686e3..b945fe367f 100644 --- a/blobstore/cli/common/cfmt/blobnode.go +++ b/blobstore/cli/common/cfmt/blobnode.go @@ -47,7 +47,7 @@ func ChunkInfoJoin(info *blobnode.ChunkInfo, prefix string) string { // ChunkInfoF chunk info func ChunkInfoF(info *blobnode.ChunkInfo) []string { if info == nil { - return []string{" "} + return nilStrings[:] } return []string{ fmt.Sprintf("ID : %s", ChunkidF(info.Id)), @@ -69,7 +69,7 @@ func DiskHeartBeatInfoJoin(info *blobnode.DiskHeartBeatInfo, prefix string) stri // DiskHeartBeatInfoF disk heartbeat info func DiskHeartBeatInfoF(info *blobnode.DiskHeartBeatInfo) []string { if info == nil { - return []string{" "} + return nilStrings[:] } return []string{ fmt.Sprintf("DiskID: %-12d | MaxN: %-8d | UsedN: %-8d | FreeN: %-8d", @@ -87,7 +87,7 @@ func DiskInfoJoin(info *blobnode.DiskInfo, prefix string) string { // DiskInfoF disk info func DiskInfoF(info *blobnode.DiskInfo) []string { if info == nil { - return []string{" "} + return nilStrings[:] } return append(DiskHeartBeatInfoF(&info.DiskHeartBeatInfo), []string{ @@ -109,7 +109,7 @@ func DiskInfoJoinV(info *blobnode.DiskInfo, prefix string) string { // DiskInfoFV disk info func DiskInfoFV(info *blobnode.DiskInfo) []string { if info == nil { - return []string{" "} + return nilStrings[:] } return []string{ fmt.Sprint("DiskID : ", info.DiskID), diff --git a/blobstore/cli/common/cfmt/cfmt.go b/blobstore/cli/common/cfmt/cfmt.go index d618125115..8c30100113 100644 --- a/blobstore/cli/common/cfmt/cfmt.go +++ b/blobstore/cli/common/cfmt/cfmt.go @@ -16,7 +16,6 @@ // // function *F make the pointer struct value to []string // function *Join []string into string with profix each line, sep is '\n' -// package cfmt import ( @@ -27,6 +26,8 @@ import ( "github.com/dustin/go-humanize" ) +var nilStrings = []string{" "} + func joinWithPrefix(prefix string, vals []string) string { if len(prefix) > 0 { for idx := range vals { diff --git a/blobstore/cli/common/cfmt/cluster.go b/blobstore/cli/common/cfmt/cluster.go index 6fafd17f35..13a1ca51c2 100644 --- a/blobstore/cli/common/cfmt/cluster.go +++ b/blobstore/cli/common/cfmt/cluster.go @@ -31,7 +31,7 @@ func VolumeInfoJoin(vol *clustermgr.VolumeInfo, prefix string) string { // VolumeInfoF volume info func VolumeInfoF(vol *clustermgr.VolumeInfo) []string { if vol == nil { - return []string{" "} + return nilStrings[:] } usedC := common.ColorizeUint64(vol.Used, vol.Total) freeC := common.ColorizeUint64Free(vol.Free, vol.Total) @@ -63,7 +63,7 @@ func ClusterInfoJoin(info *clustermgr.ClusterInfo, prefix string) string { // ClusterInfoF cluster info func ClusterInfoF(info *clustermgr.ClusterInfo) []string { if info == nil { - return []string{" "} + return nilStrings[:] } avaiC := common.ColorizeInt64(-info.Available, info.Capacity) vals := make([]string, 0, 8) diff --git a/blobstore/cli/proxy/allocator.go b/blobstore/cli/proxy/allocator.go index 250f6b3bdf..ba77fc6a99 100644 --- a/blobstore/cli/proxy/allocator.go +++ b/blobstore/cli/proxy/allocator.go @@ -38,7 +38,7 @@ func addCmdAllocator(cmd *grumble.Command) { Help: "alloc volume, just testing for most of time", Flags: func(f *grumble.Flags) { proxyFlags(f) - f.UintL("code_mode", 0, "codemode uint") + f.UintL("code_mode", 0, "codemode uint to alloc") f.Uint64L("fsize", 0, "file size") f.Uint64L("bid_count", 0, "bid count") }, @@ -52,7 +52,7 @@ func addCmdAllocator(cmd *grumble.Command) { }, Flags: func(f *grumble.Flags) { proxyFlags(f) - f.UintL("code_mode", 0, "codemode uint") + f.UintL("code_mode", 0, "codemode uint to discard") }, Run: discardVolumes, }) @@ -61,7 +61,7 @@ func addCmdAllocator(cmd *grumble.Command) { Help: "list volumes, show top 10 hosts and disks", Flags: func(f *grumble.Flags) { proxyFlags(f) - f.UintL("code_mode", 0, "codemode uint") + f.UintL("code_mode", 0, "codemode uint to list") f.IntL("top", 10, "show top") }, Run: listVolumes, diff --git a/blobstore/cli/scheduler/migrate.go b/blobstore/cli/scheduler/migrate.go index a4bbf453e4..5cac8a652b 100644 --- a/blobstore/cli/scheduler/migrate.go +++ b/blobstore/cli/scheduler/migrate.go @@ -65,7 +65,7 @@ func addCmdMigrateTask(cmd *grumble.Command) { Run: cmdListTask, Flags: func(f *grumble.Flags) { migrateFlags(f) - f.Uint64L(_diskID, 0, "disk id for which disk") + f.Uint64L(_diskID, 0, "disk id for which disk to list") f.IntL(_count, 10, "the number you want to get") }, }) @@ -84,7 +84,7 @@ func addCmdMigrateTask(cmd *grumble.Command) { Run: cmdGetMigratingProgress, Flags: func(f *grumble.Flags) { migrateFlags(f) - f.Uint64L(_diskID, 0, "disk id for which disk") + f.Uint64L(_diskID, 0, "disk id for which disk of progress") }, }) } diff --git a/blobstore/clustermgr/disk.go b/blobstore/clustermgr/disk.go index d34d9a1cd8..7ae33789d6 100644 --- a/blobstore/clustermgr/disk.go +++ b/blobstore/clustermgr/disk.go @@ -94,7 +94,7 @@ func (s *Service) DiskAdd(c *rpc.Context) { proposeInfo := base.EncodeProposeInfo(s.DiskMgr.GetModuleName(), diskmgr.OperTypeAddDisk, data, base.ProposeContext{ReqID: span.TraceID()}) err = s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Error("raft propose failed, err: ", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) } } @@ -111,7 +111,7 @@ func (s *Service) DiskInfo(c *rpc.Context) { // linear read if err := s.raftNode.ReadIndex(ctx); err != nil { - span.Errorf("read index error: %v", err) + span.Errorf("info read index error: %v", err) c.RespondError(apierrors.ErrRaftReadIndex) return } @@ -136,7 +136,7 @@ func (s *Service) DiskList(c *rpc.Context) { span.Infof("accept DiskList request, args: %v", args) if err := s.raftNode.ReadIndex(ctx); err != nil { - span.Errorf("read index error: %v", err) + span.Errorf("list read index error: %v", err) c.RespondError(apierrors.ErrRaftReadIndex) return } @@ -213,14 +213,14 @@ func (s *Service) DiskSet(c *rpc.Context) { data, err := json.Marshal(args) if err != nil { - span.Errorf("json marshal failed, args: %v, error: %v", args, err) + span.Errorf("set args: %v, error: %v", args, err) c.RespondError(errors.Info(apierrors.ErrUnexpected).Detail(err)) return } proposeInfo := base.EncodeProposeInfo(s.DiskMgr.GetModuleName(), diskmgr.OperTypeSetDiskStatus, data, base.ProposeContext{ReqID: span.TraceID()}) err = s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Error("raft propose failed, err: ", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) return } @@ -271,7 +271,7 @@ func (s *Service) DiskDrop(c *rpc.Context) { proposeInfo := base.EncodeProposeInfo(s.DiskMgr.GetModuleName(), diskmgr.OperTypeDroppingDisk, data, base.ProposeContext{ReqID: span.TraceID()}) err = s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Error("raft propose failed, err: ", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) } } @@ -323,14 +323,14 @@ func (s *Service) DiskDropped(c *rpc.Context) { // 3. data propose data, err := json.Marshal(args) if err != nil { - span.Errorf("json marshal failed, args: %v, error: %v", args, err) + span.Errorf("drop args: %v, error: %v", args, err) c.RespondError(errors.Info(apierrors.ErrUnexpected).Detail(err)) return } proposeInfo := base.EncodeProposeInfo(s.DiskMgr.GetModuleName(), diskmgr.OperTypeDroppedDisk, data, base.ProposeContext{ReqID: span.TraceID()}) err = s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Error("raft propose failed, err: ", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) } } @@ -341,7 +341,7 @@ func (s *Service) DiskDroppingList(c *rpc.Context) { span.Info("accept DiskDroppingList request") if err := s.raftNode.ReadIndex(ctx); err != nil { - span.Errorf("read index error: %v", err) + span.Errorf("dropping list read index error: %v", err) c.RespondError(apierrors.ErrRaftReadIndex) return } @@ -406,7 +406,7 @@ func (s *Service) DiskHeartbeat(c *rpc.Context) { data, err := json.Marshal(args) span.Debugf("heartbeat params: %s", string(data)) if err != nil { - span.Errorf("json marshal failed, args: %v, error: %v", args, err) + span.Errorf("heartbeat args: %v, error: %v", args, err) err = errors.Info(apierrors.ErrUnexpected).Detail(err) c.RespondError(err) return @@ -414,7 +414,7 @@ func (s *Service) DiskHeartbeat(c *rpc.Context) { proposeInfo := base.EncodeProposeInfo(s.DiskMgr.GetModuleName(), diskmgr.OperTypeHeartbeatDiskInfo, data, base.ProposeContext{ReqID: span.TraceID()}) err = s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Error("raft propose failed, err: ", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) } } @@ -450,14 +450,14 @@ func (s *Service) DiskAccess(c *rpc.Context) { data, err := json.Marshal(args) if err != nil { - span.Errorf("json marshal failed, args: %v, error: %v", args, err) + span.Errorf("access args: %v, error: %v", args, err) c.RespondError(errors.Info(apierrors.ErrUnexpected).Detail(err)) return } proposeInfo := base.EncodeProposeInfo(s.DiskMgr.GetModuleName(), diskmgr.OperTypeSwitchReadonly, data, base.ProposeContext{ReqID: span.TraceID()}) err = s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Error("raft propose failed, err: ", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) return } @@ -490,14 +490,14 @@ func (s *Service) AdminDiskUpdate(c *rpc.Context) { data, err := json.Marshal(args) if err != nil { - span.Errorf("json marshal failed, args: %v, error: %v", args, err) + span.Errorf("update args: %v, error: %v", args, err) c.RespondError(errors.Info(apierrors.ErrUnexpected).Detail(err)) return } proposeInfo := base.EncodeProposeInfo(s.DiskMgr.GetModuleName(), diskmgr.OperTypeAdminUpdateDisk, data, base.ProposeContext{ReqID: span.TraceID()}) err = s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Error("raft propose failed, err: ", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) return } diff --git a/blobstore/clustermgr/diskmgr/applier.go b/blobstore/clustermgr/diskmgr/applier.go index 004ee0771c..5e382fe90c 100644 --- a/blobstore/clustermgr/diskmgr/applier.go +++ b/blobstore/clustermgr/diskmgr/applier.go @@ -113,7 +113,7 @@ func (d *DiskMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte, diskInfo := &blobnode.DiskInfo{} err := json.Unmarshal(datas[idx], diskInfo) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -129,7 +129,7 @@ func (d *DiskMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte, setStatusArgs := &clustermgr.DiskSetArgs{} err := json.Unmarshal(datas[idx], setStatusArgs) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -141,7 +141,7 @@ func (d *DiskMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte, args := &clustermgr.DiskInfoArgs{} err := json.Unmarshal(datas[idx], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -153,7 +153,7 @@ func (d *DiskMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte, args := &clustermgr.DiskInfoArgs{} err := json.Unmarshal(datas[idx], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -165,7 +165,7 @@ func (d *DiskMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte, args := &clustermgr.DisksHeartbeatArgs{} err := json.Unmarshal(datas[idx], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -178,7 +178,7 @@ func (d *DiskMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte, args := &clustermgr.DiskAccessArgs{} err := json.Unmarshal(datas[i], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -190,7 +190,7 @@ func (d *DiskMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte, args := &blobnode.DiskInfo{} err := json.Unmarshal(datas[i], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } diff --git a/blobstore/clustermgr/persistence/normaldb/disktbl.go b/blobstore/clustermgr/persistence/normaldb/disktbl.go index 92f09b30cd..697ada5a9c 100644 --- a/blobstore/clustermgr/persistence/normaldb/disktbl.go +++ b/blobstore/clustermgr/persistence/normaldb/disktbl.go @@ -183,7 +183,7 @@ func (d *DiskTable) ListDisk(opt *clustermgr.ListOptionArgs) ([]*DiskInfoRecord, count := opt.Count for ; count > 0 && iter.Valid(); iter.Next() { if iter.Err() != nil { - return nil, errors.Info(iter.Err(), "disk table iterate failed") + return nil, errors.Info(iter.Err(), "list disk table iterate failed") } if iter.Key().Size() != 0 && iter.Value().Size() != 0 { // index iterate mode, we should check if iterate to the end @@ -201,7 +201,7 @@ func (d *DiskTable) ListDisk(opt *clustermgr.ListOptionArgs) ([]*DiskInfoRecord, if err != nil { iter.Key().Free() iter.Value().Free() - return nil, errors.Info(err, "disk table iterate failed") + return nil, errors.Info(err, "list disk table iterate failed") } // two part of detail filter @@ -358,7 +358,7 @@ func (d *DiskTable) listDisksByDiskTbl(marker proto.DiskID, count int) ([]*DiskI for i := 1; iter.Valid(); iter.Next() { if iter.Err() != nil { - return nil, errors.Info(iter.Err(), "disk table iterate failed") + return nil, errors.Info(iter.Err(), "list by disk table iterate failed") } info, err := decodeDiskInfoRecord(iter.Value().Data()) if err != nil { diff --git a/blobstore/clustermgr/persistence/volumedb/volumetbl.go b/blobstore/clustermgr/persistence/volumedb/volumetbl.go index 0c9e102099..44bedc226e 100644 --- a/blobstore/clustermgr/persistence/volumedb/volumetbl.go +++ b/blobstore/clustermgr/persistence/volumedb/volumetbl.go @@ -446,7 +446,7 @@ func (v *VolumeTable) PutVolumeUnit(vuidPrefix proto.VuidPrefix, unitRecord *Vol indexKey := "" indexName := v.indexes[volumeUintDiskIDIndex].indexName - indexKey += fmt.Sprintf(indexName+"-%d-%d", unitRecord.DiskID, vuidPrefix) + indexKey += fmtIndexKey(indexName, unitRecord.DiskID, vuidPrefix) batch.PutCF(v.indexes[volumeUintDiskIDIndex].indexTbl.GetCf(), []byte(indexKey), key) batch.PutCF(v.unitTbl.GetCf(), key, value) return v.unitTbl.DoBatch(batch) @@ -464,7 +464,7 @@ func (v *VolumeTable) PutVolumeUnits(units []*VolumeUnitRecord) (err error) { } indexKey := "" - indexKey += fmt.Sprintf(v.indexes["diskID"].indexName+"-%d-%d", unit.DiskID, unit.VuidPrefix) + indexKey += fmtIndexKey(v.indexes["diskID"].indexName, unit.DiskID, unit.VuidPrefix) batch.PutCF(v.indexes[volumeUintDiskIDIndex].indexTbl.GetCf(), []byte(indexKey), key) batch.PutCF(v.unitTbl.GetCf(), key, value) } @@ -539,10 +539,10 @@ func (v *VolumeTable) UpdateVolumeUnit(vuidPrefix proto.VuidPrefix, unitRecord * } oldDiskID := uRec.DiskID oldIndexKey := "" - oldIndexKey += fmt.Sprintf(indexName+"-%d-%d", oldDiskID, vuidPrefix) + oldIndexKey += fmtIndexKey(indexName, oldDiskID, vuidPrefix) batch.DeleteCF(v.indexes[volumeUintDiskIDIndex].indexTbl.GetCf(), []byte(oldIndexKey)) - indexKey += fmt.Sprintf(indexName+"-%d-%d", unitRecord.DiskID, vuidPrefix) + indexKey += fmtIndexKey(indexName, unitRecord.DiskID, vuidPrefix) batch.PutCF(v.indexes[volumeUintDiskIDIndex].indexTbl.GetCf(), []byte(indexKey), keyVuidPrefix) batch.PutCF(v.unitTbl.GetCf(), keyVuidPrefix, value) @@ -594,3 +594,7 @@ func decodeTaskRecord(buf []byte) (ret *VolumeTaskRecord, err error) { err = dec.Decode(&ret) return } + +func fmtIndexKey(name string, diskID proto.DiskID, vuidPrefix proto.VuidPrefix) string { + return fmt.Sprintf("%s-%d-%d", name, diskID, vuidPrefix) +} diff --git a/blobstore/clustermgr/service.go b/blobstore/clustermgr/service.go index 31a91fbcf2..b4da672ecf 100644 --- a/blobstore/clustermgr/service.go +++ b/blobstore/clustermgr/service.go @@ -87,7 +87,7 @@ func (s *Service) ServiceRegister(c *rpc.Context) { } err = s.raftNode.Propose(ctx, base.EncodeProposeInfo(s.ServiceMgr.GetModuleName(), servicemgr.OpRegister, data, base.ProposeContext{ReqID: span.TraceID()})) if err != nil { - span.Error("raft propose failed, err: ", err) + span.Error("register raft propose failed, err: ", err) c.RespondError(apierrors.ErrRaftPropose) } } @@ -135,7 +135,7 @@ func (s *Service) ServiceUnregister(c *rpc.Context) { data, base.ProposeContext{ReqID: span.TraceID()})) if err != nil { - span.Error("raft propose failed, err: ", err) + span.Error("unregister raft propose failed, err: ", err) c.RespondError(apierrors.ErrRaftPropose) } } @@ -168,7 +168,7 @@ func (s *Service) ServiceHeartbeat(c *rpc.Context) { data, base.ProposeContext{ReqID: span.TraceID()})) if err != nil { - span.Error("raft propose failed, err: ", err) + span.Error("heartbeat raft propose failed, err: ", err) c.RespondError(apierrors.ErrRaftPropose) } } diff --git a/blobstore/clustermgr/servicemgr/applier.go b/blobstore/clustermgr/servicemgr/applier.go index 703e107908..a7fc42fecf 100644 --- a/blobstore/clustermgr/servicemgr/applier.go +++ b/blobstore/clustermgr/servicemgr/applier.go @@ -66,7 +66,7 @@ func (s *ServiceMgr) Apply(ctx context.Context, opTypes []int32, datas [][]byte, defer wg.Done() var arg clustermgr.RegisterArgs if err := json.Unmarshal(datas[idx], &arg); err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, OpRegister, datas[idx]).Detail(err) return } errs[idx] = s.handleRegister(taskCtx, arg) @@ -77,7 +77,7 @@ func (s *ServiceMgr) Apply(ctx context.Context, opTypes []int32, datas [][]byte, defer wg.Done() var arg clustermgr.UnregisterArgs if err := json.Unmarshal(datas[idx], &arg); err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, OpUnregister, datas[idx]).Detail(err) return } errs[idx] = s.handleUnregister(taskCtx, arg.Name, arg.Host) @@ -88,7 +88,7 @@ func (s *ServiceMgr) Apply(ctx context.Context, opTypes []int32, datas [][]byte, defer wg.Done() var arg clustermgr.HeartbeatArgs if err := json.Unmarshal(datas[idx], &arg); err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, OpHeartbeat, datas[idx]).Detail(err) return } errs[idx] = s.handleHeartbeat(taskCtx, arg.Name, arg.Host) diff --git a/blobstore/clustermgr/volume.go b/blobstore/clustermgr/volume.go index 5e275cc24e..916cbde645 100644 --- a/blobstore/clustermgr/volume.go +++ b/blobstore/clustermgr/volume.go @@ -47,7 +47,7 @@ func (s *Service) VolumeGet(c *rpc.Context) { span.Debugf("accept VolumeGet request, args: %v", args) if err := s.raftNode.ReadIndex(ctx); err != nil { - span.Errorf("read index error: %v", err) + span.Errorf("get read index error: %v", err) c.RespondError(apierrors.ErrRaftReadIndex) return } @@ -72,7 +72,7 @@ func (s *Service) VolumeList(c *rpc.Context) { span.Debugf("accept VolumeList request, args: %v", args) if err := s.raftNode.ReadIndex(ctx); err != nil { - span.Errorf("read index error: %v", err) + span.Errorf("list read index error: %v", err) c.RespondError(apierrors.ErrRaftReadIndex) return } @@ -134,7 +134,7 @@ func (s *Service) VolumeAllocatedList(c *rpc.Context) { span.Debugf("accept VolumeAllocatedList request, request ip is %v", args.Host) if err := s.raftNode.ReadIndex(ctx); err != nil { - span.Errorf("read index error: %v", err) + span.Errorf("list allocated read index error: %v", err) c.RespondError(apierrors.ErrRaftReadIndex) return } @@ -181,14 +181,14 @@ func (s *Service) VolumeUpdate(c *rpc.Context) { } data, err := json.Marshal(args) if err != nil { - span.Errorf("json marshal failed, args: %v, error: %v", args, err) + span.Errorf("update json marshal failed, args: %v, error: %v", args, err) c.RespondError(apierrors.ErrCMUnexpect) return } proposeInfo := base.EncodeProposeInfo(s.VolumeMgr.GetModuleName(), volumemgr.OperTypeUpdateVolumeUnit, data, base.ProposeContext{ReqID: span.TraceID()}) err = s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Errorf("raft propose error:%v", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) return } @@ -216,14 +216,14 @@ func (s *Service) VolumeRetain(c *rpc.Context) { data, err := json.Marshal(retainVolumes) if err != nil { - span.Errorf("json marshal failed, args: %v, error: %v", retainVolumes, err) + span.Errorf("retain json marshal failed, args: %v, error: %v", retainVolumes, err) c.RespondError(apierrors.ErrCMUnexpect) return } proposeInfo := base.EncodeProposeInfo(s.VolumeMgr.GetModuleName(), volumemgr.OperTypeRetainVolume, data, base.ProposeContext{ReqID: span.TraceID()}) err = s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Errorf("raft propose error:%v", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) return } @@ -286,7 +286,7 @@ func (s *Service) VolumeUnitList(c *rpc.Context) { span.Debugf("accept VolumeUnitList request, args: %v", args) if err := s.raftNode.ReadIndex(ctx); err != nil { - span.Errorf("read index error: %v", err) + span.Errorf("list units read index error: %v", err) c.RespondError(apierrors.ErrRaftReadIndex) return } @@ -338,7 +338,7 @@ func (s *Service) ChunkReport(c *rpc.Context) { proposeInfo := base.EncodeProposeInfo(s.VolumeMgr.GetModuleName(), volumemgr.OperTypeChunkReport, writer.Bytes(), base.ProposeContext{ReqID: span.TraceID()}) err := s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Errorf("raft propose error:%v", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) return } @@ -368,14 +368,14 @@ func (s *Service) ChunkSetCompact(c *rpc.Context) { data, err := json.Marshal(args) if err != nil { - span.Errorf("json marshal failed, args: %v, error: %v", args, err) + span.Errorf("set compact json marshal failed, args: %v, error: %v", args, err) c.RespondError(apierrors.ErrCMUnexpect) return } proposeInfo := base.EncodeProposeInfo(s.VolumeMgr.GetModuleName(), volumemgr.OperTypeChunkSetCompact, data, base.ProposeContext{ReqID: span.TraceID()}) err = s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Error("raft propose failed, err: ", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) return } @@ -414,14 +414,14 @@ func (s *Service) AdminUpdateVolume(c *rpc.Context) { data, err := json.Marshal(volume) if err != nil { - span.Errorf("json marshal failed, args: %v, error: %v", args, err) + span.Errorf("admin update json marshal failed, args: %v, error: %v", args, err) c.RespondError(apierrors.ErrCMUnexpect) return } proposeInfo := base.EncodeProposeInfo(s.VolumeMgr.GetModuleName(), volumemgr.OperTypeAdminUpdateVolume, data, base.ProposeContext{ReqID: span.TraceID()}) err = s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Error("raft propose failed, err: ", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) return } @@ -458,14 +458,14 @@ func (s *Service) AdminUpdateVolumeUnit(c *rpc.Context) { data, err := json.Marshal(args) if err != nil { - span.Errorf("json marshal failed, args: %v, error: %v", args, err) + span.Errorf("update unit json marshal failed, args: %v, error: %v", args, err) c.RespondError(err) return } proposeInfo := base.EncodeProposeInfo(s.VolumeMgr.GetModuleName(), volumemgr.OperTypeAdminUpdateVolumeUnit, data, base.ProposeContext{ReqID: span.TraceID()}) err = s.raftNode.Propose(ctx, proposeInfo) if err != nil { - span.Errorf("raft propose failed, err:%v ", err) + span.Error(err) c.RespondError(apierrors.ErrRaftPropose) return } @@ -482,7 +482,7 @@ func (s *Service) V2VolumeList(c *rpc.Context) { span.Debugf("accept V2VolumeList request, args: %v", args) if err := s.raftNode.ReadIndex(ctx); err != nil { - span.Errorf("read index error: %v", err) + span.Errorf("list v2 read index error: %v", err) c.RespondError(apierrors.ErrRaftReadIndex) return } diff --git a/blobstore/clustermgr/volumemgr/applier.go b/blobstore/clustermgr/volumemgr/applier.go index 361347a46a..52fbee431f 100644 --- a/blobstore/clustermgr/volumemgr/applier.go +++ b/blobstore/clustermgr/volumemgr/applier.go @@ -144,7 +144,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte ) err := json.Unmarshal(datas[idx], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -171,7 +171,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := &CreateVolumeCtx{} err := args.Decode(datas[idx]) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -192,7 +192,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := &ChangeVolStatusCtx{} err := json.Unmarshal(datas[idx], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -207,7 +207,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := &clustermgr.RetainVolumes{} err := json.Unmarshal(datas[idx], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -223,7 +223,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := &clustermgr.UpdateVolumeArgs{} err := json.Unmarshal(datas[idx], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -239,7 +239,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := &clustermgr.ReportChunkArgs{} err := args.Decode(bytes.NewReader(datas[idx])) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -259,7 +259,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := &clustermgr.SetCompactChunkArgs{} err := json.Unmarshal(datas[idx], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -274,7 +274,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := make([]proto.VuidPrefix, 0) err := json.Unmarshal(datas[idx], &args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -290,7 +290,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := &allocVolumeUnitCtx{} err := json.Unmarshal(datas[idx], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -305,7 +305,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte var args DeleteTaskCtx err := json.Unmarshal(datas[idx], &args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -320,7 +320,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := make([]proto.Vid, 0) err := json.Unmarshal(datas[idx], &args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -335,7 +335,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := &clustermgr.VolumeInfoBase{} err := json.Unmarshal(datas[idx], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -350,7 +350,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := &clustermgr.AdminUpdateUnitArgs{} err := json.Unmarshal(datas[idx], args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -365,7 +365,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := &CreateVolumeCtx{} err := args.Decode(datas[idx]) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } @@ -386,7 +386,7 @@ func (v *VolumeMgr) Apply(ctx context.Context, operTypes []int32, datas [][]byte args := make([]*volumedb.VolumeUnitRecord, 0) err := json.Unmarshal(datas[idx], &args) if err != nil { - errs[idx] = errors.Info(err, "json unmarshal failed, data: ", datas[idx]).Detail(err) + errs[idx] = errors.Info(err, t, datas[idx]).Detail(err) wg.Done() continue } diff --git a/blobstore/clustermgr/volumemgr/volumeunit.go b/blobstore/clustermgr/volumemgr/volumeunit.go index 0057611bab..8379947d68 100644 --- a/blobstore/clustermgr/volumemgr/volumeunit.go +++ b/blobstore/clustermgr/volumemgr/volumeunit.go @@ -174,7 +174,7 @@ func (v *VolumeMgr) applyUpdateVolumeUnit(ctx context.Context, newVuid proto.Vui vol := v.all.getVol(newVuid.Vid()) if vol == nil { - span.Errorf("vid:%d get volume is nil ", newVuid.Vid()) + span.Errorf("update vid:%d get volume is nil ", newVuid.Vid()) return ErrVolumeNotExist } index := newVuid.Index() @@ -235,7 +235,7 @@ func (v *VolumeMgr) applyAllocVolumeUnit(ctx context.Context, args *allocVolumeU vol := v.all.getVol(args.Vuid.Vid()) if vol == nil { - span.Errorf("vid:%d get volume is nil ", args.Vuid.Vid()) + span.Errorf("alloc vid:%d get volume is nil ", args.Vuid.Vid()) return ErrVolumeNotExist } @@ -321,7 +321,7 @@ func (v *VolumeMgr) applyChunkSetCompact(ctx context.Context, args *cmapi.SetCom vol := v.all.getVol(args.Vuid.Vid()) if vol == nil { - span.Errorf("vid:%d get volume is nil ", args.Vuid.Vid()) + span.Errorf("chunkset vid:%d get volume is nil ", args.Vuid.Vid()) return ErrVolumeNotExist } diff --git a/blobstore/common/profile/profile.go b/blobstore/common/profile/profile.go index 5d1f10aca2..5e689c2db3 100644 --- a/blobstore/common/profile/profile.go +++ b/blobstore/common/profile/profile.go @@ -83,18 +83,21 @@ func (r *route) AddUserPath(path string) { } func (r *route) String() string { + varString := func(vars []string) string { + return fmt.Sprintf("\t%s\n\n", strings.Join(vars, "\n\t")) + } r.mu.Lock() sb := strings.Builder{} sb.WriteString("usage:\n\t/\n\n") sb.WriteString("vars:\n") - sb.WriteString(fmt.Sprintf("\t%s\n\n", strings.Join(r.vars, "\n\t"))) + sb.WriteString(varString(r.vars)) sb.WriteString("pprof:\n") - sb.WriteString(fmt.Sprintf("\t%s\n\n", strings.Join(r.pprof, "\n\t"))) + sb.WriteString(varString(r.pprof)) sb.WriteString("metrics:\n") - sb.WriteString(fmt.Sprintf("\t%s\n\n", strings.Join(r.metrics, "\n\t"))) + sb.WriteString(varString(r.metrics)) if len(r.users) > 0 { sb.WriteString("users:\n") - sb.WriteString(fmt.Sprintf("\t%s\n\n", strings.Join(r.users, "\n\t"))) + sb.WriteString(varString(r.users)) } r.mu.Unlock() return sb.String() diff --git a/blobstore/common/rpc/auditlog/request_row.go b/blobstore/common/rpc/auditlog/request_row.go index 55bfa9377a..bb99095d87 100644 --- a/blobstore/common/rpc/auditlog/request_row.go +++ b/blobstore/common/rpc/auditlog/request_row.go @@ -471,13 +471,14 @@ func (a *RequestRow) XlogsTime(names []string) (msSpeedTotal uint64) { // apiWithParams returns api information with maxApiLevel( default 2). func apiWithParams(service, method, path, host, params string, maxApiLevel int) (api string) { + const unknown = ".unknown" if service == "" || method == "" { return "unknown.unknown" } stype := strings.ToLower(service) fields := strings.Split(strings.ToLower(path), "/") if len(fields) <= 1 { - return stype + ".unknown" + return stype + unknown } firstPath := fields[1] @@ -493,7 +494,7 @@ func apiWithParams(service, method, path, host, params string, maxApiLevel int) return stype + ".v2-tune." + strings.Join(fields[firstPathIndex+1:], ".") } if !isValidApi(firstPath) { - return stype + ".unknown" + return stype + unknown } api = firstPath @@ -503,11 +504,11 @@ func apiWithParams(service, method, path, host, params string, maxApiLevel int) length := len(fields) for level <= maxApiLevel && index < length { api += "." + fields[index] - level += 1 - index += 1 + level++ + index++ } if !isValidMultiPathApi(api) { - return stype + ".unknown" + return stype + unknown } } diff --git a/blobstore/common/rpc/example/main/main.go b/blobstore/common/rpc/example/main/main.go index a086647ec5..e46054a612 100644 --- a/blobstore/common/rpc/example/main/main.go +++ b/blobstore/common/rpc/example/main/main.go @@ -66,7 +66,7 @@ func main() { log.Info("meta server is running at:", c.BindAddr) go func() { if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { - log.Panic("server exits:", err) + log.Panic("meta server exits:", err) } }() } @@ -80,7 +80,7 @@ func main() { log.Info("file server is running at:", c.BindAddr) go func() { if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { - log.Panic("server exits:", err) + log.Panic("file server exits:", err) } }() } @@ -94,7 +94,7 @@ func main() { log.Info("app server is running at:", c.BindAddr) go func() { if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { - log.Panic("server exits:", err) + log.Panic("app server exits:", err) } }() } diff --git a/blobstore/scheduler/client/clustermgr.go b/blobstore/scheduler/client/clustermgr.go index c432246d77..82f92e594c 100644 --- a/blobstore/scheduler/client/clustermgr.go +++ b/blobstore/scheduler/client/clustermgr.go @@ -514,7 +514,7 @@ func (c *clustermgrClient) ListDiskVolumeUnits(ctx context.Context, diskID proto diskInfo, err := c.client.DiskInfo(ctx, diskID) if err != nil { - span.Errorf("get disk info failed: disk_id[%d], err[%+v]", diskID, err) + span.Errorf("list volume unit, get disk info failed: disk_id[%d], err[%+v]", diskID, err) return nil, err } @@ -576,7 +576,7 @@ func (c *clustermgrClient) listAllDisks(ctx context.Context, status proto.DiskSt } selectDisks, selectMarker, err := c.listDisk(ctx, args) if err != nil { - span.Errorf("list disk failed: err[%+v]", err) + span.Errorf("list all disk failed: err[%+v]", err) return nil, err } @@ -602,7 +602,7 @@ func (c *clustermgrClient) listDisks(ctx context.Context, status proto.DiskStatu } selectDisks, selectMarker, err := c.listDisk(ctx, args) if err != nil { - span.Errorf("list disk failed: err[%+v]", err) + span.Errorf("list disks failed: err[%+v]", err) return nil, err } @@ -693,7 +693,7 @@ func (c *clustermgrClient) SetDiskDropped(ctx context.Context, diskID proto.Disk info, err := c.client.DiskInfo(ctx, diskID) if err != nil { - span.Errorf("get disk info failed: disk_id[%d], err[%+v]", diskID, err) + span.Errorf("set disk dropped, get disk info failed: disk_id[%d], err[%+v]", diskID, err) return err } @@ -831,7 +831,7 @@ func (c *clustermgrClient) listMigrateTasks(ctx context.Context, taskType proto. return nil, marker, err } if task.TaskType != taskType { - span.Errorf("task type is invalid: expected[%s], actual[%s]", taskType, task.TaskType) + span.Errorf("list task type is invalid: expected[%s], actual[%s]", taskType, task.TaskType) continue } tasks = append(tasks, task) @@ -883,7 +883,7 @@ func (c *clustermgrClient) GetMigratingDisk(ctx context.Context, taskType proto. return nil, err } if meta.TaskType != taskType { - span.Errorf("task type is invalid: expected[%s], actual[%s]", taskType, meta.TaskType) + span.Errorf("get task type is invalid: expected[%s], actual[%s]", taskType, meta.TaskType) return meta, errcode.ErrIllegalTaskType } if meta.Disk.DiskID != diskID { @@ -919,7 +919,7 @@ func (c *clustermgrClient) ListMigratingDisks(ctx context.Context, taskType prot return nil, err } if task.TaskType != taskType { - span.Errorf("task type is invalid: expected[%s], actual[%s]", taskType, task.TaskType) + span.Errorf("list disks task type is invalid: expected[%s], actual[%s]", taskType, task.TaskType) continue } disks = append(disks, task) diff --git a/blobstore/scheduler/disk_droper.go b/blobstore/scheduler/disk_droper.go index 8ca231c855..cd812b06c9 100644 --- a/blobstore/scheduler/disk_droper.go +++ b/blobstore/scheduler/disk_droper.go @@ -88,8 +88,8 @@ func (mgr *DiskDropMgr) clearJunkTasksWhenLoading(ctx context.Context, tasks []* span.Errorf("has junk task but the disk is not dropped: disk_id[%d], task_id[%s]", task.SourceDiskID, task.TaskID) return errcode.ErrUnexpectMigrationTask } - span.Warnf("delete junk task: task_id[%s]", task.TaskID) - base.InsistOn(ctx, "delete junk task", func() error { + span.Warnf("loading delete junk task: task_id[%s]", task.TaskID) + base.InsistOn(ctx, "loading delete junk task", func() error { return mgr.clusterMgrCli.DeleteMigrateTask(ctx, task.TaskID) }) } @@ -389,8 +389,8 @@ func (mgr *DiskDropMgr) checkAndClearJunkTasks() { if len(tasks) != 0 { span.Warnf("clear junk tasks of dropped disk: disk_id[%d], tasks size[%d]", disk.diskID, len(tasks)) for _, task := range tasks { - span.Warnf("delete junk task: task_id[%s]", task.TaskID) - base.InsistOn(ctx, "delete junk task", func() error { + span.Warnf("check and delete junk task: task_id[%s]", task.TaskID) + base.InsistOn(ctx, "check and delete junk task", func() error { return mgr.clusterMgrCli.DeleteMigrateTask(ctx, task.TaskID) }) } diff --git a/blobstore/scheduler/disk_repairer.go b/blobstore/scheduler/disk_repairer.go index df9473935f..1a0c878677 100644 --- a/blobstore/scheduler/disk_repairer.go +++ b/blobstore/scheduler/disk_repairer.go @@ -148,8 +148,8 @@ func (mgr *DiskRepairMgr) clearJunkTasksWhenLoading(ctx context.Context, tasks [ span.Errorf("has junk task but the disk is not repaired: disk_id[%d], task_id[%s]", task.SourceDiskID, task.TaskID) return errcode.ErrUnexpectMigrationTask } - span.Warnf("delete junk task: task_id[%s]", task.TaskID) - base.InsistOn(ctx, "delete junk task", func() error { + span.Warnf("loading delete junk task: task_id[%s]", task.TaskID) + base.InsistOn(ctx, " loading delete junk task", func() error { return mgr.clusterMgrCli.DeleteMigrateTask(ctx, task.TaskID) }) } @@ -717,8 +717,8 @@ func (mgr *DiskRepairMgr) checkAndClearJunkTasks() { if len(tasks) != 0 { span.Warnf("clear junk tasks of repaired disk: disk_id[%d], tasks size[%d]", disk.diskID, len(tasks)) for _, task := range tasks { - span.Warnf("delete junk task: task_id[%s]", task.TaskID) - base.InsistOn(ctx, "delete junk task", func() error { + span.Warnf("check and delete junk task: task_id[%s]", task.TaskID) + base.InsistOn(ctx, "chek and delete junk task", func() error { return mgr.clusterMgrCli.DeleteMigrateTask(ctx, task.TaskID) }) } diff --git a/blobstore/util/defaulter/defaulter.go b/blobstore/util/defaulter/defaulter.go index 2ba87c3475..cb4077fca1 100644 --- a/blobstore/util/defaulter/defaulter.go +++ b/blobstore/util/defaulter/defaulter.go @@ -83,7 +83,7 @@ func equalZero(val reflect.Value, typ reflect.Kind) bool { case reflect.Float32, reflect.Float64: return math.Float64bits(val.Float()) == 0 default: - panic("unsupported type " + typ.String()) + panic("equal zero unsupported type " + typ.String()) } } @@ -96,7 +96,7 @@ func lessZero(val reflect.Value, typ reflect.Kind) bool { case reflect.Float32, reflect.Float64: return val.Float() < -1e-9 default: - panic("unsupported type " + typ.String()) + panic("less zero unsupported type " + typ.String()) } } @@ -109,6 +109,6 @@ func lessOrEqualZero(val reflect.Value, typ reflect.Kind) bool { case reflect.Float32, reflect.Float64: return val.Float() < 1e-9 default: - panic("unsupported type " + typ.String()) + panic("less or equal zero unsupported type " + typ.String()) } }