diff --git a/cli/command/enter.go b/cli/command/enter.go index e7d54c10d..ee46746a4 100644 --- a/cli/command/enter.go +++ b/cli/command/enter.go @@ -26,8 +26,11 @@ package command import ( "github.com/opencurve/curveadm/cli/cli" + comm "github.com/opencurve/curveadm/internal/common" "github.com/opencurve/curveadm/internal/configure/topology" "github.com/opencurve/curveadm/internal/errno" + "github.com/opencurve/curveadm/internal/playbook" + task "github.com/opencurve/curveadm/internal/task/task/common" "github.com/opencurve/curveadm/internal/tools" "github.com/opencurve/curveadm/internal/utils" "github.com/spf13/cobra" @@ -43,8 +46,11 @@ func NewEnterCommand(curveadm *cli.CurveAdm) *cobra.Command { cmd := &cobra.Command{ Use: "enter ID", Short: "Enter service container", - Args: utils.ExactArgs(1), + Args: utils.RequiresMaxArgs(1), PreRunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } options.id = args[0] return curveadm.CheckId(options.id) }, @@ -57,32 +63,96 @@ func NewEnterCommand(curveadm *cli.CurveAdm) *cobra.Command { return cmd } +func genStatusForLeaderPlaybook(curveadm *cli.CurveAdm, + dcs []*topology.DeployConfig, + options statusOptions) (*playbook.Playbook, error) { + dcs = curveadm.FilterDeployConfig(dcs, topology.FilterOption{ + Id: options.id, + Role: options.role, + Host: options.host, + }) + if len(dcs) == 0 { + return nil, errno.ERR_NO_SERVICES_MATCHED + } + + steps := []int{playbook.INIT_SERVIE_STATUS, playbook.GET_MDS_LEADER} + pb := playbook.NewPlaybook(curveadm) + for _, step := range steps { + pb.AddStep(&playbook.PlaybookStep{ + Type: step, + Configs: dcs, + ExecOptions: playbook.ExecOptions{ + //Concurrency: 10, + SilentSubBar: true, + SilentMainBar: step == playbook.INIT_SERVIE_STATUS, + SkipError: true, + }, + }) + } + return pb, nil +} + func runEnter(curveadm *cli.CurveAdm, options enterOptions) error { // 1) parse cluster topology dcs, err := curveadm.ParseTopology() if err != nil { return err } - - // 2) filter service - dcs = curveadm.FilterDeployConfig(dcs, topology.FilterOption{ - Id: options.id, - Role: "*", - Host: "*", - }) - if len(dcs) == 0 { - return errno.ERR_NO_SERVICES_MATCHED + var containerId string + var dc *topology.DeployConfig + if options.id != "" { + // If there is an ID, execute the following + // 2) filter service + dcs = curveadm.FilterDeployConfig(dcs, topology.FilterOption{ + Id: options.id, + Role: "*", + Host: "*", + }) + if len(dcs) == 0 { + return errno.ERR_NO_SERVICES_MATCHED + } + // 3) get container id + dc = dcs[0] + } else { + // If no id parameter, execute the following + // 2) generate get status playbook + statusForLeaderOptions := statusOptions{id: "*", role: ROLE_MDS, host: "*"} + pb, err := genStatusForLeaderPlaybook(curveadm, dcs, statusForLeaderOptions) + if err != nil { + return err + } + // 3) run playground + err = pb.Run() + if err != nil { + return err + } + // 4) display service status for leader + statuses := []task.LeaderServiceStatus{} + value := curveadm.MemStorage().Get(comm.KEY_LEADER_SERVICE_STATUS) + if value != nil { + m := value.(map[string]task.LeaderServiceStatus) + for _, status := range m { + statuses = append(statuses, status) + } + } + for _, status := range statuses { + if status.IsLeader { + dc = status.Config + break + } + } + // 5) get leader container id + if dc == nil { + return errno.ERR_NO_LEADER_CONTAINER_FOUND + } } - - // 3) get container id - dc := dcs[0] serviceId := curveadm.GetServiceId(dc.GetId()) - containerId, err := curveadm.GetContainerId(serviceId) + containerId, err = curveadm.GetContainerId(serviceId) if err != nil { return err } - // 4) attch remote container + // attach remote container home := dc.GetProjectLayout().ServiceRootDir return tools.AttachRemoteContainer(curveadm, dc.GetHost(), containerId, home) } diff --git a/internal/common/common.go b/internal/common/common.go index 8e67c6485..419acb1be 100644 --- a/internal/common/common.go +++ b/internal/common/common.go @@ -65,10 +65,11 @@ const ( KEY_NEW_TOPOLOGY_DATA = "NEW_TOPOLOGY_DATA" // status - KEY_ALL_SERVICE_STATUS = "ALL_SERVICE_STATUS" - SERVICE_STATUS_CLEANED = "Cleaned" - SERVICE_STATUS_LOSED = "Losed" - SERVICE_STATUS_UNKNOWN = "Unknown" + KEY_ALL_SERVICE_STATUS = "ALL_SERVICE_STATUS" + SERVICE_STATUS_CLEANED = "Cleaned" + SERVICE_STATUS_LOSED = "Losed" + SERVICE_STATUS_UNKNOWN = "Unknown" + KEY_LEADER_SERVICE_STATUS = "SERVICE_STATUS_FOR_LEADER" // clean KEY_CLEAN_ITEMS = "CLEAN_ITEMS" diff --git a/internal/errno/errno.go b/internal/errno/errno.go index 46b8228c5..543daf414 100644 --- a/internal/errno/errno.go +++ b/internal/errno/errno.go @@ -253,7 +253,8 @@ var ( ERR_UNSUPPORT_CLEAN_ITEM = EC(210005, "unsupport clean item") ERR_NO_SERVICES_MATCHED = EC(210006, "no services matched") // TODO: please check pool set disk type - ERR_INVALID_DISK_TYPE = EC(210007, "poolset disk type must be lowercase and can only be one of ssd, hdd and nvme") + ERR_INVALID_DISK_TYPE = EC(210007, "poolset disk type must be lowercase and can only be one of ssd, hdd and nvme") + ERR_NO_LEADER_CONTAINER_FOUND = EC(210008, "no leader container found") // 220: commad options (client common) ERR_UNSUPPORT_CLIENT_KIND = EC(220000, "unsupport client kind") diff --git a/internal/playbook/factory.go b/internal/playbook/factory.go index f62a7b3e5..d956cecb5 100644 --- a/internal/playbook/factory.go +++ b/internal/playbook/factory.go @@ -83,6 +83,7 @@ const ( GET_CLIENT_STATUS INSTALL_CLIENT UNINSTALL_CLIENT + GET_MDS_LEADER // bs FORMAT_CHUNKFILE_POOL @@ -225,6 +226,8 @@ func (p *Playbook) createTasks(step *PlaybookStep) (*tasks.Tasks, error) { t, err = comm.NewInitServiceStatusTask(curveadm, config.GetDC(i)) case GET_SERVICE_STATUS: t, err = comm.NewGetServiceStatusTask(curveadm, config.GetDC(i)) + case GET_MDS_LEADER: + t, err = comm.NewGetMdsLeaderTask(curveadm, config.GetDC(i)) case CLEAN_SERVICE: t, err = comm.NewCleanServiceTask(curveadm, config.GetDC(i)) case INIT_SUPPORT: diff --git a/internal/task/task/common/service_status.go b/internal/task/task/common/service_status.go index 660fd68c7..9c145d900 100644 --- a/internal/task/task/common/service_status.go +++ b/internal/task/task/common/service_status.go @@ -80,6 +80,13 @@ type ( memStorage *utils.SafeMap } + step2FormatLeaderServiceStatus struct { + dc *topology.DeployConfig + serviceId string + isLeader *bool + memStorage *utils.SafeMap + } + ServiceStatus struct { Id string ParentId string @@ -94,6 +101,11 @@ type ( DataDir string Config *topology.DeployConfig } + + LeaderServiceStatus struct { + IsLeader bool + Config *topology.DeployConfig + } ) func setServiceStatus(memStorage *utils.SafeMap, id string, status ServiceStatus) { @@ -109,6 +121,19 @@ func setServiceStatus(memStorage *utils.SafeMap, id string, status ServiceStatus }) } +func setleaderServiseStatus(memStorage *utils.SafeMap, id string, status LeaderServiceStatus) { + memStorage.TX(func(kv *utils.SafeMap) error { + m := map[string]LeaderServiceStatus{} + v := kv.Get(comm.KEY_LEADER_SERVICE_STATUS) + if v != nil { + m = v.(map[string]LeaderServiceStatus) + } + m[id] = status + kv.Set(comm.KEY_LEADER_SERVICE_STATUS, m) + return nil + }) +} + func (s *step2InitStatus) Execute(ctx *context.Context) error { dc := s.dc id := s.serviceId @@ -218,6 +243,16 @@ func (s *step2FormatServiceStatus) Execute(ctx *context.Context) error { return nil } +func (s *step2FormatLeaderServiceStatus) Execute(ctx *context.Context) error { + dc := s.dc + id := s.serviceId + setleaderServiseStatus(s.memStorage, id, LeaderServiceStatus{ + IsLeader: *s.isLeader, + Config: dc, + }) + return nil +} + func NewInitServiceStatusTask(curveadm *cli.CurveAdm, dc *topology.DeployConfig) (*task.Task, error) { serviceId := curveadm.GetServiceId(dc.GetId()) containerId, err := curveadm.GetContainerId(serviceId) @@ -306,3 +341,51 @@ func NewGetServiceStatusTask(curveadm *cli.CurveAdm, dc *topology.DeployConfig) return t, nil } + +func NewGetMdsLeaderTask(curveadm *cli.CurveAdm, dc *topology.DeployConfig) (*task.Task, error) { + serviceId := curveadm.GetServiceId(dc.GetId()) + containerId, err := curveadm.GetContainerId(serviceId) + if curveadm.IsSkip(dc) { + return nil, nil + } else if err != nil { + return nil, err + } + hc, err := curveadm.GetHost(dc.GetHost()) + if err != nil { + return nil, err + } + + // new task + subname := fmt.Sprintf("host=%s role=%s containerId=%s", + dc.GetHost(), dc.GetRole(), tui.TrimContainerId(containerId)) + t := task.NewTask("Enter Leader container", subname, hc.GetSSHConfig()) + + // add step to task + var status string + var isLeader bool + t.AddStep(&step.ListContainers{ + ShowAll: true, + Format: `"{{.Status}}"`, + Filter: fmt.Sprintf("id=%s", containerId), + Out: &status, + ExecOptions: curveadm.ExecOptions(), + }) + t.AddStep(&step.Lambda{ + Lambda: TrimContainerStatus(&status), + }) + t.AddStep(&step2GetLeader{ + dc: dc, + containerId: containerId, + status: &status, + isLeader: &isLeader, + execOptions: curveadm.ExecOptions(), + }) + t.AddStep(&step2FormatLeaderServiceStatus{ + dc: dc, + serviceId: serviceId, + isLeader: &isLeader, + memStorage: curveadm.MemStorage(), + }) + + return t, nil +}