From 6b1bc5d016212f7362d09fa066c9062bf596727c Mon Sep 17 00:00:00 2001 From: YangMieMie Date: Wed, 12 Jun 2024 10:56:28 +0800 Subject: [PATCH] SKS-2810: Optimize unit tests (#180) --- Makefile | 2 +- ...tructure.cluster.x-k8s.io_elfclusters.yaml | 63 +-- ...tructure.cluster.x-k8s.io_elfmachines.yaml | 255 +++++----- ....cluster.x-k8s.io_elfmachinetemplates.yaml | 107 +++-- controllers/elfcluster_controller_test.go | 3 +- controllers/elfmachine_controller_gpu_test.go | 10 +- .../elfmachine_controller_resources_test.go | 4 +- controllers/elfmachine_controller_test.go | 452 ++++++------------ .../elfmachinetemplate_controller_test.go | 16 +- pkg/util/machine/kcp_test.go | 2 +- pkg/util/machine/machine_test.go | 8 +- test/fake/tower.go | 16 +- test/fake/types.go | 14 +- 13 files changed, 435 insertions(+), 517 deletions(-) diff --git a/Makefile b/Makefile index 9c116ec6..edeacb13 100644 --- a/Makefile +++ b/Makefile @@ -155,7 +155,7 @@ kustomize: ## Download kustomize locally if necessary. CONTROLLER_GEN = $(shell pwd)/bin/controller-gen controller-gen: ## Download controller-gen locally if necessary. - $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.13.0) + $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.15.0) GINKGO := $(shell pwd)/bin/ginkgo ginkgo: ## Download ginkgo locally if necessary. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_elfclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_elfclusters.yaml index 131e3e51..5877fe4d 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_elfclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_elfclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: elfclusters.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -38,14 +38,19 @@ spec: description: ElfCluster is the Schema for the elfclusters API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -95,9 +100,9 @@ spec: type: string type: object vmGracefulShutdown: - description: VMGracefulShutdown indicates the VMs in this ElfCluster - should shutdown gracefully when deleting the VMs. Default to false - because sometimes the OS stuck when shutting down gracefully. + description: |- + VMGracefulShutdown indicates the VMs in this ElfCluster should shutdown gracefully when deleting the VMs. + Default to false because sometimes the OS stuck when shutting down gracefully. type: boolean type: object status: @@ -110,37 +115,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_elfmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_elfmachines.yaml index 44368eba..60b98f31 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_elfmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_elfmachines.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: elfmachines.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -49,14 +49,19 @@ spec: description: ElfMachine is the Schema for the elfmachines API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -71,10 +76,9 @@ spec: format: int32 type: integer failureDomain: - description: FailureDomain is the failure domain unique identifier - this Machine should be attached to, as defined in Cluster API. For - this infrastructure provider, the name is equivalent to the name - of the ElfDeploymentZone. + description: |- + FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. + For this infrastructure provider, the name is equivalent to the name of the ElfDeploymentZone. type: string gpuDevices: description: GPUDevices is the list of physical GPUs used by the virtual @@ -98,8 +102,10 @@ spec: ha: type: boolean host: - description: Host is a unique identifier for a ELF host. Required - when cloneMode is FullClone. Defaults to AUTO_SCHEDULE. + description: |- + Host is a unique identifier for a ELF host. + Required when cloneMode is FullClone. + Defaults to AUTO_SCHEDULE. type: string memoryMiB: format: int64 @@ -112,21 +118,23 @@ spec: description: Devices is the list of network devices used by the virtual machine. items: - description: NetworkDeviceSpec defines the network configuration - for a virtual machine's network device. + description: |- + NetworkDeviceSpec defines the network configuration for a virtual machine's + network device. properties: addressesFromPools: - description: AddressesFromPools is a list of IPAddressPools - that should be assigned to IPAddressClaims. + description: |- + AddressesFromPools is a list of IPAddressPools that should be assigned + to IPAddressClaims. items: - description: TypedLocalObjectReference contains enough - information to let you locate the typed referenced object - inside the same namespace. + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. properties: apiGroup: - description: APIGroup is the group for the resource - being referenced. If APIGroup is not specified, - the specified Kind must be in the core API group. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. type: string kind: @@ -142,19 +150,22 @@ spec: x-kubernetes-map-type: atomic type: array ipAddrs: - description: IPAddrs is a list of one or more IPv4 and/or - IPv6 addresses to assign to this device. Required when - DHCP4 and DHCP6 are both false. + description: |- + IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign + to this device. + Required when DHCP4 and DHCP6 are both false. items: type: string type: array macAddr: - description: MACAddr is the MAC address used by this device. - It is generally a good idea to omit this field and allow - a MAC address to be generated. + description: |- + MACAddr is the MAC address used by this device. + It is generally a good idea to omit this field and allow a MAC address + to be generated. type: string netmask: - description: Netmask is the subnet mask used by this device. + description: |- + Netmask is the subnet mask used by this device. Required when DHCP4 is false. type: string networkType: @@ -163,9 +174,9 @@ spec: routes: description: Required when DHCP4 is false. items: - description: NetworkDeviceRouteSpec defines the network - configuration for a virtual machine's network device - route. + description: |- + NetworkDeviceRouteSpec defines the network configuration for a virtual machine's + network device route. properties: gateway: description: Gateway is the IPv4 gateway used by this @@ -189,28 +200,32 @@ spec: type: object type: array nameservers: - description: Nameservers is a list of IPv4 and/or IPv6 addresses - used as DNS nameservers. Please note that Linux allows only - three nameservers (https://linux.die.net/man/5/resolv.conf). + description: |- + Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS + nameservers. + Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf). items: type: string type: array preferredAPIServerCidr: - description: PreferredAPIServeCIDR is the preferred CIDR for the - Kubernetes API server endpoint on this machine + description: |- + PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API + server endpoint on this machine type: string required: - devices type: object numCPUS: - description: NumCPUs is the number of virtual processors in a VM. - Defaults to the analogue property value in the template from which - this machine is cloned. + description: |- + NumCPUs is the number of virtual processors in a VM. + Defaults to the analogue property value in the template from which this + machine is cloned. format: int32 type: integer numCoresPerSocket: - description: NumCoresPerSocket is the number of cores among which - to distribute CPUs in this VM. + description: |- + NumCoresPerSocket is the number of cores among which to distribute CPUs + in this VM. format: int32 type: integer osType: @@ -221,7 +236,8 @@ spec: - WINDOWS type: string providerID: - description: ProviderID is the virtual machine's UUID formatted as + description: |- + ProviderID is the virtual machine's UUID formatted as elf://f0f6f65d-0786-4170-9ab9-d02187a61ad6 type: string template: @@ -277,37 +293,37 @@ spec: operational state. properties: lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. format: date-time type: string message: - description: A human readable message indicating details about - the transition. This field may be empty. + description: |- + A human readable message indicating details about the transition. + This field may be empty. type: string reason: - description: The reason for the condition's last transition - in CamelCase. The specific API may choose whether or not this - field is considered a guaranteed API. This field may not be - empty. + description: |- + The reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may not be empty. type: string severity: - description: Severity provides an explicit classification of - Reason code, so the users or machines can immediately understand - the current situation and act accordingly. The Severity field - MUST be set only when Status=False. + description: |- + Severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. type: string status: description: Status of the condition, one of True, False, Unknown. type: string type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. + description: |- + Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. type: string required: - lastTransitionTime @@ -316,38 +332,51 @@ spec: type: object type: array failureMessage: - description: "FailureMessage will be set in the event that there is - a terminal problem reconciling the Machine and will contain a more - verbose string suitable for logging and human consumption. \n This - field should not be set for transitive errors that a controller - faces that are expected to be fixed automatically over time (like - service outages), but instead indicate that something is fundamentally - wrong with the Machine's spec or the configuration of the controller, - and that manual intervention is required. Examples of terminal errors - would be invalid combinations of settings in the spec, values that - are unsupported by the controller, or the responsible controller - itself being critically misconfigured. \n Any transient errors that - occur during the reconciliation of Machines can be added as events - to the Machine object and/or logged in the controller's output." + description: |- + FailureMessage will be set in the event that there is a terminal problem + reconciling the Machine and will contain a more verbose string suitable + for logging and human consumption. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string failureReason: - description: "FailureReason will be set in the event that there is - a terminal problem reconciling the Machine and will contain a succinct - value suitable for machine interpretation. \n This field should - not be set for transitive errors that a controller faces that are - expected to be fixed automatically over time (like service outages), - but instead indicate that something is fundamentally wrong with - the Machine's spec or the configuration of the controller, and that - manual intervention is required. Examples of terminal errors would - be invalid combinations of settings in the spec, values that are - unsupported by the controller, or the responsible controller itself - being critically misconfigured. \n Any transient errors that occur - during the reconciliation of Machines can be added as events to - the Machine object and/or logged in the controller's output." + description: |- + FailureReason will be set in the event that there is a terminal problem + reconciling the Machine and will contain a succinct value suitable + for machine interpretation. + + + This field should not be set for transitive errors that a controller + faces that are expected to be fixed automatically over + time (like service outages), but instead indicate that something is + fundamentally wrong with the Machine's spec or the configuration of + the controller, and that manual intervention is required. Examples + of terminal errors would be invalid combinations of settings in the + spec, values that are unsupported by the controller, or the + responsible controller itself being critically misconfigured. + + + Any transient errors that occur during the reconciliation of Machines + can be added as events to the Machine object and/or logged in the + controller's output. type: string gpuDevices: - description: GPUDevices returns the GPU devices status for each of - the machine's configured GPU devices. + description: |- + GPUDevices returns the GPU devices status for each of the machine's configured + GPU devices. items: description: GPUStatus provides information about one of a VM's GPU device. @@ -359,25 +388,29 @@ spec: type: object type: array hostServerName: - description: HostServerName is the name of host server where the virtual - machine runs on. This value is set automatically at runtime and - should not be set or modified by users. + description: |- + HostServerName is the name of host server where the virtual machine runs on. + This value is set automatically at runtime and should not be set or + modified by users. type: string hostServerRef: - description: HostServerRef is the Tower ID of host server where the - virtual machine runs on. This value is set automatically at runtime - and should not be set or modified by users. + description: |- + HostServerRef is the Tower ID of host server where the virtual machine runs on. + This value is set automatically at runtime and should not be set or + modified by users. type: string network: - description: Network returns the network status for each of the machine's - configured network interfaces. + description: |- + Network returns the network status for each of the machine's configured + network interfaces. items: description: NetworkStatus provides information about one of a VM's networks. properties: connected: - description: Connected is a flag that indicates whether this - network is currently connected to the VM. + description: |- + Connected is a flag that indicates whether this network is currently + connected to the VM. type: boolean ipAddrs: description: IPAddrs is one or more IP addresses reported by @@ -396,9 +429,10 @@ spec: type: object type: array placementGroupRef: - description: PlacementGroupRef is the reference to the Tower PlacementGroup - which this ElfMachine belongs to. This value is set automatically - at runtime and should not be set or modified by users. + description: |- + PlacementGroupRef is the reference to the Tower PlacementGroup which this ElfMachine belongs to. + This value is set automatically at runtime and should not be set or + modified by users. type: string ready: description: Ready is true when the provider resource is ready. @@ -411,13 +445,16 @@ spec: type: integer type: object taskRef: - description: TaskRef is a managed object reference to a Task related - to the machine. This value is set automatically at runtime and should - not be set or modified by users. + description: |- + TaskRef is a managed object reference to a Task related to the machine. + This value is set automatically at runtime and should not be set or + modified by users. type: string vmRef: - description: VMRef is used to lookup the VM. This value is set automatically - at runtime and should not be set or modified by users. + description: |- + VMRef is used to lookup the VM. + This value is set automatically at runtime and should not be set or + modified by users. type: string type: object type: object diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_elfmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_elfmachinetemplates.yaml index 0f7cbb17..8c9c03e2 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_elfmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_elfmachinetemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.15.0 name: elfmachinetemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io @@ -21,14 +21,19 @@ spec: API. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -51,10 +56,9 @@ spec: format: int32 type: integer failureDomain: - description: FailureDomain is the failure domain unique identifier - this Machine should be attached to, as defined in Cluster - API. For this infrastructure provider, the name is equivalent - to the name of the ElfDeploymentZone. + description: |- + FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API. + For this infrastructure provider, the name is equivalent to the name of the ElfDeploymentZone. type: string gpuDevices: description: GPUDevices is the list of physical GPUs used @@ -79,8 +83,10 @@ spec: ha: type: boolean host: - description: Host is a unique identifier for a ELF host. Required - when cloneMode is FullClone. Defaults to AUTO_SCHEDULE. + description: |- + Host is a unique identifier for a ELF host. + Required when cloneMode is FullClone. + Defaults to AUTO_SCHEDULE. type: string memoryMiB: format: int64 @@ -93,23 +99,24 @@ spec: description: Devices is the list of network devices used by the virtual machine. items: - description: NetworkDeviceSpec defines the network configuration - for a virtual machine's network device. + description: |- + NetworkDeviceSpec defines the network configuration for a virtual machine's + network device. properties: addressesFromPools: - description: AddressesFromPools is a list of IPAddressPools - that should be assigned to IPAddressClaims. + description: |- + AddressesFromPools is a list of IPAddressPools that should be assigned + to IPAddressClaims. items: - description: TypedLocalObjectReference contains - enough information to let you locate the typed - referenced object inside the same namespace. + description: |- + TypedLocalObjectReference contains enough information to let you locate the + typed referenced object inside the same namespace. properties: apiGroup: - description: APIGroup is the group for the - resource being referenced. If APIGroup is - not specified, the specified Kind must be - in the core API group. For any other third-party - types, APIGroup is required. + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. type: string kind: description: Kind is the type of resource @@ -126,20 +133,23 @@ spec: x-kubernetes-map-type: atomic type: array ipAddrs: - description: IPAddrs is a list of one or more IPv4 - and/or IPv6 addresses to assign to this device. + description: |- + IPAddrs is a list of one or more IPv4 and/or IPv6 addresses to assign + to this device. Required when DHCP4 and DHCP6 are both false. items: type: string type: array macAddr: - description: MACAddr is the MAC address used by - this device. It is generally a good idea to omit - this field and allow a MAC address to be generated. + description: |- + MACAddr is the MAC address used by this device. + It is generally a good idea to omit this field and allow a MAC address + to be generated. type: string netmask: - description: Netmask is the subnet mask used by - this device. Required when DHCP4 is false. + description: |- + Netmask is the subnet mask used by this device. + Required when DHCP4 is false. type: string networkType: description: NetworkType is the VM network type. @@ -147,8 +157,8 @@ spec: routes: description: Required when DHCP4 is false. items: - description: NetworkDeviceRouteSpec defines the - network configuration for a virtual machine's + description: |- + NetworkDeviceRouteSpec defines the network configuration for a virtual machine's network device route. properties: gateway: @@ -174,28 +184,32 @@ spec: type: object type: array nameservers: - description: Nameservers is a list of IPv4 and/or IPv6 - addresses used as DNS nameservers. Please note that - Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf). + description: |- + Nameservers is a list of IPv4 and/or IPv6 addresses used as DNS + nameservers. + Please note that Linux allows only three nameservers (https://linux.die.net/man/5/resolv.conf). items: type: string type: array preferredAPIServerCidr: - description: PreferredAPIServeCIDR is the preferred CIDR - for the Kubernetes API server endpoint on this machine + description: |- + PreferredAPIServeCIDR is the preferred CIDR for the Kubernetes API + server endpoint on this machine type: string required: - devices type: object numCPUS: - description: NumCPUs is the number of virtual processors in - a VM. Defaults to the analogue property value in the template - from which this machine is cloned. + description: |- + NumCPUs is the number of virtual processors in a VM. + Defaults to the analogue property value in the template from which this + machine is cloned. format: int32 type: integer numCoresPerSocket: - description: NumCoresPerSocket is the number of cores among - which to distribute CPUs in this VM. + description: |- + NumCoresPerSocket is the number of cores among which to distribute CPUs + in this VM. format: int32 type: integer osType: @@ -207,8 +221,9 @@ spec: - WINDOWS type: string providerID: - description: ProviderID is the virtual machine's UUID formatted - as elf://f0f6f65d-0786-4170-9ab9-d02187a61ad6 + description: |- + ProviderID is the virtual machine's UUID formatted as + elf://f0f6f65d-0786-4170-9ab9-d02187a61ad6 type: string template: description: Template is the name or ID of the template used diff --git a/controllers/elfcluster_controller_test.go b/controllers/elfcluster_controller_test.go index 05276f9d..fac4f103 100644 --- a/controllers/elfcluster_controller_test.go +++ b/controllers/elfcluster_controller_test.go @@ -173,7 +173,7 @@ var _ = Describe("ElfClusterReconciler", func() { }) It("should delete labels and remove elfcluster finalizer", func() { - task := fake.NewTowerTask() + task := fake.NewTowerTask("") ctrlMgrCtx := fake.NewControllerManagerContext(cluster, elfCluster) fake.InitClusterOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster) @@ -266,7 +266,6 @@ var _ = Describe("ElfClusterReconciler", func() { } logBuffer.Reset() - unexpectedError := errors.New("unexpected error") keys := []string{towerresources.GetVMLabelClusterName(), towerresources.GetVMLabelVIP(), towerresources.GetVMLabelNamespace()} mockVMService.EXPECT().CleanUnusedLabels(keys).Return(nil, unexpectedError) reconciler := &ElfClusterReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} diff --git a/controllers/elfmachine_controller_gpu_test.go b/controllers/elfmachine_controller_gpu_test.go index c079049e..240764d2 100644 --- a/controllers/elfmachine_controller_gpu_test.go +++ b/controllers/elfmachine_controller_gpu_test.go @@ -24,7 +24,6 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/pkg/errors" "github.com/smartxworks/cloudtower-go-sdk/v2/models" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -61,7 +60,6 @@ var _ = Describe("ElfMachineReconciler-GPU", func() { gpuModel := "A16" vGPUType := "V100" - unexpectedError := errors.New("unexpected error") BeforeEach(func() { logBuffer = new(bytes.Buffer) @@ -351,7 +349,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() { gpuVMInfo := fake.NewTowerGPUVMInfo() gpuVMInfo.Host = &models.NestedHost{ID: host.ID} gpuVMInfos := service.NewGPUVMInfos(gpuVMInfo) - task := fake.NewTowerTask() + task := fake.NewTowerTask("") withTaskVM := fake.NewWithTaskVM(vm, task) ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) @@ -385,7 +383,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() { gpuVMInfo := fake.NewTowerGPUVMInfo() gpuVMInfo.Host = &models.NestedHost{ID: host.ID} gpuVMInfos := service.NewGPUVMInfos(gpuVMInfo) - task := fake.NewTowerTask() + task := fake.NewTowerTask("") ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Times(2).Return(service.NewHosts(host), nil) @@ -417,7 +415,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) elfMachine.Status.VMRef = *vm.LocalID vm.GpuDevices = []*models.NestedGpuDevice{{ID: service.TowerString(fake.ID()), Name: service.TowerString("A16")}} - task := fake.NewTowerTask() + task := fake.NewTowerTask("") ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) mockVMService.EXPECT().RemoveGPUDevices(elfMachine.Status.VMRef, gomock.Len(1)).Return(nil, unexpectedError) @@ -466,7 +464,7 @@ var _ = Describe("ElfMachineReconciler-GPU", func() { It("should set clusterAutoscaler GPU label for node", func() { elfMachine.Status.HostServerRef = fake.UUID() elfMachine.Status.HostServerName = fake.UUID() - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) ctrlMgrCtx := &context.ControllerManagerContext{ Client: testEnv.Client, Name: fake.ControllerManagerName, diff --git a/controllers/elfmachine_controller_resources_test.go b/controllers/elfmachine_controller_resources_test.go index 3d6adf9a..6b502c09 100644 --- a/controllers/elfmachine_controller_resources_test.go +++ b/controllers/elfmachine_controller_resources_test.go @@ -178,7 +178,7 @@ var _ = Describe("ElfMachineReconciler", func() { vm.VMDisks = []*models.NestedVMDisk{{ID: vmDisk.ID}} mockVMService.EXPECT().GetVMDisks([]string{*vmDisk.ID}).Return([]*models.VMDisk{vmDisk}, nil) mockVMService.EXPECT().GetVMVolume(*vmVolume.ID).Return(vmVolume, nil) - task := fake.NewTowerTask() + task := fake.NewTowerTask("") withTaskVMVolume := fake.NewWithTaskVMVolume(vmVolume, task) mockVMService.EXPECT().ResizeVMVolume(*vmVolume.ID, *service.TowerDisk(20)).Return(withTaskVMVolume, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} @@ -226,7 +226,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(err.Error()).To(ContainSubstring("failed to trigger expand size from")) expectConditions(elfMachine, []conditionAssertion{{infrav1.ResourcesHotUpdatedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.ExpandingVMDiskFailedReason}}) - task := fake.NewTowerTask() + task := fake.NewTowerTask("") withTaskVMVolume := fake.NewWithTaskVMVolume(vmVolume, task) mockVMService.EXPECT().ResizeVMVolume(*vmVolume.ID, int64(10)).Return(withTaskVMVolume, nil) conditions.MarkFalse(elfMachine, infrav1.ResourcesHotUpdatedCondition, infrav1.ExpandingVMDiskReason, clusterv1.ConditionSeverityInfo, "") diff --git a/controllers/elfmachine_controller_test.go b/controllers/elfmachine_controller_test.go index da75da69..504e8201 100644 --- a/controllers/elfmachine_controller_test.go +++ b/controllers/elfmachine_controller_test.go @@ -63,6 +63,7 @@ var _ = Describe("ElfMachineReconciler", func() { elfCluster *infrav1.ElfCluster cluster *clusterv1.Cluster elfMachine *infrav1.ElfMachine + elfMachineKey client.ObjectKey k8sNode *corev1.Node machine *clusterv1.Machine kcp *controlplanev1.KubeadmControlPlane @@ -84,6 +85,7 @@ var _ = Describe("ElfMachineReconciler", func() { klog.SetOutput(logBuffer) elfCluster, cluster, elfMachine, machine, secret = fake.NewClusterAndMachineObjects() + elfMachineKey = capiutil.ObjectKey(elfMachine) kcp = fake.NewKCP() md = fake.NewMD() fake.ToWorkerMachine(machine, md) @@ -156,7 +158,6 @@ var _ = Describe("ElfMachineReconciler", func() { fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) _, _ = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) elfMachine = &infrav1.ElfMachine{} Expect(reconciler.Client.Get(ctx, elfMachineKey, elfMachine)).To(Succeed()) @@ -170,7 +171,6 @@ var _ = Describe("ElfMachineReconciler", func() { fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) reconciler := ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) _, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(err).ToNot(HaveOccurred()) Expect(logBuffer.String()).To(ContainSubstring("Cluster infrastructure is not ready yet")) @@ -187,7 +187,6 @@ var _ = Describe("ElfMachineReconciler", func() { fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) reconciler := ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) _, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(err).ToNot(HaveOccurred()) Expect(logBuffer.String()).To(ContainSubstring("Waiting for bootstrap data to be available")) @@ -203,7 +202,6 @@ var _ = Describe("ElfMachineReconciler", func() { fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) _, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(err).ToNot(HaveOccurred()) Expect(logBuffer.String()).To(ContainSubstring("Waiting for the control plane to be initialized")) @@ -220,7 +218,6 @@ var _ = Describe("ElfMachineReconciler", func() { fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) _, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(err).ToNot(HaveOccurred()) Expect(logBuffer.String()).To(ContainSubstring("Waiting for bootstrap data to be available")) @@ -237,7 +234,6 @@ var _ = Describe("ElfMachineReconciler", func() { ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(err).ToNot(HaveOccurred()) Expect(result.IsZero()).To(BeTrue()) @@ -321,7 +317,6 @@ var _ = Describe("ElfMachineReconciler", func() { fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).To(BeZero()) Expect(err).Should(HaveOccurred()) @@ -332,10 +327,9 @@ var _ = Describe("ElfMachineReconciler", func() { It("should create a new VM if not exists", func() { resetMemoryCache() - vm := fake.NewTowerVM() - vm.Name = &elfMachine.Name + vm := fake.NewTowerVMFromElfMachine(elfMachine) elfCluster.Spec.Cluster = clusterInsufficientStorageKey - task := fake.NewTowerTask() + task := fake.NewTowerTask("") withTaskVM := fake.NewWithTaskVM(vm, task) ctrlutil.AddFinalizer(elfMachine, infrav1.MachineFinalizer) ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) @@ -345,7 +339,6 @@ var _ = Describe("ElfMachineReconciler", func() { machineContext.VMService = mockVMService recordOrClearError(ctx, machineContext, ctrlMgrCtx.Client, clusterInsufficientStorageKey, true) mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(placementGroup, nil) - elfMachineKey := capiutil.ObjectKey(elfMachine) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) @@ -365,8 +358,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(err).NotTo(HaveOccurred()) Expect(logBuffer.String()).To(ContainSubstring("Insufficient memory detected for the ELF cluster")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() mockVMService.EXPECT().Clone(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(withTaskVM, nil) mockVMService.EXPECT().Get(*vm.ID).Return(vm, nil) mockVMService.EXPECT().GetTask(*task.ID).Return(task, nil) @@ -387,8 +379,7 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should recover from lost task", func() { - vm := fake.NewTowerVM() - vm.Name = &elfMachine.Name + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.LocalID = pointer.String("placeholder-%s" + *vm.LocalID) ctrlutil.AddFinalizer(elfMachine, infrav1.MachineFinalizer) ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) @@ -399,7 +390,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().Get(*vm.ID).Return(vm, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).ShouldNot(HaveOccurred()) @@ -418,7 +408,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().Clone(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("some error")) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) _, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) elfMachine = &infrav1.ElfMachine{} Expect(reconciler.Client.Get(ctx, elfMachineKey, elfMachine)).To(Succeed()) @@ -431,8 +420,7 @@ var _ = Describe("ElfMachineReconciler", func() { It("should allow VM to be temporarily disconnected", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusRUNNING - vm.Status = &status + vm.Status = models.NewVMStatus(models.VMStatusRUNNING) elfMachine.Status.VMRef = *vm.LocalID now := metav1.NewTime(time.Now().Add(-infrav1.VMDisconnectionTimeout)) elfMachine.SetVMDisconnectionTimestamp(&now) @@ -463,14 +451,13 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().GetVMVolume(*vmVolume.ID).Return(vmVolume, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) _, _ = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(reconciler.Client.Get(ctx, elfMachineKey, elfMachine)).To(Succeed()) Expect(elfMachine.GetVMDisconnectionTimestamp()).To(BeNil()) }) It("should set failure when VM was deleted", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil elfMachine.Status.VMRef = *vm.LocalID ctrlutil.AddFinalizer(elfMachine, infrav1.MachineFinalizer) @@ -480,7 +467,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().Get(elfMachine.Status.VMRef).Times(2).Return(nil, errors.New(service.VMNotFound)) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).NotTo(HaveOccurred()) @@ -504,7 +490,7 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should set ElfMachine to failure when VM was moved to the recycle bin", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil vm.InRecycleBin = pointer.Bool(true) elfMachine.Status.VMRef = *vm.LocalID @@ -515,7 +501,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().Get(elfMachine.Status.VMRef).Return(vm, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).To(BeZero()) Expect(err).ShouldNot(HaveOccurred()) @@ -526,10 +511,8 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should retry when create a VM if failed", func() { - vm := fake.NewTowerVM() - task := fake.NewTowerTask() - status := models.TaskStatusFAILED - task.Status = &status + vm := fake.NewTowerVMFromElfMachine(elfMachine) + task := fake.NewTowerTask(models.TaskStatusFAILED) elfMachine.Status.VMRef = *vm.ID elfMachine.Status.TaskRef = *task.ID ctrlutil.AddFinalizer(elfMachine, infrav1.MachineFinalizer) @@ -540,7 +523,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().GetTask(elfMachine.Status.TaskRef).Return(task, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).ToNot(HaveOccurred()) @@ -553,11 +535,9 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should set failure when task with cloud-init config error", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - task := fake.NewTowerTask() - status := models.TaskStatusFAILED - task.Status = &status + task := fake.NewTowerTask(models.TaskStatusFAILED) task.ErrorMessage = service.TowerString("Cannot unwrap Ok value of Result.Err.\r\ncode: CREATE_VM_FORM_TEMPLATE_FAILED\r\nmessage: {\"data\":{},\"ec\":\"VM_CLOUD_INIT_CONFIG_ERROR\",\"error\":{\"msg\":\"[VM_CLOUD_INIT_CONFIG_ERROR]The gateway [192.168.31.215] is unreachable. \"}}") elfMachine.Status.VMRef = *vm.ID elfMachine.Status.TaskRef = *task.ID @@ -569,7 +549,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().GetTask(elfMachine.Status.TaskRef).Return(task, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).To(BeZero()) Expect(err).ShouldNot(HaveOccurred()) @@ -585,12 +564,9 @@ var _ = Describe("ElfMachineReconciler", func() { It("should power on the VM after it is created", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status - task1 := fake.NewTowerTask() - taskStatus := models.TaskStatusSUCCESSED - task1.Status = &taskStatus - task2 := fake.NewTowerTask() + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) + task1 := fake.NewTowerTask(models.TaskStatusSUCCESSED) + task2 := fake.NewTowerTask("") elfMachine.Status.VMRef = *vm.ID elfMachine.Status.TaskRef = *task1.ID placementGroup := fake.NewVMPlacementGroup([]string{*vm.ID}) @@ -606,7 +582,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().PowerOn(*vm.LocalID, "").Return(task2, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).ShouldNot(HaveOccurred()) @@ -621,11 +596,8 @@ var _ = Describe("ElfMachineReconciler", func() { It("should expand the disk before starting the virtual machine for the first time", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status - task1 := fake.NewTowerTask() - taskStatus := models.TaskStatusSUCCESSED - task1.Status = &taskStatus + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) + task1 := fake.NewTowerTask(models.TaskStatusSUCCESSED) elfMachine.Status.VMRef = *vm.ID elfMachine.Status.TaskRef = *task1.ID placementGroup := fake.NewVMPlacementGroup([]string{*vm.ID}) @@ -642,7 +614,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().GetVMDisks([]string{*vmDisk.ID}).Return(nil, unexpectedError) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).To(BeZero()) Expect(err).To(HaveOccurred()) @@ -650,13 +621,11 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should wait for the ELF virtual machine to be created", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) placeholderID := fmt.Sprintf("placeholder-%s", *vm.LocalID) vm.LocalID = &placeholderID vm.EntityAsyncStatus = nil - task := fake.NewTowerTask() - taskStatus := models.TaskStatusFAILED - task.Status = &taskStatus + task := fake.NewTowerTask(models.TaskStatusFAILED) elfMachine.Status.VMRef = *vm.ID elfMachine.Status.TaskRef = *task.ID ctrlutil.AddFinalizer(elfMachine, infrav1.MachineFinalizer) @@ -667,7 +636,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().GetTask(elfMachine.Status.TaskRef).Return(task, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).ShouldNot(HaveOccurred()) @@ -682,11 +650,8 @@ var _ = Describe("ElfMachineReconciler", func() { It("should handle power on error", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status - task1 := fake.NewTowerTask() - taskStatus := models.TaskStatusSUCCESSED - task1.Status = &taskStatus + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) + task1 := fake.NewTowerTask(models.TaskStatusSUCCESSED) elfMachine.Status.VMRef = *vm.ID elfMachine.Status.TaskRef = *task1.ID placementGroup := fake.NewVMPlacementGroup([]string{*vm.ID}) @@ -702,7 +667,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().PowerOn(*vm.LocalID, "").Return(nil, errors.New("some error")) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).To(BeZero()) Expect(err.Error()).To(ContainSubstring("failed to trigger power on for VM")) @@ -716,12 +680,9 @@ var _ = Describe("ElfMachineReconciler", func() { It(" handle power on task failure", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status - task1 := fake.NewTowerTask() - taskStatus := models.TaskStatusFAILED - task1.Status = &taskStatus - task2 := fake.NewTowerTask() + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) + task1 := fake.NewTowerTask(models.TaskStatusFAILED) + task2 := fake.NewTowerTask("") elfMachine.Status.VMRef = *vm.LocalID elfMachine.Status.TaskRef = *task1.ID placementGroup := fake.NewVMPlacementGroup([]string{*vm.ID}) @@ -737,7 +698,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().PowerOn(*vm.LocalID, "").Return(task2, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).To(BeZero()) @@ -753,12 +713,9 @@ var _ = Describe("ElfMachineReconciler", func() { It("should power off the VM when vm is in SUSPENDED status", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSUSPENDED - vm.Status = &status - task1 := fake.NewTowerTask() - taskStatus := models.TaskStatusSUCCESSED - task1.Status = &taskStatus - task2 := fake.NewTowerTask() + vm.Status = models.NewVMStatus(models.VMStatusSUSPENDED) + task1 := fake.NewTowerTask(models.TaskStatusSUCCESSED) + task2 := fake.NewTowerTask("") elfMachine.Status.VMRef = *vm.ID elfMachine.Status.TaskRef = *task1.ID placementGroup := fake.NewVMPlacementGroup([]string{*vm.ID}) @@ -772,7 +729,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().PowerOff(*vm.LocalID).Return(task2, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).ShouldNot(HaveOccurred()) @@ -787,11 +743,8 @@ var _ = Describe("ElfMachineReconciler", func() { It("should handle power off error", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSUSPENDED - vm.Status = &status - task1 := fake.NewTowerTask() - taskStatus := models.TaskStatusSUCCESSED - task1.Status = &taskStatus + vm.Status = models.NewVMStatus(models.VMStatusSUSPENDED) + task1 := fake.NewTowerTask(models.TaskStatusSUCCESSED) elfMachine.Status.VMRef = *vm.ID elfMachine.Status.TaskRef = *task1.ID placementGroup := fake.NewVMPlacementGroup([]string{*vm.ID}) @@ -805,7 +758,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().PowerOff(*vm.LocalID).Return(nil, errors.New("some error")) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).To(BeZero()) Expect(err.Error()).To(ContainSubstring("failed to trigger powering off for VM")) @@ -839,8 +791,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(err).NotTo(HaveOccurred()) Expect(logBuffer.String()).To(ContainSubstring("The status of VM is an unexpected value nil")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() vm.Status = models.NewVMStatus(models.VMStatusUNKNOWN) ok, err = reconciler.reconcileVMStatus(ctx, machineContext, vm) Expect(ok).To(BeFalse()) @@ -851,7 +802,7 @@ var _ = Describe("ElfMachineReconciler", func() { It("should power on the VM when VM is stopped", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) - task := fake.NewTowerTask() + task := fake.NewTowerTask("") now := metav1.Now() elfMachine.SetVMFirstBootTimestamp(&now) ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) @@ -872,7 +823,7 @@ var _ = Describe("ElfMachineReconciler", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) *vm.Vcpu += 1 vm.Status = models.NewVMStatus(models.VMStatusRUNNING) - task := fake.NewTowerTask() + task := fake.NewTowerTask("") ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) machineContext := newMachineContext(elfCluster, cluster, elfMachine, machine, mockVMService) @@ -891,7 +842,7 @@ var _ = Describe("ElfMachineReconciler", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) *vm.CPU.Cores += 1 vm.Status = models.NewVMStatus(models.VMStatusRUNNING) - task := fake.NewTowerTask() + task := fake.NewTowerTask("") conditions.MarkFalse(elfMachine, infrav1.VMProvisionedCondition, infrav1.TaskFailureReason, clusterv1.ConditionSeverityInfo, "JOB_VM_SHUTDOWN_TIMEOUT") ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) @@ -912,15 +863,14 @@ var _ = Describe("ElfMachineReconciler", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) *vm.CPU.Sockets += 1 vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) - task := fake.NewTowerTask() + task := fake.NewTowerTask("") withTaskVM := fake.NewWithTaskVM(vm, task) ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) machineContext := newMachineContext(elfCluster, cluster, elfMachine, machine, mockVMService) machineContext.VMService = mockVMService - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() mockVMService.EXPECT().UpdateVM(vm, elfMachine).Return(withTaskVM, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} @@ -936,7 +886,7 @@ var _ = Describe("ElfMachineReconciler", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) *vm.Vcpu += 1 vm.Status = models.NewVMStatus(models.VMStatusSUSPENDED) - task := fake.NewTowerTask() + task := fake.NewTowerTask("") ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) machineContext := newMachineContext(elfCluster, cluster, elfMachine, machine, mockVMService) @@ -953,7 +903,7 @@ var _ = Describe("ElfMachineReconciler", func() { Context("powerOnVM", func() { It("should", func() { resetMemoryCache() - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.Host = &models.NestedHost{ID: service.TowerString(fake.ID())} elfMachine.Status.VMRef = *vm.LocalID elfCluster.Spec.Cluster = clusterInsufficientMemoryKey @@ -967,7 +917,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(err).NotTo(HaveOccurred()) Expect(logBuffer.String()).To(ContainSubstring("Insufficient memory detected for the ELF cluster")) - task := fake.NewTowerTask() + task := fake.NewTowerTask("") mockVMService.EXPECT().PowerOn(elfMachine.Status.VMRef, "").Return(task, nil) expireELFScheduleVMError(ctx, machineContext, ctrlMgrCtx.Client, clusterInsufficientMemoryKey) err = reconciler.powerOnVM(ctx, machineContext, vm) @@ -977,7 +927,6 @@ var _ = Describe("ElfMachineReconciler", func() { resetMemoryCache() // GPU - unexpectedError := errors.New("unexpected error") elfMachine.Spec.GPUDevices = []infrav1.GPUPassthroughDeviceSpec{{Model: "A16", Count: 1}} ctrlMgrCtx = fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) @@ -997,8 +946,8 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should skip adding VM to the placement group when capeVersion of ElfMachine is lower than v1.2.0", func() { - fake.ToControlPlaneMachine(machine, kcp) - fake.ToControlPlaneMachine(elfMachine, kcp) + fake.ToCPMachine(machine, kcp) + fake.ToCPMachine(elfMachine, kcp) delete(elfMachine.Annotations, infrav1.CAPEVersionAnnotation) ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) machineContext := newMachineContext(elfCluster, cluster, elfMachine, machine, mockVMService) @@ -1012,13 +961,10 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should add vm to the placement group", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status - task := fake.NewTowerTask() - taskStatus := models.TaskStatusSUCCESSED - task.Status = &taskStatus + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) + task := fake.NewTowerTask(models.TaskStatusSUCCESSED) elfMachine.Status.VMRef = *vm.LocalID placementGroup := fake.NewVMPlacementGroup(nil) ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) @@ -1037,13 +983,10 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("addVMsToPlacementGroup", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status - task := fake.NewTowerTask() - taskStatus := models.TaskStatusSUCCESSED - task.Status = &taskStatus + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) + task := fake.NewTowerTask(models.TaskStatusSUCCESSED) elfMachine.Status.VMRef = *vm.LocalID placementGroup := fake.NewVMPlacementGroup(nil) ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) @@ -1058,10 +1001,8 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(err).To(BeZero()) Expect(logBuffer.String()).To(ContainSubstring("Updating placement group succeeded")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) - taskStatus = models.TaskStatusFAILED - task.Status = &taskStatus + logBuffer.Reset() + task.Status = models.NewTaskStatus(models.TaskStatusFAILED) mockVMService.EXPECT().AddVMsToPlacementGroup(placementGroup, []string{*vm.ID}).Return(task, nil) mockVMService.EXPECT().WaitTask(gomock.Any(), *task.ID, config.WaitTaskTimeoutForPlacementGroupOperation, config.WaitTaskInterval).Return(task, nil) @@ -1069,8 +1010,7 @@ var _ = Describe("ElfMachineReconciler", func() { err = reconciler.addVMsToPlacementGroup(ctx, machineContext, placementGroup, []string{*vm.ID}) Expect(strings.Contains(err.Error(), "failed to update placement group")).To(BeTrue()) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() mockVMService.EXPECT().AddVMsToPlacementGroup(placementGroup, []string{*vm.ID}).Return(task, nil) mockVMService.EXPECT().WaitTask(gomock.Any(), *task.ID, config.WaitTaskTimeoutForPlacementGroupOperation, config.WaitTaskInterval).Return(nil, errors.New("xxx")) @@ -1081,7 +1021,7 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should wait for placement group task done", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil elfMachine.Status.VMRef = *vm.LocalID placementGroup1 := fake.NewVMPlacementGroup(nil) @@ -1096,7 +1036,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(placementGroup2, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).To(BeZero()) @@ -1107,10 +1046,9 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should handle placement group error", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) elfMachine.Status.VMRef = *vm.LocalID placementGroup := fake.NewVMPlacementGroup(nil) ctrlutil.AddFinalizer(elfMachine, infrav1.MachineFinalizer) @@ -1122,7 +1060,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(nil, errors.New("some error")) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).To(BeZero()) Expect(err).To(HaveOccurred()) @@ -1137,8 +1074,8 @@ var _ = Describe("ElfMachineReconciler", func() { cluster.Status.InfrastructureReady = true conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) machine.Spec.Bootstrap = clusterv1.Bootstrap{DataSecretName: &secret.Name} - fake.ToControlPlaneMachine(machine, kcp) - fake.ToControlPlaneMachine(elfMachine, kcp) + fake.ToCPMachine(machine, kcp) + fake.ToCPMachine(elfMachine, kcp) }) It("should not check whether the memory of host is sufficient when VM is running and the host where the VM is located is not used", func() { @@ -1147,8 +1084,7 @@ var _ = Describe("ElfMachineReconciler", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.Host = &models.NestedHost{ID: host.ID, Name: host.Name} placementGroup := fake.NewVMPlacementGroup([]string{}) - task := fake.NewTowerTask() - task.Status = models.NewTaskStatus(models.TaskStatusSUCCESSED) + task := fake.NewTowerTask(models.TaskStatusSUCCESSED) ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md, kcp) machineContext := newMachineContext(elfCluster, cluster, elfMachine, machine, mockVMService) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) @@ -1168,7 +1104,7 @@ var _ = Describe("ElfMachineReconciler", func() { It("should not be added when placement group is full", func() { host := fake.NewTowerHost() - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) vm.EntityAsyncStatus = nil vm.Host = &models.NestedHost{ID: service.TowerString(fake.UUID())} @@ -1194,8 +1130,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(err).To(BeZero()) Expect(logBuffer.String()).To(ContainSubstring("KCP is in rolling update, the placement group is full and has no unusable hosts, so skip adding VM to the placement group and power it on")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() host.HostState = &models.NestedMaintenanceHostState{State: models.NewMaintenanceModeEnum(models.MaintenanceModeEnumMAINTENANCEMODE)} mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(placementGroup, nil) mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Return(service.NewHosts(host), nil) @@ -1207,8 +1142,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(err).To(BeZero()) Expect(logBuffer.String()).To(ContainSubstring("KCP is in rolling update, the placement group is full and has unusable hosts, so wait for enough available hosts")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() vm.Status = models.NewVMStatus(models.VMStatusRUNNING) host.HostState = &models.NestedMaintenanceHostState{State: models.NewMaintenanceModeEnum(models.MaintenanceModeEnumMAINTENANCEMODE)} mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(placementGroup, nil) @@ -1221,8 +1155,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(err).To(BeZero()) Expect(logBuffer.String()).To(ContainSubstring(fmt.Sprintf("The placement group is full and VM is in %s status, skip adding VM to the placement group", *vm.Status))) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() kcp.Spec.Replicas = pointer.Int32(1) kcp.Status.Replicas = 1 kcp.Status.UpdatedReplicas = 1 @@ -1247,15 +1180,13 @@ var _ = Describe("ElfMachineReconciler", func() { host2 := fake.NewTowerHost() host3 := fake.NewTowerHost() host3.Status = models.NewHostStatus(models.HostStatusINITIALIZING) - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil vm.Host = &models.NestedHost{ID: service.TowerString(*host1.ID)} elfMachine.Status.VMRef = *vm.LocalID vm2 := fake.NewTowerVM() vm2.Host = &models.NestedHost{ID: service.TowerString(*host2.ID)} - task := fake.NewTowerTask() - taskStatus := models.TaskStatusSUCCESSED - task.Status = &taskStatus + task := fake.NewTowerTask(models.TaskStatusSUCCESSED) placementGroup := fake.NewVMPlacementGroup([]string{*vm2.ID}) ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md, kcp) machineContext := newMachineContext(elfCluster, cluster, elfMachine, machine, mockVMService) @@ -1283,8 +1214,7 @@ var _ = Describe("ElfMachineReconciler", func() { oldCP3 := fake.NewTowerVM() oldCP3.Host = &models.NestedHost{ID: service.TowerString(*host3.ID)} newCP1 := fake.NewTowerVM() - status := models.VMStatusRUNNING - newCP1.Status = &status + newCP1.Status = models.NewVMStatus(models.VMStatusRUNNING) newCP1.EntityAsyncStatus = nil newCP1.Host = &models.NestedHost{ID: service.TowerString(*host3.ID)} @@ -1320,12 +1250,12 @@ var _ = Describe("ElfMachineReconciler", func() { elfMachine.CreationTimestamp = metav1.Now() elfMachine1.CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Minute)) elfMachine2.CreationTimestamp = metav1.NewTime(time.Now().Add(2 * time.Minute)) - fake.ToControlPlaneMachine(machine, kcp) - fake.ToControlPlaneMachine(elfMachine, kcp) - fake.ToControlPlaneMachine(machine1, kcp) - fake.ToControlPlaneMachine(elfMachine1, kcp) - fake.ToControlPlaneMachine(machine2, kcp) - fake.ToControlPlaneMachine(elfMachine2, kcp) + fake.ToCPMachine(machine, kcp) + fake.ToCPMachine(elfMachine, kcp) + fake.ToCPMachine(machine1, kcp) + fake.ToCPMachine(elfMachine1, kcp) + fake.ToCPMachine(machine2, kcp) + fake.ToCPMachine(elfMachine2, kcp) vm0 := fake.NewTowerVMFromElfMachine(elfMachine) vm0.Host = &models.NestedHost{ID: service.TowerString(*host2.ID)} vm0.Status = models.NewVMStatus(models.VMStatusRUNNING) @@ -1347,7 +1277,7 @@ var _ = Describe("ElfMachineReconciler", func() { elfMachine1.Status.HostServerRef = *host1.ID elfMachine2.Status.PlacementGroupRef = *placementGroup.ID elfMachine2.Status.HostServerRef = *host2.ID - task := fake.NewTowerTask() + task := fake.NewTowerTask("") withTaskVM := fake.NewWithTaskVM(vm1, task) kcp.Spec.Replicas = pointer.Int32(3) kcp.Status.Replicas = 3 @@ -1373,8 +1303,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(logBuffer.String()).To(ContainSubstring("Waiting for the VM to be migrated from")) expectConditions(elfMachine, []conditionAssertion{{infrav1.VMProvisionedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.JoiningPlacementGroupReason}}) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() elfMachine1.Status.HostServerRef = *host0.ID mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(placementGroup, nil) mockVMService.EXPECT().FindByIDs(gomock.InAnyOrder([]string{*vm1.ID, *vm2.ID})).Return([]*models.VM{vm1, vm2}, nil) @@ -1404,19 +1333,6 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(ok).To(BeTrue()) Expect(err).To(BeZero()) Expect(logBuffer.String()).To(ContainSubstring("The VM is already on the recommended target host")) - - // vm.Host = &models.NestedHost{ID: service.TowerString(fake.ID())} - // vm1 := fake.NewTowerVM() - // vm1.Host = &models.NestedHost{ID: service.TowerString(*host.ID)} - // placementGroup.Vms = []*models.NestedVM{ - // {ID: vm1.ID, Name: vm1.Name}, - // } - // mockVMService.EXPECT().FindByIDs(gomock.InAnyOrder([]string{*vm1.ID})).Return([]*models.VM{vm1}, nil) - // reconciler = &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - // ok, err = reconciler.migrateVM(ctx, machineContext, vm, placementGroup, *host.ID) - // Expect(ok).To(BeTrue()) - // Expect(err).To(BeZero()) - // Expect(logBuffer.String()).To(ContainSubstring("is already used by placement group")) }) }) }) @@ -1426,8 +1342,8 @@ var _ = Describe("ElfMachineReconciler", func() { cluster.Status.InfrastructureReady = true conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) machine.Spec.Bootstrap = clusterv1.Bootstrap{DataSecretName: &secret.Name} - fake.ToControlPlaneMachine(machine, kcp) - fake.ToControlPlaneMachine(elfMachine, kcp) + fake.ToCPMachine(machine, kcp) + fake.ToCPMachine(elfMachine, kcp) }) Context("Rolling Update", func() { @@ -1445,7 +1361,7 @@ var _ = Describe("ElfMachineReconciler", func() { elfMachine1.Status.VMRef = *vm1.LocalID elfMachine2.Status.VMRef = *vm2.LocalID elfMachine3.Status.VMRef = *vm3.LocalID - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) elfMachine.Status.VMRef = *vm.LocalID placementGroup := fake.NewVMPlacementGroup([]string{}) placementGroup.Vms = []*models.NestedVM{} @@ -1461,8 +1377,7 @@ var _ = Describe("ElfMachineReconciler", func() { placementGroupName, err := towerresources.GetVMPlacementGroupName(ctx, ctrlMgrCtx.Client, machine, cluster) Expect(err).NotTo(HaveOccurred()) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() mockVMService.EXPECT().GetVMPlacementGroup(placementGroupName).Return(placementGroup, nil) mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Return(service.NewHosts(host), nil) mockVMService.EXPECT().FindByIDs(gomock.InAnyOrder([]string{})).Return([]*models.VM{}, nil) @@ -1473,8 +1388,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(*hostID).To(Equal("")) Expect(logBuffer.String()).To(ContainSubstring("The placement group still has capacity")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() placementGroup.Vms = []*models.NestedVM{{ID: vm1.ID, Name: vm1.Name}} mockVMService.EXPECT().GetVMPlacementGroup(placementGroupName).Return(placementGroup, nil) mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Return(service.NewHosts(host), nil) @@ -1497,12 +1411,12 @@ var _ = Describe("ElfMachineReconciler", func() { machine1.CreationTimestamp = metav1.Now() machine2.CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Minute)) machine3.CreationTimestamp = metav1.NewTime(time.Now().Add(2 * time.Minute)) - fake.ToControlPlaneMachine(machine1, kcp) - fake.ToControlPlaneMachine(elfMachine1, kcp) - fake.ToControlPlaneMachine(machine2, kcp) - fake.ToControlPlaneMachine(elfMachine2, kcp) - fake.ToControlPlaneMachine(machine3, kcp) - fake.ToControlPlaneMachine(elfMachine3, kcp) + fake.ToCPMachine(machine1, kcp) + fake.ToCPMachine(elfMachine1, kcp) + fake.ToCPMachine(machine2, kcp) + fake.ToCPMachine(elfMachine2, kcp) + fake.ToCPMachine(machine3, kcp) + fake.ToCPMachine(elfMachine3, kcp) vm1 := fake.NewTowerVMFromElfMachine(elfMachine1) vm1.Host = &models.NestedHost{ID: service.TowerString(*host1.ID)} vm2 := fake.NewTowerVMFromElfMachine(elfMachine2) @@ -1512,7 +1426,7 @@ var _ = Describe("ElfMachineReconciler", func() { elfMachine1.Status.VMRef = *vm1.LocalID elfMachine2.Status.VMRef = *vm2.LocalID elfMachine3.Status.VMRef = *vm3.LocalID - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) elfMachine.Status.VMRef = *vm.LocalID placementGroup := fake.NewVMPlacementGroup([]string{}) placementGroup.Vms = []*models.NestedVM{ @@ -1533,8 +1447,7 @@ var _ = Describe("ElfMachineReconciler", func() { placementGroupName, err := towerresources.GetVMPlacementGroupName(ctx, ctrlMgrCtx.Client, machine, cluster) Expect(err).NotTo(HaveOccurred()) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() mockVMService.EXPECT().Get(*vm3.ID).Return(vm3, nil) mockVMService.EXPECT().GetVMPlacementGroup(placementGroupName).Return(placementGroup, nil) mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Return(service.NewHosts(host1, host2, host3), nil) @@ -1547,8 +1460,7 @@ var _ = Describe("ElfMachineReconciler", func() { // One of the hosts is unavailable. - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() host1.Status = models.NewHostStatus(models.HostStatusCONNECTEDERROR) mockVMService.EXPECT().GetVMPlacementGroup(placementGroupName).Return(placementGroup, nil) mockVMService.EXPECT().GetHostsByCluster(elfCluster.Spec.Cluster).Return(service.NewHosts(host1, host2, host3), nil) @@ -1560,8 +1472,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(host).To(BeNil()) Expect(logBuffer.String()).To(ContainSubstring("KCP is in rolling update, the placement group is full and has unusable hosts, so wait for enough available hosts")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() host1.Status = models.NewHostStatus(models.HostStatusCONNECTEDHEALTHY) host2.Status = models.NewHostStatus(models.HostStatusCONNECTEDERROR) mockVMService.EXPECT().GetVMPlacementGroup(placementGroupName).Return(placementGroup, nil) @@ -1574,8 +1485,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(host).To(BeNil()) Expect(logBuffer.String()).To(ContainSubstring("KCP is in rolling update, the placement group is full and has unusable hosts, so wait for enough available hosts")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() host2.Status = models.NewHostStatus(models.HostStatusCONNECTEDHEALTHY) host3.Status = models.NewHostStatus(models.HostStatusCONNECTEDERROR) mockVMService.EXPECT().GetVMPlacementGroup(placementGroupName).Return(placementGroup, nil) @@ -1588,8 +1498,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(host).To(BeNil()) Expect(logBuffer.String()).To(ContainSubstring("KCP is in rolling update, the placement group is full and has unusable hosts, so wait for enough available hosts")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() kcp.Spec.Replicas = pointer.Int32(5) kcp.Status.Replicas = 6 kcp.Status.UpdatedReplicas = 4 @@ -1605,8 +1514,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(host).To(BeNil()) Expect(logBuffer.String()).To(ContainSubstring("KCP is in rolling update, the placement group is full and no more host for placing more KCP VM, so wait for enough available hosts")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() host3.Status = models.NewHostStatus(models.HostStatusCONNECTEDERROR) hosts := []*models.Host{host1, host2, host3} mockVMService.EXPECT().Get(*vm3.ID).Return(vm3, nil) @@ -1617,8 +1525,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(hostID).To(Equal("")) Expect(logBuffer.String()).To(ContainSubstring("Host is unavailable: host is in CONNECTED_ERROR status, skip selecting host for VM")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() vm3.Host.ID = service.TowerString(fake.UUID()) hosts = []*models.Host{host1, host2, host3} mockVMService.EXPECT().Get(*vm3.ID).Return(vm3, nil) @@ -1629,8 +1536,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(hostID).To(Equal("")) Expect(logBuffer.String()).To(ContainSubstring("Host not found, skip selecting host for VM")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() vm3.Host = &models.NestedHost{ID: service.TowerString(*host3.ID)} host3.Status = models.NewHostStatus(models.HostStatusCONNECTEDHEALTHY) host4 := fake.NewTowerHost() @@ -1646,8 +1552,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(hostID).To(Equal(*vm3.Host.ID)) Expect(logBuffer.String()).To(ContainSubstring("Select a host to power on the VM since the placement group is full")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() host3.Status = models.NewHostStatus(models.HostStatusCONNECTEDHEALTHY) hosts = []*models.Host{host1, host2, host3, host4} mockVMService.EXPECT().Get(*vm3.ID).Return(vm3, nil) @@ -1678,17 +1583,17 @@ var _ = Describe("ElfMachineReconciler", func() { elfMachine2, machine2 := fake.NewMachineObjects(elfCluster, cluster) machine1.CreationTimestamp = metav1.Now() machine2.CreationTimestamp = metav1.NewTime(time.Now().Add(1 * time.Minute)) - fake.ToControlPlaneMachine(machine1, kcp) - fake.ToControlPlaneMachine(elfMachine1, kcp) - fake.ToControlPlaneMachine(machine2, kcp) - fake.ToControlPlaneMachine(elfMachine2, kcp) + fake.ToCPMachine(machine1, kcp) + fake.ToCPMachine(elfMachine1, kcp) + fake.ToCPMachine(machine2, kcp) + fake.ToCPMachine(elfMachine2, kcp) vm1 := fake.NewTowerVMFromElfMachine(elfMachine1) vm1.Host = &models.NestedHost{ID: service.TowerString(*host1.ID)} vm2 := fake.NewTowerVMFromElfMachine(elfMachine2) vm2.Host = &models.NestedHost{ID: service.TowerString(*host2.ID)} elfMachine1.Status.VMRef = *vm1.LocalID elfMachine2.Status.VMRef = *vm2.LocalID - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) elfMachine.Status.VMRef = *vm.LocalID placementGroup := fake.NewVMPlacementGroup([]string{}) placementGroup.Vms = []*models.NestedVM{ @@ -1740,8 +1645,8 @@ var _ = Describe("ElfMachineReconciler", func() { conditions.MarkFalse(kcp, controlplanev1.ResizedCondition, controlplanev1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "") host := fake.NewTowerHost() elfMachine1, machine1 := fake.NewMachineObjects(elfCluster, cluster) - fake.ToControlPlaneMachine(machine1, kcp) - fake.ToControlPlaneMachine(elfMachine1, kcp) + fake.ToCPMachine(machine1, kcp) + fake.ToCPMachine(elfMachine1, kcp) vm1 := fake.NewTowerVMFromElfMachine(elfMachine1) vm1.Host = &models.NestedHost{ID: service.TowerString(*host.ID)} elfMachine.Status.VMRef = *vm1.LocalID @@ -1864,7 +1769,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().GetVMVolume(*vmVolume.ID).Return(vmVolume, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) _, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(err).To(BeZero()) elfMachine = &infrav1.ElfMachine{} @@ -1886,7 +1790,6 @@ var _ = Describe("ElfMachineReconciler", func() { vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil elfMachine.Status.VMRef = *vm.LocalID - elfMachineKey := capiutil.ObjectKey(elfMachine) placementGroup := fake.NewVMPlacementGroup([]string{*vm.ID}) Expect(testEnv.CreateAndWait(ctx, k8sNode)).To(Succeed()) @@ -2339,7 +2242,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().GetVMVolume(*vmVolume.ID).Return(vmVolume, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) _, _ = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) elfMachine = &infrav1.ElfMachine{} Expect(reconciler.Client.Get(ctx, elfMachineKey, elfMachine)).To(Succeed()) @@ -2373,7 +2275,6 @@ var _ = Describe("ElfMachineReconciler", func() { fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result).To(BeZero()) Expect(err).To(HaveOccurred()) @@ -2391,7 +2292,6 @@ var _ = Describe("ElfMachineReconciler", func() { fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result).To(BeZero()) Expect(err).To(HaveOccurred()) @@ -2405,7 +2305,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().GetByName(elfMachine.Name).Return(nil, errors.New(service.VMNotFound)) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result).To(BeZero()) Expect(err).To(HaveOccurred()) @@ -2416,7 +2315,7 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should delete the VM that in creating status and have not been saved to ElfMachine", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.LocalID = pointer.String("placeholder-%s" + *vm.LocalID) ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) @@ -2425,7 +2324,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).NotTo(HaveOccurred()) @@ -2438,11 +2336,10 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should delete the VM that in created status and have not been saved to ElfMachine", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusRUNNING - vm.Status = &status - task := fake.NewTowerTask() + vm.Status = models.NewVMStatus(models.VMStatusRUNNING) + task := fake.NewTowerTask("") ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) mockVMService.EXPECT().GetByName(elfMachine.Name).Return(vm, nil) @@ -2451,7 +2348,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).To(BeZero()) @@ -2464,8 +2360,8 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should remove vmRef when VM not found", func() { - vm := fake.NewTowerVM() - task := fake.NewTowerTask() + vm := fake.NewTowerVMFromElfMachine(elfMachine) + task := fake.NewTowerTask("") elfMachine.Status.VMRef = *vm.LocalID elfMachine.Status.TaskRef = *task.ID ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) @@ -2476,7 +2372,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result).To(BeZero()) Expect(err).To(HaveOccurred()) @@ -2487,11 +2382,10 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should handle task - pending", func() { - vm := fake.NewTowerVM() - status := models.VMStatusRUNNING - vm.Status = &status + vm := fake.NewTowerVMFromElfMachine(elfMachine) + vm.Status = models.NewVMStatus(models.VMStatusRUNNING) vm.EntityAsyncStatus = (*models.EntityAsyncStatus)(service.TowerString("UPDATING")) - task := fake.NewTowerTask() + task := fake.NewTowerTask("") elfMachine.Status.VMRef = *vm.LocalID elfMachine.Status.TaskRef = *task.ID ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) @@ -2502,7 +2396,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result).NotTo(BeZero()) Expect(result.RequeueAfter).NotTo(BeZero()) @@ -2514,11 +2407,9 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should handle task - failed", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - task := fake.NewTowerTask() - status := models.TaskStatusFAILED - task.Status = &status + task := fake.NewTowerTask(models.TaskStatusFAILED) elfMachine.Status.VMRef = *vm.LocalID elfMachine.Status.TaskRef = *task.ID ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) @@ -2530,8 +2421,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - - elfMachineKey := capiutil.ObjectKey(elfMachine) _, _ = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(logBuffer.String()).To(ContainSubstring("VM task failed")) elfMachine = &infrav1.ElfMachine{} @@ -2543,11 +2432,9 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should power off when VM is powered on and shut down failed", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - task := fake.NewTowerTask() - status := models.TaskStatusFAILED - task.Status = &status + task := fake.NewTowerTask(models.TaskStatusFAILED) task.ErrorMessage = pointer.String("JOB_VM_SHUTDOWN_TIMEOUT") elfMachine.Status.VMRef = *vm.LocalID elfMachine.Status.TaskRef = *task.ID @@ -2560,7 +2447,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).To(BeZero()) @@ -2574,11 +2460,9 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should power off when the VM which required vGPU devices is powered on and shut down failed", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - task := fake.NewTowerTask() - status := models.TaskStatusFAILED - task.Status = &status + task := fake.NewTowerTask(models.TaskStatusFAILED) task.ErrorMessage = pointer.String("JOB_VM_SHUTDOWN_TIMEOUT") elfMachine.Status.VMRef = *vm.LocalID elfMachine.Status.TaskRef = *task.ID @@ -2591,7 +2475,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).To(BeZero()) @@ -2605,11 +2488,9 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should handle task - done", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - task := fake.NewTowerTask() - status := models.TaskStatusSUCCESSED - task.Status = &status + task := fake.NewTowerTask(models.TaskStatusSUCCESSED) elfMachine.Status.VMRef = *vm.LocalID elfMachine.Status.TaskRef = *task.ID ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) @@ -2621,7 +2502,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) _, _ = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(logBuffer.String()).To(ContainSubstring("VM task succeeded")) elfMachine = &infrav1.ElfMachine{} @@ -2632,9 +2512,9 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should shut down when VM is powered on", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - task := fake.NewTowerTask() + task := fake.NewTowerTask("") elfMachine.Status.VMRef = *vm.LocalID ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) @@ -2644,7 +2524,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).To(BeZero()) @@ -2657,9 +2536,9 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should shutdown VM when the VM which required vGPU devices is powered on and cluster VMGracefulShutdown is false", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - task := fake.NewTowerTask() + task := fake.NewTowerTask("") elfMachine.Status.VMRef = *vm.LocalID elfMachine.Spec.VGPUDevices = []infrav1.VGPUDeviceSpec{{}} elfCluster.Spec.VMGracefulShutdown = false @@ -2671,7 +2550,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).To(BeZero()) @@ -2684,11 +2562,10 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should handle delete error", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.Name = &elfMachine.Name vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) elfMachine.Status.VMRef = *vm.LocalID ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) @@ -2698,7 +2575,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).To(BeZero()) Expect(err).ToNot(BeZero()) @@ -2709,12 +2585,11 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should delete when VM is not running", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.Name = &elfMachine.Name vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status - task := fake.NewTowerTask() + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) + task := fake.NewTowerTask("") elfMachine.Status.VMRef = *vm.LocalID ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) @@ -2724,7 +2599,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).To(BeZero()) @@ -2737,11 +2611,10 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should power off when VM is running and VMGracefulShutdown is false", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusRUNNING - vm.Status = &status - task := fake.NewTowerTask() + vm.Status = models.NewVMStatus(models.VMStatusRUNNING) + task := fake.NewTowerTask("") elfMachine.Status.VMRef = *vm.LocalID elfCluster.Spec.VMGracefulShutdown = false ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) @@ -2752,7 +2625,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return(nil, nil) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).To(BeZero()) @@ -2778,7 +2650,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(err).NotTo(HaveOccurred()) cluster.DeletionTimestamp = nil - fake.ToControlPlaneMachine(machine, kcp) + fake.ToCPMachine(machine, kcp) ctrlMgrCtx = fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) @@ -2856,11 +2728,10 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should delete k8s node before destroying VM.", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status - task := fake.NewTowerTask() + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) + task := fake.NewTowerTask("") elfMachine.Status.VMRef = *vm.LocalID cluster.Status.ControlPlaneReady = true @@ -2905,11 +2776,10 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should not delete k8s node when cluster is deleting", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status - task := fake.NewTowerTask() + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) + task := fake.NewTowerTask("") elfMachine.Status.VMRef = *vm.LocalID cluster.Status.ControlPlaneReady = true cluster.DeletionTimestamp = &metav1.Time{Time: time.Now().UTC()} @@ -2954,11 +2824,10 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should not delete k8s node when control plane is not ready", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status - task := fake.NewTowerTask() + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) + task := fake.NewTowerTask("") elfMachine.Status.VMRef = *vm.LocalID cluster.Status.ControlPlaneReady = false @@ -3001,10 +2870,9 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should handle error when delete k8s node failed", func() { - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) vm.EntityAsyncStatus = nil - status := models.VMStatusSTOPPED - vm.Status = &status + vm.Status = models.NewVMStatus(models.VMStatusSTOPPED) elfMachine.Status.VMRef = *vm.LocalID cluster.Status.ControlPlaneReady = true @@ -3067,11 +2935,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(result).To(BeZero()) Expect(err).To(BeZero()) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) - task := fake.NewTowerTask() - taskStatus := models.TaskStatusSUCCESSED - task.Status = &taskStatus + task := fake.NewTowerTask(models.TaskStatusSUCCESSED) withTaskVMPlacementGroup := fake.NewWithTaskVMPlacementGroup(nil, task) mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(nil, errors.New(service.VMPlacementGroupNotFound)) mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(placementGroup, nil) @@ -3085,10 +2949,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(err).To(BeZero()) Expect(logBuffer.String()).To(ContainSubstring("Creating placement group succeeded")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) - taskStatus = models.TaskStatusFAILED - task.Status = &taskStatus + task.Status = models.NewTaskStatus(models.TaskStatusFAILED) mockVMService.EXPECT().GetCluster(elfCluster.Spec.Cluster).Return(towerCluster, nil) mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(nil, errors.New(service.VMPlacementGroupNotFound)) mockVMService.EXPECT().CreateVMPlacementGroup(gomock.Any(), *towerCluster.ID, towerresources.GetVMPlacementGroupPolicy(machine)).Return(withTaskVMPlacementGroup, nil) @@ -3099,8 +2960,6 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("failed to create placement group")) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) resetMemoryCache() mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(nil, errors.New(service.VMPlacementGroupNotFound)) mockVMService.EXPECT().GetCluster(elfCluster.Spec.Cluster).Return(towerCluster, nil) @@ -3113,8 +2972,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("failed to wait for placement group creating task to complete in %s: pgName %s, taskID %s", config.WaitTaskTimeoutForPlacementGroupOperation, placementGroupName, *withTaskVMPlacementGroup.TaskID))) Expect(canCreatePlacementGroup(placementGroupName)).To(BeFalse()) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() resetMemoryCache() mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(nil, errors.New(service.VMPlacementGroupNotFound)) mockVMService.EXPECT().GetCluster(elfCluster.Spec.Cluster).Return(towerCluster, nil) @@ -3130,8 +2988,7 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(logBuffer.String()).To(ContainSubstring(fmt.Sprintf("Duplicate placement group detected, will try again in %s", placementGroupSilenceTime))) Expect(canCreatePlacementGroup(placementGroupName)).To(BeFalse()) - logBuffer = new(bytes.Buffer) - klog.SetOutput(logBuffer) + logBuffer.Reset() mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(nil, errors.New(service.VMPlacementGroupNotFound)) result, err = reconciler.reconcilePlacementGroup(ctx, machineContext) @@ -3179,7 +3036,6 @@ var _ = Describe("ElfMachineReconciler", func() { originalElfMachine := elfMachine.DeepCopy() reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).ShouldNot(HaveOccurred()) @@ -3200,7 +3056,6 @@ var _ = Describe("ElfMachineReconciler", func() { fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).ShouldNot(HaveOccurred()) @@ -3249,7 +3104,6 @@ var _ = Describe("ElfMachineReconciler", func() { mockVMService.EXPECT().Clone(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("some error")) reconciler := &ElfMachineReconciler{ControllerManagerContext: ctrlMgrCtx, NewVMService: mockNewVMService} - elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).To(BeZero()) Expect(err).Should(HaveOccurred()) @@ -3261,7 +3115,7 @@ var _ = Describe("ElfMachineReconciler", func() { Context("Reconcile VM task", func() { It("should handle task missing", func() { - task := fake.NewTowerTask() + task := fake.NewTowerTask("") elfMachine.Status.TaskRef = *task.ID ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) @@ -3278,7 +3132,7 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should handle failed to get task", func() { - task := fake.NewTowerTask() + task := fake.NewTowerTask("") elfMachine.Status.TaskRef = *task.ID ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) @@ -3295,8 +3149,7 @@ var _ = Describe("ElfMachineReconciler", func() { }) It("should set vm first boot timestamp", func() { - task := fake.NewTowerTask() - task.Status = models.NewTaskStatus(models.TaskStatusSUCCESSED) + task := fake.NewTowerTask(models.TaskStatusSUCCESSED) task.Description = service.TowerString("Start VM") elfMachine.Status.TaskRef = *task.ID ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) @@ -3324,8 +3177,7 @@ var _ = Describe("ElfMachineReconciler", func() { elfMachine.Spec.GPUDevices = []infrav1.GPUPassthroughDeviceSpec{{Model: "A16", Count: 1}} resetMemoryCache() - task := fake.NewTowerTask() - task.Status = models.NewTaskStatus(models.TaskStatusFAILED) + task := fake.NewTowerTask(models.TaskStatusFAILED) elfMachine.Status.TaskRef = *task.ID ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) fake.InitOwnerReferences(ctx, ctrlMgrCtx, elfCluster, cluster, elfMachine, machine) @@ -3463,7 +3315,7 @@ var _ = Describe("ElfMachineReconciler", func() { It("should set providerID and labels for node", func() { elfMachine.Status.HostServerRef = fake.UUID() elfMachine.Status.HostServerName = fake.UUID() - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) ctrlMgrCtx := &context.ControllerManagerContext{ Client: testEnv.Client, Name: fake.ControllerManagerName, @@ -3506,7 +3358,7 @@ var _ = Describe("ElfMachineReconciler", func() { It("should update labels but not update providerID", func() { elfMachine.Status.HostServerRef = fake.UUID() elfMachine.Status.HostServerName = fake.UUID() - vm := fake.NewTowerVM() + vm := fake.NewTowerVMFromElfMachine(elfMachine) ctrlMgrCtx := &context.ControllerManagerContext{ Client: testEnv.Client, Name: fake.ControllerManagerName, @@ -3614,8 +3466,7 @@ var _ = Describe("ElfMachineReconciler", func() { vm2.EntityAsyncStatus = nil ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, md) machineContext := newMachineContext(elfCluster, cluster, elfMachine, machine, mockVMService) - task := fake.NewTowerTask() - task.Status = models.NewTaskStatus(models.TaskStatusSUCCESSED) + task := fake.NewTowerTask(models.TaskStatusSUCCESSED) mockVMService.EXPECT().FindVMsByName(elfMachine.Name).Return([]*models.VM{vm1, vm2}, nil) mockVMService.EXPECT().Delete(*vm2.ID).Return(task, nil) @@ -3661,7 +3512,6 @@ var _ = Describe("ElfMachineReconciler", func() { machineContext := newMachineContext(elfCluster, cluster, elfMachine, machine, mockVMService) machineContext.VMService = mockVMService - unexpectedError := errors.New("unexpected error") setLabelInCache(capeManagedLabel) mockVMService.EXPECT().UpsertLabel(*namespaceLabel.Key, *namespaceLabel.Value).Return(namespaceLabel, nil) mockVMService.EXPECT().UpsertLabel(*clusterNameLabel.Key, *clusterNameLabel.Value).Return(clusterNameLabel, nil) diff --git a/controllers/elfmachinetemplate_controller_test.go b/controllers/elfmachinetemplate_controller_test.go index e3b5605a..5ac3d399 100644 --- a/controllers/elfmachinetemplate_controller_test.go +++ b/controllers/elfmachinetemplate_controller_test.go @@ -236,8 +236,8 @@ var _ = Describe("ElfMachineTemplateReconciler", func() { logBuffer.Reset() updatingElfMachine, updatingMachine := fake.NewMachineObjects(elfCluster, cluster) - fake.ToControlPlaneMachine(updatingElfMachine, kcp) - fake.ToControlPlaneMachine(updatingMachine, kcp) + fake.ToCPMachine(updatingElfMachine, kcp) + fake.ToCPMachine(updatingMachine, kcp) fake.SetElfMachineTemplateForElfMachine(updatingElfMachine, emt) conditions.MarkFalse(updatingElfMachine, infrav1.ResourcesHotUpdatedCondition, infrav1.WaitingForResourcesHotUpdateReason, clusterv1.ConditionSeverityInfo, "") ctrlMgrCtx = fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret, kcp, @@ -255,8 +255,8 @@ var _ = Describe("ElfMachineTemplateReconciler", func() { kcp.Spec.Replicas = pointer.Int32(3) kcp.Status.Replicas = 3 kcp.Status.UpdatedReplicas = 2 - fake.ToControlPlaneMachine(elfMachine, kcp) - fake.ToControlPlaneMachine(machine, kcp) + fake.ToCPMachine(elfMachine, kcp) + fake.ToCPMachine(machine, kcp) elfMachine.Spec.DiskGiB -= 1 machine.Status.NodeRef = &corev1.ObjectReference{} conditions.MarkTrue(machine, controlplanev1.MachineAPIServerPodHealthyCondition) @@ -311,8 +311,8 @@ var _ = Describe("ElfMachineTemplateReconciler", func() { kcp.Spec.Replicas = pointer.Int32(3) kcp.Status.Replicas = 3 kcp.Status.UpdatedReplicas = 3 - fake.ToControlPlaneMachine(elfMachine, kcp) - fake.ToControlPlaneMachine(machine, kcp) + fake.ToCPMachine(elfMachine, kcp) + fake.ToCPMachine(machine, kcp) ctrlutil.AddFinalizer(machine, infrav1.MachineFinalizer) machine.DeletionTimestamp = &metav1.Time{Time: time.Now().UTC()} ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, elfMachine, machine, secret) @@ -352,8 +352,8 @@ var _ = Describe("ElfMachineTemplateReconciler", func() { kcp.Spec.Replicas = pointer.Int32(3) kcp.Status.Replicas = 3 kcp.Status.UpdatedReplicas = 3 - fake.ToControlPlaneMachine(elfMachine, kcp) - fake.ToControlPlaneMachine(machine, kcp) + fake.ToCPMachine(elfMachine, kcp) + fake.ToCPMachine(machine, kcp) machine.Status.NodeRef = &corev1.ObjectReference{} conditions.MarkTrue(machine, controlplanev1.MachineAPIServerPodHealthyCondition) conditions.MarkTrue(machine, controlplanev1.MachineControllerManagerPodHealthyCondition) diff --git a/pkg/util/machine/kcp_test.go b/pkg/util/machine/kcp_test.go index f8ce3661..c75e7c00 100644 --- a/pkg/util/machine/kcp_test.go +++ b/pkg/util/machine/kcp_test.go @@ -31,7 +31,7 @@ func TestGetKCPByMachine(t *testing.T) { elfCluster, cluster := fake.NewClusterObjects() _, cpMachine := fake.NewMachineObjects(elfCluster, cluster) kubeadmCP := fake.NewKCP() - fake.ToControlPlaneMachine(cpMachine, kubeadmCP) + fake.ToCPMachine(cpMachine, kubeadmCP) ctrlMgrCtx := fake.NewControllerManagerContext(kubeadmCP) t.Run("should return kcp", func(t *testing.T) { kcp, err := GetKCPByMachine(ctx, ctrlMgrCtx.Client, cpMachine) diff --git a/pkg/util/machine/machine_test.go b/pkg/util/machine/machine_test.go index 1a338fa7..c8969d7f 100644 --- a/pkg/util/machine/machine_test.go +++ b/pkg/util/machine/machine_test.go @@ -49,7 +49,7 @@ func TestGetControlPlaneElfMachinesInCluster(t *testing.T) { elfCluster, cluster := fake.NewClusterObjects() elfMachine1, _ := fake.NewMachineObjects(elfCluster, cluster) elfMachine2, _ := fake.NewMachineObjects(elfCluster, cluster) - fake.ToControlPlaneMachine(elfMachine1, fake.NewKCP()) + fake.ToCPMachine(elfMachine1, fake.NewKCP()) ctrlMgrCtx := fake.NewControllerManagerContext(elfMachine1, elfMachine2) t.Run("should return Control Plane ElfMachines", func(t *testing.T) { @@ -65,7 +65,7 @@ func TestIsControlPlaneMachine(t *testing.T) { elfCluster, cluster := fake.NewClusterObjects() _, machine1 := fake.NewMachineObjects(elfCluster, cluster) _, machine2 := fake.NewMachineObjects(elfCluster, cluster) - fake.ToControlPlaneMachine(machine1, fake.NewKCP()) + fake.ToCPMachine(machine1, fake.NewKCP()) fake.ToWorkerMachine(machine2, fake.NewMD()) t.Run("CP Machine returns true, Worker node returns false", func(t *testing.T) { @@ -83,7 +83,7 @@ func TestGetNodeGroupName(t *testing.T) { kcp.Name = fmt.Sprintf("%s-kcp", cluster.Name) md := fake.NewMD() md.Name = fmt.Sprintf("%s-md", cluster.Name) - fake.ToControlPlaneMachine(machine1, kcp) + fake.ToCPMachine(machine1, kcp) fake.ToWorkerMachine(machine2, md) t.Run("CP Machine returns true, Worker node returns false", func(t *testing.T) { @@ -252,7 +252,7 @@ func TestGetControlPlaneMachinesForCluster(t *testing.T) { kcp := fake.NewKCP() elfCluster, cluster, _, machine, _ := fake.NewClusterAndMachineObjects() - fake.ToControlPlaneMachine(machine, kcp) + fake.ToCPMachine(machine, kcp) _, machine2 := fake.NewMachineObjects(elfCluster, cluster) ctrlMgrCtx := fake.NewControllerManagerContext(elfCluster, cluster, machine, machine2) machines, err := GetControlPlaneMachinesForCluster(goctx.TODO(), ctrlMgrCtx.Client, cluster) diff --git a/test/fake/tower.go b/test/fake/tower.go index 4c76575e..1a96c49f 100644 --- a/test/fake/tower.go +++ b/test/fake/tower.go @@ -54,7 +54,7 @@ func NewTowerHost() *models.Host { LocalID: &localID, Name: &id, Status: models.NewHostStatus(models.HostStatusCONNECTEDHEALTHY), - AllocatableMemoryBytes: pointer.Int64(1 * 1024 * 1024), + AllocatableMemoryBytes: pointer.Int64(MemoryMiB * 1024 * 1024), } } @@ -69,6 +69,11 @@ func NewTowerVM() *models.VM { Name: &id, Status: &status, EntityAsyncStatus: (*models.EntityAsyncStatus)(pointer.String("CREATING")), + CPU: &models.NestedCPU{ + Cores: pointer.Int32(NumCPUs), + Sockets: pointer.Int32(NumCPUs / NumCoresPerSocket), + }, + Memory: service.TowerMemory(MemoryMiB), } } @@ -99,13 +104,16 @@ func NewTowerVMNic(order int) *models.VMNic { } } -func NewTowerTask() *models.Task { +func NewTowerTask(status models.TaskStatus) *models.Task { id := uuid.New().String() - status := models.TaskStatusPENDING + st := models.NewTaskStatus(models.TaskStatusPENDING) + if status != "" { + st = models.NewTaskStatus(status) + } return &models.Task{ ID: &id, - Status: &status, + Status: st, } } diff --git a/test/fake/types.go b/test/fake/types.go index a556d5e2..c087c6b9 100644 --- a/test/fake/types.go +++ b/test/fake/types.go @@ -49,8 +49,14 @@ const ( // ElfMachineKind is the fake elf machine kind. ElfMachineKind = "ElfMachine" + // NumCPUs is the default CPU number. + NumCPUs = 6 + // NumCoresPerSocket is the default CPU number of socket. + NumCoresPerSocket = 6 // DiskGiB is the default disk size. DiskGiB = 60 + // MemoryMiB is the default memory size. + MemoryMiB = 1024 * 7 ) func NewClusterObjects() (*infrav1.ElfCluster, *clusterv1.Cluster) { @@ -117,9 +123,9 @@ func NewElfMachine(elfCluster *infrav1.ElfCluster) *infrav1.ElfMachine { }, Spec: infrav1.ElfMachineSpec{ HA: true, - NumCPUs: 1, - NumCoresPerSocket: 1, - MemoryMiB: 1, + NumCPUs: NumCPUs, + NumCoresPerSocket: NumCoresPerSocket, + MemoryMiB: MemoryMiB, DiskGiB: DiskGiB, Network: infrav1.NetworkSpec{ Devices: []infrav1.NetworkDeviceSpec{ @@ -210,7 +216,7 @@ func InitOwnerReferences( } } -func ToControlPlaneMachine(machine metav1.Object, kcp *controlplanev1.KubeadmControlPlane) { +func ToCPMachine(machine metav1.Object, kcp *controlplanev1.KubeadmControlPlane) { labels := machine.GetLabels() if labels == nil { labels = make(map[string]string)