Skip to content

Commit

Permalink
add network system of multi-host migration
Browse files Browse the repository at this point in the history
add support for network subsystem.

Signed-off-by: Houqi (Nick) Zuo <[email protected]>
  • Loading branch information
nickzhq committed Nov 1, 2024
1 parent 169fbf1 commit 00ad3de
Show file tree
Hide file tree
Showing 11 changed files with 297 additions and 0 deletions.
6 changes: 6 additions & 0 deletions virttest/vt_agent/managers/resource_backings/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,15 @@
_NfsPoolConnection,
_NfsVolumeBacking,
)
from .network import (
_TapPortBacking,
_TapNetworkConnection,
)

_pool_conn_classes = dict()
_pool_conn_classes[_DirPoolConnection.get_pool_type()] = _DirPoolConnection
_pool_conn_classes[_NfsPoolConnection.get_pool_type()] = _NfsPoolConnection
_pool_conn_classes[_TapNetworkConnection.get_pool_type()] = _TapNetworkConnection

_backing_classes = dict()
_backing_classes[_DirVolumeBacking.get_pool_type()] = {
Expand All @@ -16,6 +21,7 @@
_backing_classes[_NfsVolumeBacking.get_pool_type()] = {
_NfsVolumeBacking.get_resource_type(): _NfsVolumeBacking,
}
_backing_classes[_TapPortBacking.get_pool_type()] = _TapPortBacking


def get_resource_backing_class(pool_type, resource_type):
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .tap import _TapPortBacking, _TapNetworkConnection
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .tap_backing import _TapPortBacking
from .tap_network_connection import _TapNetworkConnection
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import logging


from ...backing import _ResourceBacking


LOG = logging.getLogger("avocado.agents.resource_backings.network.tap" + __name__)


class _TapPortBacking(_ResourceBacking):
_SOURCE_POOL_TYPE = "tap"
_BINDING_RESOURCE_TYPE = "port"

def __init__(self, backing_config):
super().__init__(backing_config)
pass

def create(self, pool_connection):
pass

def destroy(self, pool_connection):
pass

def allocate_resource(self, pool_connection, arguments):
raise NotImplemented

def release_resource(self, pool_connection, arguments):
raise NotImplemented

def get_resource_info(self, pool_connection):
pass

def sync_resource(self, pool_connection, arguments):
raise NotImplemented
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import logging


from ...pool_connection import _ResourcePoolConnection


LOG = logging.getLogger("avocado.agents.resource_backings.network.tap." + __name__)


class _TapNetworkConnection(_ResourcePoolConnection):
_CONNECT_POOL_TYPE = "linux bridge"

def __init__(self, pool_config):
super().__init__(pool_config)
spec = pool_config["spec"]
pass

def open(self):
# TODO
pass

def close(self):
# TODO
pass

@property
def connected(self):
# TODO
return False
2 changes: 2 additions & 0 deletions virttest/vt_resmgr/resources/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,15 @@
# from .cvm import _TdxPool
# from .storage import _CephPool
from .storage import _DirPool, _NfsPool
from .network import _LinuxBridgeNetwork

_pool_classes = dict()
# _pool_classes[_SnpPool.get_pool_type()] = _SnpPool
# _pool_classes[_TdxPool.get_pool_type()] = _TdxPool
# _pool_classes[_CephPool.get_pool_type()] = _CephPool
_pool_classes[_DirPool.get_pool_type()] = _DirPool
_pool_classes[_NfsPool.get_pool_type()] = _NfsPool
_pool_classes[_LinuxBridgeNetwork.get_pool_type()] = _LinuxBridgeNetwork


def get_resource_pool_class(pool_type):
Expand Down
6 changes: 6 additions & 0 deletions virttest/vt_resmgr/resources/network/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
from .tap import _LinuxBridgeNetwork


__all__ = (
_LinuxBridgeNetwork,
)
65 changes: 65 additions & 0 deletions virttest/vt_resmgr/resources/network/port_resource.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import uuid
from abc import abstractmethod

from ..resource import _Resource


class _PortResource(_Resource):
"""
This class, inherited from _Resource, defines the port resource model.
"""

_RESOURCE_TYPE = "port"

def __init__(self, resource_config):
super().__init__(resource_config)
self._handlers = {
"bind": self.bind,
"unbind": self.unbind,
"allocate": self.allocate,
"release": self.release,
}

def bind(self, arguments):
"""
Bind the port resource to one worker node.
"""
pass

def unbind(self):
"""
Unbind the port resource from the worker node.
"""
pass

def allocate(self, arguments):
raise NotImplemented

def release(self, arguments):
raise NotImplemented

def get_info(self, request):
r, o = self.sync(dict())
if r != 0:
raise Exception(o["out"])

config = self.resource_config
if request is not None:
for item in request.split("."):
if item in config:
config = config[item]
else:
raise ValueError(request)
else:
config = {item: config}

return deepcopy(config)

def sync(self, arguments):
raise NotImplemented

def create(self):
pass

def destroy(self):
pass
2 changes: 2 additions & 0 deletions virttest/vt_resmgr/resources/network/tap/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .tap_network import _LinuxBridgeNetwork
from .tap_port import _TapPort
37 changes: 37 additions & 0 deletions virttest/vt_resmgr/resources/network/tap/tap_network.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
import logging


from ...pool import _ResourcePool
from .tap_port import get_port_resource_class

from virttest.data_dir import get_shared_dir
from virttest.utils_misc import generate_random_string
from virttest.vt_cluster import cluster


LOG = logging.getLogger("avocado." + __name__)


class _LinuxBridgeNetwork(_ResourcePool):
_POOL_TYPE = "linux_bridge"

@classmethod
def define_config(cls, pool_name, pool_params):
# config = super().define_config(pool_name, pool_params)
# config["spec"].update(
# {
# "server": pool_params["nfs_server_ip"],
# "export": pool_params["nfs_mount_src"],
# "mount-options": pool_params.get("nfs_mount_options"),
# "mount": pool_params.get("nfs_mount_dir",
# os.path.join(get_shared_dir(), generate_random_string(6)))
# }
# )
# return config
pass

def get_resource_class(cls, resource_type):
pass

def meet_resource_request(self, resource_type, resource_params):
pass
113 changes: 113 additions & 0 deletions virttest/vt_resmgr/resources/network/tap/tap_port.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
import logging

from virttest.utils_numeric import normalize_data_size
from virttest.vt_cluster import cluster

from ..port_resource import _PortResource


LOG = logging.getLogger("avocado." + __name__)


class _TapPort(_PortResource):
"""
The nfs file-based volume
"""

def bind(self, arguments):
"""
Bind the resource to a backing on a worker node.
Note: A nfs volume resource can have many bindings
"""
nodes = arguments.pop("nodes", list(self.resource_bindings.keys()))
for node_name in nodes:
if not self.resource_bindings.get(node_name):
LOG.info(f"Bind the nfs volume {self.resource_id} to node {node_name}")
node = cluster.get_node(node_name)
r, o = node.proxy.resource.create_backing_object(self.resource_config)
if r != 0:
raise Exception(o["out"])
self.resource_bindings[node_name] = o["out"]
else:
LOG.info(f"The nfs volume {self.resource_id} has already bound to {node_name}")

def unbind(self, arguments):
"""
Unbind the nfs volume from a worker node
"""
nodes = arguments.pop("nodes", list(self.resource_bindings.keys()))
for node_name in nodes:
backing_id = self.resource_bindings.get(node_name)
if backing_id:
LOG.info(f"Unbind the nfs volume {self.resource_id} from node {node_name}")
node = cluster.get_node(node_name)
r, o = node.proxy.resource.destroy_backing_object(backing_id)
if r != 0:
raise Exception(o["out"])
self.resource_bindings[node_name] = None
else:
LOG.info(f"The nfs volume {self.resource_id} has already unbound from {node_name}")

def sync(self, arguments):
LOG.debug(f"Sync up the configuration of the nfs volume {self.resource_id}")
node_name, backing_id = list(self.resource_bindings.items())[0]
node = cluster.get_node(node_name)
r, o = node.proxy.resource.update_backing(backing_id,
{"sync": arguments})
if r != 0:
raise Exception(o["out"])

config = o["out"]
self.resource_meta["allocated"] = config["meta"]["allocated"]
self.resource_spec["uri"] = config["spec"]["uri"]
self.resource_spec["allocation"] = config["spec"]["allocation"]

def allocate(self, arguments):
LOG.debug(f"Allocate the nfs volume {self.resource_id} from {node_name}.")
node_name, backing_id = list(self.resource_bindings.items())[0]
node = cluster.get_node(node_name)
r, o = node.proxy.resource.update_backing(backing_id,
{"allocate": arguments})
if r != 0:
raise Exception(o["out"])

config = o["out"]
self.resource_meta["allocated"] = config["meta"]["allocated"]
self.resource_spec["uri"] = config["spec"]["uri"]
self.resource_spec["allocation"] = config["spec"]["allocation"]

def release(self, arguments):
LOG.debug(f"Release the nfs volume {self.resource_id} from {node_name}")
node_name, backing_id = list(self.resource_bindings.items())[0]
node = cluster.get_node(node_name)
r, o = node.proxy.resource.update_backing(backing_id,
{"release": arguments})
if r != 0:
raise Exception(o["error"])
self.resource_meta["allocated"] = False
self.resource_spec["allocation"] = 0
self.resource_spec["uri"] = None

def resize(self, arguments):
"""
Resize the nfs volume
"""
new = int(normalize_data_size(arguments["size"], "B"))
if new != self.resource_spec["size"]:
LOG.debug(f"Resize the nfs volume {self.resource_id} from {node_name}")
node_name, backing_id = list(self.resource_bindings.items())[0]
node = cluster.get_node(node_name)
r, o = node.proxy.resource.update_backing(backing_id, {"resize": arguments})
if r != 0:
raise Exception(o["error"])
self.resource_spec["size"] = new
else:
LOG.debug(f"New size {new} is the same with the original")


def get_port_resource_class(resource_type):
mapping = {
"tap": _TapPort,
}

return mapping.get(resource_type)

0 comments on commit 00ad3de

Please sign in to comment.