From c24ebd0da06bcccfec269f5f3e8ccecf22e754ca Mon Sep 17 00:00:00 2001 From: Andrei Kvapil Date: Thu, 27 Apr 2023 18:22:19 +0200 Subject: [PATCH] Fix multiple requisites --- pkg/linstor/highlevelclient/high_level_client.go | 15 ++------------- .../autoplacetopology/autoplacetopology.go | 15 +++++++-------- 2 files changed, 9 insertions(+), 21 deletions(-) diff --git a/pkg/linstor/highlevelclient/high_level_client.go b/pkg/linstor/highlevelclient/high_level_client.go index 645599c7..2f7c3f99 100644 --- a/pkg/linstor/highlevelclient/high_level_client.go +++ b/pkg/linstor/highlevelclient/high_level_client.go @@ -27,7 +27,6 @@ import ( "github.com/container-storage-interface/spec/lib/go/csi" "github.com/piraeusdatastore/linstor-csi/pkg/linstor/util" - "github.com/piraeusdatastore/linstor-csi/pkg/slice" "github.com/piraeusdatastore/linstor-csi/pkg/topology" "github.com/piraeusdatastore/linstor-csi/pkg/volume" ) @@ -104,18 +103,8 @@ func (c *HighLevelClient) GetAllTopologyNodes(ctx context.Context, remoteAccessP accessibleSegments = []map[string]string{{}} } - var allNodes []string - - for _, segment := range accessibleSegments { - nodes, err := c.NodesForTopology(ctx, segment) - if err != nil { - return nil, err - } - - allNodes = slice.AppendUnique(allNodes, nodes...) - } - - return allNodes, nil + // schedulded node of the pod is the first entry in the accessible segment + return c.NodesForTopology(ctx, accessibleSegments[0]) } // NodesForTopology finds all matching nodes for the given topology segment. diff --git a/pkg/topology/scheduler/autoplacetopology/autoplacetopology.go b/pkg/topology/scheduler/autoplacetopology/autoplacetopology.go index 7868bfc0..8e00ee85 100644 --- a/pkg/topology/scheduler/autoplacetopology/autoplacetopology.go +++ b/pkg/topology/scheduler/autoplacetopology/autoplacetopology.go @@ -82,14 +82,13 @@ func (s *Scheduler) Create(ctx context.Context, volId string, params *volume.Par // Step 2: Try to place a replica on any preferred segment log.WithField("requirements", topologies).Trace("got topology requirement") - for _, preferred := range topologies.GetPreferred() { - err := s.PlaceOneAccessibleToSegment(ctx, volId, preferred.GetSegments(), params.AllowRemoteVolumeAccess, diskfulNodes) - if err != nil { - log.WithError(err).Debug("failed to place on preferred segment") - } else { - log.Debug("placed accessible to preferred segment") - break - } + // schedulded node of the pod is the first entry in the preferred segment + preferred := topologies.GetPreferred()[0] + err = s.PlaceOneAccessibleToSegment(ctx, volId, preferred.GetSegments(), params.AllowRemoteVolumeAccess, diskfulNodes) + if err != nil { + log.WithError(err).Debug("failed to place on preferred segment") + } else { + log.Debug("placed accessible to preferred segment") } // Step 2a: bail early if resource already matches replica count and requisite nodes