Skip to content

Commit

Permalink
Merge pull request #25169 from mheon/graph_stop
Browse files Browse the repository at this point in the history
Add graph-based pod stop
  • Loading branch information
openshift-merge-bot[bot] authored Feb 10, 2025
2 parents 8d42125 + 46d874a commit a475083
Show file tree
Hide file tree
Showing 6 changed files with 375 additions and 164 deletions.
107 changes: 30 additions & 77 deletions libpod/container_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,34 +85,23 @@ func (c *Container) initUnlocked(ctx context.Context, recursive bool) error {
// Start requires that all dependency containers (e.g. pod infra containers) are
// running before starting the container. The recursive parameter, if set, will start all
// dependencies before starting this container.
func (c *Container) Start(ctx context.Context, recursive bool) (finalErr error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()

// defer's are executed LIFO so we are locked here
// as long as we call this after the defer unlock()
defer func() {
if finalErr != nil {
if err := saveContainerError(c, finalErr); err != nil {
logrus.Debug(err)
}
}
}()

if err := c.syncContainer(); err != nil {
return err
func (c *Container) Start(ctx context.Context, recursive bool) error {
// Have to lock the pod the container is a part of.
// This prevents running `podman start` at the same time a
// `podman pod stop` is running, which could lead to weird races.
// Pod locks come before container locks, so do this first.
if c.config.Pod != "" {
// If we get an error, the pod was probably removed.
// So we get an expected ErrCtrRemoved instead of ErrPodRemoved,
// just ignore this and move on to syncing the container.
pod, _ := c.runtime.state.Pod(c.config.Pod)
if pod != nil {
pod.lock.Lock()
defer pod.lock.Unlock()
}
}
if err := c.prepareToStart(ctx, recursive); err != nil {
return err
}

// Start the container
if err := c.start(); err != nil {
return err
}
return c.waitForHealthy(ctx)
return c.startNoPodLock(ctx, recursive)
}

// Update updates the given container.
Expand Down Expand Up @@ -294,6 +283,21 @@ func (c *Container) Stop() error {
// manually. If timeout is 0, SIGKILL will be used immediately to kill the
// container.
func (c *Container) StopWithTimeout(timeout uint) (finalErr error) {
// Have to lock the pod the container is a part of.
// This prevents running `podman stop` at the same time a
// `podman pod start` is running, which could lead to weird races.
// Pod locks come before container locks, so do this first.
if c.config.Pod != "" {
// If we get an error, the pod was probably removed.
// So we get an expected ErrCtrRemoved instead of ErrPodRemoved,
// just ignore this and move on to syncing the container.
pod, _ := c.runtime.state.Pod(c.config.Pod)
if pod != nil {
pod.lock.Lock()
defer pod.lock.Unlock()
}
}

if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
Expand Down Expand Up @@ -852,58 +856,7 @@ func (c *Container) Cleanup(ctx context.Context, onlyStopped bool) error {
}
}

// Check if state is good
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateStopping, define.ContainerStateExited) {
return fmt.Errorf("container %s is running or paused, refusing to clean up: %w", c.ID(), define.ErrCtrStateInvalid)
}
if onlyStopped && !c.ensureState(define.ContainerStateStopped) {
return fmt.Errorf("container %s is not stopped and only cleanup for a stopped container was requested: %w", c.ID(), define.ErrCtrStateInvalid)
}

// if the container was not created in the oci runtime or was already cleaned up, then do nothing
if c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) {
return nil
}

// Handle restart policy.
// Returns a bool indicating whether we actually restarted.
// If we did, don't proceed to cleanup - just exit.
didRestart, err := c.handleRestartPolicy(ctx)
if err != nil {
return err
}
if didRestart {
return nil
}

// If we didn't restart, we perform a normal cleanup

// make sure all the container processes are terminated if we are running without a pid namespace.
hasPidNs := false
if c.config.Spec.Linux != nil {
for _, i := range c.config.Spec.Linux.Namespaces {
if i.Type == spec.PIDNamespace {
hasPidNs = true
break
}
}
}
if !hasPidNs {
// do not fail on errors
_ = c.ociRuntime.KillContainer(c, uint(unix.SIGKILL), true)
}

// Check for running exec sessions
sessions, err := c.getActiveExecSessions()
if err != nil {
return err
}
if len(sessions) > 0 {
return fmt.Errorf("container %s has active exec sessions, refusing to clean up: %w", c.ID(), define.ErrCtrStateInvalid)
}

defer c.newContainerEvent(events.Cleanup)
return c.cleanup(ctx)
return c.fullCleanup(ctx, onlyStopped)
}

// Batch starts a batch operation on the given container
Expand Down
Loading

0 comments on commit a475083

Please sign in to comment.