diff --git a/cmd/root.go b/cmd/root.go index 299e98fa0..524e16e4c 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -9,25 +9,20 @@ import ( "context" "errors" "fmt" - "math" "net" - "net/http" "os" - "os/signal" - "strings" - "syscall" "time" - "github.com/robfig/cron" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/nicholas-fedor/watchtower/internal/actions" + "github.com/nicholas-fedor/watchtower/internal/api" + "github.com/nicholas-fedor/watchtower/internal/config" "github.com/nicholas-fedor/watchtower/internal/flags" + "github.com/nicholas-fedor/watchtower/internal/logging" "github.com/nicholas-fedor/watchtower/internal/meta" - pkgApi "github.com/nicholas-fedor/watchtower/pkg/api" - metricsAPI "github.com/nicholas-fedor/watchtower/pkg/api/metrics" - "github.com/nicholas-fedor/watchtower/pkg/api/update" + "github.com/nicholas-fedor/watchtower/internal/scheduling" "github.com/nicholas-fedor/watchtower/pkg/container" "github.com/nicholas-fedor/watchtower/pkg/filters" "github.com/nicholas-fedor/watchtower/pkg/metrics" @@ -35,201 +30,164 @@ import ( "github.com/nicholas-fedor/watchtower/pkg/types" ) -var ErrContainerIDNotFound = errors.New( - "container ID not found in /proc/self/cgroup and HOSTNAME is not set", -) - -// singleContainerReport implements types.Report for individual container notifications. -type singleContainerReport struct { - updated []types.ContainerReport - scanned []types.ContainerReport - failed []types.ContainerReport - skipped []types.ContainerReport - stale []types.ContainerReport - fresh []types.ContainerReport -} - -// Scanned returns scanned containers. -func (r *singleContainerReport) Scanned() []types.ContainerReport { return r.scanned } - -// Updated returns updated containers (only one for split notifications). -func (r *singleContainerReport) Updated() []types.ContainerReport { return r.updated } - -// Failed returns failed containers. -func (r *singleContainerReport) Failed() []types.ContainerReport { return r.failed } - -// Skipped returns skipped containers. -func (r *singleContainerReport) Skipped() []types.ContainerReport { return r.skipped } - -// Stale returns stale containers. -func (r *singleContainerReport) Stale() []types.ContainerReport { return r.stale } - -// Fresh returns fresh containers. -func (r *singleContainerReport) Fresh() []types.ContainerReport { return r.fresh } - -// All returns all containers (prioritized by state). -func (r *singleContainerReport) All() []types.ContainerReport { - all := make( - []types.ContainerReport, - 0, - len(r.updated)+len(r.failed)+len(r.skipped)+len(r.stale)+len(r.fresh)+len(r.scanned), +var ( + ErrContainerIDNotFound = errors.New( + "container ID not found in /proc/self/cgroup and HOSTNAME is not set", ) - all = append(all, r.updated...) - all = append(all, r.failed...) - all = append(all, r.skipped...) - all = append(all, r.stale...) - all = append(all, r.fresh...) - all = append(all, r.scanned...) - - return all -} - -// client is the Docker client instance used to interact with container operations in Watchtower. -// -// It provides an interface for listing, stopping, starting, and managing containers, initialized during -// the preRun phase with options derived from command-line flags and environment variables such as -// DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_API_VERSION. -var client container.Client - -// scheduleSpec holds the cron-formatted schedule string that dictates when periodic container updates occur. -// -// It is populated during preRun from the --schedule flag or the WATCHTOWER_SCHEDULE environment variable, -// supporting formats like "@every 1h" or standard cron syntax (e.g., "0 0 * * * *") for flexible scheduling. -var scheduleSpec string -// cleanup is a boolean flag determining whether to remove old images after a container update. -// -// It is set during preRun via the --cleanup flag or the WATCHTOWER_CLEANUP environment variable, -// enabling disk space management by deleting outdated images post-update. -var cleanup bool - -// noRestart is a boolean flag that prevents containers from being restarted after an update. -// -// It is configured in preRun via the --no-restart flag or the WATCHTOWER_NO_RESTART environment variable, -// useful when users prefer manual restart control or want to minimize downtime during updates. -var noRestart bool - -// noPull is a boolean flag that skips pulling new images from the registry during updates. -// -// It is enabled in preRun via the --no-pull flag or the WATCHTOWER_NO_PULL environment variable, -// allowing updates to proceed using only locally cached images, potentially reducing network usage. -var noPull bool - -// monitorOnly is a boolean flag enabling a mode where Watchtower monitors containers without updating them. -// -// It is set in preRun via the --monitor-only flag or the WATCHTOWER_MONITOR_ONLY environment variable, -// ideal for observing image staleness without triggering automatic updates. -var monitorOnly bool - -// enableLabel is a boolean flag restricting updates to containers with the "com.centurylinklabs.watchtower.enable" label set to true. -// -// It is configured in preRun via the --label-enable flag or the WATCHTOWER_LABEL_ENABLE environment variable, -// providing granular control over which containers are targeted for updates. -var enableLabel bool - -// disableContainers is a slice of container names explicitly excluded from updates. -// -// It is populated in preRun from the --disable-containers flag or the WATCHTOWER_DISABLE_CONTAINERS environment variable, -// allowing users to blacklist specific containers from Watchtower’s operations. -var disableContainers []string - -// notifier is the notification system instance responsible for sending update status messages to configured channels. -// -// It is initialized in preRun with notification types specified via flags (e.g., --notifications), supporting -// multiple methods like email, Slack, or MSTeams to inform users about update successes, failures, or skips. -var notifier types.Notifier - -// timeout specifies the maximum duration allowed for container stop operations during updates. -// -// It defaults to a value defined in the flags package and can be overridden in preRun via the --timeout flag or -// WATCHTOWER_TIMEOUT environment variable, ensuring containers are stopped gracefully within a specified time limit. -var timeout time.Duration - -// lifecycleHooks is a boolean flag enabling the execution of pre- and post-update lifecycle hook commands. -// -// It is set in preRun via the --enable-lifecycle-hooks flag or the WATCHTOWER_LIFECYCLE_HOOKS environment variable, -// allowing custom scripts to run at specific update stages for additional validation or actions. -var lifecycleHooks bool - -// rollingRestart is a boolean flag enabling rolling restarts, updating containers sequentially rather than all at once. -// -// It is configured in preRun via the --rolling-restart flag or the WATCHTOWER_ROLLING_RESTART environment variable, -// reducing downtime by restarting containers one-by-one during updates. -var rollingRestart bool - -// scope defines a specific operational scope for Watchtower, limiting updates to containers matching this scope. -// -// It is set in preRun via the --scope flag or the WATCHTOWER_SCOPE environment variable, useful for isolating -// Watchtower’s actions to a subset of containers (e.g., a project or environment). -var scope string - -// labelPrecedence is a boolean flag giving container label settings priority over global command-line flags. -// -// It is enabled in preRun via the --label-take-precedence flag or the WATCHTOWER_LABEL_PRECEDENCE environment variable, -// allowing container-specific configurations to override broader settings for flexibility. -var labelPrecedence bool - -// lifecycleUID is the default UID to run lifecycle hooks as. -// -// It is set in preRun via the --lifecycle-uid flag or the WATCHTOWER_LIFECYCLE_UID environment variable, -// providing a global default that can be overridden by container labels. -var lifecycleUID int - -// lifecycleGID is the default GID to run lifecycle hooks as. -// -// It is set in preRun via the --lifecycle-gid flag or the WATCHTOWER_LIFECYCLE_GID environment variable, -// providing a global default that can be overridden by container labels. -var lifecycleGID int - -// notificationSplitByContainer is a boolean flag enabling separate notifications for each updated container. -// -// It is set in preRun via the --notification-split-by-container flag or the WATCHTOWER_NOTIFICATION_SPLIT_BY_CONTAINER environment variable, -// allowing users to receive individual notifications instead of grouped ones. -var notificationSplitByContainer bool - -// cpuCopyMode specifies how CPU settings are handled when recreating containers. -// -// It is set during preRun via the --cpu-copy-mode flag or the WATCHTOWER_CPU_COPY_MODE environment variable, -// controlling CPU limit copying behavior for compatibility with different container runtimes like Podman. -var cpuCopyMode string - -// rootCmd represents the root command for the Watchtower CLI, serving as the entry point for all subcommands. -// -// It defines the base usage string, short and long descriptions, and assigns lifecycle hooks (PreRun and Run) -// to manage setup and execution, initialized with default behavior and configured via flags during runtime. -var rootCmd = NewRootCommand() - -// RunConfig encapsulates the configuration parameters for the runMain function. -// -// It aggregates command-line flags and derived settings into a single structure, providing a cohesive way -// to pass configuration data through the CLI execution flow, ensuring all necessary parameters are accessible -// for update operations, API setup, and scheduling. -type RunConfig struct { - // Command is the cobra.Command instance representing the executed command, providing access to parsed flags. - Command *cobra.Command - // Names is a slice of container names explicitly provided as positional arguments, used for filtering. - Names []string - // Filter is the types.Filter function determining which containers are processed during updates. - Filter types.Filter - // FilterDesc is a human-readable description of the applied filter, used in logging and notifications. - FilterDesc string - // RunOnce indicates whether to perform a single update and exit, set via the --run-once flag. - RunOnce bool - // UpdateOnStart enables an immediate update check on startup, then continues with periodic updates, set via the --update-on-start flag. - UpdateOnStart bool - // EnableUpdateAPI enables the HTTP update API endpoint, set via the --http-api-update flag. - EnableUpdateAPI bool - // EnableMetricsAPI enables the HTTP metrics API endpoint, set via the --http-api-metrics flag. - EnableMetricsAPI bool - // UnblockHTTPAPI allows periodic polling alongside the HTTP API, set via the --http-api-periodic-polls flag. - UnblockHTTPAPI bool - // APIToken is the authentication token for HTTP API access, set via the --http-api-token flag. - APIToken string - // APIHost is the host to bind the HTTP API to, set via the --http-api-host flag (defaults to empty string). - APIHost string - // APIPort is the port for the HTTP API server, set via the --http-api-port flag (defaults to "8080"). - APIPort string -} + // client is the Docker client instance used to interact with container operations in Watchtower. + // + // It provides an interface for listing, stopping, starting, and managing containers, initialized during + // the preRun phase with options derived from command-line flags and environment variables such as + // DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_API_VERSION. + client container.Client + + // scheduleSpec holds the cron-formatted schedule string that dictates when periodic container updates occur. + // + // It is populated during preRun from the --schedule flag or the WATCHTOWER_SCHEDULE environment variable, + // supporting formats like "@every 1h" or standard cron syntax (e.g., "0 0 * * * *") for flexible scheduling. + scheduleSpec string + + // cleanup is a boolean flag determining whether to remove old images after a container update. + // + // It is set during preRun via the --cleanup flag or the WATCHTOWER_CLEANUP environment variable, + // enabling disk space management by deleting outdated images post-update. + cleanup bool + + // noRestart is a boolean flag that prevents containers from being restarted after an update. + // + // It is configured in preRun via the --no-restart flag or the WATCHTOWER_NO_RESTART environment variable, + // useful when users prefer manual restart control or want to minimize downtime during updates. + noRestart bool + + // noPull is a boolean flag that skips pulling new images from the registry during updates. + // + // It is enabled in preRun via the --no-pull flag or the WATCHTOWER_NO_PULL environment variable, + // allowing updates to proceed using only locally cached images, potentially reducing network usage. + noPull bool + + // monitorOnly is a boolean flag enabling a mode where Watchtower monitors containers without updating them. + // + // It is set in preRun via the --monitor-only flag or the WATCHTOWER_MONITOR_ONLY environment variable, + // ideal for observing image staleness without triggering automatic updates. + monitorOnly bool + + // enableLabel is a boolean flag restricting updates to containers with the "com.centurylinklabs.watchtower.enable" label set to true. + // + // It is configured in preRun via the --label-enable flag or the WATCHTOWER_LABEL_ENABLE environment variable, + // providing granular control over which containers are targeted for updates. + enableLabel bool + + // disableContainers is a slice of container names explicitly excluded from updates. + // + // It is populated in preRun from the --disable-containers flag or the WATCHTOWER_DISABLE_CONTAINERS environment variable, + // allowing users to blacklist specific containers from Watchtower's operations. + disableContainers []string + + // notifier is the notification system instance responsible for sending update status messages to configured channels. + // + // It is initialized in preRun with notification types specified via flags (e.g., --notifications), supporting + // multiple methods like email, Slack, or MSTeams to inform users about update successes, failures, or skips. + notifier types.Notifier + + // timeout specifies the maximum duration allowed for container stop operations during updates. + // + // It defaults to a value defined in the flags package and can be overridden in preRun via the --timeout flag or + // WATCHTOWER_TIMEOUT environment variable, ensuring containers are stopped gracefully within a specified time limit. + timeout time.Duration + + // lifecycleHooks is a boolean flag enabling the execution of pre- and post-update lifecycle hook commands. + // + // It is set in preRun via the --enable-lifecycle-hooks flag or the WATCHTOWER_LIFECYCLE_HOOKS environment variable, + // allowing custom scripts to run at specific update stages for additional validation or actions. + lifecycleHooks bool + + // rollingRestart is a boolean flag enabling rolling restarts, updating containers sequentially rather than all at once. + // + // It is configured in preRun via the --rolling-restart flag or the WATCHTOWER_ROLLING_RESTART environment variable, + // reducing downtime by restarting containers one-by-one during updates. + rollingRestart bool + + // scope defines a specific operational scope for Watchtower, limiting updates to containers matching this scope. + // + // It is set in preRun via the --scope flag or the WATCHTOWER_SCOPE environment variable, useful for isolating + // Watchtower's actions to a subset of containers (e.g., a project or environment). + scope string + + // labelPrecedence is a boolean flag giving container label settings priority over global command-line flags. + // + // It is enabled in preRun via the --label-take-precedence flag or the WATCHTOWER_LABEL_PRECEDENCE environment variable, + // allowing container-specific configurations to override broader settings for flexibility. + labelPrecedence bool + + // lifecycleUID is the default UID to run lifecycle hooks as. + // + // It is set in preRun via the --lifecycle-uid flag or the WATCHTOWER_LIFECYCLE_UID environment variable, + // providing a global default that can be overridden by container labels. + lifecycleUID int + + // lifecycleGID is the default GID to run lifecycle hooks as. + // + // It is set in preRun via the --lifecycle-gid flag or the WATCHTOWER_LIFECYCLE_GID environment variable, + // providing a global default that can be overridden by container labels. + lifecycleGID int + + // notificationSplitByContainer is a boolean flag enabling separate notifications for each updated container. + // + // It is set in preRun via the --notification-split-by-container flag or the WATCHTOWER_NOTIFICATION_SPLIT_BY_CONTAINER environment variable, + // allowing users to receive individual notifications instead of grouped ones. + notificationSplitByContainer bool + + // notificationReport is a boolean flag enabling report-based notifications. + // + // It is set in preRun via the --notification-report flag or the WATCHTOWER_NOTIFICATION_REPORT environment variable, + // controlling whether notifications include session reports or just log entries. + notificationReport bool + + // cpuCopyMode specifies how CPU settings are handled when recreating containers. + // + // It is set during preRun via the --cpu-copy-mode flag or the WATCHTOWER_CPU_COPY_MODE environment variable, + // controlling CPU limit copying behavior for compatibility with different container runtimes like Podman. + cpuCopyMode string + + // rootCmd represents the root command for the Watchtower CLI, serving as the entry point for all subcommands. + // + // It defines the base usage string, short and long descriptions, and assigns lifecycle hooks (PreRun and Run) + // to manage setup and execution, initialized with default behavior and configured via flags during runtime. + rootCmd = NewRootCommand() + + // runUpdatesWithNotifications performs container updates and sends notifications about the results. + // + // It executes the update action with configured parameters, batches notifications, and returns a metric + // summarizing the session for monitoring purposes, ensuring users are informed of update outcomes. + // + // Parameters: + // - filter: The types.Filter determining which containers are targeted for updates. + // - cleanup: Boolean indicating whether to remove old images after updates. + // + // Returns: + // - *metrics.Metric: A pointer to a metric object summarizing the update session (scanned, updated, failed counts). + runUpdatesWithNotifications = func(filter types.Filter, cleanup bool) *metrics.Metric { + return actions.RunUpdatesWithNotifications( + client, + notifier, + notificationSplitByContainer, + notificationReport, + filter, + cleanup, + noRestart, + monitorOnly, + lifecycleHooks, + rollingRestart, + labelPrecedence, + noPull, + timeout, + lifecycleUID, + lifecycleGID, + cpuCopyMode, + ) + } +) // NewRootCommand creates and configures the root command for the Watchtower CLI. // @@ -321,6 +279,9 @@ func preRun(cmd *cobra.Command, _ []string) { // Retrieve notification split flag. notificationSplitByContainer, _ = flagsSet.GetBool("notification-split-by-container") + // Retrieve notification report flag. + notificationReport, _ = flagsSet.GetBool("notification-report") + // Log the scope if specified, aiding debugging by confirming the operational boundary. if scope != "" { logrus.WithField("scope", scope).Debug("Configured operational scope") @@ -434,7 +395,7 @@ func run(c *cobra.Command, names []string) { } // Set configuration for core execution, encapsulating all operational parameters. - cfg := RunConfig{ + cfg := config.RunConfig{ Command: c, Names: names, Filter: filter, @@ -538,7 +499,7 @@ func deriveScopeFromContainer(client container.Client) error { // // Returns: // - int: An exit code (0 for success, 1 for failure) used to terminate the program. -func runMain(cfg RunConfig) int { +func runMain(cfg config.RunConfig) int { // Log the container names being processed for debugging visibility. logrus.WithField("names", cfg.Names).Debug("Processing specified containers") @@ -573,7 +534,15 @@ func runMain(cfg RunConfig) int { // Handle one-time update mode, executing updates and registering metrics. if cfg.RunOnce { - writeStartupMessage(cfg.Command, time.Time{}, cfg.FilterDesc, scope) + logging.WriteStartupMessage( + cfg.Command, + time.Time{}, + cfg.FilterDesc, + scope, + client, + notifier, + meta.Version, + ) metric := runUpdatesWithNotifications(cfg.Filter, cleanup) metrics.Default().RegisterScan(metric) notifier.Close() @@ -594,12 +563,12 @@ func runMain(cfg RunConfig) int { defer cancel() // Configure and start the HTTP API, handling any startup errors. - if err := setupAndStartAPI(ctx, cfg, updateLock); err != nil { + if err := api.SetupAndStartAPI(ctx, cfg.APIHost, cfg.APIPort, cfg.APIToken, cfg.EnableUpdateAPI, cfg.EnableMetricsAPI, cfg.UnblockHTTPAPI, cfg.Filter, cfg.Command, cfg.FilterDesc, updateLock, cleanup, client, notifier, scope, meta.Version, runUpdatesWithNotifications, filters.FilterByImage, metrics.Default, logging.WriteStartupMessage); err != nil { return 1 } // Schedule and execute periodic updates, handling errors or shutdown. - if err := runUpgradesOnSchedule(ctx, cfg.Command, cfg.Filter, cfg.FilterDesc, updateLock, cleanup); err != nil { + if err := scheduling.RunUpgradesOnSchedule(ctx, cfg.Command, cfg.Filter, cfg.FilterDesc, updateLock, cleanup, scheduleSpec, logging.WriteStartupMessage, runUpdatesWithNotifications, client, scope, notifier, meta.Version); err != nil { logNotify("Scheduled upgrades failed", err) return 1 @@ -609,70 +578,6 @@ func runMain(cfg RunConfig) int { return 1 } -// setupAndStartAPI configures and launches the HTTP API if enabled by configuration flags. -// -// It sets up update and metrics endpoints, starts the API server in blocking or non-blocking mode, -// and handles startup errors, ensuring the API integrates seamlessly with Watchtower’s update workflow. -// -// Parameters: -// - ctx: The context controlling the API’s lifecycle, enabling graceful shutdown on cancellation. -// - cfg: The RunConfig struct with API-related settings (e.g., token, port, enable flags). -// - updateLock: A channel ensuring only one update runs at a time, shared with the scheduler. -// -// Returns: -// - error: An error if the API fails to start (excluding clean shutdown), nil otherwise. -func setupAndStartAPI(ctx context.Context, cfg RunConfig, updateLock chan bool) error { - // Get the formatted HTTP api address string. - address := getAPIAddr(cfg.APIHost, cfg.APIPort) - - // Initialize the HTTP API with the configured authentication token and address. - httpAPI := pkgApi.New(cfg.APIToken, address) - - // Register the update API endpoint if enabled, linking it to the update handler. - if cfg.EnableUpdateAPI { - updateHandler := update.New(func(images []string) *metrics.Metric { - metric := runUpdatesWithNotifications( - filters.FilterByImage(images, cfg.Filter), - cleanup, - ) - metrics.Default().RegisterScan(metric) - - return metric - }, updateLock) - httpAPI.RegisterFunc(updateHandler.Path, updateHandler.Handle) - - if !cfg.UnblockHTTPAPI { - writeStartupMessage(cfg.Command, time.Time{}, cfg.FilterDesc, scope) - } - } - - // Register the metrics API endpoint if enabled, providing access to update metrics. - if cfg.EnableMetricsAPI { - metricsHandler := metricsAPI.New() - httpAPI.RegisterHandler(metricsHandler.Path, metricsHandler.Handle) - } - - // Start the API server, logging errors unless it’s a clean shutdown. - if err := httpAPI.Start(ctx, cfg.EnableUpdateAPI && !cfg.UnblockHTTPAPI); err != nil && - !errors.Is(err, http.ErrServerClosed) { - logrus.WithError(err).Error("Failed to start API") - - return fmt.Errorf("failed to start HTTP API: %w", err) - } - - return nil -} - -// getAPIAddr formats the API address string based on host and port. -func getAPIAddr(host, port string) string { - address := host + ":" + port - if host != "" && strings.Contains(host, ":") && net.ParseIP(host) != nil { - address = "[" + host + "]:" + port - } - - return address -} - // logNotify logs an error message and ensures notifications are sent before returning control. // // It uses a specific message if provided, falling back to a generic one, and includes the error in fields. @@ -699,492 +604,3 @@ func awaitDockerClient() { ) time.Sleep(1 * time.Second) } - -// formatDuration converts a time.Duration into a human-readable string representation. -// -// It breaks down the duration into hours, minutes, and seconds, formatting each unit with appropriate -// grammar (singular or plural) and returning a string like "1 hour, 2 minutes, 3 seconds" or "0 seconds" -// if the duration is zero, ensuring a user-friendly output for logs and notifications. -// -// Parameters: -// - duration: The time.Duration to convert into a readable string. -// -// Returns: -// - string: A formatted string representing the duration, always including at least "0 seconds". -func formatDuration(duration time.Duration) string { - const ( - minutesPerHour = 60 // Number of minutes in an hour for duration breakdown - secondsPerMinute = 60 // Number of seconds in a minute for duration breakdown - timeUnitCount = 3 // Number of time units (hours, minutes, seconds) for pre-allocation - ) - - // timeUnit represents a single unit of time (hours, minutes, or seconds) with its value and labels. - type timeUnit struct { - value int64 // The numeric value of the unit (e.g., 2 for 2 hours) - singular string // The singular form of the unit (e.g., "hour") - plural string // The plural form of the unit (e.g., "hours") - } - - // Define units with calculated values from the duration, preserving order for display. - units := []timeUnit{ - {int64(duration.Hours()), "hour", "hours"}, - {int64(math.Mod(duration.Minutes(), minutesPerHour)), "minute", "minutes"}, - {int64(math.Mod(duration.Seconds(), secondsPerMinute)), "second", "seconds"}, - } - - parts := make([]string, 0, timeUnitCount) - // Format each unit, forcing inclusion of seconds if no other parts exist to avoid empty output. - for i, unit := range units { - parts = append(parts, formatTimeUnit(unit, i == len(units)-1 && len(parts) == 0)) - } - - // Join non-empty parts, ensuring a readable output with proper separators. - joined := strings.Join(filterEmpty(parts), ", ") - if joined == "" { - return "0 seconds" // Default output when duration is zero or all units are skipped. - } - - return joined -} - -// formatTimeUnit formats a single time unit into a string based on its value and context. -// -// It applies singular or plural grammar, skipping leading zeros unless forced (e.g., for seconds as the last unit), -// returning an empty string for skippable zeros to maintain a concise output. -// -// Parameters: -// - unit: The timeUnit struct containing the value and labels (singular/plural) to format. -// - forceInclude: A boolean indicating whether to include the unit even if zero (e.g., for seconds as fallback). -// -// Returns: -// - string: The formatted unit (e.g., "1 hour", "2 minutes") or empty string if skipped. -func formatTimeUnit(unit struct { - value int64 - singular string - plural string -}, forceInclude bool, -) string { - switch { - case unit.value == 1: - return "1 " + unit.singular - case unit.value > 1 || forceInclude: - return fmt.Sprintf("%d %s", unit.value, unit.plural) - default: - return "" // Skip zero values unless forced. - } -} - -// filterEmpty removes empty strings from a slice, returning only non-empty elements. -// -// It ensures the final formatted duration string excludes unnecessary parts, maintaining readability -// by filtering out zero-value units that were not explicitly included. -// -// Parameters: -// - parts: A slice of strings representing formatted time units (e.g., "1 hour", ""). -// -// Returns: -// - []string: A new slice containing only the non-empty strings from the input. -func filterEmpty(parts []string) []string { - var filtered []string - - for _, part := range parts { - if part != "" { - filtered = append(filtered, part) - } - } - - return filtered -} - -// writeStartupMessage logs or notifies startup information based on configuration flags. -// -// It reports Watchtower’s version, notification setup, container filtering details, scheduling information, -// and HTTP API status, providing users with a comprehensive overview of the application’s initial state. -// -// Parameters: -// - c: The cobra.Command instance, providing access to flags like --no-startup-message. -// - sched: The time.Time of the first scheduled run, or zero if no schedule is set. -// - filtering: A string describing the container filter applied (e.g., "Watching all containers"). -// - scope: The scope name for structured logging, empty string if no scope is set. -func writeStartupMessage(c *cobra.Command, sched time.Time, filtering string, scope string) { - // Retrieve flags controlling startup message behavior and API setup. - noStartupMessage, _ := c.PersistentFlags().GetBool("no-startup-message") - enableUpdateAPI, _ := c.PersistentFlags().GetBool("http-api-update") - - apiListenAddr, _ := c.PersistentFlags().GetString("http-api-host") - - apiPort, _ := c.PersistentFlags().GetString("http-api-port") - if apiPort == "" { - apiPort = "8080" - } - - if apiListenAddr == "" { - apiListenAddr = ":" + apiPort - } else { - apiListenAddr = apiListenAddr + ":" + apiPort - } - - // Configure the logger based on whether startup messages should be suppressed. - startupLog := setupStartupLogger(noStartupMessage) - - var version string - if client != nil { - version = client.GetVersion() - } - - startupLog.Info("Watchtower ", meta.Version, " using Docker API v", version) - - // Log details about configured notifiers or lack thereof. - var notifierNames []string - if notifier != nil { - notifierNames = notifier.GetNames() - } - - logNotifierInfo(startupLog, notifierNames) - - // Log filtering information, using structured logging for scope when set - if scope != "" { - startupLog.WithField("scope", scope).Info("Only checking containers in scope") - } else { - startupLog.Debug(filtering) - } - - // Log scheduling or run mode information based on configuration. - logScheduleInfo(startupLog, c, sched) - - // Report HTTP API status if enabled. - if enableUpdateAPI { - startupLog.Info(fmt.Sprintf("The HTTP API is enabled at %s.", apiListenAddr)) - } - - // Send batched notifications if not suppressed, ensuring startup info reaches users. - if !noStartupMessage { - notifier.SendNotification(nil) - } - - // Warn about trace-level logging if enabled, as it may expose sensitive data. - if logrus.IsLevelEnabled(logrus.TraceLevel) { - startupLog.Warn( - "Trace level enabled: log will include sensitive information as credentials and tokens", - ) - } -} - -// setupStartupLogger configures the logger for startup messages based on message suppression settings. -// -// It uses a local log entry if messages are suppressed (--no-startup-message), otherwise batches messages -// via the notifier for consolidated delivery, ensuring flexibility in how startup info is presented. -// -// Parameters: -// - noStartupMessage: A boolean indicating whether startup messages should be logged locally only. -// -// Returns: -// - *logrus.Entry: A configured log entry for writing startup messages. -func setupStartupLogger(noStartupMessage bool) *logrus.Entry { - if noStartupMessage { - return notifications.LocalLog - } - - log := logrus.NewEntry(logrus.StandardLogger()) - - if notifier != nil { - notifier.StartNotification() - } - - return log -} - -// logNotifierInfo logs details about the notification setup for Watchtower. -// -// It reports the list of configured notifier names (e.g., "email, slack") or indicates no notifications -// are set up, providing visibility into how update statuses will be communicated. -// -// Parameters: -// - log: The logrus.Entry used to write the notification information. -// - notifierNames: A slice of strings representing the names of configured notifiers. -func logNotifierInfo(log *logrus.Entry, notifierNames []string) { - if len(notifierNames) > 0 { - log.Info("Using notifications: " + strings.Join(notifierNames, ", ")) - } else { - log.Info("Using no notifications") - } -} - -// logScheduleInfo logs information about the scheduling or run mode configuration. -// -// It handles scheduled runs with timing details, one-time updates, or indicates no periodic runs, -// ensuring users understand when and how updates will occur. -// -// Parameters: -// - log: The logrus.Entry used to write the schedule information. -// - c: The cobra.Command instance, providing access to flags like --run-once. -// - sched: The time.Time of the first scheduled run, or zero if no schedule is set. -func logScheduleInfo(log *logrus.Entry, c *cobra.Command, sched time.Time) { - switch { - case !sched.IsZero(): // scheduled runs - until := formatDuration(time.Until(sched)) - log.Info("Scheduling next run: " + sched.Format("2006-01-02 15:04:05 -0700 MST")) - log.Info("Note that the next check will be performed in " + until) - - case func() bool { // one-time updates - v, _ := c.PersistentFlags().GetBool("run-once") - - return v - }(): - log.Info("Running a one time update.") - - case func() bool { // update on start - v, _ := c.PersistentFlags().GetBool("update-on-start") - - return v - }(): - log.Info("Running update on start, then scheduling periodic updates.") - - case func() bool { // HTTP API without periodic polling - a, _ := c.PersistentFlags().GetBool("http-api-update") - b, _ := c.PersistentFlags().GetBool("http-api-periodic-polls") - - return a && !b - }(): - log.Info("Updates via HTTP API enabled. Periodic updates are not enabled.") - - case func() bool { // HTTP API with periodic polling - a, _ := c.PersistentFlags().GetBool("http-api-update") - b, _ := c.PersistentFlags().GetBool("http-api-periodic-polls") - - return a && b - }(): - log.Info("Updates via HTTP API enabled. Periodic updates are also enabled.") - - default: // default periodic - log.Info("Periodic updates are enabled with default schedule.") - } -} - -// waitForRunningUpdate waits for any currently running update to complete before proceeding with shutdown. -// It checks the lock channel status and blocks with a timeout if an update is in progress. -// Parameters: -// - ctx: The context for cancellation, allowing early shutdown on context timeout. -// - lock: The channel used to synchronize updates, ensuring only one runs at a time. -func waitForRunningUpdate(ctx context.Context, lock chan bool) { - const updateWaitTimeout = 30 * time.Second - - logrus.Debug("Checking lock status before shutdown.") - - if len(lock) == 0 { - select { - case <-lock: - logrus.Debug("Lock acquired, update finished.") - case <-time.After(updateWaitTimeout): - logrus.Warn("Timeout waiting for running update to finish, proceeding with shutdown.") - case <-ctx.Done(): - logrus.Debug("Context cancelled, proceeding with shutdown.") - } - } else { - logrus.Debug("No update running, lock available.") - } - - logrus.Debug("Lock check completed.") -} - -// runUpgradesOnSchedule schedules and executes periodic container updates according to the cron specification. -// -// It sets up a cron scheduler, runs updates at specified intervals, and ensures graceful shutdown on interrupt -// signals (SIGINT, SIGTERM) or context cancellation, handling concurrency with a lock channel. -// If update-on-start is enabled, it triggers the first update immediately before starting the scheduler. -// -// Parameters: -// - ctx: The context controlling the scheduler’s lifecycle, enabling shutdown on cancellation. -// - c: The cobra.Command instance, providing access to flags for startup messaging. -// - filter: The types.Filter determining which containers are updated. -// - filtering: A string describing the filter, used in startup messaging. -// - lock: A channel ensuring only one update runs at a time, or nil to create a new one. -// - cleanup: Boolean indicating whether to remove old images after updates. -// -// Returns: -// - error: An error if scheduling fails (e.g., invalid cron spec), nil on successful shutdown. -func runUpgradesOnSchedule( - ctx context.Context, - c *cobra.Command, - filter types.Filter, - filtering string, - lock chan bool, - cleanup bool, -) error { - // Initialize lock if not provided, ensuring single-update concurrency. - if lock == nil { - lock = make(chan bool, 1) - lock <- true - } - - // Create a new cron scheduler for managing periodic updates. - scheduler := cron.New() - - // Define the update function to be used both for scheduled runs and immediate execution. - updateFunc := func() { - select { - case v := <-lock: - defer func() { lock <- v }() - - metric := runUpdatesWithNotifications(filter, cleanup) - metrics.Default().RegisterScan(metric) - default: - metrics.Default().RegisterScan(nil) - logrus.Debug("Skipped another update already running.") - } - - nextRuns := scheduler.Entries() - if len(nextRuns) > 0 { - logrus.Debug("Scheduled next run: " + nextRuns[0].Next.String()) - } - } - - // Add the update function to the cron schedule, handling concurrency and metrics. - if scheduleSpec != "" { - if err := scheduler.AddFunc( - scheduleSpec, - updateFunc); err != nil { - return fmt.Errorf("failed to schedule updates: %w", err) - } - } - - // Log startup message with the first scheduled run time. - var nextRun time.Time - if len(scheduler.Entries()) > 0 { - nextRun = scheduler.Entries()[0].Schedule.Next(time.Now()) - } - - writeStartupMessage(c, nextRun, filtering, scope) - - // Check if update-on-start is enabled and trigger immediate update if so. - updateOnStart, _ := c.PersistentFlags().GetBool("update-on-start") - if updateOnStart { - logrus.Info("Update on startup enabled - performing immediate check") - updateFunc() - } - - // Start the scheduler to begin periodic execution. - scheduler.Start() - - // Set up signal handling for graceful shutdown. - interrupt := make(chan os.Signal, 1) - signal.Notify(interrupt, os.Interrupt, syscall.SIGTERM) - - // Wait for shutdown signal or context cancellation. - select { - case <-ctx.Done(): - logrus.Debug("Context canceled, stopping scheduler...") - case <-interrupt: - logrus.Debug("Received interrupt signal, stopping scheduler...") - } - - // Stop the scheduler and wait for any running update to complete. - scheduler.Stop() - logrus.Debug("Waiting for running update to be finished...") - - waitForRunningUpdate(ctx, lock) - - logrus.Debug("Scheduler stopped and update completed.") - - return nil -} - -// runUpdatesWithNotifications performs container updates and sends notifications about the results. -// -// It executes the update action with configured parameters, batches notifications, and returns a metric -// summarizing the session for monitoring purposes, ensuring users are informed of update outcomes. -// -// Parameters: -// - filter: The types.Filter determining which containers are targeted for updates. -// - cleanup: Boolean indicating whether to remove old images after updates. -// -// Returns: -// - *metrics.Metric: A pointer to a metric object summarizing the update session (scanned, updated, failed counts). -var runUpdatesWithNotifications = func(filter types.Filter, cleanup bool) *metrics.Metric { - // Start batching notifications to group update messages, if notifier is initialized - if notifier != nil { - if notifier != nil { - notifier.StartNotification() - } - } else { - logrus.Warn("Notifier is nil, skipping notification batching") - } - - // Configure update parameters based on global flags and settings. - updateParams := types.UpdateParams{ - Filter: filter, - Cleanup: cleanup, - NoRestart: noRestart, - Timeout: timeout, - MonitorOnly: monitorOnly, - LifecycleHooks: lifecycleHooks, - RollingRestart: rollingRestart, - LabelPrecedence: labelPrecedence, - NoPull: noPull, - LifecycleUID: lifecycleUID, - LifecycleGID: lifecycleGID, - CPUCopyMode: cpuCopyMode, - } - - // Execute the update action, capturing results and image IDs for cleanup. - result, cleanupImageIDs, err := actions.Update(client, updateParams) - if err != nil { - logrus.WithError(err).Error("Update execution failed") - - return &metrics.Metric{ - Scanned: 0, - Updated: 0, - Failed: 0, - } - } - - // Perform deferred image cleanup if enabled. - if cleanup { - actions.CleanupImages(client, cleanupImageIDs) - } - - // Log update report for debugging. - updatedNames := make([]string, 0, len(result.Updated())) - for _, r := range result.Updated() { - updatedNames = append(updatedNames, r.Name()) - } - - logrus.WithFields(logrus.Fields{ - "scanned": len(result.Scanned()), - "updated": len(result.Updated()), - "failed": len(result.Failed()), - "updated_names": updatedNames, - }).Debug("Report before notification") - - // Send the batched notification with update results, if notifier and result are initialized - if notifier != nil && result != nil { - if notificationSplitByContainer && len(result.Updated()) > 0 { - // Send separate notifications for each updated container - for _, updatedContainer := range result.Updated() { - // Create a minimal report with only this container - singleContainerReport := &singleContainerReport{ - updated: []types.ContainerReport{updatedContainer}, - scanned: result.Scanned(), // Include all scanned for context - failed: result.Failed(), // Include all failed for context - skipped: result.Skipped(), // Include all skipped for context - stale: result.Stale(), // Include all stale for context - fresh: result.Fresh(), // Include all fresh for context - } - notifier.SendNotification(singleContainerReport) - } - } else { - // Send grouped notification as before - notifier.SendNotification(result) - } - } - - // Generate and log a metric summarizing the update session. - metricResults := metrics.NewMetric(result) - notifications.LocalLog.WithFields(logrus.Fields{ - "scanned": metricResults.Scanned, - "updated": metricResults.Updated, - "failed": metricResults.Failed, - }).Info("Update session completed") - - return metricResults -} diff --git a/cmd/root_test.go b/cmd/root_test.go index 0903d31ee..db4e39b73 100644 --- a/cmd/root_test.go +++ b/cmd/root_test.go @@ -19,7 +19,11 @@ import ( dockerContainer "github.com/docker/docker/api/types/container" + "github.com/nicholas-fedor/watchtower/internal/api" "github.com/nicholas-fedor/watchtower/internal/flags" + "github.com/nicholas-fedor/watchtower/internal/logging" + "github.com/nicholas-fedor/watchtower/internal/scheduling" + "github.com/nicholas-fedor/watchtower/internal/util" "github.com/nicholas-fedor/watchtower/pkg/api/update" containerMock "github.com/nicholas-fedor/watchtower/pkg/container/mocks" "github.com/nicholas-fedor/watchtower/pkg/metrics" @@ -364,7 +368,7 @@ func TestFormatDuration(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := formatDuration(tt.duration) + result := util.FormatDuration(tt.duration) assert.Equal(t, tt.expected, result) }) } @@ -415,11 +419,7 @@ func TestFormatTimeUnit(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := formatTimeUnit(struct { - value int64 - singular string - plural string - }{tt.value, tt.singular, tt.plural}, tt.forceInclude) + result := util.FormatTimeUnit(tt.value, tt.singular, tt.plural, tt.forceInclude) assert.Equal(t, tt.expected, result) }) } @@ -455,7 +455,7 @@ func TestFilterEmpty(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := filterEmpty(tt.input) + result := util.FilterEmpty(tt.input) assert.Equal(t, tt.expected, result) }) } @@ -545,7 +545,7 @@ func TestGetAPIAddr(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := getAPIAddr(tt.host, tt.port) + result := api.GetAPIAddr(tt.host, tt.port) assert.Equal(t, tt.expected, result) // Verify the formatted address is a valid TCP address @@ -629,6 +629,12 @@ func TestUpdateLockSerialization(t *testing.T) { // TestConcurrentScheduledAndAPIUpdate verifies that API-triggered updates wait for scheduled updates to complete, // ensuring proper serialization and preventing race conditions between periodic updates and HTTP API calls. func TestConcurrentScheduledAndAPIUpdate(t *testing.T) { + // Enable debug logging to see lock acquisition logs + originalLevel := logrus.GetLevel() + + logrus.SetLevel(logrus.DebugLevel) + defer logrus.SetLevel(originalLevel) + // Initialize the update lock channel with the same pattern as in runMain updateLock := make(chan bool, 1) updateLock <- true @@ -653,11 +659,15 @@ func TestConcurrentScheduledAndAPIUpdate(t *testing.T) { // Simulate scheduled update (longer duration) go func() { + t.Log("Scheduled: trying to acquire lock") + select { case v := <-updateLock: + t.Log("Scheduled: acquired lock") close(scheduledStarted) time.Sleep(200 * time.Millisecond) // Simulate scheduled update work (longer than API) close(scheduledCompleted) + t.Log("Scheduled: releasing lock") updateLock <- v default: @@ -665,8 +675,13 @@ func TestConcurrentScheduledAndAPIUpdate(t *testing.T) { } }() + // Wait for scheduled update to start + <-scheduledStarted + // Simulate API update request go func() { + t.Log("API: creating request") + req, err := http.NewRequestWithContext( context.Background(), http.MethodPost, @@ -680,12 +695,12 @@ func TestConcurrentScheduledAndAPIUpdate(t *testing.T) { } w := httptest.NewRecorder() + + t.Log("API: calling handler.Handle") handler.Handle(w, req) + t.Log("API: handler.Handle completed") }() - // Wait for scheduled update to start - <-scheduledStarted - // Verify API update has not started yet (should be blocked by lock) select { case <-apiStarted: @@ -747,7 +762,21 @@ func TestUpdateOnStartTriggersImmediateUpdate(t *testing.T) { filterDesc := testFilterDesc // The function should trigger immediate update and then start scheduler - err = runUpgradesOnSchedule(ctx, cmd, filter, filterDesc, updateLock, false) + err = scheduling.RunUpgradesOnSchedule( + ctx, + cmd, + filter, + filterDesc, + updateLock, + false, + "", + logging.WriteStartupMessage, + runUpdatesWithNotifications, + nil, + "", + nil, + "", + ) // Should not return an error (context cancellation is expected) require.NoError(t, err) @@ -816,7 +845,21 @@ func TestUpdateOnStartIntegratesWithCronScheduling(t *testing.T) { filterDesc := testFilterDesc startTime := time.Now() - err = runUpgradesOnSchedule(ctx, cmd, filter, filterDesc, updateLock, false) + err = scheduling.RunUpgradesOnSchedule( + ctx, + cmd, + filter, + filterDesc, + updateLock, + false, + "", + logging.WriteStartupMessage, + runUpdatesWithNotifications, + nil, + "", + nil, + "", + ) // Should not return an error (context cancellation is expected) require.NoError(t, err) @@ -888,7 +931,21 @@ func TestUpdateOnStartLockingBehavior(t *testing.T) { filter := func(_ types.FilterableContainer) bool { return false } filterDesc := testFilterDesc - err = runUpgradesOnSchedule(ctx, cmd, filter, filterDesc, updateLock, false) + err = scheduling.RunUpgradesOnSchedule( + ctx, + cmd, + filter, + filterDesc, + updateLock, + false, + "", + logging.WriteStartupMessage, + runUpdatesWithNotifications, + nil, + "", + nil, + "", + ) // Should not return an error require.NoError(t, err) @@ -939,7 +996,21 @@ func TestUpdateOnStartSelfUpdateScenario(t *testing.T) { filter := func(_ types.FilterableContainer) bool { return true } filterDesc := testFilterDesc - err = runUpgradesOnSchedule(ctx, cmd, filter, filterDesc, updateLock, false) + err = scheduling.RunUpgradesOnSchedule( + ctx, + cmd, + filter, + filterDesc, + updateLock, + false, + "", + logging.WriteStartupMessage, + runUpdatesWithNotifications, + nil, + "", + nil, + "", + ) // Should not return an error require.NoError(t, err) @@ -1001,7 +1072,21 @@ func TestUpdateOnStartMultiInstanceScenario(t *testing.T) { filter := func(_ types.FilterableContainer) bool { return false } filterDesc := "instance1" - err := runUpgradesOnSchedule(ctx, cmd1, filter, filterDesc, updateLock, false) + err := scheduling.RunUpgradesOnSchedule( + ctx, + cmd1, + filter, + filterDesc, + updateLock, + false, + "", + logging.WriteStartupMessage, + runUpdatesWithNotifications, + nil, + "", + nil, + "", + ) assert.NoError(t, err) atomic.AddInt32(&completed, 1) close(instance1Called) @@ -1014,7 +1099,21 @@ func TestUpdateOnStartMultiInstanceScenario(t *testing.T) { filter := func(_ types.FilterableContainer) bool { return false } filterDesc := "instance2" - err := runUpgradesOnSchedule(ctx, cmd2, filter, filterDesc, updateLock, false) + err := scheduling.RunUpgradesOnSchedule( + ctx, + cmd2, + filter, + filterDesc, + updateLock, + false, + "", + logging.WriteStartupMessage, + runUpdatesWithNotifications, + nil, + "", + nil, + "", + ) assert.NoError(t, err) atomic.AddInt32(&completed, 1) close(instance2Called) @@ -1059,7 +1158,7 @@ func TestWaitForRunningUpdate_NoUpdateRunning(t *testing.T) { ctx := context.Background() start := time.Now() - waitForRunningUpdate(ctx, lock) + scheduling.WaitForRunningUpdate(ctx, lock) elapsed := time.Since(start) @@ -1077,7 +1176,7 @@ func TestWaitForRunningUpdate_UpdateRunning(t *testing.T) { waitCompleted := make(chan bool, 1) go func() { - waitForRunningUpdate(ctx, lock) + scheduling.WaitForRunningUpdate(ctx, lock) waitCompleted <- true }() @@ -1141,7 +1240,21 @@ func TestRunUpgradesOnSchedule_ShutdownWaitsForRunningUpdate(t *testing.T) { filterDesc := testFilterDesc // This should start and wait for context cancellation - err := runUpgradesOnSchedule(ctx, cmd, filter, filterDesc, updateLock, false) + err := scheduling.RunUpgradesOnSchedule( + ctx, + cmd, + filter, + filterDesc, + updateLock, + false, + "", + logging.WriteStartupMessage, + runUpdatesWithNotifications, + nil, + "", + nil, + "", + ) assert.NoError(t, err) shutdownCompleted <- true diff --git a/internal/actions/actions.go b/internal/actions/actions.go new file mode 100644 index 000000000..7871544e7 --- /dev/null +++ b/internal/actions/actions.go @@ -0,0 +1,176 @@ +package actions + +import ( + "strings" + "time" + + "github.com/sirupsen/logrus" + + "github.com/nicholas-fedor/watchtower/pkg/container" + "github.com/nicholas-fedor/watchtower/pkg/metrics" + "github.com/nicholas-fedor/watchtower/pkg/notifications" + "github.com/nicholas-fedor/watchtower/pkg/session" + "github.com/nicholas-fedor/watchtower/pkg/types" +) + +// RunUpdatesWithNotifications performs container updates and sends notifications about the results. +// +// It executes the update action with configured parameters, batches notifications, and returns a metric +// summarizing the session for monitoring purposes, ensuring users are informed of update outcomes. +// +// Parameters: +// - client: The Docker client instance used for container operations. +// - notifier: The notification system instance for sending update status messages. +// - notificationSplitByContainer: Boolean flag enabling separate notifications for each updated container. +// - notificationReport: Boolean flag enabling report-based notifications. +// - filter: The types.Filter determining which containers are targeted for updates. +// - cleanup: Boolean indicating whether to remove old images after updates. +// - noRestart: Boolean flag preventing containers from being restarted after updates. +// - timeout: Maximum duration allowed for container stop operations during updates. +// - monitorOnly: Boolean flag enabling mode where Watchtower monitors without updating. +// - lifecycleHooks: Boolean flag enabling execution of pre- and post-update lifecycle hooks. +// - rollingRestart: Boolean flag enabling rolling restarts for sequential updates. +// - labelPrecedence: Boolean flag giving container label settings priority over global flags. +// - noPull: Boolean flag skipping image pulls during updates. +// - lifecycleUID: Default UID for running lifecycle hooks. +// - lifecycleGID: Default GID for running lifecycle hooks. +// - cpuCopyMode: Specifies how CPU settings are handled during container recreation. +// +// Returns: +// - *metrics.Metric: A pointer to a metric object summarizing the update session (scanned, updated, failed counts). +func RunUpdatesWithNotifications( + client container.Client, + notifier types.Notifier, + notificationSplitByContainer, notificationReport bool, + filter types.Filter, + cleanup, noRestart, monitorOnly, lifecycleHooks, rollingRestart, labelPrecedence, noPull bool, + timeout time.Duration, + lifecycleUID, lifecycleGID int, + cpuCopyMode string, +) *metrics.Metric { + // Start batching notifications to group update messages, if notifier is initialized + if notifier != nil { + notifier.StartNotification() + } else { + logrus.Warn("Notifier is nil, skipping notification batching") + } + + // Configure update parameters based on provided flags and settings. + updateParams := types.UpdateParams{ + Filter: filter, + Cleanup: cleanup, + NoRestart: noRestart, + Timeout: timeout, + MonitorOnly: monitorOnly, + LifecycleHooks: lifecycleHooks, + RollingRestart: rollingRestart, + LabelPrecedence: labelPrecedence, + NoPull: noPull, + LifecycleUID: lifecycleUID, + LifecycleGID: lifecycleGID, + CPUCopyMode: cpuCopyMode, + } + + // Execute the update action, capturing results and image IDs for cleanup. + result, cleanupImageIDs, err := Update(client, updateParams) + if err != nil { + logrus.WithError(err).Error("Update execution failed") + + return &metrics.Metric{ + Scanned: 0, + Updated: 0, + Failed: 0, + } + } + + // Perform deferred image cleanup if enabled. + if cleanup { + CleanupImages(client, cleanupImageIDs) + } + + // Log update report for debugging. + updatedNames := make([]string, 0, len(result.Updated())) + for _, r := range result.Updated() { + updatedNames = append(updatedNames, r.Name()) + } + + logrus.WithFields(logrus.Fields{ + "scanned": len(result.Scanned()), + "updated": len(result.Updated()), + "failed": len(result.Failed()), + "updated_names": updatedNames, + }).Debug("Report before notification") + + // Send the batched notification with update results, if notifier and result are initialized + if notifier != nil && result != nil { + if notificationSplitByContainer { + // Notification splitting by container is enabled - send separate notifications for each container + // instead of a single grouped notification. This provides more granular notifications when + // multiple containers are updated simultaneously. + if notificationReport && len(result.Updated()) > 0 { + // In report mode: Send separate notifications for each updated container. + // Each notification contains a SingleContainerReport with the specific container + // as the primary updated item, but includes all other containers for context + // (failed, skipped, stale, fresh) to provide complete session information. + for _, updatedContainer := range result.Updated() { + // Create a minimal report focused on this specific updated container, + // but include all other session results for comprehensive context. + singleContainerReport := &session.SingleContainerReport{ + UpdatedReports: []types.ContainerReport{updatedContainer}, + ScannedReports: []types.ContainerReport{ + updatedContainer, + }, // Include all scanned for context + FailedReports: result.Failed(), // Include all failed for context + SkippedReports: result.Skipped(), // Include all skipped for context + StaleReports: result.Stale(), // Include all stale for context + FreshReports: result.Fresh(), // Include all fresh for context + } + notifier.SendNotification(singleContainerReport) + } + } else if !notificationReport { + // In log mode: Send separate notifications for each container that had "Found new image" logs. + // This handles cases where containers may not have been updated (e.g., monitor-only mode) + // but still triggered relevant log entries that should be notified separately. + entries := notifier.GetEntries() + containerNames := make(map[string]bool) + + // Extract unique container names from log entries that indicate new images were found. + // This ensures we only send notifications for containers that actually had update activity. + for _, entry := range entries { + if strings.Contains(entry.Message, "Found new image") { + if containerName, ok := entry.Data["container"].(string); ok { + containerNames[containerName] = true + } + } + } + + // For each container with update activity, filter and send only its relevant log entries. + // This prevents mixing logs from different containers in the same notification. + for containerName := range containerNames { + filteredEntries := make([]*logrus.Entry, 0) + + for _, entry := range entries { + if cn, ok := entry.Data["container"].(string); ok && cn == containerName { + filteredEntries = append(filteredEntries, entry) + } + } + + notifier.SendFilteredEntries(filteredEntries, nil) + } + } + } else { + // Standard behavior: Send a single grouped notification containing all session results + notifier.SendNotification(result) + } + } + + // Generate and log a metric summarizing the update session. + metricResults := metrics.NewMetric(result) + notifications.LocalLog.WithFields(logrus.Fields{ + "scanned": metricResults.Scanned, + "updated": metricResults.Updated, + "failed": metricResults.Failed, + }).Info("Update session completed") + + return metricResults +} diff --git a/internal/actions/actions_test.go b/internal/actions/actions_test.go new file mode 100644 index 000000000..4b9c76ab8 --- /dev/null +++ b/internal/actions/actions_test.go @@ -0,0 +1,214 @@ +package actions_test + +import ( + "errors" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/stretchr/testify/mock" + + "github.com/nicholas-fedor/watchtower/internal/actions" + actionMocks "github.com/nicholas-fedor/watchtower/internal/actions/mocks" + "github.com/nicholas-fedor/watchtower/pkg/filters" + "github.com/nicholas-fedor/watchtower/pkg/types" + "github.com/nicholas-fedor/watchtower/pkg/types/mocks" +) + +var _ = ginkgo.Describe("RunUpdatesWithNotifications", func() { + var ( + client actionMocks.MockClient + notifier *mocks.MockNotifier + filter types.Filter + ) + + ginkgo.BeforeEach(func() { + filter = filters.NoFilter + }) + + ginkgo.When("notifier is nil", func() { + ginkgo.It("should not start notification batching", func() { + client = actionMocks.CreateMockClient( + &actionMocks.TestData{ + Containers: []types.Container{ + actionMocks.CreateMockContainer( + "test-container", + "test-container", + "image:latest", + time.Now(), + ), + }, + }, + false, + false, + ) + + metric := actions.RunUpdatesWithNotifications( + client, + nil, // nil notifier + false, false, filter, false, false, false, false, false, false, false, + time.Minute, 1000, 1001, "auto", + ) + + gomega.Expect(metric).NotTo(gomega.BeNil()) + }) + }) + + ginkgo.When("notifier is provided", func() { + ginkgo.BeforeEach(func() { + notifier = mocks.NewMockNotifier(ginkgo.GinkgoT()) + }) + + ginkgo.It("should start notification batching", func() { + client = actionMocks.CreateMockClient( + &actionMocks.TestData{ + Containers: []types.Container{ + actionMocks.CreateMockContainer( + "test-container", + "test-container", + "image:latest", + time.Now(), + ), + }, + }, + false, + false, + ) + + notifier.EXPECT().StartNotification().Return() + notifier.EXPECT().SendNotification(mock.Anything).Return() + + metric := actions.RunUpdatesWithNotifications( + client, + notifier, + false, false, filter, false, false, false, false, false, false, false, + time.Minute, 1000, 1001, "auto", + ) + + gomega.Expect(metric).NotTo(gomega.BeNil()) + }) + + ginkgo.It("should handle notification split by container", func() { + client = actionMocks.CreateMockClient( + &actionMocks.TestData{ + Containers: []types.Container{ + actionMocks.CreateMockContainerWithConfig( + "test-container", + "test-container", + "image:latest", + true, + false, + time.Now().Add(-time.Hour), + &container.Config{}, + ), + }, + Staleness: map[string]bool{"test-container": true}, + }, + false, + false, + ) + + notifier.EXPECT().StartNotification().Return() + notifier.EXPECT().SendNotification(mock.Anything).Return() + + metric := actions.RunUpdatesWithNotifications( + client, + notifier, + true, true, filter, false, false, false, false, false, false, false, + time.Minute, 1000, 1001, "auto", + ) + + gomega.Expect(metric).NotTo(gomega.BeNil()) + }) + + ginkgo.It("should handle standard grouped notifications", func() { + client = actionMocks.CreateMockClient( + &actionMocks.TestData{ + Containers: []types.Container{ + actionMocks.CreateMockContainer( + "test-container", + "test-container", + "image:latest", + time.Now(), + ), + }, + }, + false, + false, + ) + + notifier.EXPECT().StartNotification().Return() + notifier.EXPECT().SendNotification(mock.Anything).Return() + + metric := actions.RunUpdatesWithNotifications( + client, + notifier, + false, false, filter, false, false, false, false, false, false, false, + time.Minute, 1000, 1001, "auto", + ) + + gomega.Expect(metric).NotTo(gomega.BeNil()) + }) + }) + + ginkgo.When("cleanup is enabled", func() { + ginkgo.It("should call CleanupImages", func() { + client = actionMocks.CreateMockClient( + &actionMocks.TestData{ + Containers: []types.Container{ + actionMocks.CreateMockContainer( + "test-container", + "test-container", + "image:latest", + time.Now(), + ), + }, + }, + false, + false, + ) + + metric := actions.RunUpdatesWithNotifications( + client, + nil, + false, false, filter, true, false, false, false, false, false, false, + time.Minute, 1000, 1001, "auto", + ) + + gomega.Expect(metric).NotTo(gomega.BeNil()) + // CleanupImages is called internally by Update, so we check that cleanupImageIDs is processed + }) + }) + + ginkgo.When("update fails", func() { + ginkgo.It("should return zero metric on error", func() { + client = actionMocks.CreateMockClient( + &actionMocks.TestData{ + Containers: []types.Container{ + actionMocks.CreateMockContainer( + "test-container", + "test-container", + "image:latest", + time.Now(), + ), + }, + IsContainerStaleError: errors.New("mock error"), + }, + false, + false, + ) + + metric := actions.RunUpdatesWithNotifications( + client, + nil, + false, false, filter, false, false, false, false, false, false, false, + time.Minute, 1000, 1001, "auto", + ) + + gomega.Expect(metric.Scanned).To(gomega.Equal(0)) + gomega.Expect(metric.Updated).To(gomega.Equal(0)) + gomega.Expect(metric.Failed).To(gomega.Equal(0)) + }) + }) +}) diff --git a/internal/actions/check_test.go b/internal/actions/check_test.go new file mode 100644 index 000000000..0eb775868 --- /dev/null +++ b/internal/actions/check_test.go @@ -0,0 +1,248 @@ +package actions_test + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + + "github.com/nicholas-fedor/watchtower/internal/actions" + "github.com/nicholas-fedor/watchtower/internal/actions/mocks" + "github.com/nicholas-fedor/watchtower/pkg/filters" + "github.com/nicholas-fedor/watchtower/pkg/types" +) + +var _ = ginkgo.Describe("CheckForSanity", func() { + ginkgo.When("rolling restarts are disabled", func() { + ginkgo.It("should return nil without checking containers", func() { + client := mocks.CreateMockClient(&mocks.TestData{}, false, false) + + err := actions.CheckForSanity(client, filters.NoFilter, false) + + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) + + ginkgo.When("rolling restarts are enabled", func() { + ginkgo.It("should return nil when no containers have links", func() { + client := mocks.CreateMockClient( + &mocks.TestData{ + Containers: []types.Container{ + mocks.CreateMockContainer( + "container1", + "container1", + "image:latest", + time.Now(), + ), + mocks.CreateMockContainer( + "container2", + "container2", + "image:latest", + time.Now(), + ), + }, + }, + false, + false, + ) + + err := actions.CheckForSanity(client, filters.NoFilter, true) + + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.It("should return error when container has links", func() { + containerWithLinks := mocks.CreateMockContainerWithLinks( + "container1", + "container1", + "image:latest", + time.Now(), + []string{"container2"}, + mocks.CreateMockImageInfo("image:latest"), + ) + + client := mocks.CreateMockClient( + &mocks.TestData{ + Containers: []types.Container{ + containerWithLinks, + mocks.CreateMockContainer( + "container2", + "container2", + "image:latest", + time.Now(), + ), + }, + }, + false, + false, + ) + + err := actions.CheckForSanity(client, filters.NoFilter, true) + + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(err.Error()). + To(gomega.ContainSubstring("incompatible with rolling restarts")) + }) + }) +}) + +var _ = ginkgo.Describe("CheckForMultipleWatchtowerInstances", func() { + ginkgo.When("no scope is specified", func() { + ginkgo.It("should return nil when only one instance exists", func() { + client := mocks.CreateMockClient( + &mocks.TestData{ + Containers: []types.Container{ + mocks.CreateMockContainerWithConfig( + "watchtower", + "watchtower", + "watchtower:latest", + true, + false, + time.Now(), + &container.Config{ + Labels: map[string]string{ + "com.centurylinklabs.watchtower": "true", + }, + }, + ), + }, + }, + false, + false, + ) + + cleanupImageIDs := make(map[types.ImageID]bool) + err := actions.CheckForMultipleWatchtowerInstances(client, false, "", cleanupImageIDs) + + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(cleanupImageIDs).To(gomega.BeEmpty()) + }) + + ginkgo.It( + "should stop excess instances and collect image IDs when cleanup enabled", + func() { + client := mocks.CreateMockClient( + &mocks.TestData{ + Containers: []types.Container{ + mocks.CreateMockContainerWithConfig( + "watchtower-old", + "watchtower-old", + "watchtower:old", + true, + false, + time.Now().Add(-time.Hour), + &container.Config{ + Labels: map[string]string{ + "com.centurylinklabs.watchtower": "true", + }, + }, + ), + mocks.CreateMockContainerWithConfig( + "watchtower-new", + "watchtower-new", + "watchtower:new", + true, + false, + time.Now(), + &container.Config{ + Labels: map[string]string{ + "com.centurylinklabs.watchtower": "true", + }, + }, + ), + }, + }, + false, + false, + ) + + cleanupImageIDs := make(map[types.ImageID]bool) + err := actions.CheckForMultipleWatchtowerInstances( + client, + true, + "", + cleanupImageIDs, + ) + + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(client.TestData.StopContainerCount).To(gomega.Equal(1)) + gomega.Expect(cleanupImageIDs).To(gomega.HaveLen(1)) + }, + ) + }) + + ginkgo.When("scope is specified", func() { + ginkgo.It("should only clean up instances in the same scope", func() { + client := mocks.CreateMockClient( + &mocks.TestData{ + Containers: []types.Container{ + mocks.CreateMockContainerWithConfig( + "watchtower-scoped", + "watchtower-scoped", + "watchtower:latest", + true, + false, + time.Now().Add(-time.Hour), + &container.Config{ + Labels: map[string]string{ + "com.centurylinklabs.watchtower": "true", + "com.centurylinklabs.watchtower.scope": "prod", + }, + }, + ), + mocks.CreateMockContainerWithConfig( + "watchtower-unscoped", + "watchtower-unscoped", + "watchtower:latest", + true, + false, + time.Now(), + &container.Config{ + Labels: map[string]string{ + "com.centurylinklabs.watchtower": "true", + }, + }, + ), + }, + }, + false, + false, + ) + + cleanupImageIDs := make(map[types.ImageID]bool) + err := actions.CheckForMultipleWatchtowerInstances( + client, + true, + "prod", + cleanupImageIDs, + ) + + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(client.TestData.StopContainerCount).To(gomega.Equal(0)) + gomega.Expect(cleanupImageIDs).To(gomega.BeEmpty()) + }) + }) +}) + +var _ = ginkgo.Describe("CleanupImages", func() { + ginkgo.It("should do nothing when no images are provided", func() { + client := mocks.CreateMockClient(&mocks.TestData{}, false, false) + + actions.CleanupImages(client, nil) + gomega.Expect(client.TestData.TriedToRemoveImageCount).To(gomega.Equal(0)) + }) + + ginkgo.It("should attempt to remove each image ID", func() { + client := mocks.CreateMockClient(&mocks.TestData{}, false, false) + + imageIDs := map[types.ImageID]bool{ + "image1": true, + "image2": true, + "": true, // empty ID should be skipped + } + + actions.CleanupImages(client, imageIDs) + gomega.Expect(client.TestData.TriedToRemoveImageCount).To(gomega.Equal(2)) + }) +}) diff --git a/internal/api/api.go b/internal/api/api.go new file mode 100644 index 000000000..3a34b0308 --- /dev/null +++ b/internal/api/api.go @@ -0,0 +1,134 @@ +// Package api provides application-specific HTTP API orchestration for Watchtower, coordinating the setup and management of API endpoints with business logic integration. +package api + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "strings" + "time" + + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + pkgApi "github.com/nicholas-fedor/watchtower/pkg/api" + metricsAPI "github.com/nicholas-fedor/watchtower/pkg/api/metrics" + "github.com/nicholas-fedor/watchtower/pkg/api/update" + "github.com/nicholas-fedor/watchtower/pkg/container" + "github.com/nicholas-fedor/watchtower/pkg/metrics" + "github.com/nicholas-fedor/watchtower/pkg/types" +) + +// GetAPIAddr formats the API address string based on host and port. +func GetAPIAddr(host, port string) string { + address := host + ":" + port + if host != "" && strings.Contains(host, ":") && net.ParseIP(host) != nil { + address = "[" + host + "]:" + port + } + + return address +} + +// SetupAndStartAPI configures and launches the HTTP API if enabled by configuration flags. +// +// It sets up update and metrics endpoints, starts the API server in blocking or non-blocking mode, +// and handles startup errors, ensuring the API integrates seamlessly with Watchtower's update workflow. +// +// Parameters: +// - ctx: The context controlling the API's lifecycle, enabling graceful shutdown on cancellation. +// - apiHost: The host to bind the HTTP API to. +// - apiPort: The port for the HTTP API server. +// - apiToken: The authentication token for HTTP API access. +// - enableUpdateAPI: Enables the HTTP update API endpoint. +// - enableMetricsAPI: Enables the HTTP metrics API endpoint. +// - unblockHTTPAPI: Allows periodic polling alongside the HTTP API. +// - filter: The types.Filter determining which containers are targeted for updates. +// - command: The cobra.Command instance representing the executed command. +// - filterDesc: A human-readable description of the applied filter. +// - updateLock: A channel ensuring only one update runs at a time, shared with the scheduler. +// - cleanup: Boolean indicating whether to remove old images after updates. +// - client: Container client for Docker operations. +// - notifier: Notification system instance. +// - scope: Operational scope for Watchtower. +// - version: Version string. +// - runUpdatesWithNotifications: Function to run updates with notifications. +// - filterByImage: Function to filter by images. +// - defaultMetrics: Function to get default metrics. +// - writeStartupMessage: Function to write startup message. +// +// Returns: +// - error: An error if the API fails to start (excluding clean shutdown), nil otherwise. +func SetupAndStartAPI( + ctx context.Context, + apiHost, apiPort, apiToken string, + enableUpdateAPI, enableMetricsAPI, unblockHTTPAPI bool, + filter types.Filter, + command *cobra.Command, + filterDesc string, + updateLock chan bool, + cleanup bool, + client container.Client, + notifier types.Notifier, + scope string, + version string, + runUpdatesWithNotifications func(types.Filter, bool) *metrics.Metric, + filterByImage func([]string, types.Filter) types.Filter, + defaultMetrics func() *metrics.Metrics, + writeStartupMessage func(*cobra.Command, time.Time, string, string, container.Client, types.Notifier, string), + server ...pkgApi.HTTPServer, +) error { + // Get the formatted HTTP api address string. + address := GetAPIAddr(apiHost, apiPort) + + // Initialize the HTTP API with the configured authentication token and address. + var httpAPI *pkgApi.API + if len(server) > 0 { + httpAPI = pkgApi.New(apiToken, address, server[0]) + } else { + httpAPI = pkgApi.New(apiToken, address) + } + + // Register the update API endpoint if enabled, linking it to the update handler. + if enableUpdateAPI { + updateHandler := update.New(func(images []string) *metrics.Metric { + metric := runUpdatesWithNotifications( + filterByImage(images, filter), + cleanup, + ) + defaultMetrics().RegisterScan(metric) + + return metric + }, updateLock) + httpAPI.RegisterFunc(updateHandler.Path, updateHandler.Handle) + + if !unblockHTTPAPI { + writeStartupMessage( + command, + time.Time{}, + filterDesc, + scope, + client, + notifier, + version, + ) + } + } + + // Register the metrics API endpoint if enabled, providing access to update metrics. + if enableMetricsAPI { + metricsHandler := metricsAPI.New() + httpAPI.RegisterHandler(metricsHandler.Path, metricsHandler.Handle) + } + + // Start the API server, logging errors unless it's a clean shutdown. + if err := httpAPI.Start(ctx, enableUpdateAPI && !unblockHTTPAPI); err != nil && + !errors.Is(err, http.ErrServerClosed) { + logrus.WithError(err).Error("Failed to start API") + + return fmt.Errorf("failed to start HTTP API: %w", err) + } + + return nil +} diff --git a/internal/api/api_test.go b/internal/api/api_test.go new file mode 100644 index 000000000..4a5953e8d --- /dev/null +++ b/internal/api/api_test.go @@ -0,0 +1,217 @@ +package api_test + +import ( + "context" + "testing" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/spf13/cobra" + + actionMocks "github.com/nicholas-fedor/watchtower/internal/actions/mocks" + apiPkg "github.com/nicholas-fedor/watchtower/internal/api" + apiMocks "github.com/nicholas-fedor/watchtower/pkg/api/mocks" + "github.com/nicholas-fedor/watchtower/pkg/container" + "github.com/nicholas-fedor/watchtower/pkg/filters" + "github.com/nicholas-fedor/watchtower/pkg/metrics" + "github.com/nicholas-fedor/watchtower/pkg/types" + typeMocks "github.com/nicholas-fedor/watchtower/pkg/types/mocks" +) + +// TestAPI runs the Ginkgo test suite for the internal API package. +func TestAPI(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Internal API Suite") +} + +var _ = ginkgo.Describe("GetAPIAddr", func() { + ginkgo.It("should format address without brackets for non-IPv6", func() { + addr := apiPkg.GetAPIAddr("localhost", "8080") + gomega.Expect(addr).To(gomega.Equal("localhost:8080")) + }) + + ginkgo.It("should format address with brackets for IPv6", func() { + addr := apiPkg.GetAPIAddr("::1", "8080") + gomega.Expect(addr).To(gomega.Equal("[::1]:8080")) + }) + + ginkgo.It("should handle empty host", func() { + addr := apiPkg.GetAPIAddr("", "8080") + gomega.Expect(addr).To(gomega.Equal(":8080")) + }) +}) + +var _ = ginkgo.Describe("SetupAndStartAPI", func() { + var ( + cmd *cobra.Command + client actionMocks.MockClient + ) + + ginkgo.BeforeEach(func() { + cmd = &cobra.Command{} + client = actionMocks.CreateMockClient(&actionMocks.TestData{}, false, false) + }) + + ginkgo.When("update API is enabled", func() { + ginkgo.It("should start API server successfully", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cmd.Flags().Bool("http-api-update", true, "") + cmd.Flags().Bool("http-api-metrics", false, "") + cmd.Flags().Bool("http-api-periodic-polls", false, "") + cmd.Flags().String("http-api-host", "", "") + cmd.Flags().String("http-api-port", "8080", "") + cmd.Flags().String("http-api-token", "test-token", "") + + notifier := typeMocks.NewMockNotifier(ginkgo.GinkgoT()) + + // Mock the runUpdatesWithNotifications function + runUpdatesWithNotifications := func(_ types.Filter, _ bool) *metrics.Metric { + return &metrics.Metric{Scanned: 1, Updated: 1, Failed: 0} + } + + // Mock other required functions + filterByImage := func(_ []string, filter types.Filter) types.Filter { + return filter + } + defaultMetrics := metrics.Default + writeStartupMessage := func(*cobra.Command, time.Time, string, string, container.Client, types.Notifier, string) {} + + // Create mock HTTP server to avoid binding to real ports + mockServer := apiMocks.NewMockHTTPServer(ginkgo.GinkgoT()) + mockServer.EXPECT().ListenAndServe().Return(nil) + + // Use a timeout context to avoid blocking indefinitely + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer timeoutCancel() + + err := apiPkg.SetupAndStartAPI( + timeoutCtx, + "", "0", "test-token", + true, false, false, + filters.NoFilter, + cmd, + "test filter", + nil, // updateLock + false, // cleanup + client, + notifier, + "", // scope + "v1.0.0", + runUpdatesWithNotifications, + filterByImage, + defaultMetrics, + writeStartupMessage, + mockServer, + ) + + // Should complete without error when context times out (clean shutdown) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) + + ginkgo.When("metrics API is enabled", func() { + ginkgo.It("should register metrics handler", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cmd.Flags().Bool("http-api-update", true, "") + cmd.Flags().Bool("http-api-metrics", true, "") + cmd.Flags().Bool("http-api-periodic-polls", false, "") + cmd.Flags().String("http-api-host", "", "") + cmd.Flags().String("http-api-port", "8080", "") + cmd.Flags().String("http-api-token", "test-token", "") + + notifier := typeMocks.NewMockNotifier(ginkgo.GinkgoT()) + + // Mock functions + runUpdatesWithNotifications := func(_ types.Filter, _ bool) *metrics.Metric { + return &metrics.Metric{Scanned: 0, Updated: 0, Failed: 0} + } + filterByImage := func(_ []string, filter types.Filter) types.Filter { + return filter + } + defaultMetrics := metrics.Default + writeStartupMessage := func(*cobra.Command, time.Time, string, string, container.Client, types.Notifier, string) {} + + // Create mock HTTP server to avoid binding to real ports + mockServer := apiMocks.NewMockHTTPServer(ginkgo.GinkgoT()) + mockServer.EXPECT().ListenAndServe().Return(nil) + + // Use a timeout context to avoid blocking + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, 100*time.Millisecond) + defer timeoutCancel() + + err := apiPkg.SetupAndStartAPI( + timeoutCtx, + "", "0", "test-token", + true, true, false, + filters.NoFilter, + cmd, + "test filter", + nil, + false, + client, + notifier, + "", + "v1.0.0", + runUpdatesWithNotifications, + filterByImage, + defaultMetrics, + writeStartupMessage, + mockServer, + ) + + // Should complete without error when context times out (clean shutdown) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) + + ginkgo.When("no APIs are enabled", func() { + ginkgo.It("should return without starting server", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cmd.Flags().Bool("http-api-update", false, "") + cmd.Flags().Bool("http-api-metrics", false, "") + cmd.Flags().Bool("http-api-periodic-polls", false, "") + cmd.Flags().String("http-api-host", "", "") + cmd.Flags().String("http-api-port", "8080", "") + cmd.Flags().String("http-api-token", "test-token", "") + + notifier := typeMocks.NewMockNotifier(ginkgo.GinkgoT()) + + runUpdatesWithNotifications := func(_ types.Filter, _ bool) *metrics.Metric { + return &metrics.Metric{Scanned: 0, Updated: 0, Failed: 0} + } + filterByImage := func(_ []string, filter types.Filter) types.Filter { + return filter + } + defaultMetrics := metrics.Default + writeStartupMessage := func(*cobra.Command, time.Time, string, string, container.Client, types.Notifier, string) {} + + err := apiPkg.SetupAndStartAPI( + ctx, + "", "0", "test-token", + false, false, false, + filters.NoFilter, + cmd, + "test filter", + nil, + false, + client, + notifier, + "", + "v1.0.0", + runUpdatesWithNotifications, + filterByImage, + defaultMetrics, + writeStartupMessage, + ) + + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + }) +}) diff --git a/internal/config/config.go b/internal/config/config.go new file mode 100644 index 000000000..5a158e93a --- /dev/null +++ b/internal/config/config.go @@ -0,0 +1,42 @@ +// Package config provides configuration structures for Watchtower's core operations. +// It defines types that encapsulate settings and parameters used throughout the application, +// ensuring consistent and type-safe configuration management. +package config + +import ( + "github.com/spf13/cobra" + + "github.com/nicholas-fedor/watchtower/pkg/types" +) + +// RunConfig encapsulates the configuration parameters for the runMain function. +// +// It aggregates command-line flags and derived settings into a single structure, providing a cohesive way +// to pass configuration data through the CLI execution flow, ensuring all necessary parameters are accessible +// for update operations, API setup, and scheduling. +type RunConfig struct { + // Command is the cobra.Command instance representing the executed command, providing access to parsed flags. + Command *cobra.Command + // Names is a slice of container names explicitly provided as positional arguments, used for filtering. + Names []string + // Filter is the types.Filter function determining which containers are processed during updates. + Filter types.Filter + // FilterDesc is a human-readable description of the applied filter, used in logging and notifications. + FilterDesc string + // RunOnce indicates whether to perform a single update and exit, set via the --run-once flag. + RunOnce bool + // UpdateOnStart enables an immediate update check on startup, then continues with periodic updates, set via the --update-on-start flag. + UpdateOnStart bool + // EnableUpdateAPI enables the HTTP update API endpoint, set via the --http-api-update flag. + EnableUpdateAPI bool + // EnableMetricsAPI enables the HTTP metrics API endpoint, set via the --http-api-metrics flag. + EnableMetricsAPI bool + // UnblockHTTPAPI allows periodic polling alongside the HTTP API, set via the --http-api-periodic-polls flag. + UnblockHTTPAPI bool + // APIToken is the authentication token for HTTP API access, set via the --http-api-token flag. + APIToken string + // APIHost is the host to bind the HTTP API to, set via the --http-api-host flag (defaults to empty string). + APIHost string + // APIPort is the port for the HTTP API server, set via the --http-api-port flag (defaults to "8080"). + APIPort string +} diff --git a/internal/logging/startup.go b/internal/logging/startup.go new file mode 100644 index 000000000..ee7678808 --- /dev/null +++ b/internal/logging/startup.go @@ -0,0 +1,199 @@ +// Package logging provides functions for logging startup information and configuring startup logging in Watchtower. +// It handles the initialization messages, notifier setup logging, and schedule information display. +package logging + +import ( + "fmt" + "strings" + "time" + + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/nicholas-fedor/watchtower/internal/util" + "github.com/nicholas-fedor/watchtower/pkg/container" + "github.com/nicholas-fedor/watchtower/pkg/notifications" + "github.com/nicholas-fedor/watchtower/pkg/types" +) + +// WriteStartupMessage logs or notifies startup information based on configuration flags. +// +// It reports Watchtower's version, notification setup, container filtering details, scheduling information, +// and HTTP API status, providing users with a comprehensive overview of the application's initial state. +// +// Parameters: +// - c: The cobra.Command instance, providing access to flags like --no-startup-message. +// - sched: The time.Time of the first scheduled run, or zero if no schedule is set. +// - filtering: A string describing the container filter applied (e.g., "Watching all containers"). +// - scope: The scope name for structured logging, empty string if no scope is set. +// - client: The Docker client instance used to retrieve API version information. +// - notifier: The notification system instance for sending startup messages. +// - watchtowerVersion: The version string of Watchtower to include in startup messages. +func WriteStartupMessage( + c *cobra.Command, + sched time.Time, + filtering string, + scope string, + client container.Client, + notifier types.Notifier, + watchtowerVersion string, +) { + // Retrieve flags controlling startup message behavior and API setup. + noStartupMessage, _ := c.PersistentFlags().GetBool("no-startup-message") + enableUpdateAPI, _ := c.PersistentFlags().GetBool("http-api-update") + + apiListenAddr, _ := c.PersistentFlags().GetString("http-api-host") + + apiPort, _ := c.PersistentFlags().GetString("http-api-port") + if apiPort == "" { + apiPort = "8080" + } + + if apiListenAddr == "" { + apiListenAddr = ":" + apiPort + } else { + apiListenAddr = apiListenAddr + ":" + apiPort + } + + // If startup messages are suppressed, skip all logging + if noStartupMessage { + return + } + + // Configure the logger based on whether startup messages should be suppressed. + startupLog := SetupStartupLogger(noStartupMessage, notifier) + + var apiVersion string + if client != nil { + apiVersion = client.GetVersion() + } + + startupLog.Info("Watchtower ", watchtowerVersion, " using Docker API v", apiVersion) + + // Log details about configured notifiers or lack thereof. + var notifierNames []string + if notifier != nil { + notifierNames = notifier.GetNames() + } + + LogNotifierInfo(startupLog, notifierNames) + + // Log filtering information, using structured logging for scope when set + if scope != "" { + startupLog.WithField("scope", scope).Info("Only checking containers in scope") + } else { + startupLog.Debug(filtering) + } + + // Log scheduling or run mode information based on configuration. + LogScheduleInfo(startupLog, c, sched) + + // Report HTTP API status if enabled. + if enableUpdateAPI { + startupLog.Info(fmt.Sprintf("The HTTP API is enabled at %s.", apiListenAddr)) + } + + // Send batched notifications if not suppressed, ensuring startup info reaches users. + if !noStartupMessage && notifier != nil { + notifier.SendNotification(nil) + } + + // Warn about trace-level logging if enabled, as it may expose sensitive data. + if logrus.IsLevelEnabled(logrus.TraceLevel) { + startupLog.Warn( + "Trace level enabled: log will include sensitive information as credentials and tokens", + ) + } +} + +// SetupStartupLogger configures the logger for startup messages based on message suppression settings. +// +// It uses a local log entry if messages are suppressed (--no-startup-message), otherwise batches messages +// via the notifier for consolidated delivery, ensuring flexibility in how startup info is presented. +// +// Parameters: +// - noStartupMessage: A boolean indicating whether startup messages should be logged locally only. +// - notifier: The notification system instance for batching messages. +// +// Returns: +// - *logrus.Entry: A configured log entry for writing startup messages. +func SetupStartupLogger(noStartupMessage bool, notifier types.Notifier) *logrus.Entry { + if noStartupMessage { + return notifications.LocalLog + } + + log := logrus.NewEntry(logrus.StandardLogger()) + + if notifier != nil { + notifier.StartNotification() + } + + return log +} + +// LogNotifierInfo logs details about the notification setup for Watchtower. +// +// It reports the list of configured notifier names (e.g., "email, slack") or indicates no notifications +// are set up, providing visibility into how update statuses will be communicated. +// +// Parameters: +// - log: The logrus.Entry used to write the notification information. +// - notifierNames: A slice of strings representing the names of configured notifiers. +func LogNotifierInfo(log *logrus.Entry, notifierNames []string) { + if len(notifierNames) > 0 { + log.Info("Using notifications: " + strings.Join(notifierNames, ", ")) + } else { + log.Info("Using no notifications") + } +} + +// LogScheduleInfo logs information about the scheduling or run mode configuration. +// +// It handles scheduled runs with timing details, one-time updates, or indicates no periodic runs, +// ensuring users understand when and how updates will occur. +// +// Parameters: +// - log: The logrus.Entry used to write the schedule information. +// - c: The cobra.Command instance, providing access to flags like --run-once. +// - sched: The time.Time of the first scheduled run, or zero if no schedule is set. +func LogScheduleInfo(log *logrus.Entry, c *cobra.Command, sched time.Time) { + switch { + case !sched.IsZero(): // scheduled runs + until := util.FormatDuration(time.Until(sched)) + log.Info("Scheduling next run: " + sched.Format("2006-01-02 15:04:05 -0700 MST")) + log.Info("Note that the next check will be performed in " + until) + + case func() bool { // one-time updates + v, _ := c.PersistentFlags().GetBool("run-once") + + return v + }(): + log.Info("Running a one time update.") + + case func() bool { // update on start + v, _ := c.PersistentFlags().GetBool("update-on-start") + + return v + }(): + log.Info("Running update on start, then scheduling periodic updates.") + + case func() bool { // HTTP API without periodic polling + a, _ := c.PersistentFlags().GetBool("http-api-update") + b, _ := c.PersistentFlags().GetBool("http-api-periodic-polls") + + return a && !b + }(): + log.Info("Updates via HTTP API enabled. Periodic updates are not enabled.") + + case func() bool { // HTTP API with periodic polling + a, _ := c.PersistentFlags().GetBool("http-api-update") + b, _ := c.PersistentFlags().GetBool("http-api-periodic-polls") + + return a && b + }(): + log.Info("Updates via HTTP API enabled. Periodic updates are also enabled.") + + default: // default periodic + log.Info("Periodic updates are enabled with default schedule.") + } +} diff --git a/internal/logging/startup_test.go b/internal/logging/startup_test.go new file mode 100644 index 000000000..2051b7cb4 --- /dev/null +++ b/internal/logging/startup_test.go @@ -0,0 +1,245 @@ +package logging_test + +import ( + "bytes" + "testing" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + actionMocks "github.com/nicholas-fedor/watchtower/internal/actions/mocks" + "github.com/nicholas-fedor/watchtower/internal/logging" +) + +// TestStartupLogging runs the Ginkgo test suite for the internal logging startup package. +func TestStartupLogging(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Internal Logging Startup Suite") +} + +var _ = ginkgo.Describe("WriteStartupMessage", func() { + var ( + cmd *cobra.Command + client actionMocks.MockClient + buffer *bytes.Buffer + ) + + ginkgo.BeforeEach(func() { + cmd = &cobra.Command{} + client = actionMocks.CreateMockClient(&actionMocks.TestData{}, false, false) + buffer = &bytes.Buffer{} + logrus.SetOutput(buffer) + }) + + ginkgo.AfterEach(func() { + logrus.SetOutput(logrus.StandardLogger().Out) + }) + + ginkgo.It("should log startup information with no notifier", func() { + cmd.PersistentFlags().Bool("no-startup-message", false, "") + cmd.PersistentFlags().Bool("http-api-update", true, "") + cmd.PersistentFlags().String("http-api-host", "", "") + cmd.PersistentFlags().String("http-api-port", "8080", "") + + logging.WriteStartupMessage( + cmd, + time.Time{}, // no schedule + "Watching all containers", + "", // no scope + client, + nil, // no notifier + "v1.0.0", + ) + + output := buffer.String() + gomega.Expect(output).To(gomega.ContainSubstring("Watchtower v1.0.0")) + gomega.Expect(output).To(gomega.ContainSubstring("Using no notifications")) + gomega.Expect(output).To(gomega.ContainSubstring("The HTTP API is enabled")) + }) + + ginkgo.It("should suppress startup messages when flag is set", func() { + cmd.PersistentFlags().Bool("no-startup-message", true, "") + cmd.PersistentFlags().Bool("http-api-update", false, "") + + logging.WriteStartupMessage( + cmd, + time.Time{}, + "Watching all containers", + "", + client, + nil, + "v1.0.0", + ) + + // Should not log to buffer when suppressed + gomega.Expect(buffer.String()).To(gomega.BeEmpty()) + }) + + ginkgo.It("should log scope information when provided", func() { + cmd.PersistentFlags().Bool("no-startup-message", false, "") + cmd.PersistentFlags().Bool("http-api-update", false, "") + + logging.WriteStartupMessage( + cmd, + time.Time{}, + "Watching all containers", + "prod", + client, + nil, + "v1.0.0", + ) + + output := buffer.String() + gomega.Expect(output).To(gomega.ContainSubstring("Only checking containers in scope")) + }) + + ginkgo.It("should warn about trace logging", func() { + originalLevel := logrus.GetLevel() + logrus.SetLevel(logrus.TraceLevel) + defer logrus.SetLevel(originalLevel) + + cmd.PersistentFlags().Bool("no-startup-message", false, "") + cmd.PersistentFlags().Bool("http-api-update", false, "") + + logging.WriteStartupMessage( + cmd, + time.Time{}, + "Watching all containers", + "", + client, + nil, + "v1.0.0", + ) + + output := buffer.String() + gomega.Expect(output).To(gomega.ContainSubstring("Trace level enabled")) + }) +}) + +var _ = ginkgo.Describe("SetupStartupLogger", func() { + ginkgo.It("should return local log when startup messages are suppressed", func() { + logger := logging.SetupStartupLogger(true, nil) + gomega.Expect(logger).NotTo(gomega.BeNil()) + }) + + ginkgo.It("should return logger when not suppressed", func() { + logger := logging.SetupStartupLogger(false, nil) + gomega.Expect(logger).NotTo(gomega.BeNil()) + }) +}) + +var _ = ginkgo.Describe("LogNotifierInfo", func() { + var buffer *bytes.Buffer + + ginkgo.BeforeEach(func() { + buffer = &bytes.Buffer{} + logrus.SetOutput(buffer) + }) + + ginkgo.AfterEach(func() { + logrus.SetOutput(logrus.StandardLogger().Out) + }) + + ginkgo.It("should log multiple notifiers", func() { + logger := logrus.NewEntry(logrus.StandardLogger()) + logging.LogNotifierInfo(logger, []string{"slack", "email", "webhook"}) + + output := buffer.String() + gomega.Expect(output). + To(gomega.ContainSubstring("Using notifications: slack, email, webhook")) + }) + + ginkgo.It("should log no notifications when empty", func() { + logger := logrus.NewEntry(logrus.StandardLogger()) + logging.LogNotifierInfo(logger, []string{}) + + output := buffer.String() + gomega.Expect(output).To(gomega.ContainSubstring("Using no notifications")) + }) +}) + +var _ = ginkgo.Describe("LogScheduleInfo", func() { + var ( + cmd *cobra.Command + buffer *bytes.Buffer + ) + + ginkgo.BeforeEach(func() { + cmd = &cobra.Command{} + buffer = &bytes.Buffer{} + logrus.SetOutput(buffer) + }) + + ginkgo.AfterEach(func() { + logrus.SetOutput(logrus.StandardLogger().Out) + }) + + ginkgo.It("should log scheduled run information", func() { + logger := logrus.NewEntry(logrus.StandardLogger()) + sched := time.Now().Add(time.Hour) + + logging.LogScheduleInfo(logger, cmd, sched) + + output := buffer.String() + gomega.Expect(output).To(gomega.ContainSubstring("Scheduling next run")) + gomega.Expect(output). + To(gomega.ContainSubstring("Note that the next check will be performed in")) + }) + + ginkgo.It("should log one-time update", func() { + cmd.PersistentFlags().Bool("run-once", true, "") + logger := logrus.NewEntry(logrus.StandardLogger()) + + logging.LogScheduleInfo(logger, cmd, time.Time{}) + + output := buffer.String() + gomega.Expect(output).To(gomega.ContainSubstring("Running a one time update")) + }) + + ginkgo.It("should log update on start", func() { + cmd.PersistentFlags().Bool("update-on-start", true, "") + logger := logrus.NewEntry(logrus.StandardLogger()) + + logging.LogScheduleInfo(logger, cmd, time.Time{}) + + output := buffer.String() + gomega.Expect(output).To(gomega.ContainSubstring("Running update on start")) + }) + + ginkgo.It("should log HTTP API without periodic polls", func() { + cmd.PersistentFlags().Bool("http-api-update", true, "") + cmd.PersistentFlags().Bool("http-api-periodic-polls", false, "") + logger := logrus.NewEntry(logrus.StandardLogger()) + + logging.LogScheduleInfo(logger, cmd, time.Time{}) + + output := buffer.String() + gomega.Expect(output). + To(gomega.ContainSubstring("Updates via HTTP API enabled. Periodic updates are not enabled")) + }) + + ginkgo.It("should log HTTP API with periodic polls", func() { + cmd.PersistentFlags().Bool("http-api-update", true, "") + cmd.PersistentFlags().Bool("http-api-periodic-polls", true, "") + logger := logrus.NewEntry(logrus.StandardLogger()) + + logging.LogScheduleInfo(logger, cmd, time.Time{}) + + output := buffer.String() + gomega.Expect(output). + To(gomega.ContainSubstring("Updates via HTTP API enabled. Periodic updates are also enabled")) + }) + + ginkgo.It("should log default periodic updates", func() { + logger := logrus.NewEntry(logrus.StandardLogger()) + + logging.LogScheduleInfo(logger, cmd, time.Time{}) + + output := buffer.String() + gomega.Expect(output). + To(gomega.ContainSubstring("Periodic updates are enabled with default schedule")) + }) +}) diff --git a/internal/scheduling/scheduling.go b/internal/scheduling/scheduling.go new file mode 100644 index 000000000..27f90e841 --- /dev/null +++ b/internal/scheduling/scheduling.go @@ -0,0 +1,163 @@ +// Package scheduling provides functionality for scheduling and executing container updates in Watchtower. +// It handles periodic scheduling using cron specifications, manages update concurrency, and ensures +// graceful shutdown of scheduled operations. +package scheduling + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + "github.com/robfig/cron" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/nicholas-fedor/watchtower/pkg/container" + "github.com/nicholas-fedor/watchtower/pkg/metrics" + "github.com/nicholas-fedor/watchtower/pkg/types" +) + +// WaitForRunningUpdate waits for any currently running update to complete before proceeding with shutdown. +// It checks the lock channel status and blocks with a timeout if an update is in progress. +// Parameters: +// - ctx: The context for cancellation, allowing early shutdown on context timeout. +// - lock: The channel used to synchronize updates, ensuring only one runs at a time. +func WaitForRunningUpdate(ctx context.Context, lock chan bool) { + const updateWaitTimeout = 30 * time.Second + + logrus.Debug("Checking lock status before shutdown.") + + if len(lock) == 0 { + select { + case <-lock: + logrus.Debug("Lock acquired, update finished.") + case <-time.After(updateWaitTimeout): + logrus.Warn("Timeout waiting for running update to finish, proceeding with shutdown.") + case <-ctx.Done(): + logrus.Debug("Context cancelled, proceeding with shutdown.") + } + } else { + logrus.Debug("No update running, lock available.") + } + + logrus.Debug("Lock check completed.") +} + +// RunUpgradesOnSchedule schedules and executes periodic container updates according to the cron specification. +// +// It sets up a cron scheduler, runs updates at specified intervals, and ensures graceful shutdown on interrupt +// signals (SIGINT, SIGTERM) or context cancellation, handling concurrency with a lock channel. +// If update-on-start is enabled, it triggers the first update immediately before starting the scheduler. +// +// Parameters: +// - ctx: The context controlling the scheduler's lifecycle, enabling shutdown on cancellation. +// - c: The cobra.Command instance, providing access to flags for startup messaging. +// - filter: The types.Filter determining which containers are updated. +// - filtering: A string describing the filter, used in startup messaging. +// - lock: A channel ensuring only one update runs at a time, or nil to create a new one. +// - cleanup: Boolean indicating whether to remove old images after updates. +// - scheduleSpec: The cron-formatted schedule string that dictates when periodic container updates occur. +// - writeStartupMessage: Function to write the startup message with scheduling information. +// - runUpdatesWithNotifications: Function to perform container updates and send notifications. +// - client: The Docker client instance used for container operations. +// - scope: Defines a specific operational scope for Watchtower, limiting updates to containers matching this scope. +// - notifier: The notification system instance responsible for sending update status messages. +// - metaVersion: The version string for Watchtower, used in startup messaging. +// +// Returns: +// - error: An error if scheduling fails (e.g., invalid cron spec), nil on successful shutdown. +func RunUpgradesOnSchedule( + ctx context.Context, + c *cobra.Command, + filter types.Filter, + filtering string, + lock chan bool, + cleanup bool, + scheduleSpec string, + writeStartupMessage func(*cobra.Command, time.Time, string, string, container.Client, types.Notifier, string), + runUpdatesWithNotifications func(types.Filter, bool) *metrics.Metric, + client container.Client, + scope string, + notifier types.Notifier, + metaVersion string, +) error { + // Initialize lock if not provided, ensuring single-update concurrency. + if lock == nil { + lock = make(chan bool, 1) + lock <- true + } + + // Create a new cron scheduler for managing periodic updates. + scheduler := cron.New() + + // Define the update function to be used both for scheduled runs and immediate execution. + updateFunc := func() { + select { + case v := <-lock: + defer func() { lock <- v }() + + metric := runUpdatesWithNotifications(filter, cleanup) + metrics.Default().RegisterScan(metric) + default: + metrics.Default().RegisterScan(nil) + logrus.Debug("Skipped another update already running.") + } + + nextRuns := scheduler.Entries() + if len(nextRuns) > 0 { + logrus.Debug("Scheduled next run: " + nextRuns[0].Next.String()) + } + } + + // Add the update function to the cron schedule, handling concurrency and metrics. + if scheduleSpec != "" { + if err := scheduler.AddFunc( + scheduleSpec, + updateFunc); err != nil { + return fmt.Errorf("failed to schedule updates: %w", err) + } + } + + // Log startup message with the first scheduled run time. + var nextRun time.Time + if len(scheduler.Entries()) > 0 { + nextRun = scheduler.Entries()[0].Schedule.Next(time.Now()) + } + + writeStartupMessage(c, nextRun, filtering, scope, client, notifier, metaVersion) + + // Check if update-on-start is enabled and trigger immediate update if so. + updateOnStart, _ := c.PersistentFlags().GetBool("update-on-start") + if updateOnStart { + logrus.Info("Update on startup enabled - performing immediate check") + updateFunc() + } + + // Start the scheduler to begin periodic execution. + scheduler.Start() + + // Set up signal handling for graceful shutdown. + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt, syscall.SIGTERM) + + // Wait for shutdown signal or context cancellation. + select { + case <-ctx.Done(): + logrus.Debug("Context canceled, stopping scheduler...") + case <-interrupt: + logrus.Debug("Received interrupt signal, stopping scheduler...") + } + + // Stop the scheduler and wait for any running update to complete. + scheduler.Stop() + logrus.Debug("Waiting for running update to be finished...") + + WaitForRunningUpdate(ctx, lock) + + logrus.Debug("Scheduler stopped and update completed.") + + return nil +} diff --git a/internal/scheduling/scheduling_test.go b/internal/scheduling/scheduling_test.go new file mode 100644 index 000000000..9df024978 --- /dev/null +++ b/internal/scheduling/scheduling_test.go @@ -0,0 +1,202 @@ +package scheduling_test + +import ( + "context" + "testing" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/gomega" + "github.com/spf13/cobra" + + actionMocks "github.com/nicholas-fedor/watchtower/internal/actions/mocks" + "github.com/nicholas-fedor/watchtower/internal/scheduling" + "github.com/nicholas-fedor/watchtower/pkg/container" + "github.com/nicholas-fedor/watchtower/pkg/filters" + "github.com/nicholas-fedor/watchtower/pkg/metrics" + "github.com/nicholas-fedor/watchtower/pkg/types" +) + +// TestScheduling runs the Ginkgo test suite for the internal scheduling package. +func TestScheduling(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "Internal Scheduling Suite") +} + +var _ = ginkgo.Describe("WaitForRunningUpdate", func() { + ginkgo.It("should return immediately when no update is running", func() { + ctx := context.Background() + lock := make(chan bool, 1) + lock <- true // lock is available + + start := time.Now() + scheduling.WaitForRunningUpdate(ctx, lock) + elapsed := time.Since(start) + + gomega.Expect(elapsed).To(gomega.BeNumerically("<", 10*time.Millisecond)) + }) + + ginkgo.It("should wait for running update to complete", func() { + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + + lock := make(chan bool, 1) // lock is taken (no value in channel) + + start := time.Now() + scheduling.WaitForRunningUpdate(ctx, lock) + elapsed := time.Since(start) + + // Should have waited for the timeout + gomega.Expect(elapsed).To(gomega.BeNumerically(">=", 40*time.Millisecond)) + }) +}) + +var _ = ginkgo.Describe("RunUpgradesOnSchedule", func() { + var ( + cmd *cobra.Command + client actionMocks.MockClient + ) + + ginkgo.BeforeEach(func() { + cmd = &cobra.Command{} + client = actionMocks.CreateMockClient(&actionMocks.TestData{}, false, false) + }) + + ginkgo.It("should handle empty schedule spec", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cmd.Flags().Bool("update-on-start", false, "") + + runUpdatesWithNotifications := func(_ types.Filter, _ bool) *metrics.Metric { + return &metrics.Metric{Scanned: 1, Updated: 0, Failed: 0} + } + + writeStartupMessage := func(*cobra.Command, time.Time, string, string, container.Client, types.Notifier, string) {} + + // Use timeout to avoid hanging + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer timeoutCancel() + + err := scheduling.RunUpgradesOnSchedule( + timeoutCtx, + cmd, + filters.NoFilter, + "test filter", + nil, // no lock + false, // cleanup + "", // empty schedule + writeStartupMessage, + runUpdatesWithNotifications, + client, + "", // scope + nil, // no notifier + "v1.0.0", + ) + + // Should complete without error when context times out (clean cancellation) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + + ginkgo.It("should handle invalid cron spec", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + runUpdatesWithNotifications := func(_ types.Filter, _ bool) *metrics.Metric { + return &metrics.Metric{Scanned: 0, Updated: 0, Failed: 0} + } + + writeStartupMessage := func(*cobra.Command, time.Time, string, string, container.Client, types.Notifier, string) {} + + err := scheduling.RunUpgradesOnSchedule( + ctx, + cmd, + filters.NoFilter, + "test filter", + nil, + false, + "invalid cron spec", + writeStartupMessage, + runUpdatesWithNotifications, + client, + "", + nil, + "v1.0.0", + ) + + gomega.Expect(err).To(gomega.HaveOccurred()) + gomega.Expect(err.Error()).To(gomega.ContainSubstring("failed to schedule updates")) + }) + + ginkgo.It("should trigger update on start when enabled", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cmd.PersistentFlags().Bool("update-on-start", true, "") + + updateCalled := false + runUpdatesWithNotifications := func(_ types.Filter, _ bool) *metrics.Metric { + updateCalled = true + + return &metrics.Metric{Scanned: 1, Updated: 1, Failed: 0} + } + + writeStartupMessage := func(*cobra.Command, time.Time, string, string, container.Client, types.Notifier, string) {} + + // Use timeout to avoid hanging + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, 10*time.Millisecond) + defer timeoutCancel() + + err := scheduling.RunUpgradesOnSchedule( + timeoutCtx, + cmd, + filters.NoFilter, + "test filter", + nil, + false, + "", // no schedule + writeStartupMessage, + runUpdatesWithNotifications, + client, + "", + nil, + "v1.0.0", + ) + + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // clean timeout + gomega.Expect(updateCalled).To(gomega.BeTrue()) + }) + + ginkgo.It("should handle context cancellation", func() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + runUpdatesWithNotifications := func(_ types.Filter, _ bool) *metrics.Metric { + return &metrics.Metric{Scanned: 0, Updated: 0, Failed: 0} + } + + writeStartupMessage := func(*cobra.Command, time.Time, string, string, container.Client, types.Notifier, string) {} + + // Cancel immediately + cancelledCtx, cancelFunc := context.WithCancel(ctx) + cancelFunc() + + err := scheduling.RunUpgradesOnSchedule( + cancelledCtx, + cmd, + filters.NoFilter, + "test filter", + nil, + false, + "", // no schedule + writeStartupMessage, + runUpdatesWithNotifications, + client, + "", + nil, + "v1.0.0", + ) + + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) +}) diff --git a/internal/util/util.go b/internal/util/util.go index c0825c50a..89d9230b4 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -1,7 +1,20 @@ // Package util provides utility functions for Watchtower operations. package util -import "slices" +import ( + "fmt" + "math" + "slices" + "strings" + "time" +) + +// timeUnit represents a single unit of time (hours, minutes, or seconds) with its value and labels. +type timeUnit struct { + value int64 // The numeric value of the unit (e.g., 2 for 2 hours) + singular string // The singular form of the unit (e.g., "hour") + plural string // The plural form of the unit (e.g., "hours") +} // SliceEqual checks if two string slices are identical. // @@ -106,3 +119,97 @@ func StructMapSubtract(map1, map2 map[string]struct{}) map[string]struct{} { return result } + +// FormatDuration converts a time.Duration into a human-readable string representation. +// +// It breaks down the duration into hours, minutes, and seconds, formatting each unit with appropriate +// grammar (singular or plural) and returning a string like "1 hour, 2 minutes, 3 seconds" or "0 seconds" +// if the duration is zero, ensuring a user-friendly output for logs and notifications. +// +// Parameters: +// - duration: The time.Duration to convert into a readable string. +// +// Returns: +// - string: A formatted string representing the duration, always including at least "0 seconds". +func FormatDuration(duration time.Duration) string { + const ( + minutesPerHour = 60 // Number of minutes in an hour for duration breakdown + secondsPerMinute = 60 // Number of seconds in a minute for duration breakdown + timeUnitCount = 3 // Number of time units (hours, minutes, seconds) for pre-allocation + ) + + // Define units with calculated values from the duration, preserving order for display. + units := []timeUnit{ + {int64(duration.Hours()), "hour", "hours"}, + {int64(math.Mod(duration.Minutes(), minutesPerHour)), "minute", "minutes"}, + {int64(math.Mod(duration.Seconds(), secondsPerMinute)), "second", "seconds"}, + } + + parts := make([]string, 0, timeUnitCount) + // Format each unit, forcing inclusion of seconds if no other parts exist to avoid empty output. + for i, unit := range units { + parts = append( + parts, + FormatTimeUnit( + unit.value, + unit.singular, + unit.plural, + i == len(units)-1 && len(parts) == 0, + ), + ) + } + + // Join non-empty parts, ensuring a readable output with proper separators. + joined := strings.Join(FilterEmpty(parts), ", ") + if joined == "" { + return "0 seconds" // Default output when duration is zero or all units are skipped. + } + + return joined +} + +// FormatTimeUnit formats a single time unit into a string based on its value and context. +// +// It applies singular or plural grammar, skipping leading zeros unless forced (e.g., for seconds as the last unit), +// returning an empty string for skippable zeros to maintain a concise output. +// +// Parameters: +// - value: The numeric value of the unit (e.g., 2 for 2 hours). +// - singular: The singular form of the unit (e.g., "hour"). +// - plural: The plural form of the unit (e.g., "hours"). +// - forceInclude: A boolean indicating whether to include the unit even if zero (e.g., for seconds as fallback). +// +// Returns: +// - string: The formatted unit (e.g., "1 hour", "2 minutes") or empty string if skipped. +func FormatTimeUnit(value int64, singular, plural string, forceInclude bool) string { + switch { + case value == 1: + return "1 " + singular + case value > 1 || forceInclude: + return fmt.Sprintf("%d %s", value, plural) + default: + return "" // Skip zero values unless forced. + } +} + +// FilterEmpty removes empty strings from a slice, returning only non-empty elements. +// +// It ensures the final formatted duration string excludes unnecessary parts, maintaining readability +// by filtering out zero-value units that were not explicitly included. +// +// Parameters: +// - parts: A slice of strings representing formatted time units (e.g., "1 hour", ""). +// +// Returns: +// - []string: A new slice containing only the non-empty strings from the input. +func FilterEmpty(parts []string) []string { + var filtered []string + + for _, part := range parts { + if part != "" { + filtered = append(filtered, part) + } + } + + return filtered +} diff --git a/internal/util/util_test.go b/internal/util/util_test.go index 8a3c7d5e4..82d05328b 100644 --- a/internal/util/util_test.go +++ b/internal/util/util_test.go @@ -3,6 +3,7 @@ package util import ( "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -171,3 +172,128 @@ func TestMinInt_Zero(t *testing.T) { result := MinInt(0, 5) assert.Equal(t, 0, result) } + +// TestFormatDuration_Zero verifies that FormatDuration returns "0 seconds" for zero duration. +func TestFormatDuration_Zero(t *testing.T) { + t.Parallel() + + result := FormatDuration(0) + assert.Equal(t, "0 seconds", result) +} + +// TestFormatDuration_SecondsOnly verifies that FormatDuration formats seconds correctly. +func TestFormatDuration_SecondsOnly(t *testing.T) { + t.Parallel() + + result := FormatDuration(45 * time.Second) + assert.Equal(t, "45 seconds", result) +} + +// TestFormatDuration_MinutesAndSeconds verifies that FormatDuration formats minutes and seconds correctly. +func TestFormatDuration_MinutesAndSeconds(t *testing.T) { + t.Parallel() + + result := FormatDuration(2*time.Minute + 30*time.Second) + assert.Equal(t, "2 minutes, 30 seconds", result) +} + +// TestFormatDuration_HoursMinutesSeconds verifies that FormatDuration formats hours, minutes, and seconds correctly. +func TestFormatDuration_HoursMinutesSeconds(t *testing.T) { + t.Parallel() + + result := FormatDuration(1*time.Hour + 15*time.Minute + 45*time.Second) + assert.Equal(t, "1 hour, 15 minutes, 45 seconds", result) +} + +// TestFormatDuration_SingleValues verifies that FormatDuration uses singular forms for single units. +func TestFormatDuration_SingleValues(t *testing.T) { + t.Parallel() + + result := FormatDuration(1*time.Hour + 1*time.Minute + 1*time.Second) + assert.Equal(t, "1 hour, 1 minute, 1 second", result) +} + +// TestFormatDuration_LargeDuration verifies that FormatDuration handles large durations correctly. +func TestFormatDuration_LargeDuration(t *testing.T) { + t.Parallel() + + result := FormatDuration(25*time.Hour + 30*time.Minute) + assert.Equal(t, "25 hours, 30 minutes", result) +} + +// TestFormatTimeUnit_SingleValues verifies that FormatTimeUnit uses singular forms for single units. +func TestFormatTimeUnit_SingleValues(t *testing.T) { + t.Parallel() + + result := FormatTimeUnit(1, "hour", "hours", false) + assert.Equal(t, "1 hour", result) + + result = FormatTimeUnit(1, "minute", "minutes", false) + assert.Equal(t, "1 minute", result) + + result = FormatTimeUnit(1, "second", "seconds", false) + assert.Equal(t, "1 second", result) +} + +// TestFormatTimeUnit_PluralValues verifies that FormatTimeUnit uses plural forms for multiple units. +func TestFormatTimeUnit_PluralValues(t *testing.T) { + t.Parallel() + + result := FormatTimeUnit(2, "hour", "hours", false) + assert.Equal(t, "2 hours", result) + + result = FormatTimeUnit(5, "minute", "minutes", false) + assert.Equal(t, "5 minutes", result) +} + +// TestFormatTimeUnit_ZeroNotForced verifies that FormatTimeUnit returns empty string for zero values when not forced. +func TestFormatTimeUnit_ZeroNotForced(t *testing.T) { + t.Parallel() + + result := FormatTimeUnit(0, "hour", "hours", false) + assert.Empty(t, result) +} + +// TestFormatTimeUnit_ZeroForced verifies that FormatTimeUnit returns formatted string for zero values when forced. +func TestFormatTimeUnit_ZeroForced(t *testing.T) { + t.Parallel() + + result := FormatTimeUnit(0, "second", "seconds", true) + assert.Equal(t, "0 seconds", result) +} + +// TestFilterEmpty_Mixed verifies that FilterEmpty removes empty strings and keeps non-empty ones. +func TestFilterEmpty_Mixed(t *testing.T) { + t.Parallel() + + input := []string{"1 hour", "", "30 minutes", "", "45 seconds"} + result := FilterEmpty(input) + assert.Equal(t, []string{"1 hour", "30 minutes", "45 seconds"}, result) +} + +// TestFilterEmpty_AllEmpty verifies that FilterEmpty returns empty slice when all inputs are empty. +func TestFilterEmpty_AllEmpty(t *testing.T) { + t.Parallel() + + input := []string{"", "", ""} + result := FilterEmpty(input) + assert.Equal(t, []string(nil), result) +} + +// TestFilterEmpty_NoEmpty verifies that FilterEmpty returns all elements when none are empty. +func TestFilterEmpty_NoEmpty(t *testing.T) { + t.Parallel() + + input := []string{"1 hour", "30 minutes", "45 seconds"} + result := FilterEmpty(input) + assert.Equal(t, []string{"1 hour", "30 minutes", "45 seconds"}, result) +} + +// TestFilterEmpty_EmptyInput verifies that FilterEmpty returns empty slice for empty input. +func TestFilterEmpty_EmptyInput(t *testing.T) { + t.Parallel() + + input := []string{} + result := FilterEmpty(input) + assert.Equal(t, []string(nil), result) +} diff --git a/pkg/api/api.go b/pkg/api/api.go index 51afc9e97..0e41addb8 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -49,15 +49,23 @@ type API struct { Addr string // Set dynamically from flags hasHandlers bool mux *http.ServeMux // Custom mux to avoid global collisions + server HTTPServer // Optional injected server for testing } // New is a factory function creating a new API instance. -func New(token, addr string) *API { +// The server parameter is optional and allows dependency injection for testing. +func New(token, addr string, server ...HTTPServer) *API { + var injectedServer HTTPServer + if len(server) > 0 { + injectedServer = server[0] + } + api := &API{ Token: token, Addr: addr, hasHandlers: false, mux: http.NewServeMux(), + server: injectedServer, } logrus.WithFields(logrus.Fields{ "addr": api.Addr, @@ -114,17 +122,24 @@ func (api *API) Start(ctx context.Context, block bool) error { logrus.WithField("addr", api.Addr).Fatal("API token is empty or unset") } - server := &http.Server{ - Addr: api.Addr, - Handler: api.mux, - ReadTimeout: serverReadTimeout, - WriteTimeout: serverWriteTimeout, - IdleTimeout: serverIdleTimeout, - ReadHeaderTimeout: serverReadTimeout, - MaxHeaderBytes: 1 << serverMaxHeaderShift, - TLSConfig: nil, - TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - BaseContext: func(_ net.Listener) context.Context { return ctx }, + var server HTTPServer + if api.server != nil { + // Use injected server for testing + server = api.server + } else { + // Create real server for production + server = &http.Server{ + Addr: api.Addr, + Handler: api.mux, + ReadTimeout: serverReadTimeout, + WriteTimeout: serverWriteTimeout, + IdleTimeout: serverIdleTimeout, + ReadHeaderTimeout: serverReadTimeout, + MaxHeaderBytes: 1 << serverMaxHeaderShift, + TLSConfig: nil, + TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), + BaseContext: func(_ net.Listener) context.Context { return ctx }, + } } logrus.WithField("addr", api.Addr).Info("Starting HTTP API server") diff --git a/pkg/api/update/update.go b/pkg/api/update/update.go index b9eb63cdd..d65031d59 100644 --- a/pkg/api/update/update.go +++ b/pkg/api/update/update.go @@ -93,9 +93,17 @@ func (handle *Handler) Handle(w http.ResponseWriter, r *http.Request) { } // Acquire lock, blocking if another update is in progress (requests will queue). + logrus.Debug("Handler: trying to acquire lock") + chanValue := <-handle.lock - defer func() { handle.lock <- chanValue }() + logrus.Debug("Handler: acquired lock") + + defer func() { + logrus.Debug("Handler: releasing lock") + + handle.lock <- chanValue + }() if len(images) > 0 { logrus.WithField("images", images).Info("Executing targeted update") @@ -104,10 +112,14 @@ func (handle *Handler) Handle(w http.ResponseWriter, r *http.Request) { } // Execute update and get results + logrus.Debug("Handler: executing update function") + startTime := time.Now() metric := handle.fn(images) duration := time.Since(startTime) + logrus.Debug("Handler: update function completed") + // Set content type to JSON w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) diff --git a/pkg/notifications/email.go b/pkg/notifications/email.go index ca377b9cf..34d28fbc2 100644 --- a/pkg/notifications/email.go +++ b/pkg/notifications/email.go @@ -170,3 +170,20 @@ func (e *emailTypeNotifier) GetDelay() time.Duration { return e.delay } + +// GetEntries returns nil for legacy notifiers. +// +// Returns: +// - []*logrus.Entry: Always nil. +func (e *emailTypeNotifier) GetEntries() []*logrus.Entry { + return nil +} + +// SendFilteredEntries does nothing for legacy notifiers. +// +// Parameters: +// - entries: Ignored. +// - report: Ignored. +func (e *emailTypeNotifier) SendFilteredEntries(_ []*logrus.Entry, _ types.Report) { + // Legacy notifiers do not support filtered entries. +} diff --git a/pkg/notifications/gotify.go b/pkg/notifications/gotify.go index 0f0e58657..8c678458e 100644 --- a/pkg/notifications/gotify.go +++ b/pkg/notifications/gotify.go @@ -155,3 +155,20 @@ func (n *gotifyTypeNotifier) GetURL(_ *cobra.Command) (string, error) { return urlStr, nil } + +// GetEntries returns nil for legacy notifiers. +// +// Returns: +// - []*logrus.Entry: Always nil. +func (n *gotifyTypeNotifier) GetEntries() []*logrus.Entry { + return nil +} + +// SendFilteredEntries does nothing for legacy notifiers. +// +// Parameters: +// - entries: Ignored. +// - report: Ignored. +func (n *gotifyTypeNotifier) SendFilteredEntries(_ []*logrus.Entry, _ types.Report) { + // Legacy notifiers do not support filtered entries. +} diff --git a/pkg/notifications/msteams.go b/pkg/notifications/msteams.go index be98983cb..1dd23f0e8 100644 --- a/pkg/notifications/msteams.go +++ b/pkg/notifications/msteams.go @@ -102,3 +102,20 @@ func (n *msTeamsTypeNotifier) GetURL(_ *cobra.Command) (string, error) { return urlStr, nil } + +// GetEntries returns nil for legacy notifiers. +// +// Returns: +// - []*logrus.Entry: Always nil. +func (n *msTeamsTypeNotifier) GetEntries() []*logrus.Entry { + return nil +} + +// SendFilteredEntries does nothing for legacy notifiers. +// +// Parameters: +// - entries: Ignored. +// - report: Ignored. +func (n *msTeamsTypeNotifier) SendFilteredEntries(_ []*logrus.Entry, _ types.Report) { + // Legacy notifiers do not support filtered entries. +} diff --git a/pkg/notifications/shoutrrr.go b/pkg/notifications/shoutrrr.go index 68a9d2812..e4b51eab8 100644 --- a/pkg/notifications/shoutrrr.go +++ b/pkg/notifications/shoutrrr.go @@ -287,7 +287,6 @@ func (n *shoutrrrTypeNotifier) StartNotification() { func (n *shoutrrrTypeNotifier) SendNotification(report types.Report) { n.entriesMutex.Lock() entries := n.entries - n.entries = nil n.entriesMutex.Unlock() n.sendEntries(entries, report) } @@ -303,6 +302,29 @@ func (n *shoutrrrTypeNotifier) Close() { <-n.done } +// GetEntries returns a copy of the queued log entries. +// +// Returns: +// - []*logrus.Entry: Copy of queued entries. +func (n *shoutrrrTypeNotifier) GetEntries() []*logrus.Entry { + n.entriesMutex.RLock() + defer n.entriesMutex.RUnlock() + + entries := make([]*logrus.Entry, len(n.entries)) + copy(entries, n.entries) + + return entries +} + +// SendFilteredEntries sends filtered log entries with an optional report. +// +// Parameters: +// - entries: Log entries to send. +// - report: Optional scan report. +func (n *shoutrrrTypeNotifier) SendFilteredEntries(entries []*logrus.Entry, report types.Report) { + n.sendEntries(entries, report) +} + // Levels returns log levels that trigger notifications. // // Returns: diff --git a/pkg/notifications/slack.go b/pkg/notifications/slack.go index 7ab834622..69f3828b6 100644 --- a/pkg/notifications/slack.go +++ b/pkg/notifications/slack.go @@ -132,3 +132,20 @@ func (s *slackTypeNotifier) GetURL(_ *cobra.Command) (string, error) { return urlStr, nil } + +// GetEntries returns nil for legacy notifiers. +// +// Returns: +// - []*logrus.Entry: Always nil. +func (s *slackTypeNotifier) GetEntries() []*logrus.Entry { + return nil +} + +// SendFilteredEntries does nothing for legacy notifiers. +// +// Parameters: +// - entries: Ignored. +// - report: Ignored. +func (s *slackTypeNotifier) SendFilteredEntries(_ []*logrus.Entry, _ types.Report) { + // Legacy notifiers do not support filtered entries. +} diff --git a/pkg/session/report.go b/pkg/session/report.go index 1d6975717..a3255d324 100644 --- a/pkg/session/report.go +++ b/pkg/session/report.go @@ -16,6 +16,25 @@ type report struct { fresh []types.ContainerReport // Fresh containers. } +// SingleContainerReport implements types.Report for individual container notifications. +// +// This struct is used when notification splitting by container is enabled (--notification-split-by-container). +// Unlike the standard report which groups all containers from a session, SingleContainerReport focuses +// on a specific container while providing context from all other containers in the session. +// This allows notifications to be sent separately for each updated container while maintaining +// awareness of the overall session state (failed, skipped, stale, fresh containers). +type SingleContainerReport struct { + UpdatedReports []types.ContainerReport // Primary container(s) that were updated in this notification + ScannedReports []types.ContainerReport // All containers scanned during the session (for context) + FailedReports []types.ContainerReport // All containers that failed to update (for context) + SkippedReports []types.ContainerReport // All containers that were skipped (for context) + StaleReports []types.ContainerReport // All containers with stale images (for context) + FreshReports []types.ContainerReport // All containers with fresh images (for context) +} + +// SortableContainers implements sort.Interface for reports. +type SortableContainers []types.ContainerReport + // Scanned returns scanned containers. // // Returns: @@ -64,52 +83,62 @@ func (r *report) Fresh() []types.ContainerReport { return r.fresh } -// All returns deduplicated containers, prioritized by state. +// allFromSlices returns deduplicated containers from the provided slices, prioritized by state. +// +// This function ensures that each container appears only once in the final result, with priority +// given to containers in more significant states (updated > failed > skipped > stale > fresh > scanned). +// The priority order reflects the importance of the container's update status for notification purposes. +// +// Parameters: +// - scanned, updated, failed, skipped, stale, fresh: Slices of container reports categorized by their update state. // // Returns: -// - []types.ContainerReport: Sorted, unique list. -func (r *report) All() []types.ContainerReport { - // Calculate total capacity for all containers. - allLen := len( - r.scanned, - ) + len( - r.updated, - ) + len( - r.failed, - ) + len( - r.skipped, - ) + len( - r.stale, - ) + len( - r.fresh, - ) +// - []types.ContainerReport: Sorted, unique list with containers prioritized by their most significant state. +func allFromSlices( + scanned, updated, failed, skipped, stale, fresh []types.ContainerReport, +) []types.ContainerReport { + // Calculate total capacity for all containers to pre-allocate slice efficiently. + allLen := len(scanned) + len(updated) + len(failed) + len(skipped) + len(stale) + len(fresh) all := make([]types.ContainerReport, 0, allLen) - presentIDs := map[types.ContainerID][]string{} + presentIDs := map[types.ContainerID][]string{} // Track container IDs to prevent duplicates - // Append unique containers in priority order. + // appendUnique adds containers from a slice only if they haven't been added before. + // This ensures deduplication while maintaining the priority order defined by the calling sequence. appendUnique := func(reports []types.ContainerReport) { for _, report := range reports { if _, found := presentIDs[report.ID()]; found { - continue + continue // Skip containers already added from higher-priority categories } all = append(all, report) - presentIDs[report.ID()] = nil + presentIDs[report.ID()] = nil // Mark this container ID as processed } } - appendUnique(r.updated) - appendUnique(r.failed) - appendUnique(r.skipped) - appendUnique(r.stale) - appendUnique(r.fresh) - appendUnique(r.scanned) + // Add containers in priority order: updated containers get highest priority, + // followed by failed, skipped, stale, fresh, and finally scanned (lowest priority). + // This ensures that if a container appears in multiple categories, only the most + // significant state representation is included in the final list. + appendUnique(updated) // Highest priority - containers that were successfully updated + appendUnique(failed) // Containers that failed to update + appendUnique(skipped) // Containers that were intentionally skipped + appendUnique(stale) // Containers with stale images available + appendUnique(fresh) // Containers with fresh images (no update needed) + appendUnique(scanned) // Lowest priority - all containers that were scanned - sort.Sort(sortableContainers(all)) + sort.Sort(SortableContainers(all)) // Sort final list by container ID for consistent ordering return all } +// All returns deduplicated containers, prioritized by state. +// +// Returns: +// - []types.ContainerReport: Sorted, unique list. +func (r *report) All() []types.ContainerReport { + return allFromSlices(r.scanned, r.updated, r.failed, r.skipped, r.stale, r.fresh) +} + // NewReport creates a report from progress data. // // Parameters: @@ -181,22 +210,19 @@ func categorizeContainer(report *report, update *ContainerStatus) { // Parameters: // - report: Report to sort. func sortCategories(report *report) { - sort.Sort(sortableContainers(report.scanned)) - sort.Sort(sortableContainers(report.updated)) - sort.Sort(sortableContainers(report.failed)) - sort.Sort(sortableContainers(report.skipped)) - sort.Sort(sortableContainers(report.stale)) - sort.Sort(sortableContainers(report.fresh)) + sort.Sort(SortableContainers(report.scanned)) + sort.Sort(SortableContainers(report.updated)) + sort.Sort(SortableContainers(report.failed)) + sort.Sort(SortableContainers(report.skipped)) + sort.Sort(SortableContainers(report.stale)) + sort.Sort(SortableContainers(report.fresh)) } -// sortableContainers implements sort.Interface for reports. -type sortableContainers []types.ContainerReport - // Len returns the slice length. // // Returns: // - int: Number of reports. -func (s sortableContainers) Len() int { +func (s SortableContainers) Len() int { return len(s) } @@ -207,14 +233,47 @@ func (s sortableContainers) Len() int { // // Returns: // - bool: True if i’s ID is less than j’s. -func (s sortableContainers) Less(i, j int) bool { +func (s SortableContainers) Less(i, j int) bool { return s[i].ID() < s[j].ID() } +// Scanned returns scanned containers. +func (r *SingleContainerReport) Scanned() []types.ContainerReport { return r.ScannedReports } + +// Updated returns updated containers (only one for split notifications). +func (r *SingleContainerReport) Updated() []types.ContainerReport { return r.UpdatedReports } + +// Failed returns failed containers. +func (r *SingleContainerReport) Failed() []types.ContainerReport { return r.FailedReports } + +// Skipped returns skipped containers. +func (r *SingleContainerReport) Skipped() []types.ContainerReport { return r.SkippedReports } + +// Stale returns stale containers. +func (r *SingleContainerReport) Stale() []types.ContainerReport { return r.StaleReports } + +// Fresh returns fresh containers. +func (r *SingleContainerReport) Fresh() []types.ContainerReport { return r.FreshReports } + +// All returns deduplicated containers, prioritized by state. +// +// Returns: +// - []types.ContainerReport: Sorted, unique list. +func (r *SingleContainerReport) All() []types.ContainerReport { + return allFromSlices( + r.ScannedReports, + r.UpdatedReports, + r.FailedReports, + r.SkippedReports, + r.StaleReports, + r.FreshReports, + ) +} + // Swap exchanges two reports. // // Parameters: // - i, j: Indices to swap. -func (s sortableContainers) Swap(i, j int) { +func (s SortableContainers) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/pkg/session/report_test.go b/pkg/session/report_test.go index f20bca4ff..ff5cd144f 100644 --- a/pkg/session/report_test.go +++ b/pkg/session/report_test.go @@ -831,20 +831,20 @@ func Test_sortCategories(t *testing.T) { } } -func Test_sortableContainers_Len(t *testing.T) { +func Test_SortableContainers_Len(t *testing.T) { tests := []struct { name string - s sortableContainers + s SortableContainers want int }{ { name: "empty slice", - s: sortableContainers{}, + s: SortableContainers{}, want: 0, }, { name: "two elements", - s: sortableContainers{ + s: SortableContainers{ mocks.NewMockContainerReport(t), mocks.NewMockContainerReport(t), }, @@ -854,13 +854,13 @@ func Test_sortableContainers_Len(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := tt.s.Len(); got != tt.want { - t.Errorf("sortableContainers.Len() = %v, want %v", got, tt.want) + t.Errorf("SortableContainers.Len() = %v, want %v", got, tt.want) } }) } } -func Test_sortableContainers_Less(t *testing.T) { +func Test_SortableContainers_Less(t *testing.T) { type args struct { i int j int @@ -868,13 +868,13 @@ func Test_sortableContainers_Less(t *testing.T) { tests := []struct { name string - s sortableContainers + s SortableContainers args args want bool }{ { name: "lower ID first", - s: sortableContainers{ + s: SortableContainers{ func() types.ContainerReport { mock := mocks.NewMockContainerReport(t) mock.EXPECT().ID().Return(types.ContainerID("cont1")) @@ -893,7 +893,7 @@ func Test_sortableContainers_Less(t *testing.T) { }, { name: "higher ID first", - s: sortableContainers{ + s: SortableContainers{ func() types.ContainerReport { mock := mocks.NewMockContainerReport(t) mock.EXPECT().ID().Return(types.ContainerID("cont2")) @@ -914,13 +914,13 @@ func Test_sortableContainers_Less(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := tt.s.Less(tt.args.i, tt.args.j); got != tt.want { - t.Errorf("sortableContainers.Less() = %v, want %v", got, tt.want) + t.Errorf("SortableContainers.Less() = %v, want %v", got, tt.want) } }) } } -func Test_sortableContainers_Swap(t *testing.T) { +func Test_SortableContainers_Swap(t *testing.T) { type args struct { i int j int @@ -928,13 +928,13 @@ func Test_sortableContainers_Swap(t *testing.T) { tests := []struct { name string - s sortableContainers + s SortableContainers args args want []string // Use IDs directly instead of mocks for want }{ { name: "swap first and second", - s: sortableContainers{ + s: SortableContainers{ func() types.ContainerReport { mock := mocks.NewMockContainerReport(t) // Expect 1 call in the comparison loop after swap @@ -958,7 +958,7 @@ func Test_sortableContainers_Swap(t *testing.T) { tt.s.Swap(tt.args.i, tt.args.j) if len(tt.s) != len(tt.want) { - t.Errorf("sortableContainers.Swap() length = %d, want %d", len(tt.s), len(tt.want)) + t.Errorf("SortableContainers.Swap() length = %d, want %d", len(tt.s), len(tt.want)) return } @@ -969,7 +969,7 @@ func Test_sortableContainers_Swap(t *testing.T) { gotID := tt.s[i].ID() if gotID != types.ContainerID(tt.want[i]) { t.Errorf( - "sortableContainers.Swap()[%d].ID() = %v, want %v", + "SortableContainers.Swap()[%d].ID() = %v, want %v", i, gotID, tt.want[i], diff --git a/pkg/types/mocks/Notifier.go b/pkg/types/mocks/Notifier.go index 280b77490..387874aa1 100644 --- a/pkg/types/mocks/Notifier.go +++ b/pkg/types/mocks/Notifier.go @@ -6,6 +6,7 @@ package mocks import ( "github.com/nicholas-fedor/watchtower/pkg/types" + "github.com/sirupsen/logrus" mock "github.com/stretchr/testify/mock" ) @@ -102,6 +103,52 @@ func (_c *MockNotifier_Close_Call) RunAndReturn(run func()) *MockNotifier_Close_ return _c } +// GetEntries provides a mock function for the type MockNotifier +func (_mock *MockNotifier) GetEntries() []*logrus.Entry { + ret := _mock.Called() + + if len(ret) == 0 { + panic("no return value specified for GetEntries") + } + + var r0 []*logrus.Entry + if returnFunc, ok := ret.Get(0).(func() []*logrus.Entry); ok { + r0 = returnFunc() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*logrus.Entry) + } + } + return r0 +} + +// MockNotifier_GetEntries_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntries' +type MockNotifier_GetEntries_Call struct { + *mock.Call +} + +// GetEntries is a helper method to define mock.On call +func (_e *MockNotifier_Expecter) GetEntries() *MockNotifier_GetEntries_Call { + return &MockNotifier_GetEntries_Call{Call: _e.mock.On("GetEntries")} +} + +func (_c *MockNotifier_GetEntries_Call) Run(run func()) *MockNotifier_GetEntries_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockNotifier_GetEntries_Call) Return(entrys []*logrus.Entry) *MockNotifier_GetEntries_Call { + _c.Call.Return(entrys) + return _c +} + +func (_c *MockNotifier_GetEntries_Call) RunAndReturn(run func() []*logrus.Entry) *MockNotifier_GetEntries_Call { + _c.Call.Return(run) + return _c +} + // GetNames provides a mock function for the type MockNotifier func (_mock *MockNotifier) GetNames() []string { ret := _mock.Called() @@ -194,6 +241,52 @@ func (_c *MockNotifier_GetURLs_Call) RunAndReturn(run func() []string) *MockNoti return _c } +// SendFilteredEntries provides a mock function for the type MockNotifier +func (_mock *MockNotifier) SendFilteredEntries(entries []*logrus.Entry, report types.Report) { + _mock.Called(entries, report) + return +} + +// MockNotifier_SendFilteredEntries_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendFilteredEntries' +type MockNotifier_SendFilteredEntries_Call struct { + *mock.Call +} + +// SendFilteredEntries is a helper method to define mock.On call +// - entries []*logrus.Entry +// - report types.Report +func (_e *MockNotifier_Expecter) SendFilteredEntries(entries interface{}, report interface{}) *MockNotifier_SendFilteredEntries_Call { + return &MockNotifier_SendFilteredEntries_Call{Call: _e.mock.On("SendFilteredEntries", entries, report)} +} + +func (_c *MockNotifier_SendFilteredEntries_Call) Run(run func(entries []*logrus.Entry, report types.Report)) *MockNotifier_SendFilteredEntries_Call { + _c.Call.Run(func(args mock.Arguments) { + var arg0 []*logrus.Entry + if args[0] != nil { + arg0 = args[0].([]*logrus.Entry) + } + var arg1 types.Report + if args[1] != nil { + arg1 = args[1].(types.Report) + } + run( + arg0, + arg1, + ) + }) + return _c +} + +func (_c *MockNotifier_SendFilteredEntries_Call) Return() *MockNotifier_SendFilteredEntries_Call { + _c.Call.Return() + return _c +} + +func (_c *MockNotifier_SendFilteredEntries_Call) RunAndReturn(run func(entries []*logrus.Entry, report types.Report)) *MockNotifier_SendFilteredEntries_Call { + _c.Run(run) + return _c +} + // SendNotification provides a mock function for the type MockNotifier func (_mock *MockNotifier) SendNotification(reportType types.Report) { _mock.Called(reportType) diff --git a/pkg/types/notifier.go b/pkg/types/notifier.go index f05a8868b..dee3ab013 100644 --- a/pkg/types/notifier.go +++ b/pkg/types/notifier.go @@ -1,5 +1,7 @@ package types +import "github.com/sirupsen/logrus" + // Notifier defines the common interface for notification services. type Notifier interface { StartNotification() // Begin queuing messages. @@ -8,4 +10,18 @@ type Notifier interface { GetNames() []string // Service names. GetURLs() []string // Service URLs. Close() // Stop and flush notifications. + + // GetEntries returns all queued logrus entries that have been captured during the session. + // This is used for notification splitting by container in log mode, allowing notifiers + // to filter and send entries specific to individual containers rather than all entries together. + GetEntries() []*logrus.Entry + + // SendFilteredEntries sends a subset of log entries with an optional report. + // This method enables fine-grained notifications where only entries relevant to specific + // containers are sent, supporting the --notification-split-by-container feature in log mode. + // The report parameter may be nil when sending filtered log entries without session context. + SendFilteredEntries( + entries []*logrus.Entry, + report Report, + ) }