Description
Issue Description
If you have a Dockerfile with multiple steps that is being used by multiple services like this:
Repository: https://gitlab.com/chocolateimage/multistage
compose.yaml
services:
service1:
build:
target: service1
dockerfile: Dockerfile
service2:
build:
target: service2
dockerfile: Dockerfile
service3:
build:
target: service3
dockerfile: Dockerfile
service4:
build:
target: service4
dockerfile: Dockerfile
And the Dockerfile:
FROM alpine:latest AS base
WORKDIR /app
FROM base AS deps
COPY dependency-generator.sh /app/
RUN [ "./dependency-generator.sh" ]
FROM base AS service-base
COPY service-runner.sh /app
# Pretend all of the services below do different things
FROM service-base AS service1
COPY --from=deps /app/deps /app/deps
ENTRYPOINT [ "./service-runner.sh", "service1" ]
FROM service-base AS service2
COPY --from=deps /app/deps /app/deps
ENTRYPOINT [ "./service-runner.sh", "service2" ]
FROM service-base AS service3
COPY --from=deps /app/deps /app/deps
ENTRYPOINT [ "./service-runner.sh", "service3" ]
FROM service-base AS service4
COPY --from=deps /app/deps /app/deps
ENTRYPOINT [ "./service-runner.sh", "service4" ]
The deps
stage will be executed multiple times, this does not happen with Docker itself. In this example it will not do much, but in a real setting where you would download stuff and do CPU intensive work, this can slow down the build process by a large amount.
Steps to reproduce the issue
Steps to reproduce the issue
git clone https://gitlab.com/chocolateimage/multistage.git
podman-compose up
- The line
"Generating" your dependencies
gets printed multiple times
Describe the results you received
The line "Generating" your dependencies
from the deps
stage gets printed multiple times.
Describe the results you expected
I expected that just like in Docker, it would get built once, and the result will be used in the individual services.
podman info output
host:
arch: amd64
buildahVersion: 1.40.0
cgroupControllers:
- cpu
- memory
- pids
cgroupManager: systemd
cgroupVersion: v2
conmon:
package: conmon-1:2.1.13-1
path: /usr/bin/conmon
version: 'conmon version 2.1.13, commit: 82de887596ed8ee6d9b2ee85e4f167f307bb569b'
cpuUtilization:
idlePercent: 99.52
systemPercent: 0.2
userPercent: 0.28
cpus: 4
databaseBackend: sqlite
distribution:
distribution: arch
version: unknown
eventLogger: journald
freeLocks: 2043
hostname: container-test
idMappings:
gidmap:
- container_id: 0
host_id: 1000
size: 1
- container_id: 1
host_id: 100000
size: 65536
uidmap:
- container_id: 0
host_id: 1000
size: 1
- container_id: 1
host_id: 100000
size: 65536
kernel: 6.12.31-1-lts
linkmode: dynamic
logDriver: journald
memFree: 3378524160
memTotal: 4084711424
networkBackend: netavark
networkBackendInfo:
backend: netavark
dns:
package: aardvark-dns-1.15.0-1
path: /usr/lib/podman/aardvark-dns
version: aardvark-dns 1.15.0
package: netavark-1.15.0-1
path: /usr/lib/podman/netavark
version: netavark 1.15.0
ociRuntime:
name: crun
package: crun-1.21-1
path: /usr/bin/crun
version: |-
crun version 1.21
commit: 10269840aa07fb7e6b7e1acff6198692d8ff5c88
rundir: /run/user/1000/crun
spec: 1.0.0
+SYSTEMD +SELINUX +APPARMOR +CAP +SECCOMP +EBPF +CRIU +YAJL
os: linux
pasta:
executable: /usr/bin/pasta
package: passt-2025_05_12.8ec1341-1
version: ""
remoteSocket:
exists: true
path: /run/user/1000/podman/podman.sock
rootlessNetworkCmd: pasta
security:
apparmorEnabled: false
capabilities: CAP_CHOWN,CAP_DAC_OVERRIDE,CAP_FOWNER,CAP_FSETID,CAP_KILL,CAP_NET_BIND_SERVICE,CAP_SETFCAP,CAP_SETGID,CAP_SETPCAP,CAP_SETUID,CAP_SYS_CHROOT
rootless: true
seccompEnabled: true
seccompProfilePath: /etc/containers/seccomp.json
selinuxEnabled: false
serviceIsRemote: false
slirp4netns:
executable: ""
package: ""
version: ""
swapFree: 2042097664
swapTotal: 2042097664
uptime: 0h 9m 45.00s
variant: ""
plugins:
authorization: null
log:
- k8s-file
- none
- passthrough
- journald
network:
- bridge
- macvlan
- ipvlan
volume:
- local
registries: {}
store:
configFile: /home/user/.config/containers/storage.conf
containerStore:
number: 4
paused: 0
running: 0
stopped: 4
graphDriverName: overlay
graphOptions: {}
graphRoot: /home/user/.local/share/containers/storage
graphRootAllocated: 51462569984
graphRootUsed: 2777255936
graphStatus:
Backing Filesystem: extfs
Native Overlay Diff: "true"
Supports d_type: "true"
Supports shifting: "false"
Supports volatile: "true"
Using metacopy: "false"
imageCopyTmpDir: /var/tmp
imageStore:
number: 26
runRoot: /run/user/1000/containers
transientStore: false
volumePath: /home/user/.local/share/containers/storage/volumes
version:
APIVersion: 5.5.0
Built: 1747641392
BuiltTime: Mon May 19 09:56:32 2025
GitCommit: 0dbcb51477ee7ab8d3b47d30facf71fc38bb0c98
GoVersion: go1.24.3
Os: linux
OsArch: linux/amd64
Version: 5.5.0
Podman in a container
No
Privileged Or Rootless
None
Upstream Latest Release
Yes
Additional environment details
In a fresh libvirt/QEMU machine with Arch Linux
Additional information
I uploaded the example code on GitLab: https://gitlab.com/chocolateimage/multistage