Skip to content

Commit 01c5810

Browse files
committed
feat: add directory backed UserVolumes
Resolves #11848 Signed-off-by: Mateusz Urbanek <[email protected]>
1 parent b66482c commit 01c5810

File tree

23 files changed

+647
-46
lines changed

23 files changed

+647
-46
lines changed

api/resource/definitions/block/block.proto

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,7 @@ message MountSpec {
135135
int64 uid = 6;
136136
int64 gid = 7;
137137
bool recursive_relabel = 8;
138+
string bind_target = 9;
138139
}
139140

140141
// MountStatusSpec is the spec for MountStatus.

hack/release.toml

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,11 +130,27 @@ When using Factory or Imager supply as `-module.sig_enfore module.sig_enforce=0`
130130
[notes.grub]
131131
title = "GRUB"
132132
description = """\
133-
Talos Linux introduces new machine configuration option `.machine.install.grubUseUKICmdline` to control whether GRUB should use the kernel command line
133+
Talos Linux introduces new machine configuration option `.machine.install.grubUseUKICmdline` to control whether GRUB should use the kernel command line
134134
provided by the boot assets (UKI) or to use the command line constructed by Talos itself (legacy behavior).
135135
136136
This option defaults to `true` for new installations, which means that GRUB will use the command line from the UKI, making it easier to customize kernel parameters via boot asset generation.
137137
For existing installations upgrading to v1.12, this option will default to `false` to preserve the legacy behavior.
138+
"""
139+
140+
[notes.directory-user-volumes]
141+
title = "New User Volume type - bind"
142+
description = """\
143+
New field in UserVolumeConfig - `volumeType` that defaults to `partition`, but can be set to `directory`.
144+
When set to `directory`, provisioning and filesystem operations are skipped and a directory is created under `/var/mnt/<name>`.
145+
146+
The `directory` type enables lightweight storage volumes backed by a host directory, instead of requiring a full block device partition.
147+
148+
When `volumeType = "directory"`:
149+
- A directory is created at `/var/mnt/<metadata.name>`;
150+
- `provisioning`, `filesystem` and `encryption` are prohibited.
151+
152+
Note: this mode does not provide filesystem-level isolation and inherits the EPHEMERAL partition capacity limits.
153+
It should not be used for workloads requiring predictable storage quotas.
138154
"""
139155

140156
[make_deps]

internal/app/machined/pkg/controllers/block/internal/volumes/close.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ import (
1919
func Close(ctx context.Context, logger *zap.Logger, volumeContext ManagerContext) error {
2020
switch volumeContext.Cfg.TypedSpec().Type {
2121
case block.VolumeTypeTmpfs, block.VolumeTypeDirectory, block.VolumeTypeSymlink, block.VolumeTypeOverlay:
22-
// tmpfs, directory, symlink and overlay volumes can be always closed
22+
// volume types can be always closed
2323
volumeContext.Status.Phase = block.VolumePhaseClosed
2424

2525
return nil

internal/app/machined/pkg/controllers/block/internal/volumes/locate.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ func LocateAndProvision(ctx context.Context, logger *zap.Logger, volumeContext M
3030

3131
switch volumeType {
3232
case block.VolumeTypeTmpfs, block.VolumeTypeDirectory, block.VolumeTypeSymlink, block.VolumeTypeOverlay:
33-
// tmpfs, directory, symlink and overlays volumes are always ready
33+
// volume types above are always ready
3434
volumeContext.Status.Phase = block.VolumePhaseReady
3535

3636
return nil

internal/app/machined/pkg/controllers/block/mount.go

Lines changed: 113 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ func (ctrl *MountController) handleMountOperation(
293293
) error {
294294
switch volumeStatus.TypedSpec().Type {
295295
case block.VolumeTypeDirectory:
296-
return ctrl.handleDirectoryMountOperation(rootPath, mountTarget, volumeStatus)
296+
return ctrl.handleDirectoryMountOperation(logger, rootPath, mountTarget, mountRequest, volumeStatus)
297297
case block.VolumeTypeOverlay:
298298
return ctrl.handleOverlayMountOperation(logger, filepath.Join(rootPath, mountTarget), mountRequest, volumeStatus)
299299
case block.VolumeTypeSymlink:
@@ -312,8 +312,10 @@ func (ctrl *MountController) handleMountOperation(
312312
}
313313

314314
func (ctrl *MountController) handleDirectoryMountOperation(
315+
logger *zap.Logger,
315316
rootPath string,
316317
target string,
318+
mountRequest *block.MountRequest,
317319
volumeStatus *block.VolumeStatus,
318320
) error {
319321
targetPath := filepath.Join(rootPath, target)
@@ -333,9 +335,93 @@ func (ctrl *MountController) handleDirectoryMountOperation(
333335
}
334336
}
335337

338+
if volumeStatus.TypedSpec().MountSpec.BindTarget != nil {
339+
if err := ctrl.handleBindMountOperation(
340+
logger,
341+
rootPath, target, *volumeStatus.TypedSpec().MountSpec.BindTarget,
342+
mountRequest, volumeStatus,
343+
); err != nil {
344+
return fmt.Errorf("target path %q is not a directory", targetPath)
345+
}
346+
}
347+
336348
return ctrl.updateTargetSettings(targetPath, volumeStatus.TypedSpec().MountSpec)
337349
}
338350

351+
func (ctrl *MountController) handleBindMountOperation(
352+
logger *zap.Logger,
353+
rootPath string,
354+
source string,
355+
bindTarget string,
356+
mountRequest *block.MountRequest,
357+
volumeStatus *block.VolumeStatus,
358+
) error {
359+
_, ok := ctrl.activeMounts[mountRequest.Metadata().ID()]
360+
361+
// mount hasn't been done yet
362+
if !ok {
363+
mountSource := filepath.Join(rootPath, source)
364+
mountTarget := filepath.Join(rootPath, bindTarget)
365+
366+
if err := os.Mkdir(mountTarget, volumeStatus.TypedSpec().MountSpec.FileMode); err != nil {
367+
if !os.IsExist(err) {
368+
return fmt.Errorf("failed to create target path: %w", err)
369+
}
370+
371+
st, err := os.Stat(mountTarget)
372+
if err != nil {
373+
return fmt.Errorf("failed to stat target path: %w", err)
374+
}
375+
376+
if !st.IsDir() {
377+
return fmt.Errorf("target path %q is not a directory", mountTarget)
378+
}
379+
}
380+
381+
var opts []mount.ManagerOption
382+
383+
opts = append(opts,
384+
mount.WithSelinuxLabel(volumeStatus.TypedSpec().MountSpec.SelinuxLabel),
385+
)
386+
387+
manager := mount.NewManager(slices.Concat(
388+
[]mount.ManagerOption{
389+
mount.WithTarget(mountTarget),
390+
mount.WithOpentreeFromPath(mountSource),
391+
mount.WithPrinter(logger.Sugar().Infof),
392+
},
393+
opts,
394+
)...)
395+
396+
mountpoint, err := manager.Mount()
397+
if err != nil {
398+
return fmt.Errorf("failed to mount %q: %w", mountRequest.Metadata().ID(), err)
399+
}
400+
401+
if !mountRequest.TypedSpec().ReadOnly && !mountRequest.TypedSpec().Detached {
402+
if err = ctrl.updateTargetSettings(mountTarget, volumeStatus.TypedSpec().MountSpec); err != nil {
403+
manager.Unmount() //nolint:errcheck
404+
405+
return fmt.Errorf("failed to update target settings %q: %w", mountRequest.Metadata().ID(), err)
406+
}
407+
}
408+
409+
logger.Info("bind mount",
410+
zap.String("volume", volumeStatus.Metadata().ID()),
411+
zap.String("source", mountSource),
412+
zap.String("target", mountTarget),
413+
)
414+
415+
ctrl.activeMounts[mountRequest.Metadata().ID()] = &mountContext{
416+
point: mountpoint,
417+
readOnly: mountRequest.TypedSpec().ReadOnly,
418+
unmounter: manager.Unmount,
419+
}
420+
}
421+
422+
return nil
423+
}
424+
339425
//nolint:gocyclo
340426
func (ctrl *MountController) handleSymlinkMountOperation(
341427
logger *zap.Logger,
@@ -645,7 +731,7 @@ func (ctrl *MountController) handleUnmountOperation(
645731
) error {
646732
switch volumeStatus.TypedSpec().Type {
647733
case block.VolumeTypeDirectory:
648-
return nil
734+
return ctrl.handleDirectoryUnmountOperation(logger, mountRequest, volumeStatus)
649735
case block.VolumeTypeTmpfs:
650736
return fmt.Errorf("not implemented yet")
651737
case block.VolumeTypeDisk, block.VolumeTypePartition, block.VolumeTypeOverlay:
@@ -687,6 +773,31 @@ func (ctrl *MountController) handleDiskUnmountOperation(
687773
return nil
688774
}
689775

776+
func (ctrl *MountController) handleDirectoryUnmountOperation(
777+
logger *zap.Logger,
778+
mountRequest *block.MountRequest,
779+
_ *block.VolumeStatus,
780+
) error {
781+
mountCtx, ok := ctrl.activeMounts[mountRequest.Metadata().ID()]
782+
if !ok {
783+
return nil
784+
}
785+
786+
if err := mountCtx.unmounter(); err != nil {
787+
return fmt.Errorf("failed to unmount %q: %w", mountRequest.Metadata().ID(), err)
788+
}
789+
790+
delete(ctrl.activeMounts, mountRequest.Metadata().ID())
791+
792+
logger.Info("volume unmount",
793+
zap.String("volume", mountRequest.Metadata().ID()),
794+
zap.String("source", mountCtx.point.Source()),
795+
zap.String("target", mountCtx.point.Target()),
796+
)
797+
798+
return nil
799+
}
800+
690801
func (ctrl *MountController) handleSymlinkUmountOperation(
691802
mountRequest *block.MountRequest,
692803
) error {

internal/app/machined/pkg/controllers/block/user_volume_config.go

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ import (
1616
"github.com/siderolabs/gen/optional"
1717
"github.com/siderolabs/gen/xerrors"
1818
"github.com/siderolabs/gen/xslices"
19+
"github.com/siderolabs/go-pointer"
1920
"go.uber.org/zap"
2021

2122
"github.com/siderolabs/talos/internal/pkg/partition"
@@ -301,6 +302,26 @@ func (ctrl *UserVolumeConfigController) handleUserVolumeConfig(
301302
userVolumeConfig configconfig.UserVolumeConfig,
302303
v *block.VolumeConfig,
303304
volumeID string,
305+
) error {
306+
switch userVolumeConfig.Type().ValueOr(block.VolumeTypePartition) {
307+
case block.VolumeTypePartition:
308+
return ctrl.handlePartitionUserVolumeConfig(userVolumeConfig, v, volumeID)
309+
310+
case block.VolumeTypeDirectory:
311+
return ctrl.handleDirectoryUserVolumeConfig(userVolumeConfig, v)
312+
313+
case block.VolumeTypeDisk, block.VolumeTypeTmpfs, block.VolumeTypeSymlink, block.VolumeTypeOverlay:
314+
fallthrough
315+
316+
default:
317+
return fmt.Errorf("unsupported volume type %q", userVolumeConfig.Type().ValueOr(block.VolumeTypePartition).String())
318+
}
319+
}
320+
321+
func (ctrl *UserVolumeConfigController) handlePartitionUserVolumeConfig(
322+
userVolumeConfig configconfig.UserVolumeConfig,
323+
v *block.VolumeConfig,
324+
volumeID string,
304325
) error {
305326
diskSelector, ok := userVolumeConfig.Provisioning().DiskSelector().Get()
306327
if !ok {
@@ -343,6 +364,24 @@ func (ctrl *UserVolumeConfigController) handleUserVolumeConfig(
343364
return nil
344365
}
345366

367+
func (ctrl *UserVolumeConfigController) handleDirectoryUserVolumeConfig(
368+
userVolumeConfig configconfig.UserVolumeConfig,
369+
v *block.VolumeConfig,
370+
) error {
371+
v.TypedSpec().Type = block.VolumeTypeDirectory
372+
v.TypedSpec().Mount = block.MountSpec{
373+
TargetPath: userVolumeConfig.Name(),
374+
ParentID: constants.UserVolumeMountPoint,
375+
SelinuxLabel: constants.EphemeralSelinuxLabel,
376+
FileMode: 0o755,
377+
UID: 0,
378+
GID: 0,
379+
BindTarget: pointer.To(userVolumeConfig.Name()),
380+
}
381+
382+
return nil
383+
}
384+
346385
//nolint:dupl
347386
func (ctrl *UserVolumeConfigController) handleRawVolumeConfig(
348387
rawVolumeConfig configconfig.RawVolumeConfig,

internal/app/machined/pkg/controllers/block/user_volume_config_test.go

Lines changed: 30 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import (
99
"time"
1010

1111
"github.com/cosi-project/runtime/pkg/resource"
12+
"github.com/siderolabs/go-pointer"
1213
"github.com/stretchr/testify/assert"
1314
"github.com/stretchr/testify/suite"
1415

@@ -64,12 +65,16 @@ func (suite *UserVolumeConfigSuite) TestReconcileUserVolumesSwapVolumes() {
6465
},
6566
}
6667

68+
uv3 := blockcfg.NewUserVolumeConfigV1Alpha1()
69+
uv3.MetaName = "data3"
70+
uv3.VolumeType = pointer.To(block.VolumeTypeDirectory)
71+
6772
sv1 := blockcfg.NewSwapVolumeConfigV1Alpha1()
6873
sv1.MetaName = "swap"
6974
suite.Require().NoError(sv1.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`disk.transport == "nvme"`)))
7075
sv1.ProvisioningSpec.ProvisioningMaxSize = blockcfg.MustByteSize("2GiB")
7176

72-
ctr, err := container.New(uv1, uv2, sv1)
77+
ctr, err := container.New(uv1, uv2, uv3, sv1)
7378
suite.Require().NoError(err)
7479

7580
cfg := config.NewMachineConfig(ctr)
@@ -78,20 +83,28 @@ func (suite *UserVolumeConfigSuite) TestReconcileUserVolumesSwapVolumes() {
7883
userVolumes := []string{
7984
constants.UserVolumePrefix + "data1",
8085
constants.UserVolumePrefix + "data2",
86+
constants.UserVolumePrefix + "data3",
8187
}
8288

8389
ctest.AssertResources(suite, userVolumes, func(vc *block.VolumeConfig, asrt *assert.Assertions) {
8490
asrt.Contains(vc.Metadata().Labels().Raw(), block.UserVolumeLabel)
8591

86-
asrt.Equal(block.VolumeTypePartition, vc.TypedSpec().Type)
87-
asrt.Contains(userVolumes, vc.TypedSpec().Provisioning.PartitionSpec.Label)
92+
switch vc.Metadata().ID() {
93+
case userVolumes[0], userVolumes[1]:
94+
asrt.Equal(block.VolumeTypePartition, vc.TypedSpec().Type)
8895

89-
locator, err := vc.TypedSpec().Locator.Match.MarshalText()
90-
asrt.NoError(err)
96+
asrt.Contains(userVolumes, vc.TypedSpec().Provisioning.PartitionSpec.Label)
9197

92-
asrt.Contains(string(locator), vc.TypedSpec().Provisioning.PartitionSpec.Label)
98+
locator, err := vc.TypedSpec().Locator.Match.MarshalText()
99+
asrt.NoError(err)
93100

94-
asrt.Contains([]string{"data1", "data2"}, vc.TypedSpec().Mount.TargetPath)
101+
asrt.Contains(string(locator), vc.TypedSpec().Provisioning.PartitionSpec.Label)
102+
103+
case userVolumes[2]:
104+
asrt.Equal(block.VolumeTypeDirectory, vc.TypedSpec().Type)
105+
}
106+
107+
asrt.Contains([]string{"data1", "data2", "data3"}, vc.TypedSpec().Mount.TargetPath)
95108
asrt.Equal(constants.UserVolumeMountPoint, vc.TypedSpec().Mount.ParentID)
96109

97110
switch vc.Metadata().ID() {
@@ -138,39 +151,34 @@ func (suite *UserVolumeConfigSuite) TestReconcileUserVolumesSwapVolumes() {
138151
newCfg.Metadata().SetVersion(cfg.Metadata().Version())
139152
suite.Update(newCfg)
140153

141-
// controller should tear down removed volumes
154+
// controller should tear down removed resources
142155
ctest.AssertResources(suite, userVolumes, func(vc *block.VolumeConfig, asrt *assert.Assertions) {
143-
if vc.Metadata().ID() == userVolumes[0] {
144-
asrt.Equal(resource.PhaseTearingDown, vc.Metadata().Phase())
145-
} else {
156+
if vc.Metadata().ID() == userVolumes[1] {
146157
asrt.Equal(resource.PhaseRunning, vc.Metadata().Phase())
147-
}
148-
})
149-
150-
// controller should tear down removed volume resources
151-
ctest.AssertResources(suite, userVolumes, func(vc *block.VolumeConfig, asrt *assert.Assertions) {
152-
if vc.Metadata().ID() == userVolumes[0] {
153-
asrt.Equal(resource.PhaseTearingDown, vc.Metadata().Phase())
154158
} else {
155-
asrt.Equal(resource.PhaseRunning, vc.Metadata().Phase())
159+
asrt.Equal(resource.PhaseTearingDown, vc.Metadata().Phase())
156160
}
157161
})
158162

159163
ctest.AssertResources(suite, userVolumes, func(vmr *block.VolumeMountRequest, asrt *assert.Assertions) {
160-
if vmr.Metadata().ID() == userVolumes[0] {
161-
asrt.Equal(resource.PhaseTearingDown, vmr.Metadata().Phase())
162-
} else {
164+
if vmr.Metadata().ID() == userVolumes[1] {
163165
asrt.Equal(resource.PhaseRunning, vmr.Metadata().Phase())
166+
} else {
167+
asrt.Equal(resource.PhaseTearingDown, vmr.Metadata().Phase())
164168
}
165169
})
166170

167171
// remove finalizers
168172
suite.RemoveFinalizer(block.NewVolumeConfig(block.NamespaceName, userVolumes[0]).Metadata(), "test")
169173
suite.RemoveFinalizer(block.NewVolumeMountRequest(block.NamespaceName, userVolumes[0]).Metadata(), "test")
174+
suite.RemoveFinalizer(block.NewVolumeConfig(block.NamespaceName, userVolumes[2]).Metadata(), "test")
175+
suite.RemoveFinalizer(block.NewVolumeMountRequest(block.NamespaceName, userVolumes[2]).Metadata(), "test")
170176

171177
// now the resources should be removed
172178
ctest.AssertNoResource[*block.VolumeConfig](suite, userVolumes[0])
173179
ctest.AssertNoResource[*block.VolumeMountRequest](suite, userVolumes[0])
180+
ctest.AssertNoResource[*block.VolumeConfig](suite, userVolumes[2])
181+
ctest.AssertNoResource[*block.VolumeMountRequest](suite, userVolumes[2])
174182
}
175183

176184
func (suite *UserVolumeConfigSuite) TestReconcileRawVolumes() {

0 commit comments

Comments
 (0)