Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions hack/release.toml
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,16 @@ When `volumeType = "directory"`:

Note: this mode does not provide filesystem-level isolation and inherits the EPHEMERAL partition capacity limits.
It should not be used for workloads requiring predictable storage quotas.
"""

[notes.disk-user-volumes]
title = "New User Volume type - disk"
description = """\
`volumeType` in UserVolumeConfig can be set to `disk`.
When set to `disk`, a full block device is used for the volume.

When `volumeType = "disk"`:
- Size specific settings are not allowed in the provisioning block (`minSize`, `maxSize`, `grow`).
"""

[make_deps]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func Format(ctx context.Context, logger *zap.Logger, volumeContext ManagerContex
makefsOptions = append(makefsOptions, makefs.WithConfigFile(quirks.New("").XFSMkfsConfig()))

if err = makefs.XFS(volumeContext.Status.MountLocation, makefsOptions...); err != nil {
return fmt.Errorf("error formatting XFS: %w", err)
return xerrors.NewTaggedf[Retryable]("error formatting XFS: %w", err)
}
case block.FilesystemTypeEXT4:
var makefsOptions []makefs.Option
Expand All @@ -125,14 +125,14 @@ func Format(ctx context.Context, logger *zap.Logger, volumeContext ManagerContex
}

if err = makefs.Ext4(volumeContext.Status.MountLocation, makefsOptions...); err != nil {
return fmt.Errorf("error formatting ext4: %w", err)
return xerrors.NewTaggedf[Retryable]("error formatting ext4: %w", err)
}
case block.FilesystemTypeSwap:
if err = swap.Format(volumeContext.Status.MountLocation, swap.FormatOptions{
Label: volumeContext.Cfg.TypedSpec().Provisioning.FilesystemSpec.Label,
UUID: uuid.New(),
}); err != nil {
return fmt.Errorf("error formatting swap: %w", err)
return xerrors.NewTaggedf[Retryable]("error formatting swap: %w", err)
}
default:
return fmt.Errorf("unsupported filesystem type: %s", volumeContext.Cfg.TypedSpec().Provisioning.FilesystemSpec.Type)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"context"
"fmt"

"github.com/google/cel-go/cel"
"github.com/siderolabs/gen/value"
"github.com/siderolabs/gen/xerrors"
"github.com/siderolabs/go-blockdevice/v2/partitioning"
Expand Down Expand Up @@ -44,8 +45,18 @@ func LocateAndProvision(ctx context.Context, logger *zap.Logger, volumeContext M

// attempt to discover the volume
for _, dv := range volumeContext.DiscoveredVolumes {
matchContext := map[string]any{
"volume": dv,
var locator *cel.Env

matchContext := map[string]any{}

switch volumeType { //nolint:exhaustive // we do not need to repeat exhaustive check here
case block.VolumeTypeDisk:
locator = celenv.DiskLocator()

case block.VolumeTypePartition:
locator = celenv.VolumeLocator()

matchContext["volume"] = dv
}

// add disk to the context, so we can use it in CEL expressions
Expand All @@ -63,7 +74,7 @@ func LocateAndProvision(ctx context.Context, logger *zap.Logger, volumeContext M
}
}

matches, err := volumeContext.Cfg.TypedSpec().Locator.Match.EvalBool(celenv.VolumeLocator(), matchContext)
matches, err := volumeContext.Cfg.TypedSpec().Locator.Match.EvalBool(locator, matchContext)
if err != nil {
return fmt.Errorf("error evaluating volume locator: %w", err)
}
Expand Down Expand Up @@ -127,6 +138,10 @@ func LocateAndProvision(ctx context.Context, logger *zap.Logger, volumeContext M
return fmt.Errorf("no disks matched selector for volume")
}

if volumeType == block.VolumeTypeDisk && len(matchedDisks) > 1 {
return fmt.Errorf("multiple disks matched selector for disk volume; matched disks: %v", matchedDisks)
}

logger.Debug("matched disks", zap.Strings("disks", matchedDisks))

// analyze each disk, until we find the one which is the best fit
Expand Down
53 changes: 49 additions & 4 deletions internal/app/machined/pkg/controllers/block/user_volume_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,13 +304,16 @@ func (ctrl *UserVolumeConfigController) handleUserVolumeConfig(
volumeID string,
) error {
switch userVolumeConfig.Type().ValueOr(block.VolumeTypePartition) {
case block.VolumeTypePartition:
return ctrl.handlePartitionUserVolumeConfig(userVolumeConfig, v, volumeID)

case block.VolumeTypeDirectory:
return ctrl.handleDirectoryUserVolumeConfig(userVolumeConfig, v)

case block.VolumeTypeDisk, block.VolumeTypeTmpfs, block.VolumeTypeSymlink, block.VolumeTypeOverlay:
case block.VolumeTypeDisk:
return ctrl.handleDiskUserVolumeConfig(userVolumeConfig, v, volumeID)

case block.VolumeTypePartition:
return ctrl.handlePartitionUserVolumeConfig(userVolumeConfig, v, volumeID)

case block.VolumeTypeTmpfs, block.VolumeTypeSymlink, block.VolumeTypeOverlay:
fallthrough

default:
Expand Down Expand Up @@ -364,6 +367,48 @@ func (ctrl *UserVolumeConfigController) handlePartitionUserVolumeConfig(
return nil
}

func (ctrl *UserVolumeConfigController) handleDiskUserVolumeConfig(
userVolumeConfig configconfig.UserVolumeConfig,
v *block.VolumeConfig,
volumeID string,
) error {
diskSelector, ok := userVolumeConfig.Provisioning().DiskSelector().Get()
if !ok {
// this shouldn't happen due to validation
return fmt.Errorf("disk selector not found for volume %q", volumeID)
}

v.TypedSpec().Type = block.VolumeTypeDisk
v.TypedSpec().Locator.Match = diskSelector
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this does work, but by chance a bit (with a specific form of disk selector).

we have a problem here, the disk selector is evaluated in:

https://github.com/shanduur/talos/blob/0d8c0228615d4353f9f5ded9d8b682200ee7e342/pkg/machinery/cel/celenv/celenv.go#L34-L61

while volume locator in:

https://github.com/shanduur/talos/blob/0d8c0228615d4353f9f5ded9d8b682200ee7e342/pkg/machinery/cel/celenv/celenv.go#L62-L86

So anything which does simple disk.<something> works as they both have same.

But e.g. system_disk is not available in volume locator, also volume locator runs for partitions as well, so it might match it by accident.

So I think we need a separate locator.DiskMatch or something close to that for disk volumes.

Open to any other ideas as well

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let me know if the changes I did make any sense. I've added different *cel.Env that is selected based on volume type: https://github.com/siderolabs/talos/pull/12104/files#diff-29f79fc583c81df9b1167eb597fb3642587aa952b153ee5c3c5190005577bd6bR54-R61

v.TypedSpec().Provisioning = block.ProvisioningSpec{
Wave: block.WaveUserVolumes,
DiskSelector: block.DiskSelector{
Match: diskSelector,
},
PartitionSpec: block.PartitionSpec{
TypeUUID: partition.LinuxFilesystemData,
},
FilesystemSpec: block.FilesystemSpec{
Type: userVolumeConfig.Filesystem().Type(),
},
}
v.TypedSpec().Mount = block.MountSpec{
TargetPath: userVolumeConfig.Name(),
ParentID: constants.UserVolumeMountPoint,
SelinuxLabel: constants.EphemeralSelinuxLabel,
FileMode: 0o755,
UID: 0,
GID: 0,
ProjectQuotaSupport: userVolumeConfig.Filesystem().ProjectQuotaSupport(),
}

if err := convertEncryptionConfiguration(userVolumeConfig.Encryption(), v.TypedSpec()); err != nil {
return fmt.Errorf("error apply encryption configuration: %w", err)
}

return nil
}

func (ctrl *UserVolumeConfigController) handleDirectoryUserVolumeConfig(
userVolumeConfig configconfig.UserVolumeConfig,
v *block.VolumeConfig,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"time"

"github.com/cosi-project/runtime/pkg/resource"
"github.com/siderolabs/gen/xslices"
"github.com/siderolabs/go-pointer"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
Expand Down Expand Up @@ -40,18 +41,26 @@ func TestUserVolumeConfigSuite(t *testing.T) {
}

func (suite *UserVolumeConfigSuite) TestReconcileUserVolumesSwapVolumes() {
uv1 := blockcfg.NewUserVolumeConfigV1Alpha1()
uv1.MetaName = "data1"
suite.Require().NoError(uv1.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`system_disk`)))
uv1.ProvisioningSpec.ProvisioningMinSize = blockcfg.MustByteSize("10GiB")
uv1.ProvisioningSpec.ProvisioningMaxSize = blockcfg.MustByteSize("100GiB")
uv1.FilesystemSpec.FilesystemType = block.FilesystemTypeXFS

uv2 := blockcfg.NewUserVolumeConfigV1Alpha1()
uv2.MetaName = "data2"
suite.Require().NoError(uv2.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`!system_disk`)))
uv2.ProvisioningSpec.ProvisioningMaxSize = blockcfg.MustByteSize("1TiB")
uv2.EncryptionSpec = blockcfg.EncryptionSpec{
userVolumeNames := []string{
"data-part1",
"data-part2",
"data-dir1",
"data-disk1",
}

uvPart1 := blockcfg.NewUserVolumeConfigV1Alpha1()
uvPart1.MetaName = userVolumeNames[0]
suite.Require().NoError(uvPart1.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`system_disk`)))
uvPart1.ProvisioningSpec.ProvisioningMinSize = blockcfg.MustByteSize("10GiB")
uvPart1.ProvisioningSpec.ProvisioningMaxSize = blockcfg.MustByteSize("100GiB")
uvPart1.FilesystemSpec.FilesystemType = block.FilesystemTypeXFS

uvPart2 := blockcfg.NewUserVolumeConfigV1Alpha1()
uvPart2.MetaName = userVolumeNames[1]
uvPart2.VolumeType = pointer.To(block.VolumeTypePartition)
suite.Require().NoError(uvPart2.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`!system_disk`)))
uvPart2.ProvisioningSpec.ProvisioningMaxSize = blockcfg.MustByteSize("1TiB")
uvPart2.EncryptionSpec = blockcfg.EncryptionSpec{
EncryptionProvider: block.EncryptionProviderLUKS2,
EncryptionKeys: []blockcfg.EncryptionKey{
{
Expand All @@ -65,32 +74,45 @@ func (suite *UserVolumeConfigSuite) TestReconcileUserVolumesSwapVolumes() {
},
}

uv3 := blockcfg.NewUserVolumeConfigV1Alpha1()
uv3.MetaName = "data3"
uv3.VolumeType = pointer.To(block.VolumeTypeDirectory)
uvDir1 := blockcfg.NewUserVolumeConfigV1Alpha1()
uvDir1.MetaName = userVolumeNames[2]
uvDir1.VolumeType = pointer.To(block.VolumeTypeDirectory)

uvDisk1 := blockcfg.NewUserVolumeConfigV1Alpha1()
uvDisk1.MetaName = userVolumeNames[3]
suite.Require().NoError(uvDisk1.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`!system_disk`)))
uvDisk1.EncryptionSpec = blockcfg.EncryptionSpec{
EncryptionProvider: block.EncryptionProviderLUKS2,
EncryptionKeys: []blockcfg.EncryptionKey{
{
KeySlot: 0,
KeyTPM: &blockcfg.EncryptionKeyTPM{},
},
{
KeySlot: 1,
KeyStatic: &blockcfg.EncryptionKeyStatic{KeyData: "secret"},
},
},
}

sv1 := blockcfg.NewSwapVolumeConfigV1Alpha1()
sv1.MetaName = "swap"
suite.Require().NoError(sv1.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`disk.transport == "nvme"`)))
sv1.ProvisioningSpec.ProvisioningMaxSize = blockcfg.MustByteSize("2GiB")

ctr, err := container.New(uv1, uv2, uv3, sv1)
ctr, err := container.New(uvPart1, uvPart2, uvDir1, uvDisk1, sv1)
suite.Require().NoError(err)

cfg := config.NewMachineConfig(ctr)
suite.Create(cfg)

userVolumes := []string{
constants.UserVolumePrefix + "data1",
constants.UserVolumePrefix + "data2",
constants.UserVolumePrefix + "data3",
}
userVolumes := xslices.Map(userVolumeNames, func(in string) string { return constants.UserVolumePrefix + in })

ctest.AssertResources(suite, userVolumes, func(vc *block.VolumeConfig, asrt *assert.Assertions) {
asrt.Contains(vc.Metadata().Labels().Raw(), block.UserVolumeLabel)

switch vc.Metadata().ID() {
case userVolumes[0], userVolumes[1]:
case userVolumes[0], userVolumes[1], userVolumes[3]:
asrt.Equal(block.VolumeTypePartition, vc.TypedSpec().Type)

asrt.Contains(userVolumes, vc.TypedSpec().Provisioning.PartitionSpec.Label)
Expand All @@ -104,7 +126,7 @@ func (suite *UserVolumeConfigSuite) TestReconcileUserVolumesSwapVolumes() {
asrt.Equal(block.VolumeTypeDirectory, vc.TypedSpec().Type)
}

asrt.Contains([]string{"data1", "data2", "data3"}, vc.TypedSpec().Mount.TargetPath)
asrt.Contains(userVolumeNames, vc.TypedSpec().Mount.TargetPath)
asrt.Equal(constants.UserVolumeMountPoint, vc.TypedSpec().Mount.ParentID)

switch vc.Metadata().ID() {
Expand Down Expand Up @@ -143,8 +165,8 @@ func (suite *UserVolumeConfigSuite) TestReconcileUserVolumesSwapVolumes() {
suite.AddFinalizer(block.NewVolumeMountRequest(block.NamespaceName, volumeID).Metadata(), "test")
}

// drop the first volume
ctr, err = container.New(uv2)
// keep only the first volume
ctr, err = container.New(uvPart1)
suite.Require().NoError(err)

newCfg := config.NewMachineConfig(ctr)
Expand All @@ -153,32 +175,32 @@ func (suite *UserVolumeConfigSuite) TestReconcileUserVolumesSwapVolumes() {

// controller should tear down removed resources
ctest.AssertResources(suite, userVolumes, func(vc *block.VolumeConfig, asrt *assert.Assertions) {
if vc.Metadata().ID() == userVolumes[1] {
if vc.Metadata().ID() == userVolumes[0] {
asrt.Equal(resource.PhaseRunning, vc.Metadata().Phase())
} else {
asrt.Equal(resource.PhaseTearingDown, vc.Metadata().Phase())
}
})

ctest.AssertResources(suite, userVolumes, func(vmr *block.VolumeMountRequest, asrt *assert.Assertions) {
if vmr.Metadata().ID() == userVolumes[1] {
if vmr.Metadata().ID() == userVolumes[0] {
asrt.Equal(resource.PhaseRunning, vmr.Metadata().Phase())
} else {
asrt.Equal(resource.PhaseTearingDown, vmr.Metadata().Phase())
}
})

// remove finalizers
suite.RemoveFinalizer(block.NewVolumeConfig(block.NamespaceName, userVolumes[0]).Metadata(), "test")
suite.RemoveFinalizer(block.NewVolumeMountRequest(block.NamespaceName, userVolumes[0]).Metadata(), "test")
suite.RemoveFinalizer(block.NewVolumeConfig(block.NamespaceName, userVolumes[2]).Metadata(), "test")
suite.RemoveFinalizer(block.NewVolumeMountRequest(block.NamespaceName, userVolumes[2]).Metadata(), "test")
for _, userVolume := range userVolumes[1:] {
suite.RemoveFinalizer(block.NewVolumeConfig(block.NamespaceName, userVolume).Metadata(), "test")
suite.RemoveFinalizer(block.NewVolumeMountRequest(block.NamespaceName, userVolume).Metadata(), "test")
}

// now the resources should be removed
ctest.AssertNoResource[*block.VolumeConfig](suite, userVolumes[0])
ctest.AssertNoResource[*block.VolumeMountRequest](suite, userVolumes[0])
ctest.AssertNoResource[*block.VolumeConfig](suite, userVolumes[2])
ctest.AssertNoResource[*block.VolumeMountRequest](suite, userVolumes[2])
for _, userVolume := range userVolumes[1:] {
ctest.AssertNoResource[*block.VolumeConfig](suite, userVolume)
ctest.AssertNoResource[*block.VolumeMountRequest](suite, userVolume)
}
}

func (suite *UserVolumeConfigSuite) TestReconcileRawVolumes() {
Expand Down
Loading