Skip to content

Commit d346247

Browse files
authored
fix(vm): fix migration with images, hotplugs, local disks, and when changing the storage class (#1518)
Fixed: - vm no longer reboots when a vmbda is removed - pvc with data is no longer deleted when migrating a vm with local disks - images no longer block vm migration - it is now possible to hotplug images to a vm again - vms with hotplugged images can now be migrated again - vm no longer loses events related to virtual disks and reconciles faster - add a restriction on migrating vms with local disks if the vm has hotplugs - fix a panic in the test with storage class migration - vmbda interface no longer disappears from the status of a stopped vm Signed-off-by: Isteb4k <[email protected]>
1 parent d3737fa commit d346247

File tree

21 files changed

+367
-240
lines changed

21 files changed

+367
-240
lines changed

build/components/versions.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ firmware:
33
libvirt: v10.9.0
44
edk2: stable202411
55
core:
6-
3p-kubevirt: v1.3.1-v12n.14
6+
3p-kubevirt: v1.3.1-v12n.15
77
3p-containerized-data-importer: v1.60.3-v12n.10
88
distribution: 2.8.3
99
package:

images/virtualization-artifact/pkg/controller/kvbuilder/kvvm_utils.go

Lines changed: 29 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ func GenerateSerial(input string) string {
8383
type HotPlugDeviceSettings struct {
8484
VolumeName string
8585
PVCName string
86+
Image string
8687
DataVolumeName string
8788
}
8889

@@ -91,6 +92,7 @@ func ApplyVirtualMachineSpec(
9192
vdByName map[string]*v1alpha2.VirtualDisk,
9293
viByName map[string]*v1alpha2.VirtualImage,
9394
cviByName map[string]*v1alpha2.ClusterVirtualImage,
95+
vmbdas map[v1alpha2.VMBDAObjectRef][]*v1alpha2.VirtualMachineBlockDeviceAttachment,
9496
class *v1alpha2.VirtualMachineClass,
9597
ipAddress string,
9698
networkSpec network.InterfaceSpecList,
@@ -130,11 +132,11 @@ func ApplyVirtualMachineSpec(
130132
PVCName: volume.PersistentVolumeClaim.ClaimName,
131133
})
132134
}
133-
// FIXME(VM): not used, now only supports PVC
134-
if volume.DataVolume != nil && volume.DataVolume.Hotpluggable {
135+
136+
if volume.ContainerDisk != nil && isHotplugged(volume, vm, vmbdas) {
135137
hotpluggedDevices = append(hotpluggedDevices, HotPlugDeviceSettings{
136-
VolumeName: volume.Name,
137-
DataVolumeName: volume.DataVolume.Name,
138+
VolumeName: volume.Name,
139+
Image: volume.ContainerDisk.Image,
138140
})
139141
}
140142
}
@@ -218,6 +220,10 @@ func ApplyVirtualMachineSpec(
218220
}
219221
}
220222

223+
if err := kvvm.SetProvisioning(vm.Spec.Provisioning); err != nil {
224+
return err
225+
}
226+
221227
for _, device := range hotpluggedDevices {
222228
switch {
223229
case device.PVCName != "":
@@ -227,13 +233,15 @@ func ApplyVirtualMachineSpec(
227233
}); err != nil {
228234
return err
229235
}
230-
// FIXME(VM): not used, now only supports PVC
231-
case device.DataVolumeName != "":
236+
case device.Image != "":
237+
if err := kvvm.SetDisk(device.VolumeName, SetDiskOptions{
238+
ContainerDisk: pointer.GetPointer(device.Image),
239+
IsHotplugged: true,
240+
}); err != nil {
241+
return err
242+
}
232243
}
233244
}
234-
if err := kvvm.SetProvisioning(vm.Spec.Provisioning); err != nil {
235-
return err
236-
}
237245

238246
kvvm.SetOwnerRef(vm, schema.GroupVersionKind{
239247
Group: v1alpha2.SchemeGroupVersion.Group,
@@ -313,3 +321,15 @@ func setNetworksAnnotation(kvvm *KVVM, networkSpec network.InterfaceSpecList) er
313321
kvvm.SetKVVMIAnnotation(annotations.AnnNetworksSpec, networkConfigStr)
314322
return nil
315323
}
324+
325+
func isHotplugged(volume virtv1.Volume, vm *v1alpha2.VirtualMachine, vmbdas map[v1alpha2.VMBDAObjectRef][]*v1alpha2.VirtualMachineBlockDeviceAttachment) bool {
326+
name, kind := GetOriginalDiskName(volume.Name)
327+
for _, bdRef := range vm.Spec.BlockDeviceRefs {
328+
if bdRef.Name == name && bdRef.Kind == kind {
329+
return false
330+
}
331+
}
332+
333+
_, ok := vmbdas[v1alpha2.VMBDAObjectRef{Name: name, Kind: v1alpha2.VMBDAObjectRefKind(kind)}]
334+
return ok
335+
}

images/virtualization-artifact/pkg/controller/vd/internal/migration.go

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package internal
1818

1919
import (
2020
"context"
21+
"errors"
2122
"fmt"
2223
"log/slog"
2324

@@ -306,6 +307,7 @@ func (h MigrationHandler) handleMigratePrepareTarget(ctx context.Context, vd *v1
306307
}
307308

308309
// Reset migration info
310+
targetPVCName := vd.Status.MigrationState.TargetPVC
309311
vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{}
310312

311313
var targetStorageClass *storagev1.StorageClass
@@ -398,11 +400,20 @@ func (h MigrationHandler) handleMigratePrepareTarget(ctx context.Context, vd *v1
398400
}
399401

400402
log.Info("Start creating target PersistentVolumeClaim", slog.String("storageClass", targetStorageClass.Name), slog.String("capacity", size.String()))
401-
pvc, err := h.createTargetPersistentVolumeClaim(ctx, vd, targetStorageClass, size)
403+
pvc, err := h.createTargetPersistentVolumeClaim(ctx, vd, targetStorageClass, size, targetPVCName, vd.Status.Target.PersistentVolumeClaim)
402404
if err != nil {
403405
return err
404406
}
405-
log.Info("Target PersistentVolumeClaim was created or was already exists", slog.String("pvc.name", pvc.Name), slog.String("pvc.namespace", pvc.Namespace))
407+
408+
log.Info(
409+
"The target PersistentVolumeClaim has been created or already exists",
410+
slog.String("state.source.pvc", vd.Status.Target.PersistentVolumeClaim),
411+
slog.String("state.target.pvc", pvc.Name),
412+
)
413+
414+
if vd.Status.Target.PersistentVolumeClaim == pvc.Name {
415+
return errors.New("the target PersistentVolumeClaim name matched the source PersistentVolumeClaim name, please report a bug")
416+
}
406417

407418
vd.Status.MigrationState = v1alpha2.VirtualDiskMigrationState{
408419
SourcePVC: vd.Status.Target.PersistentVolumeClaim,
@@ -479,7 +490,10 @@ func (h MigrationHandler) handleMigrateSync(ctx context.Context, vd *v1alpha2.Vi
479490
func (h MigrationHandler) handleRevert(ctx context.Context, vd *v1alpha2.VirtualDisk) error {
480491
log := logger.FromContext(ctx)
481492
log.Info("Start reverting...")
482-
log.Info("Delete target PersistentVolumeClaim", slog.String("pvc.name", vd.Status.MigrationState.TargetPVC), slog.String("pvc.namespace", vd.Namespace))
493+
494+
if vd.Status.MigrationState.TargetPVC == vd.Status.Target.PersistentVolumeClaim {
495+
return errors.New("cannot revert: the target PersistentVolumeClaim name matched the source PersistentVolumeClaim name, please report a bug")
496+
}
483497

484498
err := h.deleteTargetPersistentVolumeClaim(ctx, vd)
485499
if err != nil {
@@ -573,7 +587,7 @@ func (h MigrationHandler) getInProgressMigratingVMOP(ctx context.Context, vm *v1
573587
return nil, nil
574588
}
575589

576-
func (h MigrationHandler) createTargetPersistentVolumeClaim(ctx context.Context, vd *v1alpha2.VirtualDisk, sc *storagev1.StorageClass, size resource.Quantity) (*corev1.PersistentVolumeClaim, error) {
590+
func (h MigrationHandler) createTargetPersistentVolumeClaim(ctx context.Context, vd *v1alpha2.VirtualDisk, sc *storagev1.StorageClass, size resource.Quantity, targetPVCName, sourcePVCName string) (*corev1.PersistentVolumeClaim, error) {
577591
pvcs, err := listPersistentVolumeClaims(ctx, vd, h.client)
578592
if err != nil {
579593
return nil, err
@@ -584,7 +598,7 @@ func (h MigrationHandler) createTargetPersistentVolumeClaim(ctx context.Context,
584598
for _, pvc := range pvcs {
585599
// If TargetPVC is empty, that means previous reconciliation failed and not updated TargetPVC in status.
586600
// So, we should use pvc, that is not equal to SourcePVC.
587-
if pvc.Name == vd.Status.MigrationState.TargetPVC || pvc.Name != vd.Status.MigrationState.SourcePVC {
601+
if pvc.Name == targetPVCName || pvc.Name != sourcePVCName {
588602
return &pvc, nil
589603
}
590604
}

images/virtualization-artifact/pkg/controller/vd/internal/validator/spec_changes_validator.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,8 +93,8 @@ func (v *SpecChangesValidator) ValidateUpdate(ctx context.Context, oldVD, newVD
9393
}
9494

9595
for _, bd := range vm.Status.BlockDeviceRefs {
96-
if bd.Kind == v1alpha2.DiskDevice && bd.Name == oldVD.Name && bd.Hotplugged {
97-
return nil, errors.New("storage class cannot be changed if the VirtualDisk is hotplugged to a running virtual machine")
96+
if bd.Hotplugged {
97+
return nil, errors.New("for now, changing the storage class is not allowed if the virtual machine has hot-plugged block devices")
9898
}
9999
}
100100
} else {

images/virtualization-artifact/pkg/controller/vm/internal/block_device_handler.go

Lines changed: 8 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -202,94 +202,26 @@ func (h *BlockDeviceHandler) getBlockDeviceWarnings(ctx context.Context, s state
202202
return "", err
203203
}
204204

205-
hotplugsByName := make(map[string]struct{})
206-
207-
for _, vmbdas := range vmbdasByBlockDevice {
208-
for _, vmbda := range vmbdas {
209-
switch vmbda.Status.Phase {
210-
case v1alpha2.BlockDeviceAttachmentPhaseInProgress,
211-
v1alpha2.BlockDeviceAttachmentPhaseAttached:
212-
default:
213-
continue
214-
}
215-
216-
var (
217-
cvi *v1alpha2.ClusterVirtualImage
218-
vi *v1alpha2.VirtualImage
219-
vd *v1alpha2.VirtualDisk
220-
bdStatusRef v1alpha2.BlockDeviceStatusRef
221-
)
222-
223-
switch vmbda.Spec.BlockDeviceRef.Kind {
224-
case v1alpha2.VMBDAObjectRefKindVirtualDisk:
225-
vd, err = s.VirtualDisk(ctx, vmbda.Spec.BlockDeviceRef.Name)
226-
if err != nil {
227-
return "", err
228-
}
229-
230-
if vd == nil {
231-
continue
232-
}
233-
234-
bdStatusRef = h.getBlockDeviceStatusRef(v1alpha2.DiskDevice, vmbda.Spec.BlockDeviceRef.Name)
235-
bdStatusRef.Size = vd.Status.Capacity
236-
case v1alpha2.VMBDAObjectRefKindVirtualImage:
237-
vi, err = s.VirtualImage(ctx, vmbda.Spec.BlockDeviceRef.Name)
238-
if err != nil {
239-
return "", err
240-
}
241-
242-
if vi == nil {
243-
continue
244-
}
245-
246-
bdStatusRef = h.getBlockDeviceStatusRef(v1alpha2.ImageDevice, vmbda.Spec.BlockDeviceRef.Name)
247-
bdStatusRef.Size = vi.Status.Size.Unpacked
248-
249-
case v1alpha2.VMBDAObjectRefKindClusterVirtualImage:
250-
cvi, err = s.ClusterVirtualImage(ctx, vmbda.Spec.BlockDeviceRef.Name)
251-
if err != nil {
252-
return "", err
253-
}
254-
255-
if cvi == nil {
256-
continue
257-
}
258-
259-
bdStatusRef = h.getBlockDeviceStatusRef(v1alpha2.ClusterImageDevice, vmbda.Spec.BlockDeviceRef.Name)
260-
bdStatusRef.Size = cvi.Status.Size.Unpacked
261-
default:
262-
return "", fmt.Errorf("unacceptable `Kind` of `BlockDeviceRef`: %s", vmbda.Spec.BlockDeviceRef.Kind)
263-
}
264-
265-
hotplugsByName[bdStatusRef.Name] = struct{}{}
266-
}
267-
}
268-
269205
var conflictedRefs []string
270-
vm := s.VirtualMachine().Current()
271206

272-
for _, bdSpecRef := range vm.Spec.BlockDeviceRefs {
207+
for _, bdSpecRef := range s.VirtualMachine().Current().Spec.BlockDeviceRefs {
273208
// It is a precaution to not apply changes in spec.blockDeviceRefs if disk is already
274209
// hotplugged using the VMBDA resource.
275210
// spec check is done by VirtualDisk status
276211
// the reverse check is done by the vmbda-controller.
277-
if bdSpecRef.Kind == v1alpha2.DiskDevice {
278-
if _, conflict := hotplugsByName[bdSpecRef.Name]; conflict {
279-
conflictedRefs = append(conflictedRefs, bdSpecRef.Name)
280-
continue
281-
}
282-
}
283-
284-
if _, conflict := hotplugsByName[bdSpecRef.Name]; conflict {
285-
conflictedRefs = append(conflictedRefs, bdSpecRef.Name)
212+
_, conflict := vmbdasByBlockDevice[v1alpha2.VMBDAObjectRef{
213+
Kind: v1alpha2.VMBDAObjectRefKind(bdSpecRef.Kind),
214+
Name: bdSpecRef.Name,
215+
}]
216+
if conflict {
217+
conflictedRefs = append(conflictedRefs, fmt.Sprintf("%s/%s", strings.ToLower(string(bdSpecRef.Kind)), bdSpecRef.Name))
286218
continue
287219
}
288220
}
289221

290222
var warning string
291223
if len(conflictedRefs) > 0 {
292-
warning = fmt.Sprintf("spec.blockDeviceRefs field contains hotplugged disks (%s): unplug or remove them from spec to continue.", strings.Join(conflictedRefs, ", "))
224+
warning = fmt.Sprintf("spec.blockDeviceRefs field contains block devices to hotplug (%s): unplug or remove them from spec to continue.", strings.Join(conflictedRefs, ", "))
293225
}
294226

295227
return warning, nil

images/virtualization-artifact/pkg/controller/vm/internal/service/migration_volumes.go

Lines changed: 44 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,13 @@ func (s MigrationVolumesService) SyncVolumes(ctx context.Context, vmState state.
110110
return reconcile.Result{}, nil
111111
}
112112

113-
kvvmiSynced := equality.Semantic.DeepEqual(kvvmInCluster.Spec.Template.Spec.Volumes, kvvmiInCluster.Spec.Volumes)
113+
// The pull policy for container disks are only set on the VMI spec and not on the VM spec.
114+
// In order to correctly compare the volumes set, we need to set the pull policy on the VM spec as well.
115+
kvvmInClusterCopy := kvvmInCluster.DeepCopy()
116+
s.fillContainerDiskImagePullPolicies(kvvmInClusterCopy, kvvmiInCluster)
117+
s.fillContainerDiskImagePullPolicies(builtKVVM, kvvmiInCluster)
118+
119+
kvvmiSynced := equality.Semantic.DeepEqual(kvvmInClusterCopy.Spec.Template.Spec.Volumes, kvvmiInCluster.Spec.Volumes)
114120
if !kvvmiSynced {
115121
// kubevirt does not sync volumes with kvvmi yet
116122
log.Info("kvvmi volumes are not synced yet, skip volume migration.")
@@ -199,6 +205,14 @@ func (s MigrationVolumesService) SyncVolumes(ctx context.Context, vmState state.
199205
return reconcile.Result{}, nil
200206
}
201207

208+
func getVolumesByName(vmiSpec *virtv1.VirtualMachineInstanceSpec) map[string]*virtv1.Volume {
209+
volumes := map[string]*virtv1.Volume{}
210+
for _, vol := range vmiSpec.Volumes {
211+
volumes[vol.Name] = vol.DeepCopy()
212+
}
213+
return volumes
214+
}
215+
202216
// if any volume in kvvmi is not exists in readWriteOnceDisks or storageClassChangedDisks,
203217
// this indicates that
204218
func (s MigrationVolumesService) shouldRevert(kvvmi *virtv1.VirtualMachineInstance, readWriteOnceDisks, storageClassChangedDisks map[string]*v1alpha2.VirtualDisk) bool {
@@ -230,7 +244,7 @@ func (s MigrationVolumesService) patchVolumes(ctx context.Context, kvvm *virtv1.
230244
return err
231245
}
232246

233-
logger.FromContext(ctx).Debug("Patch kvvm with migration volumes.", slog.String("patch", string(patchBytes)))
247+
logger.FromContext(ctx).Info("The volume migration is detected: patch volumes", slog.String("patch", string(patchBytes)))
234248

235249
err = s.client.Patch(ctx, kvvm, client.RawPatch(types.JSONPatchType, patchBytes))
236250
return err
@@ -261,10 +275,15 @@ func (s MigrationVolumesService) VolumesSynced(ctx context.Context, vmState stat
261275
return false, nil
262276
}
263277

264-
kvvmiSynced := equality.Semantic.DeepEqual(kvvmInCluster.Spec.Template.Spec.Volumes, kvvmiInCluster.Spec.Volumes)
278+
// The pull policy for container disks are only set on the VMI spec and not on the VM spec.
279+
// In order to correctly compare the volumes set, we need to set the pull policy on the VM spec as well.
280+
kvvmInClusterCopy := kvvmInCluster.DeepCopy()
281+
s.fillContainerDiskImagePullPolicies(kvvmInClusterCopy, kvvmiInCluster)
282+
283+
kvvmiSynced := equality.Semantic.DeepEqual(kvvmInClusterCopy.Spec.Template.Spec.Volumes, kvvmiInCluster.Spec.Volumes)
265284
if !kvvmiSynced {
266285
log.Info("kvvmi volumes are not synced yet")
267-
log.Debug("", slog.Any("kvvmi", kvvmInCluster.Spec.Template.Spec.Volumes), slog.Any("kvvmi", kvvmiInCluster.Spec.Volumes))
286+
log.Debug("", slog.Any("kvvmi", kvvmInClusterCopy.Spec.Template.Spec.Volumes), slog.Any("kvvmi", kvvmiInCluster.Spec.Volumes))
268287
return false, nil
269288
}
270289

@@ -440,21 +459,42 @@ func (s MigrationVolumesService) getReadyTargetPVCs(ctx context.Context, disks m
440459
return targetPVCs, nil
441460
}
442461

462+
func (s MigrationVolumesService) fillContainerDiskImagePullPolicies(kvvm *virtv1.VirtualMachine, kvvmi *virtv1.VirtualMachineInstance) {
463+
volsVMI := getVolumesByName(&kvvmi.Spec)
464+
for i, volume := range kvvm.Spec.Template.Spec.Volumes {
465+
if volume.ContainerDisk == nil {
466+
continue
467+
}
468+
vmiVol, ok := volsVMI[volume.Name]
469+
if !ok {
470+
continue
471+
}
472+
if vmiVol.ContainerDisk == nil {
473+
continue
474+
}
475+
kvvm.Spec.Template.Spec.Volumes[i].ContainerDisk.ImagePullPolicy = vmiVol.ContainerDisk.ImagePullPolicy
476+
}
477+
}
478+
443479
func (s MigrationVolumesService) makeKVVMFromVirtualMachineSpec(ctx context.Context, vmState state.VirtualMachineState) (*virtv1.VirtualMachine, *virtv1.VirtualMachine, error) {
444480
kvvm, err := s.makeKVVMFromSpec(ctx, vmState)
445481
if err != nil {
446482
return nil, nil, err
447483
}
484+
448485
kvvmBuilder := kvbuilder.NewKVVM(kvvm.DeepCopy(), kvbuilder.DefaultOptions(vmState.VirtualMachine().Current()))
449486
vdByName, err := vmState.VirtualDisksByName(ctx)
450487
if err != nil {
451488
return nil, nil, err
452489
}
490+
453491
err = kvbuilder.ApplyMigrationVolumes(kvvmBuilder, vmState.VirtualMachine().Changed(), vdByName)
454492
if err != nil {
455493
return nil, nil, err
456494
}
495+
457496
kvvmWithMigrationVolumes := kvvmBuilder.GetResource()
497+
458498
return kvvm, kvvmWithMigrationVolumes, nil
459499
}
460500

images/virtualization-artifact/pkg/controller/vm/internal/sync_kvvm.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -420,8 +420,13 @@ func MakeKVVMFromVMSpec(ctx context.Context, s state.VirtualMachineState) (*virt
420420

421421
networkSpec := network.CreateNetworkSpec(current, vmmacs)
422422

423+
vmbdas, err := s.VirtualMachineBlockDeviceAttachments(ctx)
424+
if err != nil {
425+
return nil, fmt.Errorf("get vmbdas: %w", err)
426+
}
427+
423428
// Create kubevirt VirtualMachine resource from d8 VirtualMachine spec.
424-
err = kvbuilder.ApplyVirtualMachineSpec(kvvmBuilder, current, bdState.VDByName, bdState.VIByName, bdState.CVIByName, class, ip.Status.Address, networkSpec)
429+
err = kvbuilder.ApplyVirtualMachineSpec(kvvmBuilder, current, bdState.VDByName, bdState.VIByName, bdState.CVIByName, vmbdas, class, ip.Status.Address, networkSpec)
425430
if err != nil {
426431
return nil, err
427432
}

0 commit comments

Comments
 (0)