Skip to content

Commit 43fb96a

Browse files
sean-jcbonzini
authored andcommitted
KVM: x86/mmu: Ensure NX huge page recovery thread is alive before waking
When waking a VM's NX huge page recovery thread, ensure the thread is actually alive before trying to wake it. Now that the thread is spawned on-demand during KVM_RUN, a VM without a recovery thread is reachable via the related module params. BUG: kernel NULL pointer dereference, address: 0000000000000040 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 RIP: 0010:vhost_task_wake+0x5/0x10 Call Trace: <TASK> set_nx_huge_pages+0xcc/0x1e0 [kvm] param_attr_store+0x8a/0xd0 module_attr_store+0x1a/0x30 kernfs_fop_write_iter+0x12f/0x1e0 vfs_write+0x233/0x3e0 ksys_write+0x60/0xd0 do_syscall_64+0x5b/0x160 entry_SYSCALL_64_after_hwframe+0x4b/0x53 RIP: 0033:0x7f3b52710104 </TASK> Modules linked in: kvm_intel kvm CR2: 0000000000000040 Fixes: 931656b ("kvm: defer huge page recovery vhost task to later") Cc: [email protected] Cc: Keith Busch <[email protected]> Signed-off-by: Sean Christopherson <[email protected]> Message-ID: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 6f61269 commit 43fb96a

File tree

1 file changed

+26
-7
lines changed

1 file changed

+26
-7
lines changed

Diff for: arch/x86/kvm/mmu/mmu.c

+26-7
Original file line numberDiff line numberDiff line change
@@ -7120,6 +7120,19 @@ static void mmu_destroy_caches(void)
71207120
kmem_cache_destroy(mmu_page_header_cache);
71217121
}
71227122

7123+
static void kvm_wake_nx_recovery_thread(struct kvm *kvm)
7124+
{
7125+
/*
7126+
* The NX recovery thread is spawned on-demand at the first KVM_RUN and
7127+
* may not be valid even though the VM is globally visible. Do nothing,
7128+
* as such a VM can't have any possible NX huge pages.
7129+
*/
7130+
struct vhost_task *nx_thread = READ_ONCE(kvm->arch.nx_huge_page_recovery_thread);
7131+
7132+
if (nx_thread)
7133+
vhost_task_wake(nx_thread);
7134+
}
7135+
71237136
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
71247137
{
71257138
if (nx_hugepage_mitigation_hard_disabled)
@@ -7180,7 +7193,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
71807193
kvm_mmu_zap_all_fast(kvm);
71817194
mutex_unlock(&kvm->slots_lock);
71827195

7183-
vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
7196+
kvm_wake_nx_recovery_thread(kvm);
71847197
}
71857198
mutex_unlock(&kvm_lock);
71867199
}
@@ -7315,7 +7328,7 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
73157328
mutex_lock(&kvm_lock);
73167329

73177330
list_for_each_entry(kvm, &vm_list, vm_list)
7318-
vhost_task_wake(kvm->arch.nx_huge_page_recovery_thread);
7331+
kvm_wake_nx_recovery_thread(kvm);
73197332

73207333
mutex_unlock(&kvm_lock);
73217334
}
@@ -7451,14 +7464,20 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
74517464
{
74527465
struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once);
74537466
struct kvm *kvm = container_of(ka, struct kvm, arch);
7467+
struct vhost_task *nx_thread;
74547468

74557469
kvm->arch.nx_huge_page_last = get_jiffies_64();
7456-
kvm->arch.nx_huge_page_recovery_thread = vhost_task_create(
7457-
kvm_nx_huge_page_recovery_worker, kvm_nx_huge_page_recovery_worker_kill,
7458-
kvm, "kvm-nx-lpage-recovery");
7470+
nx_thread = vhost_task_create(kvm_nx_huge_page_recovery_worker,
7471+
kvm_nx_huge_page_recovery_worker_kill,
7472+
kvm, "kvm-nx-lpage-recovery");
74597473

7460-
if (kvm->arch.nx_huge_page_recovery_thread)
7461-
vhost_task_start(kvm->arch.nx_huge_page_recovery_thread);
7474+
if (!nx_thread)
7475+
return;
7476+
7477+
vhost_task_start(nx_thread);
7478+
7479+
/* Make the task visible only once it is fully started. */
7480+
WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread);
74627481
}
74637482

74647483
int kvm_mmu_post_init_vm(struct kvm *kvm)

0 commit comments

Comments
 (0)