Skip to content

Commit 9358931

Browse files
Steven PriceWangYuli
authored andcommitted
arm64: RME: handle RIPAS changes before kvm_rec_enter
community inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICX7FX?from=project-issue Reference: https://patchew.org/linux/[email protected]/[email protected] ------------------------ Each page within the protected region of the realm guest can be marked as either RAM or EMPTY. Allow the VMM to control this before the guest has started and provide the equivalent functions to change this (with the guest's approval) at runtime. When transitioning from RIPAS RAM (1) to RIPAS EMPTY (0) the memory is unmapped from the guest and undelegated allowing the memory to be reused by the host. When transitioning to RIPAS RAM the actual population of the leaf RTTs is done later on stage 2 fault, however it may be necessary to allocate additional RTTs to allow the RMM track the RIPAS for the requested range. When freeing a block mapping it is necessary to temporarily unfold the RTT which requires delegating an extra page to the RMM, this page can then be recovered once the contents of the block mapping have been freed. Fixes: 4afc64441759 ("[v8-15-43]arm64: RME: Allow VMM to set RIPAS") Signed-off-by: Steven Price <[email protected]> Signed-off-by: Xu Raoqing <[email protected]> Signed-off-by: WangYuli <[email protected]>
1 parent f6b0b18 commit 9358931

File tree

1 file changed

+67
-65
lines changed

1 file changed

+67
-65
lines changed

arch/arm64/kvm/rme.c

Lines changed: 67 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -213,6 +213,7 @@ static int realm_rtt_fold(struct realm *realm,
213213
unsigned long out_rtt;
214214
int ret;
215215

216+
addr = ALIGN_DOWN(addr, rme_rtt_level_mapsize(level - 1));
216217
ret = rmi_rtt_fold(virt_to_phys(realm->rd), addr, level, &out_rtt);
217218

218219
if (RMI_RETURN_STATUS(ret) == RMI_SUCCESS && rtt_granule)
@@ -283,6 +284,61 @@ static int realm_unmap_private_page(struct realm *realm,
283284
return 0;
284285
}
285286

287+
/*
288+
* Returns 0 on successful fold, a negative value on error, a positive value if
289+
* we were not able to fold all tables at this level.
290+
*/
291+
static int realm_fold_rtt_level(struct realm *realm, int level,
292+
unsigned long start, unsigned long end)
293+
{
294+
int not_folded = 0;
295+
ssize_t map_size;
296+
unsigned long addr, next_addr;
297+
298+
if (WARN_ON(level > RMM_RTT_MAX_LEVEL))
299+
return -EINVAL;
300+
301+
map_size = rme_rtt_level_mapsize(level - 1);
302+
303+
for (addr = start; addr < end; addr = next_addr) {
304+
phys_addr_t rtt_granule;
305+
int ret;
306+
unsigned long align_addr = ALIGN(addr, map_size);
307+
308+
next_addr = ALIGN(addr + 1, map_size);
309+
310+
ret = realm_rtt_fold(realm, align_addr, level, &rtt_granule);
311+
312+
switch (RMI_RETURN_STATUS(ret)) {
313+
case RMI_SUCCESS:
314+
free_delegated_granule(rtt_granule);
315+
break;
316+
case RMI_ERROR_RTT:
317+
if (level == RMM_RTT_MAX_LEVEL ||
318+
RMI_RETURN_INDEX(ret) < level) {
319+
not_folded++;
320+
break;
321+
}
322+
/* Recurse a level deeper */
323+
ret = realm_fold_rtt_level(realm,
324+
level + 1,
325+
addr,
326+
next_addr);
327+
if (ret < 0)
328+
return ret;
329+
else if (ret == 0)
330+
/* Try again at this level */
331+
next_addr = addr;
332+
break;
333+
default:
334+
WARN_ON(1);
335+
return -ENXIO;
336+
}
337+
}
338+
339+
return not_folded;
340+
}
341+
286342
static void realm_unmap_shared_range(struct kvm *kvm,
287343
int level,
288344
unsigned long start,
@@ -339,6 +395,7 @@ static void realm_unmap_shared_range(struct kvm *kvm,
339395

340396
cond_resched_rwlock_write(&kvm->mmu_lock);
341397
}
398+
realm_fold_rtt_level(realm, get_start_level(realm) + 1, start, end);
342399
}
343400

344401
static int realm_init_sve_param(struct kvm *kvm, struct realm_params *params)
@@ -522,6 +579,7 @@ static int realm_create_rtt_levels(struct realm *realm,
522579
if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT &&
523580
RMI_RETURN_INDEX(ret) == level - 1) {
524581
/* The RTT already exists, continue */
582+
free_delegated_granule(rtt);
525583
continue;
526584
}
527585
if (ret) {
@@ -625,61 +683,6 @@ static int realm_tear_down_rtt_range(struct realm *realm,
625683
start, end);
626684
}
627685

628-
/*
629-
* Returns 0 on successful fold, a negative value on error, a positive value if
630-
* we were not able to fold all tables at this level.
631-
*/
632-
static int realm_fold_rtt_level(struct realm *realm, int level,
633-
unsigned long start, unsigned long end)
634-
{
635-
int not_folded = 0;
636-
ssize_t map_size;
637-
unsigned long addr, next_addr;
638-
639-
if (WARN_ON(level > RMM_RTT_MAX_LEVEL))
640-
return -EINVAL;
641-
642-
map_size = rme_rtt_level_mapsize(level - 1);
643-
644-
for (addr = start; addr < end; addr = next_addr) {
645-
phys_addr_t rtt_granule;
646-
int ret;
647-
unsigned long align_addr = ALIGN(addr, map_size);
648-
649-
next_addr = ALIGN(addr + 1, map_size);
650-
651-
ret = realm_rtt_fold(realm, align_addr, level, &rtt_granule);
652-
653-
switch (RMI_RETURN_STATUS(ret)) {
654-
case RMI_SUCCESS:
655-
free_delegated_granule(rtt_granule);
656-
break;
657-
case RMI_ERROR_RTT:
658-
if (level == RMM_RTT_MAX_LEVEL ||
659-
RMI_RETURN_INDEX(ret) < level) {
660-
not_folded++;
661-
break;
662-
}
663-
/* Recurse a level deeper */
664-
ret = realm_fold_rtt_level(realm,
665-
level + 1,
666-
addr,
667-
next_addr);
668-
if (ret < 0)
669-
return ret;
670-
else if (ret == 0)
671-
/* Try again at this level */
672-
next_addr = addr;
673-
break;
674-
default:
675-
WARN_ON(1);
676-
return -ENXIO;
677-
}
678-
}
679-
680-
return not_folded;
681-
}
682-
683686
void kvm_realm_destroy_rtts(struct kvm *kvm, u32 ia_bits)
684687
{
685688
struct realm *realm = &kvm->arch.realm;
@@ -1146,18 +1149,16 @@ static int realm_set_ipa_state(struct kvm_vcpu *vcpu,
11461149
* If the RMM walk ended early then more tables are
11471150
* needed to reach the required depth to set the RIPAS.
11481151
*/
1149-
if (walk_level < level) {
1150-
ret = realm_create_rtt_levels(realm, ipa,
1152+
if (walk_level >= level)
1153+
return -EINVAL;
1154+
1155+
ret = realm_create_rtt_levels(realm, ipa,
11511156
walk_level,
11521157
level,
11531158
memcache);
1154-
/* Retry with RTTs created */
1155-
if (!ret)
1156-
continue;
1157-
} else {
1158-
ret = -EINVAL;
1159-
}
1160-
1159+
if (ret)
1160+
return ret;
1161+
/* Retry with the RTT levels in place */
11611162
break;
11621163
} else {
11631164
WARN(1, "Unexpected error in %s: %#x\n", __func__,
@@ -1466,7 +1467,8 @@ static void kvm_complete_ripas_change(struct kvm_vcpu *vcpu)
14661467
break;
14671468

14681469
base = top_ipa;
1469-
} while (top_ipa < top);
1470+
} while (base < top);
1471+
rec->run->exit.ripas_base = base;
14701472
}
14711473

14721474
/*

0 commit comments

Comments
 (0)