Skip to content

Commit 38f791a

Browse files
committed
arm64: KVM: Implement 48 VA support for KVM EL2 and Stage-2
This patch adds the necessary support for all host kernel PGSIZE and VA_SPACE configuration options for both EL2 and the Stage-2 page tables. However, for 40bit and 42bit PARange systems, the architecture mandates that VTCR_EL2.SL0 is maximum 1, resulting in fewer levels of stage-2 pagge tables than levels of host kernel page tables. At the same time, systems with a PARange > 42bit, we limit the IPA range by always setting VTCR_EL2.T0SZ to 24. To solve the situation with different levels of page tables for Stage-2 translation than the host kernel page tables, we allocate a dummy PGD with pointers to our actual inital level Stage-2 page table, in order for us to reuse the kernel pgtable manipulation primitives. Reproducing all these in KVM does not look pretty and unnecessarily complicates the 32-bit side. Systems with a PARange < 40bits are not yet supported. [ I have reworked this patch from its original form submitted by Jungseok to take the architecture constraints into consideration. There were too many changes from the original patch for me to preserve the authorship. Thanks to Catalin Marinas for his help in figuring out a good solution to this challenge. I have also fixed various bugs and missing error code handling from the original patch. - Christoffer ] Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Jungseok Lee <jungseoklee85@gmail.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
1 parent 8eef912 commit 38f791a

File tree

4 files changed

+249
-40
lines changed

4 files changed

+249
-40
lines changed

arch/arm/include/asm/kvm_mmu.h

+26-3
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,11 @@
3737
*/
3838
#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
3939

40+
/*
41+
* KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
42+
*/
43+
#define KVM_MMU_CACHE_MIN_PAGES 2
44+
4045
#ifndef __ASSEMBLY__
4146

4247
#include <asm/cacheflush.h>
@@ -83,6 +88,11 @@ static inline void kvm_clean_pgd(pgd_t *pgd)
8388
clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
8489
}
8590

91+
static inline void kvm_clean_pmd(pmd_t *pmd)
92+
{
93+
clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
94+
}
95+
8696
static inline void kvm_clean_pmd_entry(pmd_t *pmd)
8797
{
8898
clean_pmd_entry(pmd);
@@ -123,10 +133,23 @@ static inline bool kvm_page_empty(void *ptr)
123133
}
124134

125135

126-
#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
127-
#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
128-
#define kvm_pud_table_empty(pudp) (0)
136+
#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
137+
#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
138+
#define kvm_pud_table_empty(kvm, pudp) (0)
139+
140+
#define KVM_PREALLOC_LEVEL 0
129141

142+
static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
143+
{
144+
return 0;
145+
}
146+
147+
static inline void kvm_free_hwpgd(struct kvm *kvm) { }
148+
149+
static inline void *kvm_get_hwpgd(struct kvm *kvm)
150+
{
151+
return kvm->arch.pgd;
152+
}
130153

131154
struct kvm;
132155

arch/arm/kvm/arm.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -409,7 +409,7 @@ static void update_vttbr(struct kvm *kvm)
409409
kvm_next_vmid++;
410410

411411
/* update vttbr to be used with the new vmid */
412-
pgd_phys = virt_to_phys(kvm->arch.pgd);
412+
pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
413413
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
414414
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
415415
kvm->arch.vttbr = pgd_phys | vmid;

arch/arm/kvm/mmu.c

+106-27
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ static unsigned long hyp_idmap_start;
4242
static unsigned long hyp_idmap_end;
4343
static phys_addr_t hyp_idmap_vector;
4444

45-
#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
45+
#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
4646

4747
#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
4848

@@ -134,7 +134,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
134134
}
135135
} while (pte++, addr += PAGE_SIZE, addr != end);
136136

137-
if (kvm_pte_table_empty(start_pte))
137+
if (kvm_pte_table_empty(kvm, start_pte))
138138
clear_pmd_entry(kvm, pmd, start_addr);
139139
}
140140

@@ -158,7 +158,7 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
158158
}
159159
} while (pmd++, addr = next, addr != end);
160160

161-
if (kvm_pmd_table_empty(start_pmd))
161+
if (kvm_pmd_table_empty(kvm, start_pmd))
162162
clear_pud_entry(kvm, pud, start_addr);
163163
}
164164

@@ -182,7 +182,7 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
182182
}
183183
} while (pud++, addr = next, addr != end);
184184

185-
if (kvm_pud_table_empty(start_pud))
185+
if (kvm_pud_table_empty(kvm, start_pud))
186186
clear_pgd_entry(kvm, pgd, start_addr);
187187
}
188188

@@ -306,7 +306,7 @@ void free_boot_hyp_pgd(void)
306306
if (boot_hyp_pgd) {
307307
unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
308308
unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
309-
free_pages((unsigned long)boot_hyp_pgd, pgd_order);
309+
free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
310310
boot_hyp_pgd = NULL;
311311
}
312312

@@ -343,7 +343,7 @@ void free_hyp_pgds(void)
343343
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
344344
unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
345345

346-
free_pages((unsigned long)hyp_pgd, pgd_order);
346+
free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
347347
hyp_pgd = NULL;
348348
}
349349

@@ -401,13 +401,46 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
401401
return 0;
402402
}
403403

404+
static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
405+
unsigned long end, unsigned long pfn,
406+
pgprot_t prot)
407+
{
408+
pud_t *pud;
409+
pmd_t *pmd;
410+
unsigned long addr, next;
411+
int ret;
412+
413+
addr = start;
414+
do {
415+
pud = pud_offset(pgd, addr);
416+
417+
if (pud_none_or_clear_bad(pud)) {
418+
pmd = pmd_alloc_one(NULL, addr);
419+
if (!pmd) {
420+
kvm_err("Cannot allocate Hyp pmd\n");
421+
return -ENOMEM;
422+
}
423+
pud_populate(NULL, pud, pmd);
424+
get_page(virt_to_page(pud));
425+
kvm_flush_dcache_to_poc(pud, sizeof(*pud));
426+
}
427+
428+
next = pud_addr_end(addr, end);
429+
ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
430+
if (ret)
431+
return ret;
432+
pfn += (next - addr) >> PAGE_SHIFT;
433+
} while (addr = next, addr != end);
434+
435+
return 0;
436+
}
437+
404438
static int __create_hyp_mappings(pgd_t *pgdp,
405439
unsigned long start, unsigned long end,
406440
unsigned long pfn, pgprot_t prot)
407441
{
408442
pgd_t *pgd;
409443
pud_t *pud;
410-
pmd_t *pmd;
411444
unsigned long addr, next;
412445
int err = 0;
413446

@@ -416,22 +449,21 @@ static int __create_hyp_mappings(pgd_t *pgdp,
416449
end = PAGE_ALIGN(end);
417450
do {
418451
pgd = pgdp + pgd_index(addr);
419-
pud = pud_offset(pgd, addr);
420452

421-
if (pud_none_or_clear_bad(pud)) {
422-
pmd = pmd_alloc_one(NULL, addr);
423-
if (!pmd) {
424-
kvm_err("Cannot allocate Hyp pmd\n");
453+
if (pgd_none(*pgd)) {
454+
pud = pud_alloc_one(NULL, addr);
455+
if (!pud) {
456+
kvm_err("Cannot allocate Hyp pud\n");
425457
err = -ENOMEM;
426458
goto out;
427459
}
428-
pud_populate(NULL, pud, pmd);
429-
get_page(virt_to_page(pud));
430-
kvm_flush_dcache_to_poc(pud, sizeof(*pud));
460+
pgd_populate(NULL, pgd, pud);
461+
get_page(virt_to_page(pgd));
462+
kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
431463
}
432464

433465
next = pgd_addr_end(addr, end);
434-
err = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
466+
err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
435467
if (err)
436468
goto out;
437469
pfn += (next - addr) >> PAGE_SHIFT;
@@ -521,21 +553,46 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
521553
*/
522554
int kvm_alloc_stage2_pgd(struct kvm *kvm)
523555
{
556+
int ret;
524557
pgd_t *pgd;
525558

526559
if (kvm->arch.pgd != NULL) {
527560
kvm_err("kvm_arch already initialized?\n");
528561
return -EINVAL;
529562
}
530563

531-
pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
564+
if (KVM_PREALLOC_LEVEL > 0) {
565+
/*
566+
* Allocate fake pgd for the page table manipulation macros to
567+
* work. This is not used by the hardware and we have no
568+
* alignment requirement for this allocation.
569+
*/
570+
pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
571+
GFP_KERNEL | __GFP_ZERO);
572+
} else {
573+
/*
574+
* Allocate actual first-level Stage-2 page table used by the
575+
* hardware for Stage-2 page table walks.
576+
*/
577+
pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
578+
}
579+
532580
if (!pgd)
533581
return -ENOMEM;
534582

583+
ret = kvm_prealloc_hwpgd(kvm, pgd);
584+
if (ret)
585+
goto out_err;
586+
535587
kvm_clean_pgd(pgd);
536588
kvm->arch.pgd = pgd;
537-
538589
return 0;
590+
out_err:
591+
if (KVM_PREALLOC_LEVEL > 0)
592+
kfree(pgd);
593+
else
594+
free_pages((unsigned long)pgd, S2_PGD_ORDER);
595+
return ret;
539596
}
540597

541598
/**
@@ -571,19 +628,39 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
571628
return;
572629

573630
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
574-
free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
631+
kvm_free_hwpgd(kvm);
632+
if (KVM_PREALLOC_LEVEL > 0)
633+
kfree(kvm->arch.pgd);
634+
else
635+
free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
575636
kvm->arch.pgd = NULL;
576637
}
577638

578-
static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
639+
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
579640
phys_addr_t addr)
580641
{
581642
pgd_t *pgd;
582643
pud_t *pud;
583-
pmd_t *pmd;
584644

585645
pgd = kvm->arch.pgd + pgd_index(addr);
586-
pud = pud_offset(pgd, addr);
646+
if (WARN_ON(pgd_none(*pgd))) {
647+
if (!cache)
648+
return NULL;
649+
pud = mmu_memory_cache_alloc(cache);
650+
pgd_populate(NULL, pgd, pud);
651+
get_page(virt_to_page(pgd));
652+
}
653+
654+
return pud_offset(pgd, addr);
655+
}
656+
657+
static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
658+
phys_addr_t addr)
659+
{
660+
pud_t *pud;
661+
pmd_t *pmd;
662+
663+
pud = stage2_get_pud(kvm, cache, addr);
587664
if (pud_none(*pud)) {
588665
if (!cache)
589666
return NULL;
@@ -629,7 +706,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
629706
pmd_t *pmd;
630707
pte_t *pte, old_pte;
631708

632-
/* Create stage-2 page table mapping - Level 1 */
709+
/* Create stage-2 page table mapping - Levels 0 and 1 */
633710
pmd = stage2_get_pmd(kvm, cache, addr);
634711
if (!pmd) {
635712
/*
@@ -690,7 +767,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
690767
if (writable)
691768
kvm_set_s2pte_writable(&pte);
692769

693-
ret = mmu_topup_memory_cache(&cache, 2, 2);
770+
ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
771+
KVM_NR_MEM_OBJS);
694772
if (ret)
695773
goto out;
696774
spin_lock(&kvm->mmu_lock);
@@ -805,7 +883,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
805883
up_read(&current->mm->mmap_sem);
806884

807885
/* We need minimum second+third level pages */
808-
ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
886+
ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
887+
KVM_NR_MEM_OBJS);
809888
if (ret)
810889
return ret;
811890

@@ -1080,8 +1159,8 @@ int kvm_mmu_init(void)
10801159
(unsigned long)phys_base);
10811160
}
10821161

1083-
hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
1084-
boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
1162+
hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1163+
boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
10851164

10861165
if (!hyp_pgd || !boot_hyp_pgd) {
10871166
kvm_err("Hyp mode PGD not allocated\n");

0 commit comments

Comments
 (0)