diff options
| author | Ryan Roberts <[email protected]> | 2025-04-22 08:18:18 +0000 |
|---|---|---|
| committer | Will Deacon <[email protected]> | 2025-05-09 12:43:07 +0000 |
| commit | 44562c71e2cfc9eed39bfcd98af63d5da8b1b5bf (patch) | |
| tree | 5d76d6d0f4b3b032873bd4d6d4b619a70dc66189 /mm/vmalloc.c | |
| parent | arm64/mm: Support huge pte-mapped pages in vmap (diff) | |
| download | kernel-44562c71e2cfc9eed39bfcd98af63d5da8b1b5bf.tar.gz kernel-44562c71e2cfc9eed39bfcd98af63d5da8b1b5bf.zip | |
mm/vmalloc: Enter lazy mmu mode while manipulating vmalloc ptes
Wrap vmalloc's pte table manipulation loops with
arch_enter_lazy_mmu_mode() / arch_leave_lazy_mmu_mode(). This provides
the arch code with the opportunity to optimize the pte manipulations.
Note that vmap_pfn() already uses lazy mmu mode since it delegates to
apply_to_page_range() which enters lazy mmu mode for both user and
kernel mappings.
These hooks will shortly be used by arm64 to improve vmalloc
performance.
Reviewed-by: Uladzislau Rezki (Sony) <[email protected]>
Reviewed-by: Catalin Marinas <[email protected]>
Reviewed-by: Anshuman Khandual <[email protected]>
Signed-off-by: Ryan Roberts <[email protected]>
Tested-by: Luiz Capitulino <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Will Deacon <[email protected]>
Diffstat (limited to 'mm/vmalloc.c')
| -rw-r--r-- | mm/vmalloc.c | 14 |
1 files changed, 14 insertions, 0 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index fe2e2cc8da94..24430160b37f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -104,6 +104,9 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pte = pte_alloc_kernel_track(pmd, addr, mask); if (!pte) return -ENOMEM; + + arch_enter_lazy_mmu_mode(); + do { if (unlikely(!pte_none(ptep_get(pte)))) { if (pfn_valid(pfn)) { @@ -127,6 +130,8 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); pfn++; } while (pte += PFN_DOWN(size), addr += size, addr != end); + + arch_leave_lazy_mmu_mode(); *mask |= PGTBL_PTE_MODIFIED; return 0; } @@ -354,6 +359,8 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long size = PAGE_SIZE; pte = pte_offset_kernel(pmd, addr); + arch_enter_lazy_mmu_mode(); + do { #ifdef CONFIG_HUGETLB_PAGE size = arch_vmap_pte_range_unmap_size(addr, pte); @@ -370,6 +377,8 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ptent = ptep_get_and_clear(&init_mm, addr, pte); WARN_ON(!pte_none(ptent) && !pte_present(ptent)); } while (pte += (size >> PAGE_SHIFT), addr += size, addr != end); + + arch_leave_lazy_mmu_mode(); *mask |= PGTBL_PTE_MODIFIED; } @@ -515,6 +524,9 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, pte = pte_alloc_kernel_track(pmd, addr, mask); if (!pte) return -ENOMEM; + + arch_enter_lazy_mmu_mode(); + do { struct page *page = pages[*nr]; @@ -528,6 +540,8 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; } while (pte++, addr += PAGE_SIZE, addr != end); + + arch_leave_lazy_mmu_mode(); *mask |= PGTBL_PTE_MODIFIED; return 0; } |
