diff options
| author | Feiyang Chen <[email protected]> | 2022-10-27 12:52:52 +0000 |
|---|---|---|
| committer | Andrew Morton <[email protected]> | 2022-12-12 02:12:12 +0000 |
| commit | 2045a3b8911b6ee64dd9b522d61abc468ecdcdb5 (patch) | |
| tree | 03aa29f89f39a9c7f0e556ec0c63238c71cb33e4 /mm/sparse-vmemmap.c | |
| parent | LoongArch: add sparse memory vmemmap support (diff) | |
| download | kernel-2045a3b8911b6ee64dd9b522d61abc468ecdcdb5.tar.gz kernel-2045a3b8911b6ee64dd9b522d61abc468ecdcdb5.zip | |
mm/sparse-vmemmap: generalise vmemmap_populate_hugepages()
Generalise vmemmap_populate_hugepages() so ARM64 & X86 & LoongArch can
share its implementation.
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Feiyang Chen <[email protected]>
Signed-off-by: Huacai Chen <[email protected]>
Acked-by: Will Deacon <[email protected]>
Acked-by: Dave Hansen <[email protected]>
Reviewed-by: Arnd Bergmann <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Dinh Nguyen <[email protected]>
Cc: Guo Ren <[email protected]>
Cc: Jiaxun Yang <[email protected]>
Cc: Min Zhou <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Philippe Mathieu-Daudé <[email protected]>
Cc: Thomas Bogendoerfer <[email protected]>
Cc: Xuefeng Li <[email protected]>
Cc: Xuerui Wang <[email protected]>
Cc: Muchun Song <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Diffstat (limited to 'mm/sparse-vmemmap.c')
| -rw-r--r-- | mm/sparse-vmemmap.c | 63 |
1 files changed, 63 insertions, 0 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 797b30e9050c..c5398a5960d0 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -295,6 +295,69 @@ int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, return vmemmap_populate_range(start, end, node, altmap, NULL); } +void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, + unsigned long addr, unsigned long next) +{ +} + +int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node, + unsigned long addr, unsigned long next) +{ + return 0; +} + +int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end, + int node, struct vmem_altmap *altmap) +{ + unsigned long addr; + unsigned long next; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + + pgd = vmemmap_pgd_populate(addr, node); + if (!pgd) + return -ENOMEM; + + p4d = vmemmap_p4d_populate(pgd, addr, node); + if (!p4d) + return -ENOMEM; + + pud = vmemmap_pud_populate(p4d, addr, node); + if (!pud) + return -ENOMEM; + + pmd = pmd_offset(pud, addr); + if (pmd_none(READ_ONCE(*pmd))) { + void *p; + + p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); + if (p) { + vmemmap_set_pmd(pmd, p, node, addr, next); + continue; + } else if (altmap) { + /* + * No fallback: In any case we care about, the + * altmap should be reasonably sized and aligned + * such that vmemmap_alloc_block_buf() will always + * succeed. For consistency with the PTE case, + * return an error here as failure could indicate + * a configuration issue with the size of the altmap. + */ + return -ENOMEM; + } + } else if (vmemmap_check_pmd(pmd, node, addr, next)) + continue; + if (vmemmap_populate_basepages(addr, next, node, altmap)) + return -ENOMEM; + } + return 0; +} + /* * For compound pages bigger than section size (e.g. x86 1G compound * pages with 2M subsection size) fill the rest of sections as tail |
