aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorMike Rapoport (Microsoft) <[email protected]>2024-10-23 16:27:05 +0000
committerAndrew Morton <[email protected]>2024-11-07 22:25:15 +0000
commitc82be0be957631b7eaa4b84ba458e1826484e60d (patch)
treed46d2176bb85fad2f268962abfa6f65cb0bb4c35 /mm/vmalloc.c
parentmm: vmalloc: group declarations depending on CONFIG_MMU together (diff)
downloadkernel-c82be0be957631b7eaa4b84ba458e1826484e60d.tar.gz
kernel-c82be0be957631b7eaa4b84ba458e1826484e60d.zip
mm: vmalloc: don't account for number of nodes for HUGE_VMAP allocations
vmalloc allocations with VM_ALLOW_HUGE_VMAP that do not explicitly specify node ID will use huge pages only if size_per_node is larger than a huge page. Still the actual allocated memory is not distributed between nodes and there is no advantage in such approach. On the contrary, BPF allocates SZ_2M * num_possible_nodes() for each new bpf_prog_pack, while it could do with a single huge page per pack. Don't account for number of nodes for VM_ALLOW_HUGE_VMAP with NUMA_NO_NODE and use huge pages whenever the requested allocation size is larger than a huge page. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Mike Rapoport (Microsoft) <[email protected]> Reviewed-by: Christoph Hellwig <[email protected]> Reviewed-by: Uladzislau Rezki (Sony) <[email protected]> Reviewed-by: Luis Chamberlain <[email protected]> Tested-by: kdevops <[email protected]> Cc: Andreas Larsson <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Ard Biesheuvel <[email protected]> Cc: Arnd Bergmann <[email protected]> Cc: Borislav Petkov (AMD) <[email protected]> Cc: Brian Cain <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Christophe Leroy <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Dinh Nguyen <[email protected]> Cc: Geert Uytterhoeven <[email protected]> Cc: Guo Ren <[email protected]> Cc: Helge Deller <[email protected]> Cc: Huacai Chen <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Johannes Berg <[email protected]> Cc: John Paul Adrian Glaubitz <[email protected]> Cc: Kent Overstreet <[email protected]> Cc: Liam R. Howlett <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Masami Hiramatsu (Google) <[email protected]> Cc: Matt Turner <[email protected]> Cc: Max Filippov <[email protected]> Cc: Michael Ellerman <[email protected]> Cc: Michal Simek <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Palmer Dabbelt <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Richard Weinberger <[email protected]> Cc: Russell King <[email protected]> Cc: Song Liu <[email protected]> Cc: Stafford Horne <[email protected]> Cc: Steven Rostedt (Google) <[email protected]> Cc: Suren Baghdasaryan <[email protected]> Cc: Thomas Bogendoerfer <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vineet Gupta <[email protected]> Cc: Will Deacon <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 5480b77f4167..5c0ea4e2b17d 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3779,8 +3779,6 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
}
if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
- unsigned long size_per_node;
-
/*
* Try huge pages. Only try for PAGE_KERNEL allocations,
* others like modules don't yet expect huge pages in
@@ -3788,13 +3786,10 @@ void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
* supporting them.
*/
- size_per_node = size;
- if (node == NUMA_NO_NODE)
- size_per_node /= num_online_nodes();
- if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
+ if (arch_vmap_pmd_supported(prot) && size >= PMD_SIZE)
shift = PMD_SHIFT;
else
- shift = arch_vmap_pte_supported_shift(size_per_node);
+ shift = arch_vmap_pte_supported_shift(size);
align = max(real_align, 1UL << shift);
size = ALIGN(real_size, 1UL << shift);