aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mm_init.c
diff options
context:
space:
mode:
authorDavid Hildenbrand <[email protected]>2024-06-07 09:09:36 +0000
committerAndrew Morton <[email protected]>2024-07-04 02:30:18 +0000
commit13c526540b316937a16946e75d459e011be0ce2e (patch)
treeffd3888cdcf9a87b41f622ccfb673ac050f19fc7 /mm/mm_init.c
parentmm: remove page_mkclean() (diff)
downloadkernel-13c526540b316937a16946e75d459e011be0ce2e.tar.gz
kernel-13c526540b316937a16946e75d459e011be0ce2e.zip
mm: pass meminit_context to __free_pages_core()
Patch series "mm/memory_hotplug: use PageOffline() instead of PageReserved() for !ZONE_DEVICE". This can be a considered a long-overdue follow-up to some parts of [1]. The patches are based on [2], but they are not strictly required -- just makes it clearer why we can use adjust_managed_page_count() for memory hotplug without going into details about highmem. We stop initializing pages with PageReserved() in memory hotplug code -- except when dealing with ZONE_DEVICE for now. Instead, we use PageOffline(): all pages are initialized to PageOffline() when onlining a memory section, and only the ones actually getting exposed to the system/page allocator will get PageOffline cleared. This way, we enlighten memory hotplug more about PageOffline() pages and can cleanup some hacks we have in virtio-mem code. What about ZONE_DEVICE? PageOffline() is wrong, but we might just stop using PageReserved() for them later by simply checking for is_zone_device_page() at suitable places. That will be a separate patch set / proposal. This primarily affects virtio-mem, HV-balloon and XEN balloon. I only briefly tested with virtio-mem, which benefits most from these cleanups. [1] https://lore.kernel.org/all/[email protected]/ [2] https://lkml.kernel.org/r/[email protected] This patch (of 3): In preparation for further changes, let's teach __free_pages_core() about the differences of memory hotplug handling. Move the memory hotplug specific handling from generic_online_page() to __free_pages_core(), use adjust_managed_page_count() on the memory hotplug path, and spell out why memory freed via memblock cannot currently use adjust_managed_page_count(). [[email protected]: add missed CONFIG_DEFERRED_STRUCT_PAGE_INIT] Link: https://lkml.kernel.org/r/[email protected] [[email protected]: fix up the memblock comment, per Oscar] Link: https://lkml.kernel.org/r/[email protected] [[email protected]: add the parameter name also in the declaration] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: David Hildenbrand <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Dexuan Cui <[email protected]> Cc: Dmitry Vyukov <[email protected]> Cc: Eugenio PĂ©rez <[email protected]> Cc: Haiyang Zhang <[email protected]> Cc: Jason Wang <[email protected]> Cc: Juergen Gross <[email protected]> Cc: "K. Y. Srinivasan" <[email protected]> Cc: Marco Elver <[email protected]> Cc: Michael S. Tsirkin <[email protected]> Cc: Mike Rapoport (IBM) <[email protected]> Cc: Oleksandr Tyshchenko <[email protected]> Cc: Oscar Salvador <[email protected]> Cc: Stefano Stabellini <[email protected]> Cc: Wei Liu <[email protected]> Cc: Xuan Zhuo <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
Diffstat (limited to 'mm/mm_init.c')
-rw-r--r--mm/mm_init.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 5d150d412920..03874f624b32 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1931,7 +1931,7 @@ static void __init deferred_free_pages(unsigned long pfn,
if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
for (i = 0; i < nr_pages; i += pageblock_nr_pages)
set_pageblock_migratetype(page + i, MIGRATE_MOVABLE);
- __free_pages_core(page, MAX_PAGE_ORDER);
+ __free_pages_core(page, MAX_PAGE_ORDER, MEMINIT_EARLY);
return;
}
@@ -1941,7 +1941,7 @@ static void __init deferred_free_pages(unsigned long pfn,
for (i = 0; i < nr_pages; i++, page++, pfn++) {
if (pageblock_aligned(pfn))
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
- __free_pages_core(page, 0);
+ __free_pages_core(page, 0, MEMINIT_EARLY);
}
}
@@ -2471,7 +2471,7 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
}
}
- __free_pages_core(page, order);
+ __free_pages_core(page, order, MEMINIT_EARLY);
}
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);