diff options
| author | Jinjiang Tu <[email protected]> | 2025-07-24 09:09:57 +0000 |
|---|---|---|
| committer | Andrew Morton <[email protected]> | 2025-08-05 20:38:39 +0000 |
| commit | aa5a10b070690225317ed4d85413d144abfff750 (patch) | |
| tree | 333eceec567ccfd9b4fccf3105acff8efed117ff | |
| parent | mm/smaps: fix race between smaps_hugetlb_range and migration (diff) | |
| download | kernel-aa5a10b070690225317ed4d85413d144abfff750.tar.gz kernel-aa5a10b070690225317ed4d85413d144abfff750.zip | |
fs/proc/task_mmu: hold PTL in pagemap_hugetlb_range and gather_hugetlb_stats
Hold PTL in pagemap_hugetlb_range() and gather_hugetlb_stats() to avoid
operating on stale page, as pagemap_pmd_range() and gather_pte_stats()
have done.
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Jinjiang Tu <[email protected]>
Acked-by: David Hildenbrand <[email protected]>
Cc: Andrei Vagin <[email protected]>
Cc: Andrii Nakryiko <[email protected]>
Cc: Baolin Wang <[email protected]>
Cc: Brahmajit Das <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Christophe Leroy <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Dev Jain <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Joern Engel <[email protected]>
Cc: Kefeng Wang <[email protected]>
Cc: Lorenzo Stoakes <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Ryan Roberts <[email protected]>
Cc: Thiago Jung Bauermann <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
| -rw-r--r-- | fs/proc/task_mmu.c | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 55bab10bc779..ee1e4ccd33bd 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -2021,12 +2021,14 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, struct pagemapread *pm = walk->private; struct vm_area_struct *vma = walk->vma; u64 flags = 0, frame = 0; + spinlock_t *ptl; int err = 0; pte_t pte; if (vma->vm_flags & VM_SOFTDIRTY) flags |= PM_SOFT_DIRTY; + ptl = huge_pte_lock(hstate_vma(vma), walk->mm, ptep); pte = huge_ptep_get(walk->mm, addr, ptep); if (pte_present(pte)) { struct folio *folio = page_folio(pte_page(pte)); @@ -2054,11 +2056,12 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, err = add_to_pagemap(&pme, pm); if (err) - return err; + break; if (pm->show_pfn && (flags & PM_PRESENT)) frame++; } + spin_unlock(ptl); cond_resched(); return err; @@ -3132,17 +3135,22 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { - pte_t huge_pte = huge_ptep_get(walk->mm, addr, pte); + pte_t huge_pte; struct numa_maps *md; struct page *page; + spinlock_t *ptl; + ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); + huge_pte = huge_ptep_get(walk->mm, addr, pte); if (!pte_present(huge_pte)) - return 0; + goto out; page = pte_page(huge_pte); md = walk->private; gather_stats(page, md, pte_dirty(huge_pte), 1); +out: + spin_unlock(ptl); return 0; } |
