aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/fault.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <[email protected]>2023-07-24 18:54:01 +0000
committerAndrew Morton <[email protected]>2023-08-18 17:12:50 +0000
commit284e05920498788c5df1a7dd6424adb426498e1c (patch)
tree0d7ba33616091d40551d1c74acde80ca25664afb /arch/powerpc/mm/fault.c
parentmm/mmap: change vma iteration order in do_vmi_align_munmap() (diff)
downloadkernel-284e05920498788c5df1a7dd6424adb426498e1c.tar.gz
kernel-284e05920498788c5df1a7dd6424adb426498e1c.zip
mm: remove CONFIG_PER_VMA_LOCK ifdefs
Patch series "Handle most file-backed faults under the VMA lock", v3. This patchset adds the ability to handle page faults on parts of files which are already in the page cache without taking the mmap lock. This patch (of 10): Provide lock_vma_under_rcu() when CONFIG_PER_VMA_LOCK is not defined to eliminate ifdefs in the users. Link: https://lkml.kernel.org/r/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Reviewed-by: Suren Baghdasaryan <[email protected]> Cc: Punit Agrawal <[email protected]> Cc: Arjun Roy <[email protected]> Cc: Eric Dumazet <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
Diffstat (limited to 'arch/powerpc/mm/fault.c')
-rw-r--r--arch/powerpc/mm/fault.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 5bfdf6ecfa96..fafce6bdeff0 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -469,7 +469,6 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
if (is_exec)
flags |= FAULT_FLAG_INSTRUCTION;
-#ifdef CONFIG_PER_VMA_LOCK
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
@@ -501,7 +500,6 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
return user_mode(regs) ? 0 : SIGBUS;
lock_mmap:
-#endif /* CONFIG_PER_VMA_LOCK */
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
@@ -551,9 +549,7 @@ retry:
mmap_read_unlock(current->mm);
-#ifdef CONFIG_PER_VMA_LOCK
done:
-#endif
if (unlikely(fault & VM_FAULT_ERROR))
return mm_fault_error(regs, address, fault);