From ca3a35e9684ff253dc36bb3e1d4b0b5f37c2e44d Mon Sep 17 00:00:00 2001 From: Sedat Dilek Date: Sat, 14 Aug 2010 15:04:01 +0200 Subject: [PATCH] Revert "mm: keep a guard page below a grow-down stack segment" This reverts commit 320b2b8de12698082609ebbc1a17165727f4c893. --- mm/memory.c | 23 ----------------------- 1 files changed, 0 insertions(+), 23 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 9606ceb..858829d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2760,26 +2760,6 @@ out_release: } /* - * This is like a special single-page "expand_downwards()", - * except we must first make sure that 'address-PAGE_SIZE' - * doesn't hit another vma. - * - * The "find_vma()" will do the right thing even if we wrap - */ -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) -{ - address &= PAGE_MASK; - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { - address -= PAGE_SIZE; - if (find_vma(vma->vm_mm, address) != vma) - return -ENOMEM; - - expand_stack(vma, address); - } - return 0; -} - -/* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. @@ -2792,9 +2772,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, spinlock_t *ptl; pte_t entry; - if (check_stack_guard_page(vma, address) < 0) - return VM_FAULT_SIGBUS; - if (!(flags & FAULT_FLAG_WRITE)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), vma->vm_page_prot)); -- 1.7.1