diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index 0750121ac371..7e51b659f955 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -34,6 +34,7 @@ #include #include +struct page *global_migrate_page; /* Default simulator parameters values */ #if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \ diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index f0fb25727d96..67fda14a93d6 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -26,6 +26,8 @@ #include #include "ubifs.h" +struct page *global_migrate_page; + static int ubifs_default_version_set(const char *val, const struct kernel_param *kp) { int n = 0, ret; diff --git a/include/linux/mm.h b/include/linux/mm.h index 73a52aba448f..825829b0b71f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -43,6 +43,7 @@ struct bdi_writeback; struct pt_regs; extern int sysctl_page_lock_unfairness; +extern struct page *global_migrate_page; void init_mm_internals(void); @@ -739,12 +740,20 @@ struct inode; * routine so they can be sure the page doesn't go away from under them. */ +#include /* * Drop a ref, return true if the refcount fell to zero (the page has no users) */ static inline int put_page_testzero(struct page *page) { VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); + + if (PagePrivate(page) && page->mapping && page->mapping->host && page->mapping->host->i_sb && !strncmp(page->mapping->host->i_sb->s_type->name, "ubifs", 5) && global_migrate_page == page) { + pr_err(KERN_ERR "Wait page %px %d lru %d %s %lu\n", page, page_count(page), PageLRU(page), current->comm, page->flags); + mdelay(10); + //pr_err("%px lru %d\n", page, PageLRU(page)); + } + return page_ref_dec_and_test(page); } diff --git a/mm/compaction.c b/mm/compaction.c index bfc93da1c2c7..a9192f3ee5f8 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1063,6 +1063,9 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, NR_ISOLATED_ANON + page_is_file_lru(page), thp_nr_pages(page)); + if (page == global_migrate_page) + pr_err("compat add page %px count %d lru %d\n", page, page_count(page), PageLRU(page)); + isolate_success: list_add(&page->lru, &cc->migratepages); isolate_success_no_list: diff --git a/mm/debug.c b/mm/debug.c index fae0f81ad831..70f7fbee4026 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -86,7 +86,7 @@ static void __dump_page(struct page *page) */ mapcount = PageSlab(head) ? 0 : page_mapcount(page); - pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", + pr_warn("page:%px refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", page, page_ref_count(head), mapcount, mapping, page_to_pgoff(page), page_to_pfn(page)); if (compound) { diff --git a/mm/filemap.c b/mm/filemap.c index dae481293b5d..22c429eda850 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1271,6 +1271,7 @@ static inline bool trylock_page_bit_common(struct page *page, int bit_nr, /* How many times do we accept lock stealing from under a waiter? */ int sysctl_page_lock_unfairness = 5; +struct page *global_migrate_page = NULL; static inline int wait_on_page_bit_common(wait_queue_head_t *q, struct page *page, int bit_nr, int state, enum behavior behavior) diff --git a/mm/migrate.c b/mm/migrate.c index a6a7743ee98f..c3453ad82f36 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -377,6 +377,7 @@ static int expected_page_refs(struct address_space *mapping, struct page *page) * 2 for pages with a mapping * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. */ +#include int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, int extra_count) { @@ -453,6 +454,10 @@ int migrate_page_move_mapping(struct address_space *mapping, * to one less reference. * We know this isn't the last reference. */ + if (page == global_migrate_page) { + mdelay(10); + pr_err("page %px count %d exp %d lru %d\n", page, page_count(page), expected_count, PageLRU(page)); + } page_ref_unfreeze(page, expected_count - nr); xas_unlock(&xas); diff --git a/mm/swap.c b/mm/swap.c index af3cad4e5378..847465f68523 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1018,9 +1018,17 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec) * looking at the same page) and the evictable page will be stranded * in an unevictable LRU. */ + if (PagePrivate(page) && page->mapping && page->mapping->host && page->mapping->host->i_sb && !strncmp(page->mapping->host->i_sb->s_type->name, "ubifs", 5)) { + pr_err("Add lru page %px lru %d\n", page, PageLRU(page)); + } SetPageLRU(page); smp_mb__after_atomic(); + if (PagePrivate(page) && page->mapping && page->mapping->host && page->mapping->host->i_sb && !strncmp(page->mapping->host->i_sb->s_type->name, "ubifs", 5)) { + global_migrate_page = page; + pr_err("Add lru page2 %px lru %d\n", page, PageLRU(page)); + } + if (page_evictable(page)) { if (was_unevictable) __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);