diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 17e9969db499..51b1dcfb5022 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -85,6 +85,7 @@ extern bool shmem_huge_enabled(struct vm_area_struct *vma); extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); +extern void shmem_mark_page_lazyfree(struct page *page); /* Flag allocation requirements to shmem_getpage */ enum sgp_type { diff --git a/include/linux/swap.h b/include/linux/swap.h index e68ae91643f3..8ed2f6e51ae7 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -363,6 +363,7 @@ extern void rotate_reclaimable_page(struct page *page); extern void deactivate_file_page(struct page *page); extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); +extern void mark_page_lazyfree_movetail(struct page *page); extern void swap_setup(void); extern void __lru_cache_add_inactive_or_unevictable(struct page *page, diff --git a/mm/shmem.c b/mm/shmem.c index d8f45f30b656..a4b5012b1267 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4284,3 +4284,9 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, #endif } EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); + +void shmem_mark_page_lazyfree(struct page *page) +{ + mark_page_lazyfree_movetail(page); +} +EXPORT_SYMBOL_GPL(shmem_mark_page_lazyfree); diff --git a/mm/swap.c b/mm/swap.c index b6c5e44e49f0..171213a64202 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -65,6 +65,7 @@ struct lru_pvecs { struct pagevec lru_deactivate_file; struct pagevec lru_deactivate; struct pagevec lru_lazyfree; + struct pagevec lru_lazyfree_movetail; #ifdef CONFIG_SMP struct pagevec activate_page; #endif @@ -630,6 +631,21 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, } } +static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec, + void *arg) +{ + if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && + !PageSwapCache(page)) { + bool active = PageActive(page); + + del_page_from_lru_list(page, lruvec, + LRU_INACTIVE_ANON + active); + ClearPageActive(page); + ClearPageReferenced(page); + add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE); + } +} + /* * Drain pages out of the cpu's pagevecs. * Either "cpu" is the current CPU, and preemption has already been @@ -665,6 +681,10 @@ void lru_add_drain_cpu(int cpu) if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); + pvec = &per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu); + if (pagevec_count(pvec)) + pagevec_lru_move_fn(pvec, lru_lazyfree_movetail_fn, NULL); + activate_page_drain(cpu); invalidate_bh_lrus_cpu(cpu); } @@ -742,6 +762,29 @@ void mark_page_lazyfree(struct page *page) } } +/** + * mark_page_lazyfree_movetail - make a swapbacked page lazyfree + * @page: page to deactivate + * + * mark_page_lazyfree_movetail() moves @page to the tail of inactive file list. + * This is done to accelerate the reclaim of @page. + */ +void mark_page_lazyfree_movetail(struct page *page) +{ + if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && + !PageSwapCache(page)) { + struct pagevec *pvec; + + local_lock(&lru_pvecs.lock); + pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree_movetail); + get_page(page); + if (pagevec_add_and_need_flush(pvec, page)) + pagevec_lru_move_fn(pvec, + lru_lazyfree_movetail_fn, NULL); + local_unlock(&lru_pvecs.lock); + } +} + void lru_add_drain(void) { local_lock(&lru_pvecs.lock); @@ -854,6 +897,7 @@ inline void __lru_add_drain_all(bool force_all_cpus) pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || + pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu)) || need_activate_page_drain(cpu) || has_bh_in_lru(cpu, NULL)) { INIT_WORK(work, lru_add_drain_per_cpu);