From 77fe7f136a7312954b1b8b7eeb4bc91fc3c14a3f Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 22 Mar 2022 14:44:00 -0700 Subject: [PATCH] mm/page_alloc: check high-order pages for corruption during PCP operations Eric Dumazet pointed out that commit 44042b449872 ("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists") only checks the head page during PCP refill and allocation operations. This was an oversight and all pages should be checked. This will incur a small performance penalty but it's necessary for correctness. Link: https://lkml.kernel.org/r/20220310092456.GJ15701@techsingularity.net Fixes: 44042b449872 ("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists") Signed-off-by: Mel Gorman Reported-by: Eric Dumazet Acked-by: Eric Dumazet Reviewed-by: Shakeel Butt Acked-by: Vlastimil Babka Acked-by: David Rientjes Cc: Michal Hocko Cc: Wei Xu Cc: Greg Thelen Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 78 ++++++++++++++++++++++++------------------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5d126853e239..e36d7631a64c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2291,43 +2291,6 @@ static inline int check_new_page(struct page *page) return 1; } -#ifdef CONFIG_DEBUG_VM -/* - * With DEBUG_VM enabled, order-0 pages are checked for expected state when - * being allocated from pcp lists. With debug_pagealloc also enabled, they are - * also checked when pcp lists are refilled from the free lists. - */ -static inline bool check_pcp_refill(struct page *page) -{ - if (debug_pagealloc_enabled_static()) - return check_new_page(page); - else - return false; -} - -static inline bool check_new_pcp(struct page *page) -{ - return check_new_page(page); -} -#else -/* - * With DEBUG_VM disabled, free order-0 pages are checked for expected state - * when pcp lists are being refilled from the free lists. With debug_pagealloc - * enabled, they are also checked when being allocated from the pcp lists. - */ -static inline bool check_pcp_refill(struct page *page) -{ - return check_new_page(page); -} -static inline bool check_new_pcp(struct page *page) -{ - if (debug_pagealloc_enabled_static()) - return check_new_page(page); - else - return false; -} -#endif /* CONFIG_DEBUG_VM */ - static bool check_new_pages(struct page *page, unsigned int order) { int i; @@ -2341,6 +2304,43 @@ static bool check_new_pages(struct page *page, unsigned int order) return false; } +#ifdef CONFIG_DEBUG_VM +/* + * With DEBUG_VM enabled, order-0 pages are checked for expected state when + * being allocated from pcp lists. With debug_pagealloc also enabled, they are + * also checked when pcp lists are refilled from the free lists. + */ +static inline bool check_pcp_refill(struct page *page, unsigned int order) +{ + if (debug_pagealloc_enabled_static()) + return check_new_pages(page, order); + else + return false; +} + +static inline bool check_new_pcp(struct page *page, unsigned int order) +{ + return check_new_pages(page, order); +} +#else +/* + * With DEBUG_VM disabled, free order-0 pages are checked for expected state + * when pcp lists are being refilled from the free lists. With debug_pagealloc + * enabled, they are also checked when being allocated from the pcp lists. + */ +static inline bool check_pcp_refill(struct page *page, unsigned int order) +{ + return check_new_pages(page, order); +} +static inline bool check_new_pcp(struct page *page, unsigned int order) +{ + if (debug_pagealloc_enabled_static()) + return check_new_pages(page, order); + else + return false; +} +#endif /* CONFIG_DEBUG_VM */ + inline void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags) { @@ -2982,7 +2982,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, if (unlikely(page == NULL)) break; - if (unlikely(check_pcp_refill(page))) + if (unlikely(check_pcp_refill(page, order))) continue; /* @@ -3600,7 +3600,7 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, page = list_first_entry(list, struct page, lru); list_del(&page->lru); pcp->count -= 1 << order; - } while (check_new_pcp(page)); + } while (check_new_pcp(page, order)); return page; }