mm: refactor folio_undo_large_rmappable()
commit593a10dabeupstream. Folios of order <= 1 are not in deferred list, the check of order is added into folio_undo_large_rmappable() from commit8897277acf("mm: support order-1 folios in the page cache"), but there is a repeated check for small folio (order 0) during each call of the folio_undo_large_rmappable(), so only keep folio_order() check inside the function. In addition, move all the checks into header file to save a function call for non-large-rmappable or empty deferred_list folio. Link: https://lkml.kernel.org/r/20240521130315.46072-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Lance Yang <ioworker0@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeel.butt@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> [ Upstream commit itself does not apply cleanly, because there are fewer calls to folio_undo_large_rmappable() in this tree. ] Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
0275e4021b
commit
eb6b6d3e1f
+1
-12
@@ -2767,22 +2767,11 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void folio_undo_large_rmappable(struct folio *folio)
|
||||
void __folio_undo_large_rmappable(struct folio *folio)
|
||||
{
|
||||
struct deferred_split *ds_queue;
|
||||
unsigned long flags;
|
||||
|
||||
if (folio_order(folio) <= 1)
|
||||
return;
|
||||
|
||||
/*
|
||||
* At this point, there is no one trying to add the folio to
|
||||
* deferred_list. If folio is not in deferred_list, it's safe
|
||||
* to check without acquiring the split_queue_lock.
|
||||
*/
|
||||
if (data_race(list_empty(&folio->_deferred_list)))
|
||||
return;
|
||||
|
||||
ds_queue = get_deferred_split_queue(folio);
|
||||
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
|
||||
if (!list_empty(&folio->_deferred_list)) {
|
||||
|
||||
+16
-1
@@ -413,7 +413,22 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
|
||||
#endif
|
||||
}
|
||||
|
||||
void folio_undo_large_rmappable(struct folio *folio);
|
||||
void __folio_undo_large_rmappable(struct folio *folio);
|
||||
static inline void folio_undo_large_rmappable(struct folio *folio)
|
||||
{
|
||||
if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
|
||||
return;
|
||||
|
||||
/*
|
||||
* At this point, there is no one trying to add the folio to
|
||||
* deferred_list. If folio is not in deferred_list, it's safe
|
||||
* to check without acquiring the split_queue_lock.
|
||||
*/
|
||||
if (data_race(list_empty(&folio->_deferred_list)))
|
||||
return;
|
||||
|
||||
__folio_undo_large_rmappable(folio);
|
||||
}
|
||||
|
||||
static inline struct folio *page_rmappable_folio(struct page *page)
|
||||
{
|
||||
|
||||
+1
-3
@@ -600,9 +600,7 @@ void destroy_large_folio(struct folio *folio)
|
||||
return;
|
||||
}
|
||||
|
||||
if (folio_test_large_rmappable(folio))
|
||||
folio_undo_large_rmappable(folio);
|
||||
|
||||
folio_undo_large_rmappable(folio);
|
||||
mem_cgroup_uncharge(folio);
|
||||
free_the_page(&folio->page, folio_order(folio));
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user