btrfs: convert try_release_subpage_extent_buffer() to take a folio
The old page API is being gradually replaced and converted to use folio to improve code readability and avoid repeated conversion between page and folio. And use folio_pos instead of page_offset, which is more consistent with folio usage. At the same time, folio_test_private() can handle folio directly without converting from page to folio first. Signed-off-by: Li Zetao <lizetao1@huawei.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
@@ -4077,11 +4077,11 @@ out:
|
||||
return found;
|
||||
}
|
||||
|
||||
static int try_release_subpage_extent_buffer(struct page *page)
|
||||
static int try_release_subpage_extent_buffer(struct folio *folio)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = page_to_fs_info(page);
|
||||
u64 cur = page_offset(page);
|
||||
const u64 end = page_offset(page) + PAGE_SIZE;
|
||||
struct btrfs_fs_info *fs_info = folio_to_fs_info(folio);
|
||||
u64 cur = folio_pos(folio);
|
||||
const u64 end = cur + PAGE_SIZE;
|
||||
int ret;
|
||||
|
||||
while (cur < end) {
|
||||
@@ -4096,7 +4096,7 @@ static int try_release_subpage_extent_buffer(struct page *page)
|
||||
* with spinlock rather than RCU.
|
||||
*/
|
||||
spin_lock(&fs_info->buffer_lock);
|
||||
eb = get_next_extent_buffer(fs_info, page_folio(page), cur);
|
||||
eb = get_next_extent_buffer(fs_info, folio, cur);
|
||||
if (!eb) {
|
||||
/* No more eb in the page range after or at cur */
|
||||
spin_unlock(&fs_info->buffer_lock);
|
||||
@@ -4137,12 +4137,12 @@ static int try_release_subpage_extent_buffer(struct page *page)
|
||||
* Finally to check if we have cleared folio private, as if we have
|
||||
* released all ebs in the page, the folio private should be cleared now.
|
||||
*/
|
||||
spin_lock(&page->mapping->i_private_lock);
|
||||
if (!folio_test_private(page_folio(page)))
|
||||
spin_lock(&folio->mapping->i_private_lock);
|
||||
if (!folio_test_private(folio))
|
||||
ret = 1;
|
||||
else
|
||||
ret = 0;
|
||||
spin_unlock(&page->mapping->i_private_lock);
|
||||
spin_unlock(&folio->mapping->i_private_lock);
|
||||
return ret;
|
||||
|
||||
}
|
||||
@@ -4153,7 +4153,7 @@ int try_release_extent_buffer(struct page *page)
|
||||
struct extent_buffer *eb;
|
||||
|
||||
if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
|
||||
return try_release_subpage_extent_buffer(page);
|
||||
return try_release_subpage_extent_buffer(page_folio(page));
|
||||
|
||||
/*
|
||||
* We need to make sure nobody is changing folio private, as we rely on
|
||||
|
||||
Reference in New Issue
Block a user