erofs: fix erofs_insert_workgroup() lockref usage
As Linus pointed out [1], lockref_put_return() is fundamentally
designed to be something that can fail. It behaves as a fastpath-only
thing, and the failure case needs to be handled anyway.
Actually, since the new pcluster was just allocated without being
populated, it won't be accessed by others until it is inserted into
XArray, so lockref helpers are actually unneeded here.
Let's just set the proper reference count on initializing.
[1] https://lore.kernel.org/r/CAHk-=whCga8BeQnJ3ZBh_Hfm9ctba_wpF444LpwRybVNMzO6Dw@mail.gmail.com
Fixes: 7674a42f35 ("erofs: use struct lockref to replace handcrafted approach")
Reviewed-by: Chao Yu <chao@kernel.org>
Link: https://lore.kernel.org/r/20231031060524.1103921-1-hsiangkao@linux.alibaba.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
This commit is contained in:
+1
-7
@@ -77,12 +77,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
|
||||
struct erofs_sb_info *const sbi = EROFS_SB(sb);
|
||||
struct erofs_workgroup *pre;
|
||||
|
||||
/*
|
||||
* Bump up before making this visible to others for the XArray in order
|
||||
* to avoid potential UAF without serialized by xa_lock.
|
||||
*/
|
||||
lockref_get(&grp->lockref);
|
||||
|
||||
DBG_BUGON(grp->lockref.count < 1);
|
||||
repeat:
|
||||
xa_lock(&sbi->managed_pslots);
|
||||
pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
|
||||
@@ -96,7 +91,6 @@ repeat:
|
||||
cond_resched();
|
||||
goto repeat;
|
||||
}
|
||||
lockref_put_return(&grp->lockref);
|
||||
grp = pre;
|
||||
}
|
||||
xa_unlock(&sbi->managed_pslots);
|
||||
|
||||
@@ -796,6 +796,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
|
||||
return PTR_ERR(pcl);
|
||||
|
||||
spin_lock_init(&pcl->obj.lockref.lock);
|
||||
pcl->obj.lockref.count = 1; /* one ref for this request */
|
||||
pcl->algorithmformat = map->m_algorithmformat;
|
||||
pcl->length = 0;
|
||||
pcl->partial = true;
|
||||
|
||||
Reference in New Issue
Block a user