io_uring-6.15-20250424

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmgK7tUQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpimDD/0VhVviLWT/29v4fDdjqK7uFcnAa1QgT36B
 CyezvPtvY3XB3dwUXflwLjEmBz+j8yPRmJrOmG0B9KfdVgxI8nCIdszKE89oGP1E
 Kkja4o0N2p+Mozgdpj5pDM0oPKHXM8i/SABH9+Ize077E8uH47+gFz0iVkq6sn0+
 fmXzg6yhDaTB1IWrAN/nezpg14auFBbJB8dEE0LY/whGlOaX21WvpvCfaOPxwtRd
 78GI9jAulQZuNZ8YG+kQCXA477zeu3fxQ/VqFrDbYj97J8Z/M4U7ZtjS31tBiyJn
 iPyh/A/gjs+cq++zKhca1eK/eHabL1Rf5E6KCyvR+nEzUwmBsIO9GwWUH5QDWo2N
 r5RmRmag7eYlhOskf8W4RbgV+5EEORyqySivsEN0lveHUWOxWUreoiUdZBUYxrYP
 BeB5A+K+22mdIHWAAHZnDhbnv43VhU+Tr5oYHwcvdKI9dmnQPJBFGgQ41wk9GPN+
 eTLMYFqWVGtljXIVraIE/CIl7iYHQJw98UpMfWAlqTLWaT2s3iyux+uYblpK5F6R
 syLqZhHlMvFHqEJpd26973AX0MFwJCCBtgl5Bwcu7D0G47WV1mqKLbHjBtbEDwCJ
 ffCcM9y1Apj7CR1RL6aWXbjVHUxsyJUujgj2FUesO4xxRxKxkoF7SYpipE0BK5LD
 IAA/qp3WfA==
 =piqO
 -----END PGP SIGNATURE-----

Merge tag 'io_uring-6.15-20250424' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:

 - Fix an older bug for handling of fallback task_work, when the task is
   exiting. Found by code inspection while reworking cancelation.

 - Fix duplicate flushing in one of the CQE posting helpers.

* tag 'io_uring-6.15-20250424' of git://git.kernel.dk/linux:
  io_uring: fix 'sync' handling of io_fallback_tw()
  io_uring: don't duplicate flushing in io_req_post_cqe
This commit is contained in:
Linus Torvalds 2025-04-25 11:31:47 -07:00
commit 0537fbb6ec

@ -872,10 +872,15 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
lockdep_assert(!io_wq_current_is_worker());
lockdep_assert_held(&ctx->uring_lock);
__io_cq_lock(ctx);
posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
if (!ctx->lockless_cq) {
spin_lock(&ctx->completion_lock);
posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
spin_unlock(&ctx->completion_lock);
} else {
posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
}
ctx->submit_state.cq_flush = true;
__io_cq_unlock_post(ctx);
return posted;
}
@ -1078,21 +1083,22 @@ static __cold void __io_fallback_tw(struct llist_node *node, bool sync)
while (node) {
req = container_of(node, struct io_kiocb, io_task_work.node);
node = node->next;
if (sync && last_ctx != req->ctx) {
if (last_ctx != req->ctx) {
if (last_ctx) {
flush_delayed_work(&last_ctx->fallback_work);
if (sync)
flush_delayed_work(&last_ctx->fallback_work);
percpu_ref_put(&last_ctx->refs);
}
last_ctx = req->ctx;
percpu_ref_get(&last_ctx->refs);
}
if (llist_add(&req->io_task_work.node,
&req->ctx->fallback_llist))
schedule_delayed_work(&req->ctx->fallback_work, 1);
if (llist_add(&req->io_task_work.node, &last_ctx->fallback_llist))
schedule_delayed_work(&last_ctx->fallback_work, 1);
}
if (last_ctx) {
flush_delayed_work(&last_ctx->fallback_work);
if (sync)
flush_delayed_work(&last_ctx->fallback_work);
percpu_ref_put(&last_ctx->refs);
}
}