dma: k3-udma: fix deferred work in udma_ring_irq_handler()

Due to realtime constraints it is not acceptable to use workqueues,
specifically delayed_work items, to check after some time if the pending
DMA TX request is really done. Although schedule_delayed_work() is called
with delay=0 from the irq handler, the work item is not executed directly
and instead will probably be executed within the context of a normal
priority task.

So in order to fix this the workqueue mechanism will be replaced with a
tasklet, which is guaranteed to be scheduled within the context of the
high priority kthread for the irq handler. Small delays within the
callback udma_check_tx_completion() are done with udelay() and longer
delays are implemented as a ktimer.

In future the tasklets can be replaced with a new type of workqueue
WQ_BH [1] where its work items will be run in softirq context [2].

[1] https://lwn.net/Articles/960041/
[2] https://lwn.net/ml/linux-kernel/20240130091300.2968534-1-tj@kernel.org/

Signed-off-by: Tobias Biehl <tobias.biehl@wago.com>
This commit is contained in:
Tobias Biehl
2024-08-15 15:51:48 +02:00
parent bcdcba7cad
commit fe05209529
+41 -27
View File
@@ -241,7 +241,8 @@ enum udma_chan_state {
};
struct udma_tx_drain {
struct delayed_work work;
struct tasklet_struct tasklet;
struct timer_list timer;
ktime_t tstamp;
u32 residue;
};
@@ -1083,11 +1084,9 @@ static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
return true;
}
static void udma_check_tx_completion(struct work_struct *work)
static void udma_check_tx_completion(struct tasklet_struct *t)
{
struct udma_chan *uc = container_of(work, typeof(*uc),
tx_drain.work.work);
bool desc_done = true;
struct udma_chan *uc = container_of(t, typeof(*uc), tx_drain.tasklet);
u32 residue_diff;
ktime_t time_diff;
unsigned long delay;
@@ -1101,10 +1100,14 @@ static void udma_check_tx_completion(struct work_struct *work)
* Get current residue and time stamp or see if
* transfer is complete
*/
desc_done = udma_is_desc_really_done(uc, uc->desc);
}
if (udma_is_desc_really_done(uc, uc->desc)) {
struct udma_desc *d = uc->desc;
if (!desc_done) {
udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
break;
}
/*
* Find the time delta and residue delta w.r.t
* previous poll
@@ -1121,27 +1124,36 @@ static void udma_check_tx_completion(struct work_struct *work)
*/
delay = (time_diff / residue_diff) *
uc->tx_drain.residue;
udelay(ktime_to_us(delay));
} else {
/* No progress, check again in 1 second */
schedule_delayed_work(&uc->tx_drain.work, HZ);
mod_timer(&uc->tx_drain.timer, jiffies + 1*HZ);
break;
}
usleep_range(ktime_to_us(delay),
ktime_to_us(delay) + 10);
continue;
}
}
}
if (uc->desc) {
static void udma_delayed_check(struct timer_list *t)
{
struct udma_chan *uc = container_of(t, typeof(*uc), tx_drain.timer);
if (uc->desc) {
/*
* Get current residue and time stamp or see if
* transfer is complete
*/
if (udma_is_desc_really_done(uc, uc->desc)) {
struct udma_desc *d = uc->desc;
udma_decrement_byte_counters(uc, d->residue);
udma_start(uc);
vchan_cookie_complete(&d->vd);
break;
} else {
/* Still no progress after 1 second */
WARN_ON_ONCE(1);
mod_timer(&uc->tx_drain.timer, jiffies + 1*HZ);
}
break;
}
}
@@ -1192,8 +1204,7 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
udma_start(uc);
vchan_cookie_complete(&d->vd);
} else {
schedule_delayed_work(&uc->tx_drain.work,
0);
tasklet_schedule(&uc->tx_drain.tasklet);
}
}
} else {
@@ -2545,8 +2556,9 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan)
udma_reset_rings(uc);
INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
udma_check_tx_completion);
timer_setup(&uc->tx_drain.timer, udma_delayed_check, 0);
tasklet_setup(&uc->tx_drain.tasklet, udma_check_tx_completion);
return 0;
err_irq_free:
@@ -2709,8 +2721,8 @@ static int pktdma_alloc_chan_resources(struct dma_chan *chan)
udma_reset_rings(uc);
INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
udma_check_tx_completion);
timer_setup(&uc->tx_drain.timer, udma_delayed_check, 0);
tasklet_setup(&uc->tx_drain.tasklet, udma_check_tx_completion);
if (uc->tchan)
dev_dbg(ud->dev,
@@ -3924,7 +3936,8 @@ static int udma_terminate_all(struct dma_chan *chan)
uc->terminated_desc = uc->desc;
uc->desc = NULL;
uc->terminated_desc->terminated = true;
cancel_delayed_work(&uc->tx_drain.work);
del_timer(&uc->tx_drain.timer);
tasklet_kill(&uc->tx_drain.tasklet);
}
uc->paused = false;
@@ -3958,7 +3971,8 @@ static void udma_synchronize(struct dma_chan *chan)
if (udma_is_chan_running(uc))
dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
cancel_delayed_work_sync(&uc->tx_drain.work);
del_timer_sync(&uc->tx_drain.timer);
tasklet_kill(&uc->tx_drain.tasklet);
udma_reset_rings(uc);
}
@@ -4046,7 +4060,8 @@ static void udma_free_chan_resources(struct dma_chan *chan)
udma_reset_rings(uc);
}
cancel_delayed_work_sync(&uc->tx_drain.work);
del_timer_sync(&uc->tx_drain.timer);
tasklet_kill(&uc->tx_drain.tasklet);
if (uc->irq_num_ring > 0) {
free_irq(uc->irq_num_ring, uc);
@@ -5528,7 +5543,6 @@ static int udma_probe(struct platform_device *pdev)
/* Use custom vchan completion handling */
tasklet_setup(&uc->vc.task, udma_vchan_complete);
init_completion(&uc->teardown_completed);
INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
}
/* Configure the copy_align to the maximum burst size the device supports */