media: rockchip: hdmirx: low latency support fence

Signed-off-by: Dingxian Wen <shawn.wen@rock-chips.com>
Change-Id: I1d86ace09f73e7794b6e682a1438ad691a24ab59
This commit is contained in:
Dingxian Wen
2023-07-19 15:06:15 +08:00
parent 6d8b5b33d5
commit 588c040ed1
@@ -9,6 +9,7 @@
#include <linux/cpufreq.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dma-fence.h>
#include <linux/dma-mapping.h>
#include <linux/extcon-provider.h>
#include <linux/fs.h>
@@ -29,6 +30,7 @@
#include <linux/rk_hdmirx_config.h>
#include <linux/rockchip/rockchip_sip.h>
#include <linux/seq_file.h>
#include <linux/sync_file.h>
#include <linux/v4l2-dv-timings.h>
#include <linux/workqueue.h>
#include <media/cec.h>
@@ -52,7 +54,7 @@
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-3)");
MODULE_PARM_DESC(debug, "debug level (0-4)");
static bool low_latency;
module_param(low_latency, bool, 0644);
@@ -147,6 +149,12 @@ struct hdmirx_reg_table {
enum hdmirx_reg_attr attr;
};
struct hdmirx_fence_context {
u64 context;
u64 seqno;
spinlock_t spinlock;
};
struct hdmirx_buffer {
struct vb2_v4l2_buffer vb;
struct list_head queue;
@@ -181,6 +189,12 @@ struct hdmirx_stream {
u32 irq_stat;
};
struct hdmirx_fence {
struct list_head fence_list;
struct dma_fence *fence;
int fence_fd;
};
struct rk_hdmirx_dev {
struct cec_notifier *cec_notifier;
struct cpufreq_policy *policy;
@@ -207,6 +221,7 @@ struct rk_hdmirx_dev {
struct hdmirx_audiostate audio_state;
struct extcon_dev *extcon;
struct hdmirx_cec *cec;
struct hdmirx_fence_context fence_ctx;
struct mutex stream_lock;
struct mutex work_lock;
struct pm_qos_request pm_qos;
@@ -218,6 +233,9 @@ struct rk_hdmirx_dev {
struct regmap *grf;
struct regmap *vo1_grf;
struct rk_hdmirx_hdcp *hdcp;
struct hdmirx_fence *hdmirx_fence;
struct list_head qbuf_fence_list_head;
struct list_head done_fence_list_head;
void __iomem *regs;
int edid_version;
int audio_present;
@@ -255,6 +273,7 @@ struct rk_hdmirx_dev {
u8 edid[EDID_BLOCK_SIZE * 2];
hdmi_codec_plugged_cb plugged_cb;
spinlock_t rst_lock;
spinlock_t fence_lock;
};
static const unsigned int hdmirx_extcon_cable[] = {
@@ -417,6 +436,64 @@ static u32 hdmirx_readl(struct rk_hdmirx_dev *hdmirx_dev, int reg)
return val;
}
static const char *hdmirx_fence_get_name(struct dma_fence *fence)
{
return RK_HDMIRX_DRVNAME;
}
static const struct dma_fence_ops hdmirx_fence_ops = {
.get_driver_name = hdmirx_fence_get_name,
.get_timeline_name = hdmirx_fence_get_name,
};
static void hdmirx_fence_context_init(struct hdmirx_fence_context *fence_ctx)
{
fence_ctx->context = dma_fence_context_alloc(1);
spin_lock_init(&fence_ctx->spinlock);
}
static struct dma_fence *hdmirx_dma_fence_alloc(struct hdmirx_fence_context *fence_ctx)
{
struct dma_fence *fence = NULL;
if (fence_ctx == NULL) {
pr_err("fence_context is NULL!\n");
return ERR_PTR(-EINVAL);
}
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return ERR_PTR(-ENOMEM);
dma_fence_init(fence, &hdmirx_fence_ops, &fence_ctx->spinlock,
fence_ctx->context, ++fence_ctx->seqno);
return fence;
}
static int hdmirx_dma_fence_get_fd(struct dma_fence *fence)
{
struct sync_file *sync_file = NULL;
int fence_fd = -1;
if (!fence)
return -EINVAL;
fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (fence_fd < 0)
return fence_fd;
sync_file = sync_file_create(fence);
if (!sync_file) {
put_unused_fd(fence_fd);
return -ENOMEM;
}
fd_install(fence_fd, sync_file->file);
return fence_fd;
}
static void hdmirx_reset_dma(struct rk_hdmirx_dev *hdmirx_dev)
{
unsigned long lock_flags = 0;
@@ -1939,6 +2016,39 @@ static int hdmirx_queue_setup(struct vb2_queue *queue,
return 0;
}
static void hdmirx_qbuf_alloc_fence(struct rk_hdmirx_dev *hdmirx_dev)
{
struct dma_fence *fence;
int fence_fd;
struct hdmirx_fence *hdmirx_fence;
unsigned long lock_flags = 0;
struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
fence = hdmirx_dma_fence_alloc(&hdmirx_dev->fence_ctx);
if (!IS_ERR(fence)) {
fence_fd = hdmirx_dma_fence_get_fd(fence);
if (fence_fd >= 0) {
hdmirx_fence = kzalloc(sizeof(struct hdmirx_fence), GFP_KERNEL);
if (!hdmirx_fence) {
v4l2_err(v4l2_dev, "%s: failed to alloc hdmirx_fence!\n", __func__);
return;
}
hdmirx_fence->fence = fence;
hdmirx_fence->fence_fd = fence_fd;
spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
list_add_tail(&hdmirx_fence->fence_list, &hdmirx_dev->qbuf_fence_list_head);
spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
v4l2_dbg(3, debug, v4l2_dev, "%s: fence:%p, fence_fd:%d\n",
__func__, fence, fence_fd);
} else {
dma_fence_put(fence);
v4l2_err(v4l2_dev, "%s: failed to get fence fd!\n", __func__);
}
} else {
v4l2_err(v4l2_dev, "%s: alloc fence failed!\n", __func__);
}
}
/*
* The vb2_buffer are stored in hdmirx_buffer, in order to unify
* mplane buffer and none-mplane buffer.
@@ -1953,6 +2063,8 @@ static void hdmirx_buf_queue(struct vb2_buffer *vb)
const struct hdmirx_output_fmt *out_fmt;
unsigned long lock_flags = 0;
int i;
struct rk_hdmirx_dev *hdmirx_dev;
struct v4l2_device *v4l2_dev;
if (vb == NULL) {
pr_err("%s: vb null pointer err!\n", __func__);
@@ -1966,6 +2078,9 @@ static void hdmirx_buf_queue(struct vb2_buffer *vb)
pixm = &stream->pixm;
out_fmt = stream->out_fmt;
hdmirx_dev = stream->hdmirx_dev;
v4l2_dev = &hdmirx_dev->v4l2_dev;
memset(hdmirx_buf->buff_addr, 0, sizeof(hdmirx_buf->buff_addr));
/*
* If mplanes > 1, every c-plane has its own m-plane,
@@ -1987,9 +2102,58 @@ static void hdmirx_buf_queue(struct vb2_buffer *vb)
}
}
v4l2_dbg(4, debug, v4l2_dev, "qbuf fd:%d\n", vb->planes[0].m.fd);
spin_lock_irqsave(&stream->vbq_lock, lock_flags);
list_add_tail(&hdmirx_buf->queue, &stream->buf_head);
spin_unlock_irqrestore(&stream->vbq_lock, lock_flags);
if (low_latency)
hdmirx_qbuf_alloc_fence(hdmirx_dev);
}
static void hdmirx_free_fence(struct rk_hdmirx_dev *hdmirx_dev)
{
unsigned long lock_flags = 0;
struct hdmirx_fence *vb_fence, *done_fence;
struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
LIST_HEAD(local_list);
spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
if (hdmirx_dev->hdmirx_fence) {
v4l2_dbg(2, debug, v4l2_dev, "%s: signal hdmirx_fence fd:%d\n",
__func__, hdmirx_dev->hdmirx_fence->fence_fd);
dma_fence_signal(hdmirx_dev->hdmirx_fence->fence);
dma_fence_put(hdmirx_dev->hdmirx_fence->fence);
kfree(hdmirx_dev->hdmirx_fence);
hdmirx_dev->hdmirx_fence = NULL;
}
list_replace_init(&hdmirx_dev->qbuf_fence_list_head, &local_list);
spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
while (!list_empty(&local_list)) {
vb_fence = list_first_entry(&local_list, struct hdmirx_fence, fence_list);
list_del(&vb_fence->fence_list);
v4l2_dbg(2, debug, v4l2_dev, "%s: free qbuf_fence fd:%d\n",
__func__, vb_fence->fence_fd);
dma_fence_put(vb_fence->fence);
put_unused_fd(vb_fence->fence_fd);
kfree(vb_fence);
}
spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
list_replace_init(&hdmirx_dev->done_fence_list_head, &local_list);
spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
while (!list_empty(&local_list)) {
done_fence = list_first_entry(&local_list, struct hdmirx_fence, fence_list);
list_del(&done_fence->fence_list);
v4l2_dbg(2, debug, v4l2_dev, "%s: free done_fence fd:%d\n",
__func__, done_fence->fence_fd);
dma_fence_put(done_fence->fence);
put_unused_fd(done_fence->fence_fd);
kfree(done_fence);
}
}
static void return_all_buffers(struct hdmirx_stream *stream,
@@ -1997,6 +2161,7 @@ static void return_all_buffers(struct hdmirx_stream *stream,
{
struct hdmirx_buffer *buf;
unsigned long flags;
struct rk_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
spin_lock_irqsave(&stream->vbq_lock, flags);
if (stream->curr_buf)
@@ -2015,6 +2180,8 @@ static void return_all_buffers(struct hdmirx_stream *stream,
spin_lock_irqsave(&stream->vbq_lock, flags);
}
spin_unlock_irqrestore(&stream->vbq_lock, flags);
hdmirx_free_fence(hdmirx_dev);
}
static void hdmirx_stop_streaming(struct vb2_queue *queue)
@@ -2188,6 +2355,57 @@ static int hdmirx_get_hdcp_auth_status(struct rk_hdmirx_dev *hdmirx_dev)
return val ? 1 : 0;
}
static void hdmirx_dqbuf_get_done_fence(struct rk_hdmirx_dev *hdmirx_dev)
{
unsigned long lock_flags = 0;
struct hdmirx_fence *done_fence;
struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
if (!list_empty(&hdmirx_dev->done_fence_list_head)) {
done_fence = list_first_entry(&hdmirx_dev->done_fence_list_head,
struct hdmirx_fence, fence_list);
list_del(&done_fence->fence_list);
} else {
done_fence = NULL;
}
spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
if (done_fence) {
spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
if (hdmirx_dev->hdmirx_fence) {
v4l2_err(v4l2_dev, "%s: last fence not signal, signal now!\n", __func__);
dma_fence_signal(hdmirx_dev->hdmirx_fence->fence);
dma_fence_put(hdmirx_dev->hdmirx_fence->fence);
v4l2_dbg(2, debug, v4l2_dev, "%s: signal fence:%p, old_fd:%d\n",
__func__,
hdmirx_dev->hdmirx_fence->fence,
hdmirx_dev->hdmirx_fence->fence_fd);
kfree(hdmirx_dev->hdmirx_fence);
hdmirx_dev->hdmirx_fence = NULL;
}
hdmirx_dev->hdmirx_fence = done_fence;
spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
v4l2_dbg(3, debug, v4l2_dev, "%s: fence:%p, fence_fd:%d\n",
__func__, done_fence->fence, done_fence->fence_fd);
}
}
static int hdmirx_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
int ret;
struct hdmirx_stream *stream = video_drvdata(file);
struct rk_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
if (!hdmirx_dev->get_timing)
return -EINVAL;
ret = vb2_ioctl_dqbuf(file, priv, p);
hdmirx_dqbuf_get_done_fence(hdmirx_dev);
return ret;
}
static long hdmirx_ioctl_default(struct file *file, void *fh,
bool valid_prio, unsigned int cmd, void *arg)
{
@@ -2306,7 +2524,7 @@ static const struct v4l2_ioctl_ops hdmirx_v4l2_ioctl_ops = {
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_dqbuf = hdmirx_dqbuf,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
@@ -2380,10 +2598,12 @@ static void process_audio_change(struct rk_hdmirx_dev *hdmirx_dev)
static void process_signal_change(struct rk_hdmirx_dev *hdmirx_dev)
{
unsigned long lock_flags = 0;
struct hdmirx_stream *stream = &hdmirx_dev->stream;
const struct v4l2_event evt_signal_lost = {
.type = RK_HDMIRX_V4L2_EVENT_SIGNAL_LOST,
};
struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
hdmirx_dev->get_timing = false;
sip_hdmirx_config(HDMIRX_INFO_NOTIFY, 0, DMA_CONFIG6, 0);
@@ -2401,6 +2621,18 @@ static void process_signal_change(struct rk_hdmirx_dev *hdmirx_dev)
v4l2_event_queue(&stream->vdev, &evt_signal_lost);
if (hdmirx_dev->hdcp && hdmirx_dev->hdcp->hdcp_stop)
hdmirx_dev->hdcp->hdcp_stop(hdmirx_dev->hdcp);
spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
if (hdmirx_dev->hdmirx_fence) {
dma_fence_signal(hdmirx_dev->hdmirx_fence->fence);
dma_fence_put(hdmirx_dev->hdmirx_fence->fence);
v4l2_dbg(2, debug, v4l2_dev, "%s: signal fence:%p, old_fd:%d\n",
__func__,
hdmirx_dev->hdmirx_fence->fence,
hdmirx_dev->hdmirx_fence->fence_fd);
kfree(hdmirx_dev->hdmirx_fence);
hdmirx_dev->hdmirx_fence = NULL;
}
spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
schedule_delayed_work_on(hdmirx_dev->bound_cpu,
&hdmirx_dev->delayed_work_res_change,
msecs_to_jiffies(1000));
@@ -2633,6 +2865,8 @@ static void hdmirx_vb_done(struct hdmirx_stream *stream,
{
const struct hdmirx_output_fmt *fmt = stream->out_fmt;
u32 i;
struct rk_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
/* Dequeue a filled buffer */
for (i = 0; i < fmt->mplanes; i++) {
@@ -2642,10 +2876,12 @@ static void hdmirx_vb_done(struct hdmirx_stream *stream,
vb_done->vb2_buf.timestamp = ktime_get_ns();
vb2_buffer_done(&vb_done->vb2_buf, VB2_BUF_STATE_DONE);
v4l2_dbg(4, debug, v4l2_dev, "vb_done fd:%d", vb_done->vb2_buf.planes[0].m.fd);
}
static void dma_idle_int_handler(struct rk_hdmirx_dev *hdmirx_dev, bool *handled)
{
unsigned long lock_flags = 0;
struct hdmirx_stream *stream = &hdmirx_dev->stream;
struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
struct v4l2_dv_timings timings = hdmirx_dev->timings;
@@ -2656,8 +2892,21 @@ static void dma_idle_int_handler(struct rk_hdmirx_dev *hdmirx_dev, bool *handled
v4l2_dbg(1, debug, v4l2_dev,
"%s: last time have no line_flag_irq\n", __func__);
if (low_latency)
if (low_latency) {
spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
if (hdmirx_dev->hdmirx_fence) {
dma_fence_signal(hdmirx_dev->hdmirx_fence->fence);
dma_fence_put(hdmirx_dev->hdmirx_fence->fence);
v4l2_dbg(3, debug, v4l2_dev, "%s: signal fence:%p, old_fd:%d\n",
__func__,
hdmirx_dev->hdmirx_fence->fence,
hdmirx_dev->hdmirx_fence->fence_fd);
kfree(hdmirx_dev->hdmirx_fence);
hdmirx_dev->hdmirx_fence = NULL;
}
spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
goto DMA_IDLE_OUT;
}
if (stream->line_flag_int_cnt <= FILTER_FRAME_CNT)
goto DMA_IDLE_OUT;
@@ -2671,6 +2920,9 @@ static void dma_idle_int_handler(struct rk_hdmirx_dev *hdmirx_dev, bool *handled
if (vb_done) {
vb_done->vb2_buf.timestamp = ktime_get_ns();
vb_done->sequence = stream->frame_idx;
/* config userbits 0 or 0xffffffff as invalid fence_fd*/
memset(vb_done->timecode.userbits, 0xff,
sizeof(vb_done->timecode.userbits));
hdmirx_vb_done(stream, vb_done);
stream->frame_idx++;
if (stream->frame_idx == 30)
@@ -2692,8 +2944,44 @@ DMA_IDLE_OUT:
*handled = true;
}
static void hdmirx_add_fence_to_vb_done(struct hdmirx_stream *stream,
struct vb2_v4l2_buffer *vb_done)
{
unsigned long lock_flags = 0;
struct hdmirx_fence *vb_fence;
struct rk_hdmirx_dev *hdmirx_dev = stream->hdmirx_dev;
struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
if (!list_empty(&hdmirx_dev->qbuf_fence_list_head)) {
vb_fence = list_first_entry(&hdmirx_dev->qbuf_fence_list_head,
struct hdmirx_fence, fence_list);
list_del(&vb_fence->fence_list);
} else {
vb_fence = NULL;
}
if (vb_fence)
list_add_tail(&vb_fence->fence_list, &hdmirx_dev->done_fence_list_head);
spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
if (vb_fence) {
/* pass the fence_fd to userspace through timecode.userbits */
if (put_user(vb_fence->fence_fd, vb_done->timecode.userbits))
v4l2_err(v4l2_dev, "%s: failed to trans fence fd!\n", __func__);
v4l2_dbg(3, debug, v4l2_dev, "%s: fence:%p, fence_fd:%d\n",
__func__, vb_fence->fence, vb_fence->fence_fd);
} else {
/* config userbits 0 or 0xffffffff as invalid fence_fd*/
memset(vb_done->timecode.userbits, 0xff, sizeof(vb_done->timecode.userbits));
v4l2_err(v4l2_dev, "%s: failed to get fence fd!\n", __func__);
}
}
static void line_flag_int_handler(struct rk_hdmirx_dev *hdmirx_dev, bool *handled)
{
unsigned long lock_flags = 0;
struct hdmirx_stream *stream = &hdmirx_dev->stream;
struct v4l2_device *v4l2_dev = &hdmirx_dev->v4l2_dev;
struct v4l2_dv_timings timings = hdmirx_dev->timings;
@@ -2716,6 +3004,19 @@ static void line_flag_int_handler(struct rk_hdmirx_dev *hdmirx_dev, bool *handle
if ((bt->interlaced != V4L2_DV_INTERLACED) ||
(stream->line_flag_int_cnt % 2 == 0)) {
spin_lock_irqsave(&hdmirx_dev->fence_lock, lock_flags);
if (hdmirx_dev->hdmirx_fence) {
dma_fence_signal(hdmirx_dev->hdmirx_fence->fence);
dma_fence_put(hdmirx_dev->hdmirx_fence->fence);
v4l2_dbg(2, debug, v4l2_dev, "%s: signal last fence:%p, old_fd:%d\n",
__func__,
hdmirx_dev->hdmirx_fence->fence,
hdmirx_dev->hdmirx_fence->fence_fd);
kfree(hdmirx_dev->hdmirx_fence);
hdmirx_dev->hdmirx_fence = NULL;
}
spin_unlock_irqrestore(&hdmirx_dev->fence_lock, lock_flags);
if (!stream->next_buf) {
spin_lock(&stream->vbq_lock);
if (!list_empty(&stream->buf_head)) {
@@ -2740,6 +3041,7 @@ static void line_flag_int_handler(struct rk_hdmirx_dev *hdmirx_dev, bool *handle
vb_done = &stream->curr_buf->vb;
if (vb_done) {
hdmirx_add_fence_to_vb_done(stream, vb_done);
vb_done->vb2_buf.timestamp = ktime_get_ns();
vb_done->sequence = stream->frame_idx;
hdmirx_vb_done(stream, vb_done);
@@ -2751,6 +3053,19 @@ static void line_flag_int_handler(struct rk_hdmirx_dev *hdmirx_dev, bool *handle
stream->curr_buf = stream->next_buf;
stream->next_buf = NULL;
}
} else {
v4l2_dbg(3, debug, v4l2_dev,
"%s: next_buf NULL, drop the frame!\n", __func__);
}
if (stream->curr_buf) {
v4l2_dbg(4, debug, v4l2_dev, "%s: curr_fd:%d\n",
__func__, stream->curr_buf->vb.vb2_buf.planes[0].m.fd);
}
if (stream->next_buf) {
v4l2_dbg(4, debug, v4l2_dev, "%s: next_fd:%d\n",
__func__, stream->next_buf->vb.vb2_buf.planes[0].m.fd);
}
} else {
v4l2_dbg(3, debug, v4l2_dev, "%s: interlace:%d, line_flag_int_cnt:%d\n",
@@ -4324,6 +4639,9 @@ static int hdmirx_probe(struct platform_device *pdev)
mutex_init(&hdmirx_dev->stream_lock);
mutex_init(&hdmirx_dev->work_lock);
spin_lock_init(&hdmirx_dev->rst_lock);
spin_lock_init(&hdmirx_dev->fence_lock);
INIT_LIST_HEAD(&hdmirx_dev->qbuf_fence_list_head);
INIT_LIST_HEAD(&hdmirx_dev->done_fence_list_head);
INIT_WORK(&hdmirx_dev->work_wdt_config,
hdmirx_work_wdt_config);
INIT_DELAYED_WORK(&hdmirx_dev->delayed_work_hotplug,
@@ -4507,7 +4825,8 @@ static int hdmirx_probe(struct platform_device *pdev)
hdmirx_register_hdcp(dev, hdmirx_dev, hdmirx_dev->hdcp_enable);
hdmirx_register_debugfs(hdmirx_dev->dev, hdmirx_dev);
hdmirx_fence_context_init(&hdmirx_dev->fence_ctx);
hdmirx_dev->hdmirx_fence = NULL;
hdmirx_dev->initialized = true;
dev_info(dev, "%s driver probe ok!\n", dev_name(dev));
@@ -4538,7 +4857,6 @@ static int hdmirx_remove(struct platform_device *pdev)
struct rk_hdmirx_dev *hdmirx_dev = dev_get_drvdata(dev);
debugfs_remove_recursive(hdmirx_dev->debugfs_dir);
cpu_latency_qos_remove_request(&hdmirx_dev->pm_qos);
cancel_delayed_work(&hdmirx_dev->delayed_work_hotplug);
cancel_delayed_work(&hdmirx_dev->delayed_work_res_change);