dma-buf: support to cache dma-buf-attachment

This patch try to fix this issue by caching the dma-buf attachments and
stores the cache list to dtor_data of dma-buf structor. The dma-buf
attach with cache will try to find cached attachment first and return
the valid instance.

This patch also store the deattch operation to dtor of dma-buf structor
by dma_buf_set_destructor.

Change-Id: I4778c3328825f6c04f5d2608994e62fe3478bf1b
Signed-off-by: Jianqun Xu <jay.xu@rock-chips.com>
This commit is contained in:
Jianqun Xu
2021-07-09 10:00:21 +08:00
committed by Tao Huang
parent 9900aebfb0
commit 97516c0fee
4 changed files with 212 additions and 0 deletions
+8
View File
@@ -1,6 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "DMABUF options"
config DMABUF_CACHE
bool "DMABUF cache attachment"
default ARCH_ROCKCHIP
depends on NO_GKI
help
This option support to store attachments in a list and destroy them by
set to a callback list in the dtor of dma-buf.
config SYNC_FILE
bool "Explicit Synchronization Framework"
default n
+1
View File
@@ -3,6 +3,7 @@ obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
dma-resv.o seqno-fence.o
obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_DMABUF_CACHE) += dma-cache.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o
obj-$(CONFIG_SW_SYNC_DEBUG) += sync_debug.o
+171
View File
@@ -0,0 +1,171 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021 Rockchip Electronics Co. Ltd.
*/
#include <linux/slab.h>
#include <linux/dma-buf.h>
#undef CONFIG_DMABUF_CACHE
#include <linux/dma-cache.h>
struct dma_buf_cache_list {
struct list_head head;
struct mutex lock;
};
struct dma_buf_cache {
struct list_head list;
struct dma_buf_attachment *attach;
enum dma_data_direction direction;
struct sg_table *sg_table;
};
static int dma_buf_cache_destructor(struct dma_buf *dmabuf, void *dtor_data)
{
struct dma_buf_cache_list *data;
struct dma_buf_cache *cache, *tmp;
data = dmabuf->dtor_data;
mutex_lock(&data->lock);
list_for_each_entry_safe(cache, tmp, &data->head, list) {
if (cache->sg_table)
dma_buf_unmap_attachment(cache->attach,
cache->sg_table,
cache->direction);
dma_buf_detach(dmabuf, cache->attach);
list_del(&cache->list);
kfree(cache);
}
mutex_unlock(&data->lock);
kfree(data);
return 0;
}
static struct dma_buf_cache *
dma_buf_cache_get_cache(struct dma_buf_attachment *attach)
{
struct dma_buf_cache_list *data;
struct dma_buf_cache *cache;
struct dma_buf *dmabuf = attach->dmabuf;
if (dmabuf->dtor != dma_buf_cache_destructor)
return NULL;
data = dmabuf->dtor_data;
mutex_lock(&data->lock);
list_for_each_entry(cache, &data->head, list) {
if (cache->attach == attach) {
mutex_unlock(&data->lock);
return cache;
}
}
mutex_unlock(&data->lock);
return NULL;
}
void dma_buf_cache_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach)
{
struct dma_buf_cache *cache;
cache = dma_buf_cache_get_cache(attach);
if (!cache)
dma_buf_detach(dmabuf, attach);
}
EXPORT_SYMBOL(dma_buf_cache_detach);
struct dma_buf_attachment *dma_buf_cache_attach(struct dma_buf *dmabuf,
struct device *dev)
{
struct dma_buf_cache_list *data;
struct dma_buf_cache *cache;
if (!dmabuf->dtor) {
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
mutex_init(&data->lock);
INIT_LIST_HEAD(&data->head);
dma_buf_set_destructor(dmabuf, dma_buf_cache_destructor, data);
}
if (dmabuf->dtor && dmabuf->dtor != dma_buf_cache_destructor)
return dma_buf_attach(dmabuf, dev);
data = dmabuf->dtor_data;
mutex_lock(&data->lock);
list_for_each_entry(cache, &data->head, list) {
if (cache->attach->dev == dev) {
/* Already attached */
mutex_unlock(&data->lock);
return cache->attach;
}
}
mutex_unlock(&data->lock);
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (!cache)
return ERR_PTR(-ENOMEM);
/* Cache attachment */
cache->attach = dma_buf_attach(dmabuf, dev);
mutex_lock(&data->lock);
list_add(&cache->list, &data->head);
mutex_unlock(&data->lock);
return cache->attach;
}
EXPORT_SYMBOL(dma_buf_cache_attach);
void dma_buf_cache_unmap_attachment(struct dma_buf_attachment *attach,
struct sg_table *sg_table,
enum dma_data_direction direction)
{
struct dma_buf_cache *cache;
cache = dma_buf_cache_get_cache(attach);
if (!cache)
dma_buf_unmap_attachment(attach, sg_table, direction);
}
EXPORT_SYMBOL(dma_buf_cache_unmap_attachment);
struct sg_table *dma_buf_cache_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct dma_buf_cache *cache;
cache = dma_buf_cache_get_cache(attach);
if (!cache)
return dma_buf_map_attachment(attach, direction);
if (cache->sg_table) {
/* Already mapped */
if (cache->direction == direction)
return cache->sg_table;
/* Different directions */
dma_buf_unmap_attachment(attach, cache->sg_table,
cache->direction);
}
/* Cache map */
cache->sg_table = dma_buf_map_attachment(attach, direction);
cache->direction = direction;
if (!cache->sg_table)
return ERR_PTR(-ENOMEM);
return cache->sg_table;
}
EXPORT_SYMBOL(dma_buf_cache_map_attachment);
+32
View File
@@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2021 Rockchip Electronics Co. Ltd.
*/
#ifndef _LINUX_DMA_BUF_CACHE_H
#define _LINUX_DMA_BUF_CACHE_H
#include <linux/dma-buf.h>
extern void dma_buf_cache_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach);
extern void dma_buf_cache_unmap_attachment(struct dma_buf_attachment *attach,
struct sg_table *sg_table,
enum dma_data_direction direction);
extern struct dma_buf_attachment *
dma_buf_cache_attach(struct dma_buf *dmabuf, struct device *dev);
extern struct sg_table *
dma_buf_cache_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction);
#ifdef CONFIG_DMABUF_CACHE
/* Replace dma-buf apis to cached apis */
#define dma_buf_attach dma_buf_cache_attach
#define dma_buf_detach dma_buf_cache_detach
#define dma_buf_map_attachment dma_buf_cache_map_attachment
#define dma_buf_unmap_attachment dma_buf_cache_unmap_attachment
#endif
#endif /* _LINUX_DMA_BUF_CACHE_H */