dma-buf: rk_heaps: drop early fixup for rk_dma_cma

The early fixup for cma reserved memory will do dma contiguous memory
remap, which makes the pages from cma enable to be activated into buddy
system. But the buddy system requires a page must in one pageblock for
possible migrate or compact operation, the pageblock size is usually to
be more than 1 MiB, then the cma size must to be 1 MiB aligned.

On mini kernel, the CMA_INACTIVE makes the cma area not to be activated
into the buddy system, no migrate or compact will happen to pages from
the cma area, in this case, it possible to drop the early fixup to make
the cma base and size align to PAGE_SIZE only. Also it will save several
pages which used to be pte memory for these pages when remap.

Reviewed-by: Simon Xue <xxm@rock-chips.com>
Tested-by: Zhichao Yu <zhichao.yu@rock-chips.com>
Signed-off-by: Jianqun Xu <jay.xu@rock-chips.com>
Change-Id: I307e27aaa6e9bcb9cd86414ae7de0f7cfef7b706
This commit is contained in:
Jianqun Xu
2022-03-30 12:07:15 +08:00
parent aa0f99d430
commit a0dfa37898
2 changed files with 20 additions and 5 deletions
+5 -5
View File
@@ -7,10 +7,7 @@
*/
#include <linux/cma.h>
#include <linux/device.h>
#include <linux/dma-map-ops.h>
#include <linux/err.h>
#include <linux/syscalls.h>
#include "rk-dma-heap.h"
@@ -59,14 +56,17 @@ int __init rk_dma_heap_cma_setup(void)
if (rk_dma_heap_base)
fix = true;
ret = cma_declare_contiguous(rk_dma_heap_base, size, 0x0, 0, 0, fix,
"rk-dma-heap-cma", &rk_dma_heap_cma);
ret = cma_declare_contiguous(rk_dma_heap_base, PAGE_ALIGN(size), 0x0,
PAGE_SIZE, 0, fix, "rk-dma-heap-cma",
&rk_dma_heap_cma);
if (ret)
return ret;
#if !IS_ENABLED(CONFIG_CMA_INACTIVE)
/* Architecture specific contiguous memory fixup. */
dma_contiguous_early_fixup(cma_get_base(rk_dma_heap_cma),
cma_get_size(rk_dma_heap_cma));
#endif
return 0;
}
+15
View File
@@ -181,7 +181,9 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
struct cma **res_cma)
{
struct cma *cma;
#if !IS_ENABLED(CONFIG_CMA_INACTIVE)
phys_addr_t alignment;
#endif
/* Sanity checks */
if (cma_area_count == ARRAY_SIZE(cma_areas)) {
@@ -192,6 +194,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
if (!size || !memblock_is_region_reserved(base, size))
return -EINVAL;
#if !IS_ENABLED(CONFIG_CMA_INACTIVE)
/* ensure minimal alignment required by mm core */
alignment = PAGE_SIZE <<
max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
@@ -202,6 +205,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
return -EINVAL;
#endif
/*
* Each reserved area must be initialised later, when more kernel
@@ -275,6 +279,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
if (alignment && !is_power_of_2(alignment))
return -EINVAL;
#if !IS_ENABLED(CONFIG_CMA_INACTIVE)
/*
* Sanitise input arguments.
* Pages both ends in CMA area could be merged into adjacent unmovable
@@ -289,6 +294,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
&base, &alignment);
goto err;
}
#endif
base = ALIGN(base, alignment);
size = ALIGN(size, alignment);
limit &= ~(alignment - 1);
@@ -386,14 +392,23 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
if (ret)
goto free_mem;
#if !IS_ENABLED(CONFIG_CMA_INACTIVE)
pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
&base);
#else
pr_info("Reserved %ld KiB at %pa\n", (unsigned long)size / SZ_1K,
&base);
#endif
return 0;
free_mem:
memblock_free(base, size);
err:
#if !IS_ENABLED(CONFIG_CMA_INACTIVE)
pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
#else
pr_err("Failed to reserve %ld KiB\n", (unsigned long)size / SZ_1K);
#endif
return ret;
}