zram: switch to new zsmalloc object mapping API
Use new read/write zsmalloc object API. For cases when RO mapped object spans two physical pages (requires temp buffer) compression streams now carry around one extra physical page. Link: https://lkml.kernel.org/r/20250303022425.285971-16-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Cc: Hillf Danton <hdanton@sina.com> Cc: Kairui Song <ryncsn@gmail.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Yosry Ahmed <yosry.ahmed@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
44f7641349
commit
82f91900c7
@@ -45,6 +45,7 @@ static const struct zcomp_ops *backends[] = {
|
||||
static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
|
||||
{
|
||||
comp->ops->destroy_ctx(&zstrm->ctx);
|
||||
vfree(zstrm->local_copy);
|
||||
vfree(zstrm->buffer);
|
||||
zstrm->buffer = NULL;
|
||||
}
|
||||
@@ -57,12 +58,13 @@ static int zcomp_strm_init(struct zcomp *comp, struct zcomp_strm *zstrm)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
zstrm->local_copy = vzalloc(PAGE_SIZE);
|
||||
/*
|
||||
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
|
||||
* case when compressed size is larger than the original one
|
||||
*/
|
||||
zstrm->buffer = vzalloc(2 * PAGE_SIZE);
|
||||
if (!zstrm->buffer) {
|
||||
if (!zstrm->buffer || !zstrm->local_copy) {
|
||||
zcomp_strm_free(comp, zstrm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -34,6 +34,8 @@ struct zcomp_strm {
|
||||
struct mutex lock;
|
||||
/* compression buffer */
|
||||
void *buffer;
|
||||
/* local copy of handle memory */
|
||||
void *local_copy;
|
||||
struct zcomp_ctx ctx;
|
||||
};
|
||||
|
||||
|
||||
@@ -1561,11 +1561,11 @@ static int read_incompressible_page(struct zram *zram, struct page *page,
|
||||
void *src, *dst;
|
||||
|
||||
handle = zram_get_handle(zram, index);
|
||||
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
|
||||
src = zs_obj_read_begin(zram->mem_pool, handle, NULL);
|
||||
dst = kmap_local_page(page);
|
||||
copy_page(dst, src);
|
||||
kunmap_local(dst);
|
||||
zs_unmap_object(zram->mem_pool, handle);
|
||||
zs_obj_read_end(zram->mem_pool, handle, src);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1583,11 +1583,11 @@ static int read_compressed_page(struct zram *zram, struct page *page, u32 index)
|
||||
prio = zram_get_priority(zram, index);
|
||||
|
||||
zstrm = zcomp_stream_get(zram->comps[prio]);
|
||||
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
|
||||
src = zs_obj_read_begin(zram->mem_pool, handle, zstrm->local_copy);
|
||||
dst = kmap_local_page(page);
|
||||
ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst);
|
||||
kunmap_local(dst);
|
||||
zs_unmap_object(zram->mem_pool, handle);
|
||||
zs_obj_read_end(zram->mem_pool, handle, src);
|
||||
zcomp_stream_put(zstrm);
|
||||
|
||||
return ret;
|
||||
@@ -1683,7 +1683,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
|
||||
u32 index)
|
||||
{
|
||||
unsigned long handle;
|
||||
void *src, *dst;
|
||||
void *src;
|
||||
|
||||
/*
|
||||
* This function is called from preemptible context so we don't need
|
||||
@@ -1701,11 +1701,9 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
|
||||
src = kmap_local_page(page);
|
||||
memcpy(dst, src, PAGE_SIZE);
|
||||
zs_obj_write(zram->mem_pool, handle, src, PAGE_SIZE);
|
||||
kunmap_local(src);
|
||||
zs_unmap_object(zram->mem_pool, handle);
|
||||
|
||||
zram_slot_lock(zram, index);
|
||||
zram_set_flag(zram, index, ZRAM_HUGE);
|
||||
@@ -1726,7 +1724,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
|
||||
int ret = 0;
|
||||
unsigned long handle;
|
||||
unsigned int comp_len;
|
||||
void *dst, *mem;
|
||||
void *mem;
|
||||
struct zcomp_strm *zstrm;
|
||||
unsigned long element;
|
||||
bool same_filled;
|
||||
@@ -1773,11 +1771,8 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
|
||||
|
||||
memcpy(dst, zstrm->buffer, comp_len);
|
||||
zs_obj_write(zram->mem_pool, handle, zstrm->buffer, comp_len);
|
||||
zcomp_stream_put(zstrm);
|
||||
zs_unmap_object(zram->mem_pool, handle);
|
||||
|
||||
zram_slot_lock(zram, index);
|
||||
zram_set_handle(zram, index, handle);
|
||||
@@ -1882,7 +1877,7 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
|
||||
unsigned int comp_len_new;
|
||||
unsigned int class_index_old;
|
||||
unsigned int class_index_new;
|
||||
void *src, *dst;
|
||||
void *src;
|
||||
int ret = 0;
|
||||
|
||||
handle_old = zram_get_handle(zram, index);
|
||||
@@ -1993,12 +1988,9 @@ static int recompress_slot(struct zram *zram, u32 index, struct page *page,
|
||||
return PTR_ERR((void *)handle_new);
|
||||
}
|
||||
|
||||
dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
|
||||
memcpy(dst, zstrm->buffer, comp_len_new);
|
||||
zs_obj_write(zram->mem_pool, handle_new, zstrm->buffer, comp_len_new);
|
||||
zcomp_stream_put(zstrm);
|
||||
|
||||
zs_unmap_object(zram->mem_pool, handle_new);
|
||||
|
||||
zram_free_page(zram, index);
|
||||
zram_set_handle(zram, index, handle_new);
|
||||
zram_set_obj_size(zram, index, comp_len_new);
|
||||
|
||||
Reference in New Issue
Block a user