7ac07a26de
Patch series "zram: Support multiple compression streams", v5. This series adds support for multiple compression streams. The main idea is that different compression algorithms have different characteristics and zram may benefit when it uses a combination of algorithms: a default algorithm that is faster but have lower compression rate and a secondary algorithm that can use higher compression rate at a price of slower compression/decompression. There are several use-case for this functionality: - huge pages re-compression: zstd or deflate can successfully compress huge pages (~50% of huge pages on my synthetic ChromeOS tests), IOW pages that lzo was not able to compress. - idle pages re-compression: idle/cold pages sit in the memory and we may reduce zsmalloc memory usage if we recompress those idle pages. Userspace has a number of ways to control the behavior and impact of zram recompression: what type of pages should be recompressed, size watermarks, etc. Please refer to documentation patch. This patch (of 13): The patch turns compression streams and compressor algorithm name struct zram members into arrays, so that we can have multiple compression streams support (in the next patches). The patch uses a rather explicit API for compressor selection: - Get primary (default) compression stream zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]) - Get secondary compression stream zcomp_stream_get(zram->comps[ZRAM_SECONDARY_COMP]) We use similar API for compression streams put(). At this point we always have just one compression stream, since CONFIG_ZRAM_MULTI_COMP is not yet defined. Link: https://lkml.kernel.org/r/20221109115047.2921851-1-senozhatsky@chromium.org Link: https://lkml.kernel.org/r/20221109115047.2921851-2-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Acked-by: Minchan Kim <minchan@kernel.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Alexey Romanov <avromanov@sberdevices.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
234 lines
5.7 KiB
C
234 lines
5.7 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Copyright (C) 2014 Sergey Senozhatsky.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/string.h>
|
|
#include <linux/err.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/crypto.h>
|
|
|
|
#include "zcomp.h"
|
|
|
|
static const char * const backends[] = {
|
|
#if IS_ENABLED(CONFIG_CRYPTO_LZO)
|
|
"lzo",
|
|
"lzo-rle",
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
|
|
"lz4",
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
|
|
"lz4hc",
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_CRYPTO_842)
|
|
"842",
|
|
#endif
|
|
#if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
|
|
"zstd",
|
|
#endif
|
|
};
|
|
|
|
static void zcomp_strm_free(struct zcomp_strm *zstrm)
|
|
{
|
|
if (!IS_ERR_OR_NULL(zstrm->tfm))
|
|
crypto_free_comp(zstrm->tfm);
|
|
free_pages((unsigned long)zstrm->buffer, 1);
|
|
zstrm->tfm = NULL;
|
|
zstrm->buffer = NULL;
|
|
}
|
|
|
|
/*
|
|
* Initialize zcomp_strm structure with ->tfm initialized by backend, and
|
|
* ->buffer. Return a negative value on error.
|
|
*/
|
|
static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp)
|
|
{
|
|
zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
|
|
/*
|
|
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
|
|
* case when compressed size is larger than the original one
|
|
*/
|
|
zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
|
|
if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
|
|
zcomp_strm_free(zstrm);
|
|
return -ENOMEM;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
bool zcomp_available_algorithm(const char *comp)
|
|
{
|
|
/*
|
|
* Crypto does not ignore a trailing new line symbol,
|
|
* so make sure you don't supply a string containing
|
|
* one.
|
|
* This also means that we permit zcomp initialisation
|
|
* with any compressing algorithm known to crypto api.
|
|
*/
|
|
return crypto_has_comp(comp, 0, 0) == 1;
|
|
}
|
|
|
|
/* show available compressors */
|
|
ssize_t zcomp_available_show(const char *comp, char *buf)
|
|
{
|
|
bool known_algorithm = false;
|
|
ssize_t sz = 0;
|
|
int i;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(backends); i++) {
|
|
if (!strcmp(comp, backends[i])) {
|
|
known_algorithm = true;
|
|
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
|
|
"[%s] ", backends[i]);
|
|
} else {
|
|
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
|
|
"%s ", backends[i]);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Out-of-tree module known to crypto api or a missing
|
|
* entry in `backends'.
|
|
*/
|
|
if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1)
|
|
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
|
|
"[%s] ", comp);
|
|
|
|
sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
|
|
return sz;
|
|
}
|
|
|
|
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
|
|
{
|
|
local_lock(&comp->stream->lock);
|
|
return this_cpu_ptr(comp->stream);
|
|
}
|
|
|
|
void zcomp_stream_put(struct zcomp *comp)
|
|
{
|
|
local_unlock(&comp->stream->lock);
|
|
}
|
|
|
|
int zcomp_compress(struct zcomp_strm *zstrm,
|
|
const void *src, unsigned int *dst_len)
|
|
{
|
|
/*
|
|
* Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized
|
|
* because sometimes we can endup having a bigger compressed data
|
|
* due to various reasons: for example compression algorithms tend
|
|
* to add some padding to the compressed buffer. Speaking of padding,
|
|
* comp algorithm `842' pads the compressed length to multiple of 8
|
|
* and returns -ENOSP when the dst memory is not big enough, which
|
|
* is not something that ZRAM wants to see. We can handle the
|
|
* `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we
|
|
* receive -ERRNO from the compressing backend we can't help it
|
|
* anymore. To make `842' happy we need to tell the exact size of
|
|
* the dst buffer, zram_drv will take care of the fact that
|
|
* compressed buffer is too big.
|
|
*/
|
|
*dst_len = PAGE_SIZE * 2;
|
|
|
|
return crypto_comp_compress(zstrm->tfm,
|
|
src, PAGE_SIZE,
|
|
zstrm->buffer, dst_len);
|
|
}
|
|
|
|
int zcomp_decompress(struct zcomp_strm *zstrm,
|
|
const void *src, unsigned int src_len, void *dst)
|
|
{
|
|
unsigned int dst_len = PAGE_SIZE;
|
|
|
|
return crypto_comp_decompress(zstrm->tfm,
|
|
src, src_len,
|
|
dst, &dst_len);
|
|
}
|
|
|
|
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
|
|
{
|
|
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
|
|
struct zcomp_strm *zstrm;
|
|
int ret;
|
|
|
|
zstrm = per_cpu_ptr(comp->stream, cpu);
|
|
local_lock_init(&zstrm->lock);
|
|
|
|
ret = zcomp_strm_init(zstrm, comp);
|
|
if (ret)
|
|
pr_err("Can't allocate a compression stream\n");
|
|
return ret;
|
|
}
|
|
|
|
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
|
|
{
|
|
struct zcomp *comp = hlist_entry(node, struct zcomp, node);
|
|
struct zcomp_strm *zstrm;
|
|
|
|
zstrm = per_cpu_ptr(comp->stream, cpu);
|
|
zcomp_strm_free(zstrm);
|
|
return 0;
|
|
}
|
|
|
|
static int zcomp_init(struct zcomp *comp)
|
|
{
|
|
int ret;
|
|
|
|
comp->stream = alloc_percpu(struct zcomp_strm);
|
|
if (!comp->stream)
|
|
return -ENOMEM;
|
|
|
|
ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
|
|
if (ret < 0)
|
|
goto cleanup;
|
|
return 0;
|
|
|
|
cleanup:
|
|
free_percpu(comp->stream);
|
|
return ret;
|
|
}
|
|
|
|
void zcomp_destroy(struct zcomp *comp)
|
|
{
|
|
cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
|
|
free_percpu(comp->stream);
|
|
kfree(comp);
|
|
}
|
|
|
|
/*
|
|
* search available compressors for requested algorithm.
|
|
* allocate new zcomp and initialize it. return compressing
|
|
* backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
|
|
* if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
|
|
* case of allocation error, or any other error potentially
|
|
* returned by zcomp_init().
|
|
*/
|
|
struct zcomp *zcomp_create(const char *alg)
|
|
{
|
|
struct zcomp *comp;
|
|
int error;
|
|
|
|
/*
|
|
* Crypto API will execute /sbin/modprobe if the compression module
|
|
* is not loaded yet. We must do it here, otherwise we are about to
|
|
* call /sbin/modprobe under CPU hot-plug lock.
|
|
*/
|
|
if (!zcomp_available_algorithm(alg))
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
|
|
if (!comp)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
comp->name = alg;
|
|
error = zcomp_init(comp);
|
|
if (error) {
|
|
kfree(comp);
|
|
return ERR_PTR(error);
|
|
}
|
|
return comp;
|
|
}
|