1

crypto: scomp - remove memcpy if sg_nents is 1 and pages are lowmem

while sg_nents is 1, which is always true for the current kernel
as the only user - zswap is this case, we might have a chance to
remove memcpy, thus improve the performance.
Though sg_nents is 1, its buffer might cross two pages. If those
pages are highmem, we have no cheap way to map them to contiguous
virtual address because kmap doesn't support more than one page
(kmap single higmem page could be still expensive for tlb) and
vmap is expensive.
So we also test and enure page is not highmem in order to safely
use page_to_virt before removing the memcpy. The good news is
that in the most majority of cases, we are lowmem, and we are
always lowmem in those modern and popular hardware.

Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Tested-by: Chengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Barry Song 2024-03-02 08:27:45 +13:00 committed by Herbert Xu
parent 43a7885ec0
commit 77292bb8ca

View File

@ -117,6 +117,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
struct crypto_scomp *scomp = *tfm_ctx; struct crypto_scomp *scomp = *tfm_ctx;
void **ctx = acomp_request_ctx(req); void **ctx = acomp_request_ctx(req);
struct scomp_scratch *scratch; struct scomp_scratch *scratch;
void *src, *dst;
unsigned int dlen; unsigned int dlen;
int ret; int ret;
@ -134,13 +135,25 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
scratch = raw_cpu_ptr(&scomp_scratch); scratch = raw_cpu_ptr(&scomp_scratch);
spin_lock(&scratch->lock); spin_lock(&scratch->lock);
scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0); if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) {
if (dir) src = page_to_virt(sg_page(req->src)) + req->src->offset;
ret = crypto_scomp_compress(scomp, scratch->src, req->slen, } else {
scratch->dst, &req->dlen, *ctx); scatterwalk_map_and_copy(scratch->src, req->src, 0,
req->slen, 0);
src = scratch->src;
}
if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst)))
dst = page_to_virt(sg_page(req->dst)) + req->dst->offset;
else else
ret = crypto_scomp_decompress(scomp, scratch->src, req->slen, dst = scratch->dst;
scratch->dst, &req->dlen, *ctx);
if (dir)
ret = crypto_scomp_compress(scomp, src, req->slen,
dst, &req->dlen, *ctx);
else
ret = crypto_scomp_decompress(scomp, src, req->slen,
dst, &req->dlen, *ctx);
if (!ret) { if (!ret) {
if (!req->dst) { if (!req->dst) {
req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL); req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
@ -152,8 +165,17 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
ret = -ENOSPC; ret = -ENOSPC;
goto out; goto out;
} }
scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen, if (dst == scratch->dst) {
1); scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
req->dlen, 1);
} else {
int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE);
int i;
struct page *dst_page = sg_page(req->dst);
for (i = 0; i < nr_pages; i++)
flush_dcache_page(dst_page + i);
}
} }
out: out:
spin_unlock(&scratch->lock); spin_unlock(&scratch->lock);