1

crypto: af_alg/hash: Support MSG_SPLICE_PAGES

Make AF_ALG sendmsg() support MSG_SPLICE_PAGES in the hashing code.  This
causes pages to be spliced from the source iterator if possible.

This allows ->sendpage() to be replaced by something that can handle
multiple multipage folios in a single transaction.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Herbert Xu <herbert@gondor.apana.org.au>
cc: "David S. Miller" <davem@davemloft.net>
cc: Eric Dumazet <edumazet@google.com>
cc: Jakub Kicinski <kuba@kernel.org>
cc: Paolo Abeni <pabeni@redhat.com>
cc: Jens Axboe <axboe@kernel.dk>
cc: Matthew Wilcox <willy@infradead.org>
cc: linux-crypto@vger.kernel.org
cc: netdev@vger.kernel.org
Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
David Howells 2023-06-06 14:08:56 +01:00 committed by Paolo Abeni
parent fb800fa4c1
commit c662b043cd
2 changed files with 73 additions and 44 deletions

View File

@ -542,9 +542,14 @@ void af_alg_free_sg(struct af_alg_sgl *sgl)
{ {
int i; int i;
if (sgl->need_unpin) if (sgl->sgt.sgl) {
for (i = 0; i < sgl->sgt.nents; i++) if (sgl->need_unpin)
unpin_user_page(sg_page(&sgl->sgt.sgl[i])); for (i = 0; i < sgl->sgt.nents; i++)
unpin_user_page(sg_page(&sgl->sgt.sgl[i]));
if (sgl->sgt.sgl != sgl->sgl)
kvfree(sgl->sgt.sgl);
sgl->sgt.sgl = NULL;
}
} }
EXPORT_SYMBOL_GPL(af_alg_free_sg); EXPORT_SYMBOL_GPL(af_alg_free_sg);

View File

@ -63,78 +63,102 @@ static void hash_free_result(struct sock *sk, struct hash_ctx *ctx)
static int hash_sendmsg(struct socket *sock, struct msghdr *msg, static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored) size_t ignored)
{ {
int limit = ALG_MAX_PAGES * PAGE_SIZE;
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk); struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private; struct hash_ctx *ctx = ask->private;
long copied = 0; ssize_t copied = 0;
size_t len, max_pages, npages;
bool continuing = ctx->more, need_init = false;
int err; int err;
if (limit > sk->sk_sndbuf) max_pages = min_t(size_t, ALG_MAX_PAGES,
limit = sk->sk_sndbuf; DIV_ROUND_UP(sk->sk_sndbuf, PAGE_SIZE));
lock_sock(sk); lock_sock(sk);
if (!ctx->more) { if (!continuing) {
if ((msg->msg_flags & MSG_MORE)) if ((msg->msg_flags & MSG_MORE))
hash_free_result(sk, ctx); hash_free_result(sk, ctx);
need_init = true;
err = crypto_wait_req(crypto_ahash_init(&ctx->req), &ctx->wait);
if (err)
goto unlock;
} }
ctx->more = false; ctx->more = false;
while (msg_data_left(msg)) { while (msg_data_left(msg)) {
int len = msg_data_left(msg);
if (len > limit)
len = limit;
ctx->sgl.sgt.sgl = ctx->sgl.sgl; ctx->sgl.sgt.sgl = ctx->sgl.sgl;
ctx->sgl.sgt.nents = 0; ctx->sgl.sgt.nents = 0;
ctx->sgl.sgt.orig_nents = 0; ctx->sgl.sgt.orig_nents = 0;
len = extract_iter_to_sg(&msg->msg_iter, len, &ctx->sgl.sgt, err = -EIO;
ALG_MAX_PAGES, 0); npages = iov_iter_npages(&msg->msg_iter, max_pages);
if (len < 0) { if (npages == 0)
err = copied ? 0 : len; goto unlock_free;
goto unlock;
if (npages > ARRAY_SIZE(ctx->sgl.sgl)) {
err = -ENOMEM;
ctx->sgl.sgt.sgl =
kvmalloc(array_size(npages,
sizeof(*ctx->sgl.sgt.sgl)),
GFP_KERNEL);
if (!ctx->sgl.sgt.sgl)
goto unlock_free;
} }
sg_mark_end(ctx->sgl.sgt.sgl + ctx->sgl.sgt.nents); sg_init_table(ctx->sgl.sgl, npages);
ctx->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter); ctx->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter);
ahash_request_set_crypt(&ctx->req, ctx->sgl.sgt.sgl, NULL, len); err = extract_iter_to_sg(&msg->msg_iter, LONG_MAX,
&ctx->sgl.sgt, npages, 0);
if (err < 0)
goto unlock_free;
len = err;
sg_mark_end(ctx->sgl.sgt.sgl + ctx->sgl.sgt.nents - 1);
err = crypto_wait_req(crypto_ahash_update(&ctx->req), if (!msg_data_left(msg)) {
&ctx->wait); err = hash_alloc_result(sk, ctx);
af_alg_free_sg(&ctx->sgl); if (err)
if (err) { goto unlock_free;
iov_iter_revert(&msg->msg_iter, len);
goto unlock;
} }
copied += len; ahash_request_set_crypt(&ctx->req, ctx->sgl.sgt.sgl,
} ctx->result, len);
err = 0; if (!msg_data_left(msg) && !continuing &&
!(msg->msg_flags & MSG_MORE)) {
err = crypto_ahash_digest(&ctx->req);
} else {
if (need_init) {
err = crypto_wait_req(
crypto_ahash_init(&ctx->req),
&ctx->wait);
if (err)
goto unlock_free;
need_init = false;
}
if (msg_data_left(msg) || (msg->msg_flags & MSG_MORE))
err = crypto_ahash_update(&ctx->req);
else
err = crypto_ahash_finup(&ctx->req);
continuing = true;
}
err = crypto_wait_req(err, &ctx->wait);
if (err)
goto unlock_free;
copied += len;
af_alg_free_sg(&ctx->sgl);
}
ctx->more = msg->msg_flags & MSG_MORE; ctx->more = msg->msg_flags & MSG_MORE;
if (!ctx->more) { err = 0;
err = hash_alloc_result(sk, ctx);
if (err)
goto unlock;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = crypto_wait_req(crypto_ahash_final(&ctx->req),
&ctx->wait);
}
unlock: unlock:
release_sock(sk); release_sock(sk);
return copied ?: err;
return err ?: copied; unlock_free:
af_alg_free_sg(&ctx->sgl);
goto unlock;
} }
static ssize_t hash_sendpage(struct socket *sock, struct page *page, static ssize_t hash_sendpage(struct socket *sock, struct page *page,