crypto: x86/aes-gcm - fix PREEMPT_RT issue in gcm_crypt()
On PREEMPT_RT, kfree() takes sleeping locks and must not be called with
preemption disabled. Therefore, on PREEMPT_RT skcipher_walk_done() must
not be called from within a kernel_fpu_{begin,end}() pair, even when
it's the last call which is guaranteed to not allocate memory.
Therefore, move the last skcipher_walk_done() in gcm_crypt() to the end
of the function so that it goes after the kernel_fpu_end(). To make
this work cleanly, rework the data processing loop to handle only
non-last data segments.
Fixes: b06affb1cb
("crypto: x86/aes-gcm - add VAES and AVX512 / AVX10 optimized AES-GCM")
Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Closes: https://lore.kernel.org/linux-crypto/20240802102333.itejxOsJ@linutronix.de
Signed-off-by: Eric Biggers <ebiggers@google.com>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
4b7acc85de
commit
001412493e
@ -1366,6 +1366,8 @@ gcm_crypt(struct aead_request *req, int flags)
|
|||||||
err = skcipher_walk_aead_encrypt(&walk, req, false);
|
err = skcipher_walk_aead_encrypt(&walk, req, false);
|
||||||
else
|
else
|
||||||
err = skcipher_walk_aead_decrypt(&walk, req, false);
|
err = skcipher_walk_aead_decrypt(&walk, req, false);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Since the AES-GCM assembly code requires that at least three assembly
|
* Since the AES-GCM assembly code requires that at least three assembly
|
||||||
@ -1381,37 +1383,31 @@ gcm_crypt(struct aead_request *req, int flags)
|
|||||||
gcm_process_assoc(key, ghash_acc, req->src, assoclen, flags);
|
gcm_process_assoc(key, ghash_acc, req->src, assoclen, flags);
|
||||||
|
|
||||||
/* En/decrypt the data and pass the ciphertext through GHASH. */
|
/* En/decrypt the data and pass the ciphertext through GHASH. */
|
||||||
while ((nbytes = walk.nbytes) != 0) {
|
while (unlikely((nbytes = walk.nbytes) < walk.total)) {
|
||||||
if (unlikely(nbytes < walk.total)) {
|
/*
|
||||||
/*
|
* Non-last segment. In this case, the assembly function
|
||||||
* Non-last segment. In this case, the assembly
|
* requires that the length be a multiple of 16 (AES_BLOCK_SIZE)
|
||||||
* function requires that the length be a multiple of 16
|
* bytes. The needed buffering of up to 16 bytes is handled by
|
||||||
* (AES_BLOCK_SIZE) bytes. The needed buffering of up
|
* the skcipher_walk. Here we just need to round down to a
|
||||||
* to 16 bytes is handled by the skcipher_walk. Here we
|
* multiple of 16.
|
||||||
* just need to round down to a multiple of 16.
|
*/
|
||||||
*/
|
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
|
||||||
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
|
aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
|
||||||
aes_gcm_update(key, le_ctr, ghash_acc,
|
walk.dst.virt.addr, nbytes, flags);
|
||||||
walk.src.virt.addr, walk.dst.virt.addr,
|
le_ctr[0] += nbytes / AES_BLOCK_SIZE;
|
||||||
nbytes, flags);
|
kernel_fpu_end();
|
||||||
le_ctr[0] += nbytes / AES_BLOCK_SIZE;
|
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
||||||
kernel_fpu_end();
|
if (err)
|
||||||
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
|
return err;
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
} else {
|
|
||||||
/* Last segment: process all remaining data. */
|
|
||||||
aes_gcm_update(key, le_ctr, ghash_acc,
|
|
||||||
walk.src.virt.addr, walk.dst.virt.addr,
|
|
||||||
nbytes, flags);
|
|
||||||
err = skcipher_walk_done(&walk, 0);
|
|
||||||
/*
|
|
||||||
* The low word of the counter isn't used by the
|
|
||||||
* finalize, so there's no need to increment it here.
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (err)
|
/* Last segment: process all remaining data. */
|
||||||
goto out;
|
aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
|
||||||
|
walk.dst.virt.addr, nbytes, flags);
|
||||||
|
/*
|
||||||
|
* The low word of the counter isn't used by the finalize, so there's no
|
||||||
|
* need to increment it here.
|
||||||
|
*/
|
||||||
|
|
||||||
/* Finalize */
|
/* Finalize */
|
||||||
taglen = crypto_aead_authsize(tfm);
|
taglen = crypto_aead_authsize(tfm);
|
||||||
@ -1439,8 +1435,9 @@ gcm_crypt(struct aead_request *req, int flags)
|
|||||||
datalen, tag, taglen, flags))
|
datalen, tag, taglen, flags))
|
||||||
err = -EBADMSG;
|
err = -EBADMSG;
|
||||||
}
|
}
|
||||||
out:
|
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
|
if (nbytes)
|
||||||
|
skcipher_walk_done(&walk, 0);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user