1

crypto: x86/aesni - Perform address alignment early for XTS mode

Currently, the alignment of each field in struct aesni_xts_ctx occurs
right before every access. However, it's possible to perform this
alignment ahead of time.

Introduce a helper function that converts struct crypto_skcipher *tfm
to struct aesni_xts_ctx *ctx and returns an aligned address. Utilize
this helper function at the beginning of each XTS function and then
eliminate redundant alignment code.

Suggested-by: Eric Biggers <ebiggers@kernel.org>
Link: https://lore.kernel.org/all/ZFWQ4sZEVu%2FLHq+Q@gmail.com/
Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
Cc: linux-crypto@vger.kernel.org
Cc: x86@kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Chang S. Bae 2023-09-28 00:25:08 -07:00 committed by Herbert Xu
parent d148736ff1
commit e12a68b3c6

View File

@ -223,6 +223,11 @@ static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
return aes_align_addr(raw_ctx);
}
static inline struct aesni_xts_ctx *aes_xts_ctx(struct crypto_skcipher *tfm)
{
return aes_align_addr(crypto_skcipher_ctx(tfm));
}
static int aes_set_key_common(struct crypto_aes_ctx *ctx,
const u8 *in_key, unsigned int key_len)
{
@ -875,7 +880,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
int err;
err = xts_verify_key(tfm, key, keylen);
@ -885,18 +890,18 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
keylen /= 2;
/* first half of xts-key is for crypt */
err = aes_set_key_common(aes_ctx(&ctx->crypt_ctx), key, keylen);
err = aes_set_key_common(&ctx->crypt_ctx, key, keylen);
if (err)
return err;
/* second half of xts-key is for tweak */
return aes_set_key_common(aes_ctx(&ctx->tweak_ctx), key + keylen, keylen);
return aes_set_key_common(&ctx->tweak_ctx, key + keylen, keylen);
}
static int xts_crypt(struct skcipher_request *req, bool encrypt)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
int tail = req->cryptlen % AES_BLOCK_SIZE;
struct skcipher_request subreq;
struct skcipher_walk walk;
@ -932,7 +937,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
kernel_fpu_begin();
/* calculate first value of T */
aesni_enc(aes_ctx(&ctx->tweak_ctx), walk.iv, walk.iv);
aesni_enc(&ctx->tweak_ctx, walk.iv, walk.iv);
while (walk.nbytes > 0) {
int nbytes = walk.nbytes;
@ -941,11 +946,11 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
nbytes &= ~(AES_BLOCK_SIZE - 1);
if (encrypt)
aesni_xts_encrypt(aes_ctx(&ctx->crypt_ctx),
aesni_xts_encrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
nbytes, walk.iv);
else
aesni_xts_decrypt(aes_ctx(&ctx->crypt_ctx),
aesni_xts_decrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
nbytes, walk.iv);
kernel_fpu_end();
@ -973,11 +978,11 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
kernel_fpu_begin();
if (encrypt)
aesni_xts_encrypt(aes_ctx(&ctx->crypt_ctx),
aesni_xts_encrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
else
aesni_xts_decrypt(aes_ctx(&ctx->crypt_ctx),
aesni_xts_decrypt(&ctx->crypt_ctx,
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
kernel_fpu_end();