2019-06-04 01:11:33 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2009-10-18 19:53:06 -07:00
|
|
|
/*
|
|
|
|
* Accelerated GHASH implementation with Intel PCLMULQDQ-NI
|
|
|
|
* instructions. This file contains glue code.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2009 Intel Corp.
|
|
|
|
* Author: Huang Ying <ying.huang@intel.com>
|
|
|
|
*/
|
|
|
|
|
2010-12-15 02:58:57 -07:00
|
|
|
#include <linux/err.h>
|
2009-10-18 19:53:06 -07:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/crypto.h>
|
|
|
|
#include <crypto/algapi.h>
|
|
|
|
#include <crypto/cryptd.h>
|
|
|
|
#include <crypto/gf128mul.h>
|
|
|
|
#include <crypto/internal/hash.h>
|
2019-03-12 22:12:48 -07:00
|
|
|
#include <crypto/internal/simd.h>
|
2012-01-25 16:09:06 -07:00
|
|
|
#include <asm/cpu_device_id.h>
|
2019-03-12 22:12:48 -07:00
|
|
|
#include <asm/simd.h>
|
2024-10-01 12:35:57 -07:00
|
|
|
#include <linux/unaligned.h>
|
2009-10-18 19:53:06 -07:00
|
|
|
|
|
|
|
#define GHASH_BLOCK_SIZE 16
|
|
|
|
#define GHASH_DIGEST_SIZE 16
|
|
|
|
|
2022-12-19 22:40:41 -07:00
|
|
|
void clmul_ghash_mul(char *dst, const le128 *shash);
|
2009-10-18 19:53:06 -07:00
|
|
|
|
|
|
|
void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
|
2022-12-19 22:40:41 -07:00
|
|
|
const le128 *shash);
|
2009-10-18 19:53:06 -07:00
|
|
|
|
|
|
|
struct ghash_async_ctx {
|
|
|
|
struct cryptd_ahash *cryptd_tfm;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ghash_ctx {
|
2022-12-19 22:40:41 -07:00
|
|
|
le128 shash;
|
2009-10-18 19:53:06 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ghash_desc_ctx {
|
|
|
|
u8 buffer[GHASH_BLOCK_SIZE];
|
|
|
|
u32 bytes;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ghash_init(struct shash_desc *desc)
|
|
|
|
{
|
|
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
|
|
|
|
|
|
memset(dctx, 0, sizeof(*dctx));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_setkey(struct crypto_shash *tfm,
|
|
|
|
const u8 *key, unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
|
2014-03-27 10:14:40 -07:00
|
|
|
u64 a, b;
|
2009-10-18 19:53:06 -07:00
|
|
|
|
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN
The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to
make the ->setkey() functions provide more information about errors.
However, no one actually checks for this flag, which makes it pointless.
Also, many algorithms fail to set this flag when given a bad length key.
Reviewing just the generic implementations, this is the case for
aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309,
rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably
many more in arch/*/crypto/ and drivers/crypto/.
Some algorithms can even set this flag when the key is the correct
length. For example, authenc and authencesn set it when the key payload
is malformed in any way (not just a bad length), the atmel-sha and ccree
drivers can set it if a memory allocation fails, and the chelsio driver
sets it for bad auth tag lengths, not just bad key lengths.
So even if someone actually wanted to start checking this flag (which
seems unlikely, since it's been unused for a long time), there would be
a lot of work needed to get it working correctly. But it would probably
be much better to go back to the drawing board and just define different
return values, like -EINVAL if the key is invalid for the algorithm vs.
-EKEYREJECTED if the key was rejected by a policy like "no weak keys".
That would be much simpler, less error-prone, and easier to test.
So just remove this flag.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-30 20:19:36 -07:00
|
|
|
if (keylen != GHASH_BLOCK_SIZE)
|
2009-10-18 19:53:06 -07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2022-12-19 22:40:42 -07:00
|
|
|
/*
|
|
|
|
* GHASH maps bits to polynomial coefficients backwards, which makes it
|
|
|
|
* hard to implement. But it can be shown that the GHASH multiplication
|
|
|
|
*
|
|
|
|
* D * K (mod x^128 + x^7 + x^2 + x + 1)
|
|
|
|
*
|
|
|
|
* (where D is a data block and K is the key) is equivalent to:
|
|
|
|
*
|
|
|
|
* bitreflect(D) * bitreflect(K) * x^(-127)
|
|
|
|
* (mod x^128 + x^127 + x^126 + x^121 + 1)
|
|
|
|
*
|
|
|
|
* So, the code below precomputes:
|
|
|
|
*
|
|
|
|
* bitreflect(K) * x^(-127) (mod x^128 + x^127 + x^126 + x^121 + 1)
|
|
|
|
*
|
|
|
|
* ... but in Montgomery form (so that Montgomery multiplication can be
|
|
|
|
* used), i.e. with an extra x^128 factor, which means actually:
|
|
|
|
*
|
|
|
|
* bitreflect(K) * x (mod x^128 + x^127 + x^126 + x^121 + 1)
|
|
|
|
*
|
|
|
|
* The within-a-byte part of bitreflect() cancels out GHASH's built-in
|
|
|
|
* reflection, and thus bitreflect() is actually a byteswap.
|
|
|
|
*/
|
2022-12-19 22:40:40 -07:00
|
|
|
a = get_unaligned_be64(key);
|
|
|
|
b = get_unaligned_be64(key + 8);
|
2022-12-19 22:40:41 -07:00
|
|
|
ctx->shash.a = cpu_to_le64((a << 1) | (b >> 63));
|
|
|
|
ctx->shash.b = cpu_to_le64((b << 1) | (a >> 63));
|
2014-03-27 10:14:40 -07:00
|
|
|
if (a >> 63)
|
2022-12-19 22:40:41 -07:00
|
|
|
ctx->shash.a ^= cpu_to_le64((u64)0xc2 << 56);
|
2009-10-18 19:53:06 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_update(struct shash_desc *desc,
|
|
|
|
const u8 *src, unsigned int srclen)
|
|
|
|
{
|
|
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
|
|
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
|
|
|
u8 *dst = dctx->buffer;
|
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
if (dctx->bytes) {
|
|
|
|
int n = min(srclen, dctx->bytes);
|
|
|
|
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
|
|
|
|
|
|
|
|
dctx->bytes -= n;
|
|
|
|
srclen -= n;
|
|
|
|
|
|
|
|
while (n--)
|
|
|
|
*pos++ ^= *src++;
|
|
|
|
|
|
|
|
if (!dctx->bytes)
|
|
|
|
clmul_ghash_mul(dst, &ctx->shash);
|
|
|
|
}
|
|
|
|
|
|
|
|
clmul_ghash_update(dst, src, srclen, &ctx->shash);
|
|
|
|
kernel_fpu_end();
|
|
|
|
|
|
|
|
if (srclen & 0xf) {
|
|
|
|
src += srclen - (srclen & 0xf);
|
|
|
|
srclen &= 0xf;
|
|
|
|
dctx->bytes = GHASH_BLOCK_SIZE - srclen;
|
|
|
|
while (srclen--)
|
|
|
|
*dst++ ^= *src++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
|
|
|
{
|
|
|
|
u8 *dst = dctx->buffer;
|
|
|
|
|
|
|
|
if (dctx->bytes) {
|
|
|
|
u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
|
|
|
|
|
|
|
|
while (dctx->bytes--)
|
|
|
|
*tmp++ ^= 0;
|
|
|
|
|
|
|
|
kernel_fpu_begin();
|
|
|
|
clmul_ghash_mul(dst, &ctx->shash);
|
|
|
|
kernel_fpu_end();
|
|
|
|
}
|
|
|
|
|
|
|
|
dctx->bytes = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_final(struct shash_desc *desc, u8 *dst)
|
|
|
|
{
|
|
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
|
|
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
|
|
|
u8 *buf = dctx->buffer;
|
|
|
|
|
|
|
|
ghash_flush(ctx, dctx);
|
|
|
|
memcpy(dst, buf, GHASH_BLOCK_SIZE);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct shash_alg ghash_alg = {
|
|
|
|
.digestsize = GHASH_DIGEST_SIZE,
|
|
|
|
.init = ghash_init,
|
|
|
|
.update = ghash_update,
|
|
|
|
.final = ghash_final,
|
|
|
|
.setkey = ghash_setkey,
|
|
|
|
.descsize = sizeof(struct ghash_desc_ctx),
|
|
|
|
.base = {
|
|
|
|
.cra_name = "__ghash",
|
|
|
|
.cra_driver_name = "__ghash-pclmulqdqni",
|
|
|
|
.cra_priority = 0,
|
2018-06-30 15:16:11 -07:00
|
|
|
.cra_flags = CRYPTO_ALG_INTERNAL,
|
2009-10-18 19:53:06 -07:00
|
|
|
.cra_blocksize = GHASH_BLOCK_SIZE,
|
|
|
|
.cra_ctxsize = sizeof(struct ghash_ctx),
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ghash_async_init(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
|
|
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
2016-06-21 01:55:16 -07:00
|
|
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
|
|
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
|
2009-10-18 19:53:06 -07:00
|
|
|
|
2016-06-21 01:55:16 -07:00
|
|
|
desc->tfm = child;
|
|
|
|
return crypto_shash_init(desc);
|
2009-10-18 19:53:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_async_update(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
2016-06-21 01:55:16 -07:00
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
2009-10-18 19:53:06 -07:00
|
|
|
|
2019-03-12 22:12:48 -07:00
|
|
|
if (!crypto_simd_usable() ||
|
2016-06-21 01:55:16 -07:00
|
|
|
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
2009-10-18 19:53:06 -07:00
|
|
|
memcpy(cryptd_req, req, sizeof(*req));
|
|
|
|
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
|
|
|
return crypto_ahash_update(cryptd_req);
|
|
|
|
} else {
|
|
|
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
|
|
return shash_ahash_update(req, desc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_async_final(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
2016-06-21 01:55:16 -07:00
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
2009-10-18 19:53:06 -07:00
|
|
|
|
2019-03-12 22:12:48 -07:00
|
|
|
if (!crypto_simd_usable() ||
|
2016-06-21 01:55:16 -07:00
|
|
|
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
2009-10-18 19:53:06 -07:00
|
|
|
memcpy(cryptd_req, req, sizeof(*req));
|
|
|
|
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
|
|
|
return crypto_ahash_final(cryptd_req);
|
|
|
|
} else {
|
|
|
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
|
|
return crypto_shash_final(desc, req->result);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-29 07:45:33 -07:00
|
|
|
static int ghash_async_import(struct ahash_request *req, const void *in)
|
|
|
|
{
|
|
|
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
|
|
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
|
|
|
|
|
|
ghash_async_init(req);
|
|
|
|
memcpy(dctx, in, sizeof(*dctx));
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_async_export(struct ahash_request *req, void *out)
|
|
|
|
{
|
|
|
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
|
|
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
|
|
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
|
|
|
|
|
|
|
memcpy(out, dctx, sizeof(*dctx));
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2009-10-18 19:53:06 -07:00
|
|
|
static int ghash_async_digest(struct ahash_request *req)
|
|
|
|
{
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct ahash_request *cryptd_req = ahash_request_ctx(req);
|
|
|
|
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
|
|
|
|
|
2019-03-12 22:12:48 -07:00
|
|
|
if (!crypto_simd_usable() ||
|
2016-06-21 01:55:16 -07:00
|
|
|
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
|
2009-10-18 19:53:06 -07:00
|
|
|
memcpy(cryptd_req, req, sizeof(*req));
|
|
|
|
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
|
|
|
|
return crypto_ahash_digest(cryptd_req);
|
|
|
|
} else {
|
|
|
|
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
|
|
|
|
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
|
|
|
|
|
|
|
|
desc->tfm = child;
|
|
|
|
return shash_ahash_digest(req, desc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|
|
|
unsigned int keylen)
|
|
|
|
{
|
|
|
|
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
|
|
|
|
struct crypto_ahash *child = &ctx->cryptd_tfm->base;
|
|
|
|
|
|
|
|
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
|
|
|
|
crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
|
|
|
|
& CRYPTO_TFM_REQ_MASK);
|
2019-12-30 20:19:38 -07:00
|
|
|
return crypto_ahash_setkey(child, key, keylen);
|
2009-10-18 19:53:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ghash_async_init_tfm(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct cryptd_ahash *cryptd_tfm;
|
|
|
|
struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
2015-03-30 13:01:49 -07:00
|
|
|
cryptd_tfm = cryptd_alloc_ahash("__ghash-pclmulqdqni",
|
|
|
|
CRYPTO_ALG_INTERNAL,
|
|
|
|
CRYPTO_ALG_INTERNAL);
|
2009-10-18 19:53:06 -07:00
|
|
|
if (IS_ERR(cryptd_tfm))
|
|
|
|
return PTR_ERR(cryptd_tfm);
|
|
|
|
ctx->cryptd_tfm = cryptd_tfm;
|
|
|
|
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
|
|
|
|
sizeof(struct ahash_request) +
|
|
|
|
crypto_ahash_reqsize(&cryptd_tfm->base));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ghash_async_exit_tfm(struct crypto_tfm *tfm)
|
|
|
|
{
|
|
|
|
struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
|
|
|
|
|
|
cryptd_free_ahash(ctx->cryptd_tfm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ahash_alg ghash_async_alg = {
|
|
|
|
.init = ghash_async_init,
|
|
|
|
.update = ghash_async_update,
|
|
|
|
.final = ghash_async_final,
|
|
|
|
.setkey = ghash_async_setkey,
|
|
|
|
.digest = ghash_async_digest,
|
2015-11-29 07:45:33 -07:00
|
|
|
.export = ghash_async_export,
|
|
|
|
.import = ghash_async_import,
|
2009-10-18 19:53:06 -07:00
|
|
|
.halg = {
|
|
|
|
.digestsize = GHASH_DIGEST_SIZE,
|
2015-11-29 07:45:33 -07:00
|
|
|
.statesize = sizeof(struct ghash_desc_ctx),
|
2009-10-18 19:53:06 -07:00
|
|
|
.base = {
|
|
|
|
.cra_name = "ghash",
|
|
|
|
.cra_driver_name = "ghash-clmulni",
|
|
|
|
.cra_priority = 400,
|
2015-09-03 04:32:01 -07:00
|
|
|
.cra_ctxsize = sizeof(struct ghash_async_ctx),
|
2018-06-30 15:16:12 -07:00
|
|
|
.cra_flags = CRYPTO_ALG_ASYNC,
|
2009-10-18 19:53:06 -07:00
|
|
|
.cra_blocksize = GHASH_BLOCK_SIZE,
|
|
|
|
.cra_module = THIS_MODULE,
|
|
|
|
.cra_init = ghash_async_init_tfm,
|
|
|
|
.cra_exit = ghash_async_exit_tfm,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2012-01-25 16:09:06 -07:00
|
|
|
static const struct x86_cpu_id pcmul_cpu_id[] = {
|
2020-03-20 06:14:05 -07:00
|
|
|
X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL), /* Pickle-Mickle-Duck */
|
2012-01-25 16:09:06 -07:00
|
|
|
{}
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id);
|
|
|
|
|
2009-10-18 19:53:06 -07:00
|
|
|
static int __init ghash_pclmulqdqni_mod_init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2012-01-25 16:09:06 -07:00
|
|
|
if (!x86_match_cpu(pcmul_cpu_id))
|
2009-10-18 19:53:06 -07:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
err = crypto_register_shash(&ghash_alg);
|
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
err = crypto_register_ahash(&ghash_async_alg);
|
|
|
|
if (err)
|
|
|
|
goto err_shash;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_shash:
|
|
|
|
crypto_unregister_shash(&ghash_alg);
|
|
|
|
err_out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit ghash_pclmulqdqni_mod_exit(void)
|
|
|
|
{
|
|
|
|
crypto_unregister_ahash(&ghash_async_alg);
|
|
|
|
crypto_unregister_shash(&ghash_alg);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(ghash_pclmulqdqni_mod_init);
|
|
|
|
module_exit(ghash_pclmulqdqni_mod_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
2019-07-19 23:09:18 -07:00
|
|
|
MODULE_DESCRIPTION("GHASH hash function, accelerated by PCLMULQDQ-NI");
|
2014-11-20 18:05:53 -07:00
|
|
|
MODULE_ALIAS_CRYPTO("ghash");
|