1

This update includes the following changes:

API:
 
 - Make self-test asynchronous.
 
 Algorithms:
 
 - Remove MPI functions added for SM3.
 - Add allocation error checks to remaining MPI functions (introduced for SM3).
 - Set default Jitter RNG OSR to 3.
 
 Drivers:
 
 - Add hwrng driver for Rockchip RK3568 SoC.
 - Allow disabling SR-IOV VFs through sysfs in qat.
 - Fix device reset bugs in hisilicon.
 - Fix authenc key parsing by using generic helper in octeontx*.
 
 Others:
 
 - Fix xor benchmarking on parisc.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEn51F/lCuNhUwmDeSxycdCkmxi6cFAmbnq/wACgkQxycdCkmx
 i6cyXw//cBgngKOuCv7tLqMPeSLC39jDJEKkP9tS9ZilYyxzg1b9cbnDLlKNk4Yq
 4A6rRqqh8PD3/yJT58pGXaU5Is5sVMQRqqgwFutXrkD+hsMLk2nlgzsWYhg6aUsY
 /THTfmKTwEgfc3qDLZq6xGOShmMdO6NiOGsH3MeEWhbgfrDuJlOkHXd7QncNa7q8
 NEP7kI3vBc0xFcOxzbjy8tSGYEmPft1LECXAKsgOycWj9Q0SkzPocmo59iSbM21b
 HfV0p3hgAEa5VgKv0Rc5/6PevAqJqOSjGNfRBSPZ97o7dop8sl/z/cOWiy8dM7wO
 xhd9g7XXtmML6UO2MpJPMJzsLgMsjmUTWO2UyEpIyst6RVfJlniOL/jGzWmZ/P2+
 vw/F/mX8k60Zv1du46PC3p6eBeH4Yx/2fEPvPTJus+DQHS9GchXtAKtMToyyUHc2
 6TAy0nOihVQK2Q3QuQ1B/ghQS4tkdOenOIYHSCf9a9nJamub+PqP8jWDw0Y2RcY6
 jSs+tk6hwHJaKnj/T/Mr0gVPX9L8KHCYBtZD7Qbr0NhoXOT6w47m6bbew/dzTN+0
 pmFsgz32fNm8vb8R8D0kZDF63s6uz6CN+P9Dx6Tab4X+87HxNdeaBPS/Le9tYgOC
 0MmE5oIquooqedpM5tW55yuyOHhLPGLQS2SDiA+Ke+WYbAC8SQc=
 =rG1X
 -----END PGP SIGNATURE-----

Merge tag 'v6.12-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu"
 "API:
   - Make self-test asynchronous

  Algorithms:
   - Remove MPI functions added for SM3
   - Add allocation error checks to remaining MPI functions (introduced
     for SM3)
   - Set default Jitter RNG OSR to 3

  Drivers:
   - Add hwrng driver for Rockchip RK3568 SoC
   - Allow disabling SR-IOV VFs through sysfs in qat
   - Fix device reset bugs in hisilicon
   - Fix authenc key parsing by using generic helper in octeontx*

  Others:
   - Fix xor benchmarking on parisc"

* tag 'v6.12-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (96 commits)
  crypto: n2 - Set err to EINVAL if snprintf fails for hmac
  crypto: camm/qi - Use ERR_CAST() to return error-valued pointer
  crypto: mips/crc32 - Clean up useless assignment operations
  crypto: qcom-rng - rename *_of_data to *_match_data
  crypto: qcom-rng - fix support for ACPI-based systems
  dt-bindings: crypto: qcom,prng: document support for SA8255p
  crypto: aegis128 - Fix indentation issue in crypto_aegis128_process_crypt()
  crypto: octeontx* - Select CRYPTO_AUTHENC
  crypto: testmgr - Hide ENOENT errors
  crypto: qat - Remove trailing space after \n newline
  crypto: hisilicon/sec - Remove trailing space after \n newline
  crypto: algboss - Pass instance creation error up
  crypto: api - Fix generic algorithm self-test races
  crypto: hisilicon/qm - inject error before stopping queue
  crypto: hisilicon/hpre - mask cluster timeout error
  crypto: hisilicon/qm - reset device before enabling it
  crypto: hisilicon/trng - modifying the order of header files
  crypto: hisilicon - add a lock for the qp send operation
  crypto: hisilicon - fix missed error branch
  crypto: ccp - do not request interrupt on cmd completion when irqs disabled
  ...
This commit is contained in:
Linus Torvalds 2024-09-16 06:28:28 +02:00
commit 85ffc6e4ed
105 changed files with 1423 additions and 3900 deletions

View File

@ -137,7 +137,10 @@ patternProperties:
- const: fsl,sec-v4.0-rtic
reg:
maxItems: 1
items:
- description: RTIC control and status register space.
- description: RTIC recoverable error indication register space.
minItems: 1
ranges:
maxItems: 1

View File

@ -17,6 +17,7 @@ properties:
- qcom,prng-ee # 8996 and later using EE
- items:
- enum:
- qcom,sa8255p-trng
- qcom,sa8775p-trng
- qcom,sc7280-trng
- qcom,sm8450-trng

View File

@ -0,0 +1,61 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/rng/rockchip,rk3568-rng.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Rockchip RK3568 TRNG
description: True Random Number Generator on Rockchip RK3568 SoC
maintainers:
- Aurelien Jarno <aurelien@aurel32.net>
- Daniel Golle <daniel@makrotopia.org>
properties:
compatible:
enum:
- rockchip,rk3568-rng
reg:
maxItems: 1
clocks:
items:
- description: TRNG clock
- description: TRNG AHB clock
clock-names:
items:
- const: core
- const: ahb
resets:
maxItems: 1
required:
- compatible
- reg
- clocks
- clock-names
- resets
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/rk3568-cru.h>
bus {
#address-cells = <2>;
#size-cells = <2>;
rng@fe388000 {
compatible = "rockchip,rk3568-rng";
reg = <0x0 0xfe388000 0x0 0x4000>;
clocks = <&cru CLK_TRNG_NS>, <&cru HCLK_TRNG_NS>;
clock-names = "core", "ahb";
resets = <&cru SRST_TRNG_NS>;
};
};
...

View File

@ -19807,6 +19807,13 @@ F: Documentation/userspace-api/media/v4l/metafmt-rkisp1.rst
F: drivers/media/platform/rockchip/rkisp1
F: include/uapi/linux/rkisp1-config.h
ROCKCHIP RK3568 RANDOM NUMBER GENERATOR SUPPORT
M: Daniel Golle <daniel@makrotopia.org>
M: Aurelien Jarno <aurelien@aurel32.net>
S: Maintained
F: Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml
F: drivers/char/hw_random/rockchip-rng.c
ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER
M: Jacob Chen <jacob-chen@iotwrt.com>
M: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>

View File

@ -166,10 +166,9 @@ config CRYPTO_AES_ARM
config CRYPTO_AES_ARM_BS
tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (bit-sliced NEON)"
depends on KERNEL_MODE_NEON
select CRYPTO_AES_ARM
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_SIMD
help
Length-preserving ciphers: AES cipher algorithms (FIPS-197)
@ -183,8 +182,15 @@ config CRYPTO_AES_ARM_BS
Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
and for XTS mode encryption, CBC and XTS mode decryption speedup is
around 25%. (CBC encryption speed is not affected by this driver.)
This implementation does not rely on any lookup tables so it is
believed to be invulnerable to cache timing attacks.
The bit sliced AES code does not use lookup tables, so it is believed
to be invulnerable to cache timing attacks. However, since the bit
sliced AES code cannot process single blocks efficiently, in certain
cases table-based code with some countermeasures against cache timing
attacks will still be used as a fallback method; specifically CBC
encryption (not CBC decryption), the encryption of XTS tweaks, XTS
ciphertext stealing when the message isn't a multiple of 16 bytes, and
CTR when invoked in a context in which NEON instructions are unusable.
config CRYPTO_AES_ARM_CE
tristate "Ciphers: AES, modes: ECB/CBC/CTS/CTR/XTS (ARMv8 Crypto Extensions)"

View File

@ -711,7 +711,7 @@ static int __init aes_init(void)
algname = aes_algs[i].base.cra_name + 2;
drvname = aes_algs[i].base.cra_driver_name + 2;
basename = aes_algs[i].base.cra_driver_name;
simd = simd_skcipher_create_compat(algname, drvname, basename);
simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto unregister_simds;

View File

@ -9,9 +9,10 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
#include "aes-cipher.h"
asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
asmlinkage void __aes_arm_decrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
EXPORT_SYMBOL_GPL(__aes_arm_encrypt);
EXPORT_SYMBOL_GPL(__aes_arm_decrypt);
static void aes_arm_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef ARM_CRYPTO_AES_CIPHER_H
#define ARM_CRYPTO_AES_CIPHER_H
#include <linux/linkage.h>
#include <linux/types.h>
asmlinkage void __aes_arm_encrypt(const u32 rk[], int rounds,
const u8 *in, u8 *out);
asmlinkage void __aes_arm_decrypt(const u32 rk[], int rounds,
const u8 *in, u8 *out);
#endif /* ARM_CRYPTO_AES_CIPHER_H */

View File

@ -9,24 +9,22 @@
#include <asm/simd.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/xts.h>
#include <linux/module.h>
#include "aes-cipher.h"
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_DESCRIPTION("Bit sliced AES using NEON instructions");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("ecb(aes)");
MODULE_ALIAS_CRYPTO("cbc(aes)-all");
MODULE_ALIAS_CRYPTO("cbc(aes)");
MODULE_ALIAS_CRYPTO("ctr(aes)");
MODULE_ALIAS_CRYPTO("xts(aes)");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds);
asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
@ -52,13 +50,13 @@ struct aesbs_ctx {
struct aesbs_cbc_ctx {
struct aesbs_ctx key;
struct crypto_skcipher *enc_tfm;
struct crypto_aes_ctx fallback;
};
struct aesbs_xts_ctx {
struct aesbs_ctx key;
struct crypto_cipher *cts_tfm;
struct crypto_cipher *tweak_tfm;
struct crypto_aes_ctx fallback;
struct crypto_aes_ctx tweak_key;
};
struct aesbs_ctr_ctx {
@ -129,37 +127,49 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_aes_ctx rk;
int err;
err = aes_expandkey(&rk, in_key, key_len);
err = aes_expandkey(&ctx->fallback, in_key, key_len);
if (err)
return err;
ctx->key.rounds = 6 + key_len / 4;
kernel_neon_begin();
aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
aesbs_convert_key(ctx->key.rk, ctx->fallback.key_enc, ctx->key.rounds);
kernel_neon_end();
memzero_explicit(&rk, sizeof(rk));
return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len);
return 0;
}
static int cbc_encrypt(struct skcipher_request *req)
{
struct skcipher_request *subreq = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
const struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
skcipher_request_set_tfm(subreq, ctx->enc_tfm);
skcipher_request_set_callback(subreq,
skcipher_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->cryptlen, req->iv);
err = skcipher_walk_virt(&walk, req, false);
return crypto_skcipher_encrypt(subreq);
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
u8 *prev = walk.iv;
do {
crypto_xor_cpy(dst, src, prev, AES_BLOCK_SIZE);
__aes_arm_encrypt(ctx->fallback.key_enc,
ctx->key.rounds, dst, dst);
prev = dst;
src += AES_BLOCK_SIZE;
dst += AES_BLOCK_SIZE;
nbytes -= AES_BLOCK_SIZE;
} while (nbytes >= AES_BLOCK_SIZE);
memcpy(walk.iv, prev, AES_BLOCK_SIZE);
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int cbc_decrypt(struct skcipher_request *req)
@ -190,30 +200,6 @@ static int cbc_decrypt(struct skcipher_request *req)
return err;
}
static int cbc_init(struct crypto_skcipher *tfm)
{
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int reqsize;
ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->enc_tfm))
return PTR_ERR(ctx->enc_tfm);
reqsize = sizeof(struct skcipher_request);
reqsize += crypto_skcipher_reqsize(ctx->enc_tfm);
crypto_skcipher_set_reqsize(tfm, reqsize);
return 0;
}
static void cbc_exit(struct crypto_skcipher *tfm)
{
struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(ctx->enc_tfm);
}
static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@ -271,16 +257,8 @@ static int ctr_encrypt(struct skcipher_request *req)
static void ctr_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
{
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned long flags;
/*
* Temporarily disable interrupts to avoid races where
* cachelines are evicted when the CPU is interrupted
* to do something else.
*/
local_irq_save(flags);
aes_encrypt(&ctx->fallback, dst, src);
local_irq_restore(flags);
__aes_arm_encrypt(ctx->fallback.key_enc, ctx->key.rounds, src, dst);
}
static int ctr_encrypt_sync(struct skcipher_request *req)
@ -302,45 +280,23 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
return err;
key_len /= 2;
err = crypto_cipher_setkey(ctx->cts_tfm, in_key, key_len);
err = aes_expandkey(&ctx->fallback, in_key, key_len);
if (err)
return err;
err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len);
err = aes_expandkey(&ctx->tweak_key, in_key + key_len, key_len);
if (err)
return err;
return aesbs_setkey(tfm, in_key, key_len);
}
static int xts_init(struct crypto_skcipher *tfm)
{
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(ctx->cts_tfm))
return PTR_ERR(ctx->cts_tfm);
ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(ctx->tweak_tfm))
crypto_free_cipher(ctx->cts_tfm);
return PTR_ERR_OR_ZERO(ctx->tweak_tfm);
}
static void xts_exit(struct crypto_skcipher *tfm)
{
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_cipher(ctx->tweak_tfm);
crypto_free_cipher(ctx->cts_tfm);
}
static int __xts_crypt(struct skcipher_request *req, bool encrypt,
void (*fn)(u8 out[], u8 const in[], u8 const rk[],
int rounds, int blocks, u8 iv[], int))
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
const int rounds = ctx->key.rounds;
int tail = req->cryptlen % AES_BLOCK_SIZE;
struct skcipher_request subreq;
u8 buf[2 * AES_BLOCK_SIZE];
@ -364,7 +320,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
if (err)
return err;
crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);
__aes_arm_encrypt(ctx->tweak_key.key_enc, rounds, walk.iv, walk.iv);
while (walk.nbytes >= AES_BLOCK_SIZE) {
unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE;
@ -378,7 +334,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
kernel_neon_begin();
fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk,
ctx->key.rounds, blocks, walk.iv, reorder_last_tweak);
rounds, blocks, walk.iv, reorder_last_tweak);
kernel_neon_end();
err = skcipher_walk_done(&walk,
walk.nbytes - blocks * AES_BLOCK_SIZE);
@ -396,9 +352,9 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
if (encrypt)
crypto_cipher_encrypt_one(ctx->cts_tfm, buf, buf);
__aes_arm_encrypt(ctx->fallback.key_enc, rounds, buf, buf);
else
crypto_cipher_decrypt_one(ctx->cts_tfm, buf, buf);
__aes_arm_decrypt(ctx->fallback.key_dec, rounds, buf, buf);
crypto_xor(buf, req->iv, AES_BLOCK_SIZE);
@ -439,8 +395,7 @@ static struct skcipher_alg aes_algs[] = { {
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.base.cra_module = THIS_MODULE,
.base.cra_flags = CRYPTO_ALG_INTERNAL |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_flags = CRYPTO_ALG_INTERNAL,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@ -449,8 +404,6 @@ static struct skcipher_alg aes_algs[] = { {
.setkey = aesbs_cbc_setkey,
.encrypt = cbc_encrypt,
.decrypt = cbc_decrypt,
.init = cbc_init,
.exit = cbc_exit,
}, {
.base.cra_name = "__ctr(aes)",
.base.cra_driver_name = "__ctr-aes-neonbs",
@ -500,8 +453,6 @@ static struct skcipher_alg aes_algs[] = { {
.setkey = aesbs_xts_setkey,
.encrypt = xts_encrypt,
.decrypt = xts_decrypt,
.init = xts_init,
.exit = xts_exit,
} };
static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];
@ -540,7 +491,7 @@ static int __init aes_init(void)
algname = aes_algs[i].base.cra_name + 2;
drvname = aes_algs[i].base.cra_driver_name + 2;
basename = aes_algs[i].base.cra_driver_name;
simd = simd_skcipher_create_compat(algname, drvname, basename);
simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto unregister_simds;

View File

@ -473,7 +473,8 @@ poly1305_blocks_neon:
subs $len,$len,#64
ldp x9,x13,[$inp,#48]
add $in2,$inp,#96
adr $zeros,.Lzeros
adrp $zeros,.Lzeros
add $zeros,$zeros,#:lo12:.Lzeros
lsl $padbit,$padbit,#24
add x15,$ctx,#48
@ -885,10 +886,13 @@ poly1305_blocks_neon:
ret
.size poly1305_blocks_neon,.-poly1305_blocks_neon
.pushsection .rodata
.align 5
.Lzeros:
.long 0,0,0,0,0,0,0,0
.asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm"
.popsection
.align 2
#if !defined(__KERNEL__) && !defined(_WIN64)
.comm OPENSSL_armcap_P,4,4

View File

@ -77,24 +77,26 @@ static u32 crc32_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
{
u32 crc = crc_;
#ifdef CONFIG_64BIT
while (len >= sizeof(u64)) {
u64 value = get_unaligned_le64(p);
if (IS_ENABLED(CONFIG_64BIT)) {
for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) {
u64 value = get_unaligned_le64(p);
CRC32(crc, value, d);
p += sizeof(u64);
len -= sizeof(u64);
}
CRC32(crc, value, d);
}
if (len & sizeof(u32)) {
#else /* !CONFIG_64BIT */
while (len >= sizeof(u32)) {
#endif
u32 value = get_unaligned_le32(p);
if (len & sizeof(u32)) {
u32 value = get_unaligned_le32(p);
CRC32(crc, value, w);
p += sizeof(u32);
len -= sizeof(u32);
CRC32(crc, value, w);
p += sizeof(u32);
}
} else {
for (; len >= sizeof(u32); len -= sizeof(u32)) {
u32 value = get_unaligned_le32(p);
CRC32(crc, value, w);
p += sizeof(u32);
}
}
if (len & sizeof(u16)) {
@ -117,24 +119,26 @@ static u32 crc32c_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
{
u32 crc = crc_;
#ifdef CONFIG_64BIT
while (len >= sizeof(u64)) {
u64 value = get_unaligned_le64(p);
if (IS_ENABLED(CONFIG_64BIT)) {
for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) {
u64 value = get_unaligned_le64(p);
CRC32C(crc, value, d);
p += sizeof(u64);
len -= sizeof(u64);
}
CRC32(crc, value, d);
}
if (len & sizeof(u32)) {
#else /* !CONFIG_64BIT */
while (len >= sizeof(u32)) {
#endif
u32 value = get_unaligned_le32(p);
if (len & sizeof(u32)) {
u32 value = get_unaligned_le32(p);
CRC32C(crc, value, w);
p += sizeof(u32);
len -= sizeof(u32);
CRC32(crc, value, w);
p += sizeof(u32);
}
} else {
for (; len >= sizeof(u32); len -= sizeof(u32)) {
u32 value = get_unaligned_le32(p);
CRC32(crc, value, w);
p += sizeof(u32);
}
}
if (len & sizeof(u16)) {

View File

@ -295,5 +295,6 @@ module_exit(curve25519_mod_exit);
MODULE_ALIAS_CRYPTO("curve25519");
MODULE_ALIAS_CRYPTO("curve25519-ppc64le");
MODULE_DESCRIPTION("PPC64le Curve25519 scalar multiplication with 51 bits limbs");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Danny Tsen <dtsen@us.ibm.com>");

View File

@ -14,7 +14,7 @@ config CRYPTO_CURVE25519_X86
- ADX (large integer arithmetic)
config CRYPTO_AES_NI_INTEL
tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XTR, XTS, GCM (AES-NI)"
tristate "Ciphers: AES, modes: ECB, CBC, CTS, CTR, XCTR, XTS, GCM (AES-NI/VAES)"
depends on X86
select CRYPTO_AEAD
select CRYPTO_LIB_AES
@ -25,10 +25,14 @@ config CRYPTO_AES_NI_INTEL
help
Block cipher: AES cipher algorithms
AEAD cipher: AES with GCM
Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XTR, XTS
Length-preserving ciphers: AES with ECB, CBC, CTS, CTR, XCTR, XTS
Architecture: x86 (32-bit and 64-bit) using:
- AES-NI (AES new instructions)
- VAES (Vector AES)
Some algorithm implementations are supported only in 64-bit builds,
and some have additional prerequisites such as AVX2 or AVX512.
config CRYPTO_BLOWFISH_X86_64
tristate "Ciphers: Blowfish, modes: ECB, CBC"

View File

@ -1366,6 +1366,8 @@ gcm_crypt(struct aead_request *req, int flags)
err = skcipher_walk_aead_encrypt(&walk, req, false);
else
err = skcipher_walk_aead_decrypt(&walk, req, false);
if (err)
return err;
/*
* Since the AES-GCM assembly code requires that at least three assembly
@ -1381,37 +1383,31 @@ gcm_crypt(struct aead_request *req, int flags)
gcm_process_assoc(key, ghash_acc, req->src, assoclen, flags);
/* En/decrypt the data and pass the ciphertext through GHASH. */
while ((nbytes = walk.nbytes) != 0) {
if (unlikely(nbytes < walk.total)) {
/*
* Non-last segment. In this case, the assembly
* function requires that the length be a multiple of 16
* (AES_BLOCK_SIZE) bytes. The needed buffering of up
* to 16 bytes is handled by the skcipher_walk. Here we
* just need to round down to a multiple of 16.
*/
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
aes_gcm_update(key, le_ctr, ghash_acc,
walk.src.virt.addr, walk.dst.virt.addr,
nbytes, flags);
le_ctr[0] += nbytes / AES_BLOCK_SIZE;
kernel_fpu_end();
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
kernel_fpu_begin();
} else {
/* Last segment: process all remaining data. */
aes_gcm_update(key, le_ctr, ghash_acc,
walk.src.virt.addr, walk.dst.virt.addr,
nbytes, flags);
err = skcipher_walk_done(&walk, 0);
/*
* The low word of the counter isn't used by the
* finalize, so there's no need to increment it here.
*/
}
while (unlikely((nbytes = walk.nbytes) < walk.total)) {
/*
* Non-last segment. In this case, the assembly function
* requires that the length be a multiple of 16 (AES_BLOCK_SIZE)
* bytes. The needed buffering of up to 16 bytes is handled by
* the skcipher_walk. Here we just need to round down to a
* multiple of 16.
*/
nbytes = round_down(nbytes, AES_BLOCK_SIZE);
aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
walk.dst.virt.addr, nbytes, flags);
le_ctr[0] += nbytes / AES_BLOCK_SIZE;
kernel_fpu_end();
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
if (err)
return err;
kernel_fpu_begin();
}
if (err)
goto out;
/* Last segment: process all remaining data. */
aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
walk.dst.virt.addr, nbytes, flags);
/*
* The low word of the counter isn't used by the finalize, so there's no
* need to increment it here.
*/
/* Finalize */
taglen = crypto_aead_authsize(tfm);
@ -1439,8 +1435,9 @@ gcm_crypt(struct aead_request *req, int flags)
datalen, tag, taglen, flags))
err = -EBADMSG;
}
out:
kernel_fpu_end();
if (nbytes)
skcipher_walk_done(&walk, 0);
return err;
}
@ -1753,6 +1750,6 @@ static void __exit aesni_exit(void)
late_initcall(aesni_init);
module_exit(aesni_exit);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
MODULE_DESCRIPTION("AES cipher and modes, optimized with AES-NI or VAES instructions");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("aes");

View File

@ -592,22 +592,22 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
leaq K256+0*32(%rip), INP ## reuse INP as scratch reg
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 0*32
FOUR_ROUNDS_AND_SCHED (_XFER + 0*32)
leaq K256+1*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 1*32
FOUR_ROUNDS_AND_SCHED (_XFER + 1*32)
leaq K256+2*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 2*32
FOUR_ROUNDS_AND_SCHED (_XFER + 2*32)
leaq K256+3*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 3*32
FOUR_ROUNDS_AND_SCHED (_XFER + 3*32)
add $4*32, SRND
cmp $3*4*32, SRND
@ -618,12 +618,12 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
leaq K256+0*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
DO_4ROUNDS _XFER + 0*32
DO_4ROUNDS (_XFER + 0*32)
leaq K256+1*32(%rip), INP
vpaddd (INP, SRND), X1, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
DO_4ROUNDS _XFER + 1*32
DO_4ROUNDS (_XFER + 1*32)
add $2*32, SRND
vmovdqa X2, X0
@ -651,8 +651,8 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
xor SRND, SRND
.align 16
.Lloop3:
DO_4ROUNDS _XFER + 0*32 + 16
DO_4ROUNDS _XFER + 1*32 + 16
DO_4ROUNDS (_XFER + 0*32 + 16)
DO_4ROUNDS (_XFER + 1*32 + 16)
add $2*32, SRND
cmp $4*4*32, SRND
jb .Lloop3

View File

@ -1305,7 +1305,7 @@ config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE
config CRYPTO_JITTERENTROPY_OSR
int "CPU Jitter RNG Oversampling Rate"
range 1 15
default 1
default 3
help
The Jitter RNG allows the specification of an oversampling rate (OSR).
The Jitter RNG operation requires a fixed amount of timing

View File

@ -323,8 +323,9 @@ static __always_inline
int crypto_aegis128_process_crypt(struct aegis_state *state,
struct skcipher_walk *walk,
void (*crypt)(struct aegis_state *state,
u8 *dst, const u8 *src,
unsigned int size))
u8 *dst,
const u8 *src,
unsigned int size))
{
int err = 0;

View File

@ -235,7 +235,6 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
EXPORT_SYMBOL_GPL(crypto_remove_spawns);
static void crypto_alg_finish_registration(struct crypto_alg *alg,
bool fulfill_requests,
struct list_head *algs_to_put)
{
struct crypto_alg *q;
@ -247,30 +246,8 @@ static void crypto_alg_finish_registration(struct crypto_alg *alg,
if (crypto_is_moribund(q))
continue;
if (crypto_is_larval(q)) {
struct crypto_larval *larval = (void *)q;
/*
* Check to see if either our generic name or
* specific name can satisfy the name requested
* by the larval entry q.
*/
if (strcmp(alg->cra_name, q->cra_name) &&
strcmp(alg->cra_driver_name, q->cra_name))
continue;
if (larval->adult)
continue;
if ((q->cra_flags ^ alg->cra_flags) & larval->mask)
continue;
if (fulfill_requests && crypto_mod_get(alg))
larval->adult = alg;
else
larval->adult = ERR_PTR(-EAGAIN);
if (crypto_is_larval(q))
continue;
}
if (strcmp(alg->cra_name, q->cra_name))
continue;
@ -359,7 +336,7 @@ __crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
list_add(&larval->alg.cra_list, &crypto_alg_list);
} else {
alg->cra_flags |= CRYPTO_ALG_TESTED;
crypto_alg_finish_registration(alg, true, algs_to_put);
crypto_alg_finish_registration(alg, algs_to_put);
}
out:
@ -376,7 +353,6 @@ void crypto_alg_tested(const char *name, int err)
struct crypto_alg *alg;
struct crypto_alg *q;
LIST_HEAD(list);
bool best;
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
@ -390,7 +366,8 @@ void crypto_alg_tested(const char *name, int err)
}
pr_err("alg: Unexpected test result for %s: %d\n", name, err);
goto unlock;
up_write(&crypto_alg_sem);
return;
found:
q->cra_flags |= CRYPTO_ALG_DEAD;
@ -408,32 +385,15 @@ found:
alg->cra_flags |= CRYPTO_ALG_TESTED;
/*
* If a higher-priority implementation of the same algorithm is
* currently being tested, then don't fulfill request larvals.
*/
best = true;
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (crypto_is_moribund(q) || !crypto_is_larval(q))
continue;
if (strcmp(alg->cra_name, q->cra_name))
continue;
if (q->cra_priority > alg->cra_priority) {
best = false;
break;
}
}
crypto_alg_finish_registration(alg, best, &list);
crypto_alg_finish_registration(alg, &list);
complete:
list_del_init(&test->alg.cra_list);
complete_all(&test->completion);
unlock:
up_write(&crypto_alg_sem);
crypto_alg_put(&test->alg);
crypto_remove_final(&list);
}
EXPORT_SYMBOL_GPL(crypto_alg_tested);
@ -454,7 +414,6 @@ int crypto_register_alg(struct crypto_alg *alg)
{
struct crypto_larval *larval;
LIST_HEAD(algs_to_put);
bool test_started = false;
int err;
alg->cra_flags &= ~CRYPTO_ALG_DEAD;
@ -465,15 +424,16 @@ int crypto_register_alg(struct crypto_alg *alg)
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(alg, &algs_to_put);
if (!IS_ERR_OR_NULL(larval)) {
test_started = crypto_boot_test_finished();
bool test_started = crypto_boot_test_finished();
larval->test_started = test_started;
if (test_started)
crypto_schedule_test(larval);
}
up_write(&crypto_alg_sem);
if (IS_ERR(larval))
return PTR_ERR(larval);
if (test_started)
crypto_wait_for_test(larval);
crypto_remove_final(&algs_to_put);
return 0;
}
@ -688,8 +648,10 @@ int crypto_register_instance(struct crypto_template *tmpl,
larval = __crypto_register_alg(&inst->alg, &algs_to_put);
if (IS_ERR(larval))
goto unlock;
else if (larval)
else if (larval) {
larval->test_started = true;
crypto_schedule_test(larval);
}
hlist_add_head(&inst->list, &tmpl->instances);
inst->tmpl = tmpl;
@ -699,8 +661,6 @@ unlock:
if (IS_ERR(larval))
return PTR_ERR(larval);
if (larval)
crypto_wait_for_test(larval);
crypto_remove_final(&algs_to_put);
return 0;
}
@ -1084,6 +1044,7 @@ static void __init crypto_start_tests(void)
l->test_started = true;
larval = l;
crypto_schedule_test(larval);
break;
}
@ -1091,8 +1052,6 @@ static void __init crypto_start_tests(void)
if (!larval)
break;
crypto_wait_for_test(larval);
}
set_crypto_boot_test_finished();

View File

@ -51,7 +51,7 @@ static int cryptomgr_probe(void *data)
{
struct cryptomgr_param *param = data;
struct crypto_template *tmpl;
int err;
int err = -ENOENT;
tmpl = crypto_lookup_template(param->template);
if (!tmpl)
@ -64,6 +64,8 @@ static int cryptomgr_probe(void *data)
crypto_tmpl_put(tmpl);
out:
param->larval->adult = ERR_PTR(err);
param->larval->alg.cra_flags |= CRYPTO_ALG_DEAD;
complete_all(&param->larval->completion);
crypto_alg_put(&param->larval->alg);
kfree(param);

View File

@ -37,6 +37,8 @@ DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
#endif
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
u32 mask);
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
{
@ -68,11 +70,6 @@ static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type,
if ((q->cra_flags ^ type) & mask)
continue;
if (crypto_is_larval(q) &&
!crypto_is_test_larval((struct crypto_larval *)q) &&
((struct crypto_larval *)q)->mask != mask)
continue;
exact = !strcmp(q->cra_driver_name, name);
fuzzy = !strcmp(q->cra_name, name);
if (!exact && !(fuzzy && q->cra_priority > best))
@ -111,6 +108,8 @@ struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
if (!larval)
return ERR_PTR(-ENOMEM);
type &= ~CRYPTO_ALG_TYPE_MASK | (mask ?: CRYPTO_ALG_TYPE_MASK);
larval->mask = mask;
larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type;
larval->alg.cra_priority = -1;
@ -152,32 +151,31 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
return alg;
}
void crypto_larval_kill(struct crypto_alg *alg)
static void crypto_larval_kill(struct crypto_larval *larval)
{
struct crypto_larval *larval = (void *)alg;
bool unlinked;
down_write(&crypto_alg_sem);
list_del(&alg->cra_list);
unlinked = list_empty(&larval->alg.cra_list);
if (!unlinked)
list_del_init(&larval->alg.cra_list);
up_write(&crypto_alg_sem);
complete_all(&larval->completion);
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_larval_kill);
void crypto_wait_for_test(struct crypto_larval *larval)
if (unlinked)
return;
complete_all(&larval->completion);
crypto_alg_put(&larval->alg);
}
void crypto_schedule_test(struct crypto_larval *larval)
{
int err;
err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult);
if (WARN_ON_ONCE(err != NOTIFY_STOP))
goto out;
err = wait_for_completion_killable(&larval->completion);
WARN_ON(err);
out:
crypto_larval_kill(&larval->alg);
WARN_ON_ONCE(err != NOTIFY_STOP);
}
EXPORT_SYMBOL_GPL(crypto_wait_for_test);
EXPORT_SYMBOL_GPL(crypto_schedule_test);
static void crypto_start_test(struct crypto_larval *larval)
{
@ -196,14 +194,17 @@ static void crypto_start_test(struct crypto_larval *larval)
larval->test_started = true;
up_write(&crypto_alg_sem);
crypto_wait_for_test(larval);
crypto_schedule_test(larval);
}
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
{
struct crypto_larval *larval = (void *)alg;
struct crypto_larval *larval;
long time_left;
again:
larval = container_of(alg, struct crypto_larval, alg);
if (!crypto_boot_test_finished())
crypto_start_test(larval);
@ -213,11 +214,20 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
alg = larval->adult;
if (time_left < 0)
alg = ERR_PTR(-EINTR);
else if (!time_left)
else if (!time_left) {
if (crypto_is_test_larval(larval))
crypto_larval_kill(larval);
alg = ERR_PTR(-ETIMEDOUT);
else if (!alg)
alg = ERR_PTR(-ENOENT);
else if (IS_ERR(alg))
} else if (!alg) {
u32 type;
u32 mask;
alg = &larval->alg;
type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
mask = larval->mask;
alg = crypto_alg_lookup(alg->cra_name, type, mask) ?:
ERR_PTR(-EAGAIN);
} else if (IS_ERR(alg))
;
else if (crypto_is_test_larval(larval) &&
!(alg->cra_flags & CRYPTO_ALG_TESTED))
@ -228,6 +238,9 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
alg = ERR_PTR(-EAGAIN);
crypto_mod_put(&larval->alg);
if (!IS_ERR(alg) && crypto_is_larval(alg))
goto again;
return alg;
}
@ -292,8 +305,12 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
alg = crypto_larval_wait(alg);
else if (!alg)
else if (alg)
;
else if (!(mask & CRYPTO_ALG_TESTED))
alg = crypto_larval_add(name, type, mask);
else
alg = ERR_PTR(-ENOENT);
return alg;
}
@ -340,7 +357,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
crypto_mod_put(larval);
alg = ERR_PTR(-ENOENT);
}
crypto_larval_kill(larval);
crypto_larval_kill(container_of(larval, struct crypto_larval, alg));
return alg;
}
EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);

View File

@ -27,7 +27,7 @@ struct chachapoly_ctx {
struct crypto_ahash *poly;
/* key bytes we use for the ChaCha20 IV */
unsigned int saltlen;
u8 salt[];
u8 salt[] __counted_by(saltlen);
};
struct poly_req {

View File

@ -145,9 +145,9 @@ static int dh_is_pubkey_valid(struct dh_ctx *ctx, MPI y)
* ->p is odd, so no need to explicitly subtract one
* from it before shifting to the right.
*/
mpi_rshift(q, ctx->p, 1);
ret = mpi_rshift(q, ctx->p, 1) ?:
mpi_powm(val, y, q, ctx->p);
ret = mpi_powm(val, y, q, ctx->p);
mpi_free(q);
if (ret) {
mpi_free(val);

View File

@ -113,8 +113,7 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask);
void crypto_larval_kill(struct crypto_alg *alg);
void crypto_wait_for_test(struct crypto_larval *larval);
void crypto_schedule_test(struct crypto_larval *larval);
void crypto_alg_tested(const char *name, int err);
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,

View File

@ -146,6 +146,7 @@ struct rand_data {
#define JENT_ENTROPY_SAFETY_FACTOR 64
#include <linux/fips.h>
#include <linux/minmax.h>
#include "jitterentropy.h"
/***************************************************************************
@ -638,10 +639,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
return -2;
}
if ((DATA_SIZE_BITS / 8) < len)
tocopy = (DATA_SIZE_BITS / 8);
else
tocopy = len;
tocopy = min(DATA_SIZE_BITS / 8, len);
if (jent_read_random_block(ec->hash_state, p, tocopy))
return -1;

View File

@ -98,14 +98,13 @@ static int _rsa_dec_crt(const struct rsa_mpi_key *key, MPI m_or_m1_or_h, MPI c)
goto err_free_mpi;
/* (2iii) h = (m_1 - m_2) * qInv mod p */
mpi_sub(m12_or_qh, m_or_m1_or_h, m2);
mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p);
ret = mpi_sub(m12_or_qh, m_or_m1_or_h, m2) ?:
mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p);
/* (2iv) m = m_2 + q * h */
mpi_mul(m12_or_qh, key->q, m_or_m1_or_h);
mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n);
ret = 0;
ret = ret ?:
mpi_mul(m12_or_qh, key->q, m_or_m1_or_h) ?:
mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n);
err_free_mpi:
mpi_free(m12_or_qh);
@ -236,6 +235,7 @@ static int rsa_check_key_length(unsigned int len)
static int rsa_check_exponent_fips(MPI e)
{
MPI e_max = NULL;
int err;
/* check if odd */
if (!mpi_test_bit(e, 0)) {
@ -250,7 +250,12 @@ static int rsa_check_exponent_fips(MPI e)
e_max = mpi_alloc(0);
if (!e_max)
return -ENOMEM;
mpi_set_bit(e_max, 256);
err = mpi_set_bit(e_max, 256);
if (err) {
mpi_free(e_max);
return err;
}
if (mpi_cmp(e, e_max) >= 0) {
mpi_free(e_max);

View File

@ -136,27 +136,19 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm)
return 0;
}
struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
const char *algname,
const char *drvname,
const char *basename)
{
struct simd_skcipher_alg *salg;
struct crypto_skcipher *tfm;
struct skcipher_alg *ialg;
struct skcipher_alg *alg;
int err;
tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL,
CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
ialg = crypto_skcipher_alg(tfm);
salg = kzalloc(sizeof(*salg), GFP_KERNEL);
if (!salg) {
salg = ERR_PTR(-ENOMEM);
goto out_put_tfm;
goto out;
}
salg->ialg_name = basename;
@ -195,30 +187,16 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
if (err)
goto out_free_salg;
out_put_tfm:
crypto_free_skcipher(tfm);
out:
return salg;
out_free_salg:
kfree(salg);
salg = ERR_PTR(err);
goto out_put_tfm;
goto out;
}
EXPORT_SYMBOL_GPL(simd_skcipher_create_compat);
struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
const char *basename)
{
char drvname[CRYPTO_MAX_ALG_NAME];
if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
return simd_skcipher_create_compat(algname, drvname, basename);
}
EXPORT_SYMBOL_GPL(simd_skcipher_create);
void simd_skcipher_free(struct simd_skcipher_alg *salg)
{
crypto_unregister_skcipher(&salg->alg);
@ -246,7 +224,7 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
algname = algs[i].base.cra_name + 2;
drvname = algs[i].base.cra_driver_name + 2;
basename = algs[i].base.cra_driver_name;
simd = simd_skcipher_create_compat(algname, drvname, basename);
simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto err_unregister;
@ -383,27 +361,19 @@ static int simd_aead_init(struct crypto_aead *tfm)
return 0;
}
struct simd_aead_alg *simd_aead_create_compat(const char *algname,
const char *drvname,
const char *basename)
static struct simd_aead_alg *simd_aead_create_compat(struct aead_alg *ialg,
const char *algname,
const char *drvname,
const char *basename)
{
struct simd_aead_alg *salg;
struct crypto_aead *tfm;
struct aead_alg *ialg;
struct aead_alg *alg;
int err;
tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL,
CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
ialg = crypto_aead_alg(tfm);
salg = kzalloc(sizeof(*salg), GFP_KERNEL);
if (!salg) {
salg = ERR_PTR(-ENOMEM);
goto out_put_tfm;
goto out;
}
salg->ialg_name = basename;
@ -442,36 +412,20 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname,
if (err)
goto out_free_salg;
out_put_tfm:
crypto_free_aead(tfm);
out:
return salg;
out_free_salg:
kfree(salg);
salg = ERR_PTR(err);
goto out_put_tfm;
goto out;
}
EXPORT_SYMBOL_GPL(simd_aead_create_compat);
struct simd_aead_alg *simd_aead_create(const char *algname,
const char *basename)
{
char drvname[CRYPTO_MAX_ALG_NAME];
if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
return simd_aead_create_compat(algname, drvname, basename);
}
EXPORT_SYMBOL_GPL(simd_aead_create);
void simd_aead_free(struct simd_aead_alg *salg)
static void simd_aead_free(struct simd_aead_alg *salg)
{
crypto_unregister_aead(&salg->alg);
kfree(salg);
}
EXPORT_SYMBOL_GPL(simd_aead_free);
int simd_register_aeads_compat(struct aead_alg *algs, int count,
struct simd_aead_alg **simd_algs)
@ -493,7 +447,7 @@ int simd_register_aeads_compat(struct aead_alg *algs, int count,
algname = algs[i].base.cra_name + 2;
drvname = algs[i].base.cra_driver_name + 2;
basename = algs[i].base.cra_driver_name;
simd = simd_aead_create_compat(algname, drvname, basename);
simd = simd_aead_create_compat(algs + i, algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto err_unregister;

View File

@ -1939,6 +1939,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs,
atfm = crypto_alloc_ahash(driver, type, mask);
if (IS_ERR(atfm)) {
if (PTR_ERR(atfm) == -ENOENT)
return -ENOENT;
pr_err("alg: hash: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(atfm));
return PTR_ERR(atfm);
@ -2703,6 +2705,8 @@ static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
tfm = crypto_alloc_aead(driver, type, mask);
if (IS_ERR(tfm)) {
if (PTR_ERR(tfm) == -ENOENT)
return -ENOENT;
pr_err("alg: aead: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@ -3280,6 +3284,8 @@ static int alg_test_skcipher(const struct alg_test_desc *desc,
tfm = crypto_alloc_skcipher(driver, type, mask);
if (IS_ERR(tfm)) {
if (PTR_ERR(tfm) == -ENOENT)
return -ENOENT;
pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@ -3693,6 +3699,8 @@ static int alg_test_cipher(const struct alg_test_desc *desc,
tfm = crypto_alloc_cipher(driver, type, mask);
if (IS_ERR(tfm)) {
if (PTR_ERR(tfm) == -ENOENT)
return -ENOENT;
printk(KERN_ERR "alg: cipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@ -3717,6 +3725,8 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
acomp = crypto_alloc_acomp(driver, type, mask);
if (IS_ERR(acomp)) {
if (PTR_ERR(acomp) == -ENOENT)
return -ENOENT;
pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
driver, PTR_ERR(acomp));
return PTR_ERR(acomp);
@ -3729,6 +3739,8 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
} else {
comp = crypto_alloc_comp(driver, type, mask);
if (IS_ERR(comp)) {
if (PTR_ERR(comp) == -ENOENT)
return -ENOENT;
pr_err("alg: comp: Failed to load transform for %s: %ld\n",
driver, PTR_ERR(comp));
return PTR_ERR(comp);
@ -3805,6 +3817,8 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
rng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(rng)) {
if (PTR_ERR(rng) == -ENOENT)
return -ENOENT;
printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(rng));
return PTR_ERR(rng);
@ -3832,10 +3846,13 @@ static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
drng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(drng)) {
if (PTR_ERR(drng) == -ENOENT)
goto out_no_rng;
printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
"%s\n", driver);
out_no_rng:
kfree_sensitive(buf);
return -ENOMEM;
return PTR_ERR(drng);
}
test_data.testentropy = &testentropy;
@ -4077,6 +4094,8 @@ static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
tfm = crypto_alloc_kpp(driver, type, mask);
if (IS_ERR(tfm)) {
if (PTR_ERR(tfm) == -ENOENT)
return -ENOENT;
pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
@ -4305,6 +4324,8 @@ static int alg_test_akcipher(const struct alg_test_desc *desc,
tfm = crypto_alloc_akcipher(driver, type, mask);
if (IS_ERR(tfm)) {
if (PTR_ERR(tfm) == -ENOENT)
return -ENOENT;
pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);

View File

@ -83,33 +83,30 @@ static void __init
do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
{
int speed;
int i, j;
ktime_t min, start, diff;
unsigned long reps;
ktime_t min, start, t0;
tmpl->next = template_list;
template_list = tmpl;
preempt_disable();
min = (ktime_t)S64_MAX;
for (i = 0; i < 3; i++) {
start = ktime_get();
for (j = 0; j < REPS; j++) {
mb(); /* prevent loop optimization */
tmpl->do_2(BENCH_SIZE, b1, b2);
mb();
}
diff = ktime_sub(ktime_get(), start);
if (diff < min)
min = diff;
}
reps = 0;
t0 = ktime_get();
/* delay start until time has advanced */
while ((start = ktime_get()) == t0)
cpu_relax();
do {
mb(); /* prevent loop optimization */
tmpl->do_2(BENCH_SIZE, b1, b2);
mb();
} while (reps++ < REPS || (t0 = ktime_get()) == start);
min = ktime_sub(t0, start);
preempt_enable();
// bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
if (!min)
min = 1;
speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
speed = (1000 * reps * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
tmpl->speed = speed;
pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed);

View File

@ -555,6 +555,7 @@ config HW_RANDOM_ARM_SMCCC_TRNG
config HW_RANDOM_CN10K
tristate "Marvell CN10K Random Number Generator support"
depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST))
default HW_RANDOM if ARCH_THUNDER
help
This driver provides support for the True Random Number
generator available in Marvell CN10K SoCs.
@ -572,6 +573,20 @@ config HW_RANDOM_JH7110
To compile this driver as a module, choose M here.
The module will be called jh7110-trng.
config HW_RANDOM_ROCKCHIP
tristate "Rockchip True Random Number Generator"
depends on HW_RANDOM && (ARCH_ROCKCHIP || COMPILE_TEST)
depends on HAS_IOMEM
default HW_RANDOM
help
This driver provides kernel-side support for the True Random Number
Generator hardware found on some Rockchip SoC like RK3566 or RK3568.
To compile this driver as a module, choose M here: the
module will be called rockchip-rng.
If unsure, say Y.
endif # HW_RANDOM
config UML_RANDOM

View File

@ -48,4 +48,5 @@ obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o
obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o
obj-$(CONFIG_HW_RANDOM_ROCKCHIP) += rockchip-rng.o
obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o

View File

@ -94,8 +94,10 @@ static int bcm2835_rng_init(struct hwrng *rng)
return ret;
ret = reset_control_reset(priv->reset);
if (ret)
if (ret) {
clk_disable_unprepare(priv->clk);
return ret;
}
if (priv->mask_interrupts) {
/* mask the interrupt */

View File

@ -622,6 +622,7 @@ static int __maybe_unused cctrng_resume(struct device *dev)
/* wait for Cryptocell reset completion */
if (!cctrng_wait_for_reset_completion(drvdata)) {
dev_err(dev, "Cryptocell reset not completed");
clk_disable_unprepare(drvdata->clk);
return -EBUSY;
}

View File

@ -142,7 +142,7 @@ static int mtk_rng_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, priv);
pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
devm_pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "registered RNG driver\n");

View File

@ -147,33 +147,25 @@ static int mxc_rnga_probe(struct platform_device *pdev)
mxc_rng->rng.data_present = mxc_rnga_data_present;
mxc_rng->rng.data_read = mxc_rnga_data_read;
mxc_rng->clk = devm_clk_get(&pdev->dev, NULL);
mxc_rng->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(mxc_rng->clk)) {
dev_err(&pdev->dev, "Could not get rng_clk!\n");
return PTR_ERR(mxc_rng->clk);
}
err = clk_prepare_enable(mxc_rng->clk);
if (err)
return err;
mxc_rng->mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxc_rng->mem)) {
err = PTR_ERR(mxc_rng->mem);
goto err_ioremap;
return err;
}
err = hwrng_register(&mxc_rng->rng);
if (err) {
dev_err(&pdev->dev, "MXC RNGA registering failed (%d)\n", err);
goto err_ioremap;
return err;
}
return 0;
err_ioremap:
clk_disable_unprepare(mxc_rng->clk);
return err;
}
static void mxc_rnga_remove(struct platform_device *pdev)
@ -181,8 +173,6 @@ static void mxc_rnga_remove(struct platform_device *pdev)
struct mxc_rng *mxc_rng = platform_get_drvdata(pdev);
hwrng_unregister(&mxc_rng->rng);
clk_disable_unprepare(mxc_rng->clk);
}
static const struct of_device_id mxc_rnga_of_match[] = {

View File

@ -0,0 +1,228 @@
// SPDX-License-Identifier: GPL-2.0
/*
* rockchip-rng.c True Random Number Generator driver for Rockchip RK3568 SoC
*
* Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd.
* Copyright (c) 2022, Aurelien Jarno
* Authors:
* Lin Jinhan <troy.lin@rock-chips.com>
* Aurelien Jarno <aurelien@aurel32.net>
*/
#include <linux/clk.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
#define RK_RNG_AUTOSUSPEND_DELAY 100
#define RK_RNG_MAX_BYTE 32
#define RK_RNG_POLL_PERIOD_US 100
#define RK_RNG_POLL_TIMEOUT_US 10000
/*
* TRNG collects osc ring output bit every RK_RNG_SAMPLE_CNT time. The value is
* a tradeoff between speed and quality and has been adjusted to get a quality
* of ~900 (~87.5% of FIPS 140-2 successes).
*/
#define RK_RNG_SAMPLE_CNT 1000
/* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */
#define TRNG_RST_CTL 0x0004
#define TRNG_RNG_CTL 0x0400
#define TRNG_RNG_CTL_LEN_64_BIT (0x00 << 4)
#define TRNG_RNG_CTL_LEN_128_BIT (0x01 << 4)
#define TRNG_RNG_CTL_LEN_192_BIT (0x02 << 4)
#define TRNG_RNG_CTL_LEN_256_BIT (0x03 << 4)
#define TRNG_RNG_CTL_OSC_RING_SPEED_0 (0x00 << 2)
#define TRNG_RNG_CTL_OSC_RING_SPEED_1 (0x01 << 2)
#define TRNG_RNG_CTL_OSC_RING_SPEED_2 (0x02 << 2)
#define TRNG_RNG_CTL_OSC_RING_SPEED_3 (0x03 << 2)
#define TRNG_RNG_CTL_MASK GENMASK(15, 0)
#define TRNG_RNG_CTL_ENABLE BIT(1)
#define TRNG_RNG_CTL_START BIT(0)
#define TRNG_RNG_SAMPLE_CNT 0x0404
#define TRNG_RNG_DOUT 0x0410
struct rk_rng {
struct hwrng rng;
void __iomem *base;
int clk_num;
struct clk_bulk_data *clk_bulks;
};
/* The mask in the upper 16 bits determines the bits that are updated */
static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask)
{
writel((mask << 16) | val, rng->base + TRNG_RNG_CTL);
}
static int rk_rng_init(struct hwrng *rng)
{
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
int ret;
/* start clocks */
ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks);
if (ret < 0) {
dev_err((struct device *) rk_rng->rng.priv,
"Failed to enable clks %d\n", ret);
return ret;
}
/* set the sample period */
writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT);
/* set osc ring speed and enable it */
rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_LEN_256_BIT |
TRNG_RNG_CTL_OSC_RING_SPEED_0 |
TRNG_RNG_CTL_ENABLE,
TRNG_RNG_CTL_MASK);
return 0;
}
static void rk_rng_cleanup(struct hwrng *rng)
{
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
/* stop TRNG */
rk_rng_write_ctl(rk_rng, 0, TRNG_RNG_CTL_MASK);
/* stop clocks */
clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
}
static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
u32 reg;
int ret = 0;
ret = pm_runtime_resume_and_get((struct device *) rk_rng->rng.priv);
if (ret < 0)
return ret;
/* Start collecting random data */
rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_START, TRNG_RNG_CTL_START);
ret = readl_poll_timeout(rk_rng->base + TRNG_RNG_CTL, reg,
!(reg & TRNG_RNG_CTL_START),
RK_RNG_POLL_PERIOD_US,
RK_RNG_POLL_TIMEOUT_US);
if (ret < 0)
goto out;
/* Read random data stored in the registers */
memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read);
out:
pm_runtime_mark_last_busy((struct device *) rk_rng->rng.priv);
pm_runtime_put_sync_autosuspend((struct device *) rk_rng->rng.priv);
return (ret < 0) ? ret : to_read;
}
static int rk_rng_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct reset_control *rst;
struct rk_rng *rk_rng;
int ret;
rk_rng = devm_kzalloc(dev, sizeof(*rk_rng), GFP_KERNEL);
if (!rk_rng)
return -ENOMEM;
rk_rng->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rk_rng->base))
return PTR_ERR(rk_rng->base);
rk_rng->clk_num = devm_clk_bulk_get_all(dev, &rk_rng->clk_bulks);
if (rk_rng->clk_num < 0)
return dev_err_probe(dev, rk_rng->clk_num,
"Failed to get clks property\n");
rst = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(rst))
return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n");
reset_control_assert(rst);
udelay(2);
reset_control_deassert(rst);
platform_set_drvdata(pdev, rk_rng);
rk_rng->rng.name = dev_driver_string(dev);
if (!IS_ENABLED(CONFIG_PM)) {
rk_rng->rng.init = rk_rng_init;
rk_rng->rng.cleanup = rk_rng_cleanup;
}
rk_rng->rng.read = rk_rng_read;
rk_rng->rng.priv = (unsigned long) dev;
rk_rng->rng.quality = 900;
pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
ret = devm_pm_runtime_enable(dev);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Runtime pm activation failed.\n");
ret = devm_hwrng_register(dev, &rk_rng->rng);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to register Rockchip hwrng\n");
return 0;
}
static int __maybe_unused rk_rng_runtime_suspend(struct device *dev)
{
struct rk_rng *rk_rng = dev_get_drvdata(dev);
rk_rng_cleanup(&rk_rng->rng);
return 0;
}
static int __maybe_unused rk_rng_runtime_resume(struct device *dev)
{
struct rk_rng *rk_rng = dev_get_drvdata(dev);
return rk_rng_init(&rk_rng->rng);
}
static const struct dev_pm_ops rk_rng_pm_ops = {
SET_RUNTIME_PM_OPS(rk_rng_runtime_suspend,
rk_rng_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
static const struct of_device_id rk_rng_dt_match[] = {
{ .compatible = "rockchip,rk3568-rng", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rk_rng_dt_match);
static struct platform_driver rk_rng_driver = {
.driver = {
.name = "rockchip-rng",
.pm = &rk_rng_pm_ops,
.of_match_table = rk_rng_dt_match,
},
.probe = rk_rng_probe,
};
module_platform_driver(rk_rng_driver);
MODULE_DESCRIPTION("Rockchip RK3568 True Random Number Generator driver");
MODULE_AUTHOR("Lin Jinhan <troy.lin@rock-chips.com>");
MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>");
MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
MODULE_LICENSE("GPL");

View File

@ -149,7 +149,6 @@ struct crypto4xx_alg {
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
int crypto4xx_build_pd(struct crypto_async_request *req,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,

View File

@ -150,8 +150,6 @@ struct meson_alg_template {
#endif
};
int meson_enqueue(struct crypto_async_request *areq, u32 type);
int meson_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int meson_cipher_init(struct crypto_tfm *tfm);

View File

@ -2376,33 +2376,29 @@ static int atmel_aes_probe(struct platform_device *pdev)
}
/* Initializing the clock */
aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
aes_dd->iclk = devm_clk_get_prepared(&pdev->dev, "aes_clk");
if (IS_ERR(aes_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(aes_dd->iclk);
goto err_tasklet_kill;
}
err = clk_prepare(aes_dd->iclk);
if (err)
goto err_tasklet_kill;
err = atmel_aes_hw_version_init(aes_dd);
if (err)
goto err_iclk_unprepare;
goto err_tasklet_kill;
atmel_aes_get_cap(aes_dd);
#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
err = -EPROBE_DEFER;
goto err_iclk_unprepare;
goto err_tasklet_kill;
}
#endif
err = atmel_aes_buff_init(aes_dd);
if (err)
goto err_iclk_unprepare;
goto err_tasklet_kill;
err = atmel_aes_dma_init(aes_dd);
if (err)
@ -2429,8 +2425,6 @@ err_algs:
atmel_aes_dma_cleanup(aes_dd);
err_buff_cleanup:
atmel_aes_buff_cleanup(aes_dd);
err_iclk_unprepare:
clk_unprepare(aes_dd->iclk);
err_tasklet_kill:
tasklet_kill(&aes_dd->done_task);
tasklet_kill(&aes_dd->queue_task);
@ -2455,8 +2449,6 @@ static void atmel_aes_remove(struct platform_device *pdev)
atmel_aes_dma_cleanup(aes_dd);
atmel_aes_buff_cleanup(aes_dd);
clk_unprepare(aes_dd->iclk);
}
static struct platform_driver atmel_aes_driver = {

View File

@ -2623,27 +2623,23 @@ static int atmel_sha_probe(struct platform_device *pdev)
}
/* Initializing the clock */
sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
sha_dd->iclk = devm_clk_get_prepared(&pdev->dev, "sha_clk");
if (IS_ERR(sha_dd->iclk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(sha_dd->iclk);
goto err_tasklet_kill;
}
err = clk_prepare(sha_dd->iclk);
if (err)
goto err_tasklet_kill;
err = atmel_sha_hw_version_init(sha_dd);
if (err)
goto err_iclk_unprepare;
goto err_tasklet_kill;
atmel_sha_get_cap(sha_dd);
if (sha_dd->caps.has_dma) {
err = atmel_sha_dma_init(sha_dd);
if (err)
goto err_iclk_unprepare;
goto err_tasklet_kill;
dev_info(dev, "using %s for DMA transfers\n",
dma_chan_name(sha_dd->dma_lch_in.chan));
@ -2669,8 +2665,6 @@ err_algs:
spin_unlock(&atmel_sha.lock);
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
err_iclk_unprepare:
clk_unprepare(sha_dd->iclk);
err_tasklet_kill:
tasklet_kill(&sha_dd->queue_task);
tasklet_kill(&sha_dd->done_task);
@ -2693,8 +2687,6 @@ static void atmel_sha_remove(struct platform_device *pdev)
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
clk_unprepare(sha_dd->iclk);
}
static struct platform_driver atmel_sha_driver = {

View File

@ -961,7 +961,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (IS_ERR(drv_ctx))
return (struct aead_edesc *)drv_ctx;
return ERR_CAST(drv_ctx);
/* allocate space for base edesc and hw desc commands, link tables */
edesc = qi_cache_alloc(flags);
@ -1271,7 +1271,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (IS_ERR(drv_ctx))
return (struct skcipher_edesc *)drv_ctx;
return ERR_CAST(drv_ctx);
src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {

View File

@ -5006,10 +5006,14 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
struct device *dev = &ls_dev->dev;
struct dpaa2_caam_priv *priv;
struct dpaa2_caam_priv_per_cpu *ppriv;
cpumask_t clean_mask;
cpumask_var_t clean_mask;
int err, cpu;
u8 i;
err = -ENOMEM;
if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
goto err_cpumask;
priv = dev_get_drvdata(dev);
priv->dev = dev;
@ -5085,7 +5089,6 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
}
}
cpumask_clear(&clean_mask);
i = 0;
for_each_online_cpu(cpu) {
u8 j;
@ -5114,7 +5117,7 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
err = -ENOMEM;
goto err_alloc_netdev;
}
cpumask_set_cpu(cpu, &clean_mask);
cpumask_set_cpu(cpu, clean_mask);
ppriv->net_dev->dev = *dev;
netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi,
@ -5122,15 +5125,19 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
DPAA2_CAAM_NAPI_WEIGHT);
}
return 0;
err = 0;
goto free_cpumask;
err_alloc_netdev:
free_dpaa2_pcpu_netdev(priv, &clean_mask);
free_dpaa2_pcpu_netdev(priv, clean_mask);
err_get_rx_queue:
dpaa2_dpseci_congestion_free(priv);
err_get_vers:
dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
err_open:
free_cpumask:
free_cpumask_var(clean_mask);
err_cpumask:
return err;
}

View File

@ -736,7 +736,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
cpumask_t clean_mask;
cpumask_var_t clean_mask;
err = -ENOMEM;
if (!zalloc_cpumask_var(&clean_mask, GFP_KERNEL))
goto fail_cpumask;
ctrlpriv = dev_get_drvdata(ctrldev);
qidev = ctrldev;
@ -745,19 +749,16 @@ int caam_qi_init(struct platform_device *caam_pdev)
err = init_cgr(qidev);
if (err) {
dev_err(qidev, "CGR initialization failed: %d\n", err);
return err;
goto fail_cgr;
}
/* Initialise response FQs */
err = alloc_rsp_fqs(qidev);
if (err) {
dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
free_rsp_fqs();
return err;
goto fail_fqs;
}
cpumask_clear(&clean_mask);
/*
* Enable the NAPI contexts on each of the core which has an affine
* portal.
@ -773,7 +774,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
err = -ENOMEM;
goto fail;
}
cpumask_set_cpu(i, &clean_mask);
cpumask_set_cpu(i, clean_mask);
priv->net_dev = net_dev;
net_dev->dev = *qidev;
@ -788,7 +789,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
if (!qi_cache) {
dev_err(qidev, "Can't allocate CAAM cache\n");
err = -ENOMEM;
goto fail2;
goto fail;
}
caam_debugfs_qi_init(ctrlpriv);
@ -798,11 +799,19 @@ int caam_qi_init(struct platform_device *caam_pdev)
goto fail2;
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
return 0;
goto free_cpumask;
fail2:
free_rsp_fqs();
kmem_cache_destroy(qi_cache);
fail:
free_caam_qi_pcpu_netdev(&clean_mask);
free_caam_qi_pcpu_netdev(clean_mask);
fail_fqs:
free_rsp_fqs();
qman_delete_cgr_safe(&qipriv.cgr);
qman_release_cgrid(qipriv.cgr.cgrid);
fail_cgr:
free_cpumask:
free_cpumask_var(clean_mask);
fail_cpumask:
return err;
}

View File

@ -910,7 +910,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
sev->int_rcvd = 0;
reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC;
reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd);
/*
* If invoked during panic handling, local interrupts are disabled so
* the PSP command completion interrupt can't be used.
* sev_wait_cmd_ioc() already checks for interrupts disabled and
* polls for PSP command completion. Ensure we do not request an
* interrupt from the PSP if irqs disabled.
*/
if (!irqs_disabled())
reg |= SEV_CMDRESP_IOC;
iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
/* wait for command completion */
@ -1629,8 +1640,6 @@ static int sev_update_firmware(struct device *dev)
if (ret)
dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
else
dev_info(dev, "SEV firmware update successful\n");
__free_pages(p, order);
@ -2382,6 +2391,7 @@ void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_platform_init_args args = {0};
u8 api_major, api_minor, build;
int rc;
if (!sev)
@ -2392,9 +2402,19 @@ void sev_pci_init(void)
if (sev_get_api_version())
goto err;
api_major = sev->api_major;
api_minor = sev->api_minor;
build = sev->build;
if (sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
if (api_major != sev->api_major || api_minor != sev->api_minor ||
build != sev->build)
dev_info(sev->dev, "SEV firmware updated from %d.%d.%d to %d.%d.%d\n",
api_major, api_minor, build,
sev->api_major, sev->api_minor, sev->build);
/* Initialize the platform */
args.probe = true;
rc = sev_platform_init(&args);
@ -2410,6 +2430,8 @@ void sev_pci_init(void)
return;
err:
sev_dev_destroy(psp_master);
psp_master->sev_data = NULL;
}

View File

@ -138,7 +138,6 @@ struct sp_device *sp_alloc_struct(struct device *dev);
int sp_init(struct sp_device *sp);
void sp_destroy(struct sp_device *sp);
struct sp_device *sp_get_master(void);
int sp_suspend(struct sp_device *sp);
int sp_resume(struct sp_device *sp);

View File

@ -326,8 +326,6 @@ struct sl3516_ce_alg_template {
unsigned long stat_bytes;
};
int sl3516_ce_enqueue(struct crypto_async_request *areq, u32 type);
int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sl3516_ce_cipher_init(struct crypto_tfm *tfm);

View File

@ -575,7 +575,9 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
do {
atomic64_inc(&dfx[HPRE_SEND_CNT].value);
spin_lock_bh(&ctx->req_lock);
ret = hisi_qp_send(ctx->qp, msg);
spin_unlock_bh(&ctx->req_lock);
if (ret != -EBUSY)
break;
atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);

View File

@ -13,9 +13,7 @@
#include <linux/uacce.h>
#include "hpre.h"
#define HPRE_QM_ABNML_INT_MASK 0x100004
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HPRE_COMM_CNT_CLR_CE 0x0
#define HPRE_CTRL_CNT_CLR_CE 0x301000
#define HPRE_FSM_MAX_CNT 0x301008
#define HPRE_VFG_AXQOS 0x30100c
@ -42,7 +40,6 @@
#define HPRE_HAC_INT_SET 0x301500
#define HPRE_RNG_TIMEOUT_NUM 0x301A34
#define HPRE_CORE_INT_ENABLE 0
#define HPRE_CORE_INT_DISABLE GENMASK(21, 0)
#define HPRE_RDCHN_INI_ST 0x301a00
#define HPRE_CLSTR_BASE 0x302000
#define HPRE_CORE_EN_OFFSET 0x04
@ -66,7 +63,6 @@
#define HPRE_CLSTR_ADDR_INTRVL 0x1000
#define HPRE_CLUSTER_INQURY 0x100
#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
#define HPRE_TIMEOUT_ABNML_BIT 6
#define HPRE_PASID_EN_BIT 9
#define HPRE_REG_RD_INTVRL_US 10
#define HPRE_REG_RD_TMOUT_US 1000
@ -203,9 +199,9 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
{HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
{HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
{HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE},
{HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
{HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
{HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E},
{HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E},
{HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E},
{HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
{HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
{HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
@ -358,6 +354,8 @@ static struct dfx_diff_registers hpre_diff_regs[] = {
},
};
static const struct hisi_qm_err_ini hpre_err_ini;
bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
{
u32 cap_val;
@ -654,11 +652,6 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
/* HPRE need more time, we close this interrupt */
val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK);
val |= BIT(HPRE_TIMEOUT_ABNML_BIT);
writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK);
if (qm->ver >= QM_HW_V3)
writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
qm->io_base + HPRE_TYPES_ENB);
@ -667,9 +660,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);
writel(0x0, qm->io_base + HPRE_BD_ENDIAN);
writel(0x0, qm->io_base + HPRE_INT_MASK);
writel(0x0, qm->io_base + HPRE_POISON_BYPASS);
writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE);
writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);
@ -759,7 +750,7 @@ static void hpre_hw_error_disable(struct hisi_qm *qm)
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
u32 ce, nfe;
u32 ce, nfe, err_en;
ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
@ -776,7 +767,8 @@ static void hpre_hw_error_enable(struct hisi_qm *qm)
hpre_master_ooo_ctrl(qm, true);
/* enable hpre hw error interrupts */
writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK);
err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE;
writel(~err_en, qm->io_base + HPRE_INT_MASK);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@ -1161,6 +1153,7 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &hpre_devices;
qm->err_ini = &hpre_err_ini;
if (pf_q_num_flag)
set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
}
@ -1350,8 +1343,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
hpre_open_sva_prefetch(qm);
qm->err_ini = &hpre_err_ini;
qm->err_ini->err_info_init(qm);
hisi_qm_dev_err_init(qm);
ret = hpre_show_last_regs_init(qm);
if (ret)
@ -1380,6 +1371,18 @@ static int hpre_probe_init(struct hpre *hpre)
return 0;
}
static void hpre_probe_uninit(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_VF)
return;
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
hpre_show_last_regs_uninit(qm);
hpre_close_sva_prefetch(qm);
hisi_qm_dev_err_uninit(qm);
}
static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_qm *qm;
@ -1405,7 +1408,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm);
if (ret)
goto err_with_err_init;
goto err_with_probe_init;
ret = hpre_debugfs_init(qm);
if (ret)
@ -1444,9 +1447,8 @@ err_qm_del_list:
hpre_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
err_with_err_init:
hpre_show_last_regs_uninit(qm);
hisi_qm_dev_err_uninit(qm);
err_with_probe_init:
hpre_probe_uninit(qm);
err_with_qm_init:
hisi_qm_uninit(qm);
@ -1468,13 +1470,7 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF) {
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
hpre_show_last_regs_uninit(qm);
hisi_qm_dev_err_uninit(qm);
}
hpre_probe_uninit(qm);
hisi_qm_uninit(qm);
}

View File

@ -450,6 +450,7 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
};
static void qm_irqs_unregister(struct hisi_qm *qm);
static int qm_reset_device(struct hisi_qm *qm);
static u32 qm_get_hw_error_status(struct hisi_qm *qm)
{
@ -4014,6 +4015,28 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
return -ETIMEDOUT;
}
static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
{
u32 nfe_enb = 0;
/* Kunpeng930 hardware automatically close master ooo when NFE occurs */
if (qm->ver >= QM_HW_V3)
return;
if (!qm->err_status.is_dev_ecc_mbit &&
qm->err_status.is_qm_ecc_mbit &&
qm->err_ini->close_axi_master_ooo) {
qm->err_ini->close_axi_master_ooo(qm);
} else if (qm->err_status.is_dev_ecc_mbit &&
!qm->err_status.is_qm_ecc_mbit &&
!qm->err_ini->close_axi_master_ooo) {
nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
qm->io_base + QM_RAS_NFE_ENABLE);
writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
}
}
static int qm_vf_reset_prepare(struct hisi_qm *qm,
enum qm_stop_reason stop_reason)
{
@ -4078,6 +4101,8 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
return ret;
}
qm_dev_ecc_mbit_handle(qm);
/* PF obtains the information of VF by querying the register. */
qm_cmd_uninit(qm);
@ -4108,33 +4133,26 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
return 0;
}
static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
static int qm_master_ooo_check(struct hisi_qm *qm)
{
u32 nfe_enb = 0;
u32 val;
int ret;
/* Kunpeng930 hardware automatically close master ooo when NFE occurs */
if (qm->ver >= QM_HW_V3)
return;
/* Check the ooo register of the device before resetting the device. */
writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
val, (val == ACC_MASTER_TRANS_RETURN_RW),
POLL_PERIOD, POLL_TIMEOUT);
if (ret)
pci_warn(qm->pdev, "Bus lock! Please reset system.\n");
if (!qm->err_status.is_dev_ecc_mbit &&
qm->err_status.is_qm_ecc_mbit &&
qm->err_ini->close_axi_master_ooo) {
qm->err_ini->close_axi_master_ooo(qm);
} else if (qm->err_status.is_dev_ecc_mbit &&
!qm->err_status.is_qm_ecc_mbit &&
!qm->err_ini->close_axi_master_ooo) {
nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
qm->io_base + QM_RAS_NFE_ENABLE);
writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
}
return ret;
}
static int qm_soft_reset(struct hisi_qm *qm)
static int qm_soft_reset_prepare(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
int ret;
u32 val;
/* Ensure all doorbells and mailboxes received by QM */
ret = qm_check_req_recv(qm);
@ -4155,30 +4173,23 @@ static int qm_soft_reset(struct hisi_qm *qm)
return ret;
}
qm_dev_ecc_mbit_handle(qm);
/* OOO register set and check */
writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
qm->io_base + ACC_MASTER_GLOBAL_CTRL);
/* If bus lock, reset chip */
ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
val,
(val == ACC_MASTER_TRANS_RETURN_RW),
POLL_PERIOD, POLL_TIMEOUT);
if (ret) {
pci_emerg(pdev, "Bus lock! Please reset system.\n");
ret = qm_master_ooo_check(qm);
if (ret)
return ret;
}
if (qm->err_ini->close_sva_prefetch)
qm->err_ini->close_sva_prefetch(qm);
ret = qm_set_pf_mse(qm, false);
if (ret) {
if (ret)
pci_err(pdev, "Fails to disable pf MSE bit.\n");
return ret;
}
return ret;
}
static int qm_reset_device(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
/* The reset related sub-control registers are not in PCI BAR */
if (ACPI_HANDLE(&pdev->dev)) {
@ -4197,12 +4208,23 @@ static int qm_soft_reset(struct hisi_qm *qm)
pci_err(pdev, "Reset step %llu failed!\n", value);
return -EIO;
}
} else {
pci_err(pdev, "No reset method!\n");
return -EINVAL;
return 0;
}
return 0;
pci_err(pdev, "No reset method!\n");
return -EINVAL;
}
static int qm_soft_reset(struct hisi_qm *qm)
{
int ret;
ret = qm_soft_reset_prepare(qm);
if (ret)
return ret;
return qm_reset_device(qm);
}
static int qm_vf_reset_done(struct hisi_qm *qm)
@ -5155,6 +5177,35 @@ err_request_mem_regions:
return ret;
}
static int qm_clear_device(struct hisi_qm *qm)
{
acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev);
int ret;
if (qm->fun_type == QM_HW_VF)
return 0;
/* Device does not support reset, return */
if (!qm->err_ini->err_info_init)
return 0;
qm->err_ini->err_info_init(qm);
if (!handle)
return 0;
/* No reset method, return */
if (!acpi_has_method(handle, qm->err_info.acpi_rst))
return 0;
ret = qm_master_ooo_check(qm);
if (ret) {
writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
return ret;
}
return qm_reset_device(qm);
}
static int hisi_qm_pci_init(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@ -5184,8 +5235,14 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
goto err_get_pci_res;
}
ret = qm_clear_device(qm);
if (ret)
goto err_free_vectors;
return 0;
err_free_vectors:
pci_free_irq_vectors(pdev);
err_get_pci_res:
qm_put_pci_res(qm);
err_disable_pcidev:
@ -5486,7 +5543,6 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
int ret;
u32 val;
ret = qm->ops->set_msi(qm, false);
if (ret) {
@ -5494,18 +5550,9 @@ static int qm_prepare_for_suspend(struct hisi_qm *qm)
return ret;
}
/* shutdown OOO register */
writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
qm->io_base + ACC_MASTER_GLOBAL_CTRL);
ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
val,
(val == ACC_MASTER_TRANS_RETURN_RW),
POLL_PERIOD, POLL_TIMEOUT);
if (ret) {
pci_emerg(pdev, "Bus lock! Please reset system.\n");
ret = qm_master_ooo_check(qm);
if (ret)
return ret;
}
ret = qm_set_pf_mse(qm, false);
if (ret)

View File

@ -458,7 +458,7 @@ static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[])
static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask)
{
if (hash_mask & SEC_HASH_IPV4_MASK) {
dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n ");
dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n");
return -EINVAL;
}

View File

@ -1065,9 +1065,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
struct hisi_qm *qm = &sec->qm;
int ret;
qm->err_ini = &sec_err_ini;
qm->err_ini->err_info_init(qm);
ret = sec_set_user_domain_and_cache(qm);
if (ret)
return ret;
@ -1122,6 +1119,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &sec_devices;
qm->err_ini = &sec_err_ini;
if (pf_q_num_flag)
set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
@ -1186,6 +1184,12 @@ static int sec_probe_init(struct sec_dev *sec)
static void sec_probe_uninit(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_VF)
return;
sec_debug_regs_clear(qm);
sec_show_last_regs_uninit(qm);
sec_close_sva_prefetch(qm);
hisi_qm_dev_err_uninit(qm);
}
@ -1274,7 +1278,6 @@ err_qm_del_list:
sec_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
err_probe_uninit:
sec_show_last_regs_uninit(qm);
sec_probe_uninit(qm);
err_qm_uninit:
sec_qm_uninit(qm);
@ -1296,11 +1299,6 @@ static void sec_remove(struct pci_dev *pdev)
sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
sec_debug_regs_clear(qm);
sec_show_last_regs_uninit(qm);
sec_probe_uninit(qm);
sec_qm_uninit(qm);

View File

@ -225,7 +225,7 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
int sg_n;
int sg_n, ret;
if (!dev || !sgl || !pool || !hw_sgl_dma || index >= pool->count)
return ERR_PTR(-EINVAL);
@ -240,14 +240,15 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
if (sg_n_mapped > pool->sge_nr) {
dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n");
return ERR_PTR(-EINVAL);
ret = -EINVAL;
goto err_unmap;
}
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
if (IS_ERR(curr_hw_sgl)) {
dev_err(dev, "Get SGL error!\n");
dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(-ENOMEM);
ret = -ENOMEM;
goto err_unmap;
}
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
@ -262,6 +263,11 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
*hw_sgl_dma = curr_sgl_dma;
return curr_hw_sgl;
err_unmap:
dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/internal/rng.h>
#include <linux/acpi.h>
#include <linux/crypto.h>
#include <linux/err.h>
@ -13,7 +14,6 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/random.h>
#include <crypto/internal/rng.h>
#define HISI_TRNG_REG 0x00F0
#define HISI_TRNG_BYTES 4
@ -121,7 +121,7 @@ static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src,
u32 i;
if (dlen > SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES || dlen == 0) {
pr_err("dlen(%d) exceeds limit(%d)!\n", dlen,
pr_err("dlen(%u) exceeds limit(%d)!\n", dlen,
SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES);
return -EINVAL;
}

View File

@ -54,7 +54,7 @@ struct hisi_zip_req {
struct hisi_zip_req_q {
struct hisi_zip_req *q;
unsigned long *req_bitmap;
rwlock_t req_lock;
spinlock_t req_lock;
u16 size;
};
@ -116,17 +116,17 @@ static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
struct hisi_zip_req *req_cache;
int req_id;
write_lock(&req_q->req_lock);
spin_lock(&req_q->req_lock);
req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
if (req_id >= req_q->size) {
write_unlock(&req_q->req_lock);
spin_unlock(&req_q->req_lock);
dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
return ERR_PTR(-EAGAIN);
}
set_bit(req_id, req_q->req_bitmap);
write_unlock(&req_q->req_lock);
spin_unlock(&req_q->req_lock);
req_cache = q + req_id;
req_cache->req_id = req_id;
@ -140,9 +140,9 @@ static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
write_lock(&req_q->req_lock);
spin_lock(&req_q->req_lock);
clear_bit(req->req_id, req_q->req_bitmap);
write_unlock(&req_q->req_lock);
spin_unlock(&req_q->req_lock);
}
static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
@ -213,6 +213,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
@ -244,7 +245,9 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
/* send command to start a task */
atomic64_inc(&dfx->send_cnt);
spin_lock_bh(&req_q->req_lock);
ret = hisi_qp_send(qp, &zip_sqe);
spin_unlock_bh(&req_q->req_lock);
if (unlikely(ret < 0)) {
atomic64_inc(&dfx->send_busy_cnt);
ret = -EAGAIN;
@ -456,7 +459,7 @@ static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
goto err_free_comp_q;
}
rwlock_init(&req_q->req_lock);
spin_lock_init(&req_q->req_lock);
req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req),
GFP_KERNEL);

View File

@ -1141,8 +1141,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
hisi_zip->ctrl = ctrl;
ctrl->hisi_zip = hisi_zip;
qm->err_ini = &hisi_zip_err_ini;
qm->err_ini->err_info_init(qm);
ret = hisi_zip_set_user_domain_and_cache(qm);
if (ret)
@ -1203,6 +1201,7 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
qm->qp_num = pf_q_num;
qm->debug.curr_qm_qp_num = pf_q_num;
qm->qm_list = &zip_devices;
qm->err_ini = &hisi_zip_err_ini;
if (pf_q_num_flag)
set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
} else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) {
@ -1269,6 +1268,16 @@ static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)
return 0;
}
static void hisi_zip_probe_uninit(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_VF)
return;
hisi_zip_show_last_regs_uninit(qm);
hisi_zip_close_sva_prefetch(qm);
hisi_qm_dev_err_uninit(qm);
}
static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hisi_zip *hisi_zip;
@ -1295,7 +1304,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm);
if (ret)
goto err_dev_err_uninit;
goto err_probe_uninit;
ret = hisi_zip_debugfs_init(qm);
if (ret)
@ -1334,9 +1343,8 @@ err_qm_del_list:
hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
err_dev_err_uninit:
hisi_zip_show_last_regs_uninit(qm);
hisi_qm_dev_err_uninit(qm);
err_probe_uninit:
hisi_zip_probe_uninit(qm);
err_qm_uninit:
hisi_zip_qm_uninit(qm);
@ -1358,8 +1366,7 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
hisi_zip_show_last_regs_uninit(qm);
hisi_qm_dev_err_uninit(qm);
hisi_zip_probe_uninit(qm);
hisi_zip_qm_uninit(qm);
}

View File

@ -987,31 +987,23 @@ static int img_hash_probe(struct platform_device *pdev)
}
dev_dbg(dev, "using IRQ channel %d\n", irq);
hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
hdev->hash_clk = devm_clk_get_enabled(&pdev->dev, "hash");
if (IS_ERR(hdev->hash_clk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(hdev->hash_clk);
goto res_err;
}
hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
hdev->sys_clk = devm_clk_get_enabled(&pdev->dev, "sys");
if (IS_ERR(hdev->sys_clk)) {
dev_err(dev, "clock initialization failed.\n");
err = PTR_ERR(hdev->sys_clk);
goto res_err;
}
err = clk_prepare_enable(hdev->hash_clk);
if (err)
goto res_err;
err = clk_prepare_enable(hdev->sys_clk);
if (err)
goto clk_err;
err = img_hash_dma_init(hdev);
if (err)
goto dma_err;
goto res_err;
dev_dbg(dev, "using %s for DMA transfers\n",
dma_chan_name(hdev->dma_lch));
@ -1032,10 +1024,6 @@ err_algs:
list_del(&hdev->list);
spin_unlock(&img_hash.lock);
dma_release_channel(hdev->dma_lch);
dma_err:
clk_disable_unprepare(hdev->sys_clk);
clk_err:
clk_disable_unprepare(hdev->hash_clk);
res_err:
tasklet_kill(&hdev->done_task);
tasklet_kill(&hdev->dma_task);
@ -1058,9 +1046,6 @@ static void img_hash_remove(struct platform_device *pdev)
tasklet_kill(&hdev->dma_task);
dma_release_channel(hdev->dma_lch);
clk_disable_unprepare(hdev->hash_clk);
clk_disable_unprepare(hdev->sys_clk);
}
#ifdef CONFIG_PM_SLEEP

View File

@ -897,7 +897,6 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
int safexcel_select_ring(struct safexcel_crypto_priv *priv);
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *ring);
void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int ring);
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *ring);
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,

View File

@ -495,10 +495,10 @@ static void remove_device_compression_modes(struct iaa_device *iaa_device)
if (!device_mode)
continue;
free_device_compression_mode(iaa_device, device_mode);
iaa_device->compression_modes[i] = NULL;
if (iaa_compression_modes[i]->free)
iaa_compression_modes[i]->free(device_mode);
free_device_compression_mode(iaa_device, device_mode);
iaa_device->compression_modes[i] = NULL;
}
}

View File

@ -1150,6 +1150,7 @@ static const struct of_device_id kmb_ocs_hcu_of_match[] = {
},
{}
};
MODULE_DEVICE_TABLE(of, kmb_ocs_hcu_of_match);
static void kmb_ocs_hcu_remove(struct platform_device *pdev)
{

View File

@ -163,7 +163,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
out_err:
adf_cleanup_accel(accel_dev);
return ret;
@ -177,7 +177,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
}

View File

@ -165,7 +165,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
out_err:
adf_cleanup_accel(accel_dev);
return ret;
@ -179,7 +179,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
}

View File

@ -202,7 +202,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@ -221,7 +221,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);

View File

@ -176,7 +176,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@ -196,7 +196,7 @@ static void adf_remove(struct pci_dev *pdev)
return;
}
adf_flush_vf_wq(accel_dev);
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);

View File

@ -202,7 +202,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@ -221,7 +221,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);

View File

@ -176,7 +176,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@ -196,7 +196,7 @@ static void adf_remove(struct pci_dev *pdev)
return;
}
adf_flush_vf_wq(accel_dev);
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);

View File

@ -44,7 +44,7 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
adf_pf2vf_notify_restarting(accel_dev);
adf_pf2vf_wait_for_restarting_complete(accel_dev);
pci_clear_master(pdev);
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
return PCI_ERS_RESULT_NEED_RESET;
}

View File

@ -100,6 +100,8 @@ void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev)
}
static void adf_cfg_section_del_all(struct list_head *head);
static void adf_cfg_section_del_all_except(struct list_head *head,
const char *section_name);
void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
{
@ -111,6 +113,17 @@ void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
}
void adf_cfg_del_all_except(struct adf_accel_dev *accel_dev,
const char *section_name)
{
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
down_write(&dev_cfg_data->lock);
adf_cfg_section_del_all_except(&dev_cfg_data->sec_list, section_name);
up_write(&dev_cfg_data->lock);
clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
}
/**
* adf_cfg_dev_remove() - Clears acceleration device configuration table.
* @accel_dev: Pointer to acceleration device.
@ -185,6 +198,22 @@ static void adf_cfg_section_del_all(struct list_head *head)
}
}
static void adf_cfg_section_del_all_except(struct list_head *head,
const char *section_name)
{
struct list_head *list, *tmp;
struct adf_cfg_section *ptr;
list_for_each_prev_safe(list, tmp, head) {
ptr = list_entry(list, struct adf_cfg_section, list);
if (!strcmp(ptr->name, section_name))
continue;
adf_cfg_keyval_del_all(&ptr->param_head);
list_del(list);
kfree(ptr);
}
}
static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
const char *key)
{

View File

@ -35,6 +35,8 @@ void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev);
void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev);
int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
void adf_cfg_del_all_except(struct adf_accel_dev *accel_dev,
const char *section_name);
int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
const char *section_name,
const char *key, const void *val,

View File

@ -56,7 +56,7 @@ int adf_service_register(struct service_hndl *service);
int adf_service_unregister(struct service_hndl *service);
int adf_dev_up(struct adf_accel_dev *accel_dev, bool init_config);
int adf_dev_down(struct adf_accel_dev *accel_dev, bool cache_config);
int adf_dev_down(struct adf_accel_dev *accel_dev);
int adf_dev_restart(struct adf_accel_dev *accel_dev);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);

View File

@ -247,7 +247,7 @@ static void adf_ctl_stop_devices(u32 id)
if (!accel_dev->is_vf)
continue;
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
}
}
@ -256,7 +256,7 @@ static void adf_ctl_stop_devices(u32 id)
if (!adf_dev_started(accel_dev))
continue;
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
}
}
}
@ -319,7 +319,7 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
if (ret) {
dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
ctl_data->device_id);
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
}
out:
kfree(ctl_data);

View File

@ -83,7 +83,7 @@
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
/* Ring interrupt */
#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2)
#define ADF_RP_INT_SRC_SEL_F_RISE_MASK GENMASK(1, 0)
#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0)
#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4
#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC)

View File

@ -323,6 +323,8 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
if (hw_data->stop_timer)
hw_data->stop_timer(accel_dev);
hw_data->disable_iov(accel_dev);
if (wait)
msleep(100);
@ -386,16 +388,14 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
adf_tl_shutdown(accel_dev);
hw_data->disable_iov(accel_dev);
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
hw_data->free_irq(accel_dev);
clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
}
/* Delete configuration only if not restarting */
/* If not restarting, delete all cfg sections except for GENERAL */
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
adf_cfg_del_all(accel_dev);
adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC);
if (hw_data->exit_arb)
hw_data->exit_arb(accel_dev);
@ -445,33 +445,7 @@ void adf_error_notifier(struct adf_accel_dev *accel_dev)
}
}
static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
{
char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
int ret;
ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED, services);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
if (!ret) {
ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
if (ret)
return ret;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
ADF_SERVICES_ENABLED,
services, ADF_STR);
if (ret)
return ret;
}
return 0;
}
int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
int adf_dev_down(struct adf_accel_dev *accel_dev)
{
int ret = 0;
@ -480,15 +454,9 @@ int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
mutex_lock(&accel_dev->state_lock);
if (reconfig) {
ret = adf_dev_shutdown_cache_cfg(accel_dev);
goto out;
}
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
out:
mutex_unlock(&accel_dev->state_lock);
return ret;
}
@ -535,7 +503,7 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev)
if (!accel_dev)
return -EFAULT;
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
ret = adf_dev_up(accel_dev, false);
/* if device is already up return success*/

View File

@ -18,14 +18,17 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n");
for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
vf->restarting = false;
if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK)
vf->restarting = true;
else
vf->restarting = false;
if (!vf->init)
continue;
if (adf_send_pf2vf_msg(accel_dev, i, msg))
dev_err(&GET_DEV(accel_dev),
"Failed to send restarting msg to VF%d\n", i);
else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK)
vf->restarting = true;
}
}

View File

@ -48,6 +48,20 @@ void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
void adf_vf2pf_notify_restart_complete(struct adf_accel_dev *accel_dev)
{
struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE };
/* Check compatibility version */
if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_FALLBACK)
return;
if (adf_send_vf2pf_msg(accel_dev, msg))
dev_err(&GET_DEV(accel_dev),
"Failed to send Restarting complete event to PF\n");
}
EXPORT_SYMBOL_GPL(adf_vf2pf_notify_restart_complete);
int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
{
u8 pf_version;

View File

@ -6,6 +6,7 @@
#if defined(CONFIG_PCI_IOV)
int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
void adf_vf2pf_notify_restart_complete(struct adf_accel_dev *accel_dev);
int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev);
int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev);
int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev);

View File

@ -86,11 +86,133 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
return pci_enable_sriov(pdev, totalvfs);
}
static int adf_add_sriov_configuration(struct adf_accel_dev *accel_dev)
{
unsigned long val = 0;
int ret;
ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
if (ret)
return ret;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
&val, ADF_DEC);
if (ret)
return ret;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
&val, ADF_DEC);
if (ret)
return ret;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
return ret;
}
static int adf_do_disable_sriov(struct adf_accel_dev *accel_dev)
{
int ret;
if (adf_dev_in_use(accel_dev)) {
dev_err(&GET_DEV(accel_dev),
"Cannot disable SR-IOV, device in use\n");
return -EBUSY;
}
if (adf_dev_started(accel_dev)) {
if (adf_devmgr_in_reset(accel_dev)) {
dev_err(&GET_DEV(accel_dev),
"Cannot disable SR-IOV, device in reset\n");
return -EBUSY;
}
ret = adf_dev_down(accel_dev);
if (ret)
goto err_del_cfg;
}
adf_disable_sriov(accel_dev);
ret = adf_dev_up(accel_dev, true);
if (ret)
goto err_del_cfg;
return 0;
err_del_cfg:
adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC);
return ret;
}
static int adf_do_enable_sriov(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
int totalvfs = pci_sriov_get_totalvfs(pdev);
unsigned long val;
int ret;
if (!device_iommu_mapped(&GET_DEV(accel_dev))) {
dev_warn(&GET_DEV(accel_dev),
"IOMMU should be enabled for SR-IOV to work correctly\n");
return -EINVAL;
}
if (adf_dev_started(accel_dev)) {
if (adf_devmgr_in_reset(accel_dev) || adf_dev_in_use(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Device busy\n");
return -EBUSY;
}
ret = adf_dev_down(accel_dev);
if (ret)
return ret;
}
ret = adf_add_sriov_configuration(accel_dev);
if (ret)
goto err_del_cfg;
/* Allocate memory for VF info structs */
accel_dev->pf.vf_info = kcalloc(totalvfs, sizeof(struct adf_accel_vf_info),
GFP_KERNEL);
ret = -ENOMEM;
if (!accel_dev->pf.vf_info)
goto err_del_cfg;
ret = adf_dev_up(accel_dev, false);
if (ret) {
dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
accel_dev->accel_id);
goto err_free_vf_info;
}
ret = adf_enable_sriov(accel_dev);
if (ret)
goto err_free_vf_info;
val = 1;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED,
&val, ADF_DEC);
if (ret)
goto err_free_vf_info;
return totalvfs;
err_free_vf_info:
adf_dev_down(accel_dev);
kfree(accel_dev->pf.vf_info);
accel_dev->pf.vf_info = NULL;
return ret;
err_del_cfg:
adf_cfg_del_all_except(accel_dev, ADF_GENERAL_SEC);
return ret;
}
void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
unsigned long val = 0;
if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
ADF_SRIOV_ENABLED, cfg))
@ -99,15 +221,9 @@ void adf_reenable_sriov(struct adf_accel_dev *accel_dev)
if (!accel_dev->pf.vf_info)
return;
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
&val, ADF_DEC))
if (adf_add_sriov_configuration(accel_dev))
return;
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
&val, ADF_DEC))
return;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
dev_dbg(&pdev->dev, "Re-enabling SRIOV\n");
adf_enable_sriov(accel_dev);
}
@ -168,70 +284,16 @@ EXPORT_SYMBOL_GPL(adf_disable_sriov);
int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
int totalvfs = pci_sriov_get_totalvfs(pdev);
unsigned long val;
int ret;
if (!accel_dev) {
dev_err(&pdev->dev, "Failed to find accel_dev\n");
return -EFAULT;
}
if (!device_iommu_mapped(&pdev->dev))
dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
if (accel_dev->pf.vf_info) {
dev_info(&pdev->dev, "Already enabled for this device\n");
return -EINVAL;
}
if (adf_dev_started(accel_dev)) {
if (adf_devmgr_in_reset(accel_dev) ||
adf_dev_in_use(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Device busy\n");
return -EBUSY;
}
ret = adf_dev_down(accel_dev, true);
if (ret)
return ret;
}
if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
return -EFAULT;
val = 0;
if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
ADF_NUM_CY, (void *)&val, ADF_DEC))
return -EFAULT;
ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
&val, ADF_DEC);
if (ret)
return ret;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
/* Allocate memory for VF info structs */
accel_dev->pf.vf_info = kcalloc(totalvfs,
sizeof(struct adf_accel_vf_info),
GFP_KERNEL);
if (!accel_dev->pf.vf_info)
return -ENOMEM;
if (adf_dev_up(accel_dev, false)) {
dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
accel_dev->accel_id);
return -EFAULT;
}
ret = adf_enable_sriov(accel_dev);
if (ret)
return ret;
val = 1;
adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED,
&val, ADF_DEC);
return numvfs;
if (numvfs)
return adf_do_enable_sriov(accel_dev);
else
return adf_do_disable_sriov(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_sriov_configure);

View File

@ -62,7 +62,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
break;
}
ret = adf_dev_down(accel_dev, true);
ret = adf_dev_down(accel_dev);
if (ret)
return ret;
@ -76,7 +76,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
} else if (ret) {
dev_err(dev, "Failed to start device qat_dev%d\n",
accel_id);
adf_dev_down(accel_dev, true);
adf_dev_down(accel_dev);
return ret;
}
break;

View File

@ -13,6 +13,7 @@
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "adf_cfg_common.h"
#include "adf_pfvf_vf_msg.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
@ -71,10 +72,11 @@ static void adf_dev_stop_async(struct work_struct *work)
struct adf_accel_dev *accel_dev = stop_data->accel_dev;
adf_dev_restarting_notify(accel_dev);
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
/* Re-enable PF2VF interrupts */
adf_enable_pf2vf_interrupts(accel_dev);
adf_vf2pf_notify_restart_complete(accel_dev);
kfree(stop_data);
}

View File

@ -23,6 +23,8 @@ struct qat_alg_buf_list {
);
struct qat_alg_buf buffers[];
} __packed;
static_assert(offsetof(struct qat_alg_buf_list, buffers) == sizeof(struct qat_alg_buf_list_hdr),
"struct member likely outside of __struct_group()");
struct qat_alg_fixed_buf_list {
struct qat_alg_buf_list_hdr sgl_hdr;

View File

@ -58,7 +58,7 @@ static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
unsigned int i;
if (!ae_data) {
pr_err("QAT: bad argument, ae_data is NULL\n ");
pr_err("QAT: bad argument, ae_data is NULL\n");
return -EINVAL;
}

View File

@ -202,7 +202,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@ -221,7 +221,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);

View File

@ -176,7 +176,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
out_err_dev_stop:
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
out_err_free_reg:
pci_release_regions(accel_pci_dev->pci_dev);
out_err_disable:
@ -196,7 +196,7 @@ static void adf_remove(struct pci_dev *pdev)
return;
}
adf_flush_vf_wq(accel_dev);
adf_dev_down(accel_dev, false);
adf_dev_down(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
kfree(accel_dev);

View File

@ -28,6 +28,7 @@ config CRYPTO_DEV_OCTEONTX_CPT
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select CRYPTO_DEV_MARVELL
help
This driver allows you to utilize the Marvell Cryptographic
@ -47,6 +48,7 @@ config CRYPTO_DEV_OCTEONTX2_CPT
select CRYPTO_SKCIPHER
select CRYPTO_HASH
select CRYPTO_AEAD
select CRYPTO_AUTHENC
select NET_DEVLINK
help
This driver allows you to utilize the Marvell Cryptographic

View File

@ -17,7 +17,6 @@
#include <crypto/sha2.h>
#include <crypto/xts.h>
#include <crypto/scatterwalk.h>
#include <linux/rtnetlink.h>
#include <linux/sort.h>
#include <linux/module.h>
#include "otx_cptvf.h"
@ -66,6 +65,8 @@ static struct cpt_device_table ae_devices = {
.count = ATOMIC_INIT(0)
};
static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg);
static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
{
int count, ret = 0;
@ -509,44 +510,61 @@ static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
ctx->cipher_type = cipher_type;
ctx->mac_type = mac_type;
switch (ctx->mac_type) {
case OTX_CPT_SHA1:
ctx->hashalg = crypto_alloc_shash("sha1", 0, 0);
break;
case OTX_CPT_SHA256:
ctx->hashalg = crypto_alloc_shash("sha256", 0, 0);
break;
case OTX_CPT_SHA384:
ctx->hashalg = crypto_alloc_shash("sha384", 0, 0);
break;
case OTX_CPT_SHA512:
ctx->hashalg = crypto_alloc_shash("sha512", 0, 0);
break;
}
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
if (!ctx->hashalg)
return 0;
/*
* When selected cipher is NULL we use HMAC opcode instead of
* FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
* for calculating ipad and opad
*/
if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
switch (ctx->mac_type) {
case OTX_CPT_SHA1:
ctx->hashalg = crypto_alloc_shash("sha1", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
int ss = crypto_shash_statesize(ctx->hashalg);
case OTX_CPT_SHA256:
ctx->hashalg = crypto_alloc_shash("sha256", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
ctx->ipad = kzalloc(ss, GFP_KERNEL);
if (!ctx->ipad) {
crypto_free_shash(ctx->hashalg);
return -ENOMEM;
}
case OTX_CPT_SHA384:
ctx->hashalg = crypto_alloc_shash("sha384", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
case OTX_CPT_SHA512:
ctx->hashalg = crypto_alloc_shash("sha512", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
ctx->opad = kzalloc(ss, GFP_KERNEL);
if (!ctx->opad) {
kfree(ctx->ipad);
crypto_free_shash(ctx->hashalg);
return -ENOMEM;
}
}
crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx));
ctx->sdesc = alloc_sdesc(ctx->hashalg);
if (!ctx->sdesc) {
kfree(ctx->opad);
kfree(ctx->ipad);
crypto_free_shash(ctx->hashalg);
return -ENOMEM;
}
return 0;
}
@ -602,8 +620,7 @@ static void otx_cpt_aead_exit(struct crypto_aead *tfm)
kfree(ctx->ipad);
kfree(ctx->opad);
if (ctx->hashalg)
crypto_free_shash(ctx->hashalg);
crypto_free_shash(ctx->hashalg);
kfree(ctx->sdesc);
}
@ -699,7 +716,7 @@ static inline void swap_data64(void *buf, u32 len)
*dst = cpu_to_be64p(src);
}
static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
static int swap_pad(u8 mac_type, u8 *pad)
{
struct sha512_state *sha512;
struct sha256_state *sha256;
@ -707,22 +724,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
switch (mac_type) {
case OTX_CPT_SHA1:
sha1 = (struct sha1_state *) in_pad;
sha1 = (struct sha1_state *)pad;
swap_data32(sha1->state, SHA1_DIGEST_SIZE);
memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
break;
case OTX_CPT_SHA256:
sha256 = (struct sha256_state *) in_pad;
sha256 = (struct sha256_state *)pad;
swap_data32(sha256->state, SHA256_DIGEST_SIZE);
memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
break;
case OTX_CPT_SHA384:
case OTX_CPT_SHA512:
sha512 = (struct sha512_state *) in_pad;
sha512 = (struct sha512_state *)pad;
swap_data64(sha512->state, SHA512_DIGEST_SIZE);
memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
break;
default:
@ -732,55 +746,53 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
return 0;
}
static int aead_hmac_init(struct crypto_aead *cipher)
static int aead_hmac_init(struct crypto_aead *cipher,
struct crypto_authenc_keys *keys)
{
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
int state_size = crypto_shash_statesize(ctx->hashalg);
int ds = crypto_shash_digestsize(ctx->hashalg);
int bs = crypto_shash_blocksize(ctx->hashalg);
int authkeylen = ctx->auth_key_len;
int authkeylen = keys->authkeylen;
u8 *ipad = NULL, *opad = NULL;
int ret = 0, icount = 0;
ctx->sdesc = alloc_sdesc(ctx->hashalg);
if (!ctx->sdesc)
return -ENOMEM;
ctx->ipad = kzalloc(bs, GFP_KERNEL);
if (!ctx->ipad) {
ret = -ENOMEM;
goto calc_fail;
}
ctx->opad = kzalloc(bs, GFP_KERNEL);
if (!ctx->opad) {
ret = -ENOMEM;
goto calc_fail;
}
ipad = kzalloc(state_size, GFP_KERNEL);
if (!ipad) {
ret = -ENOMEM;
goto calc_fail;
}
opad = kzalloc(state_size, GFP_KERNEL);
if (!opad) {
ret = -ENOMEM;
goto calc_fail;
}
int icount = 0;
int ret;
if (authkeylen > bs) {
ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
authkeylen, ipad);
ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey,
authkeylen, ctx->key);
if (ret)
goto calc_fail;
return ret;
authkeylen = ds;
} else {
memcpy(ipad, ctx->key, authkeylen);
} else
memcpy(ctx->key, keys->authkey, authkeylen);
ctx->enc_key_len = keys->enckeylen;
ctx->auth_key_len = authkeylen;
if (ctx->cipher_type == OTX_CPT_CIPHER_NULL)
return keys->enckeylen ? -EINVAL : 0;
switch (keys->enckeylen) {
case AES_KEYSIZE_128:
ctx->key_type = OTX_CPT_AES_128_BIT;
break;
case AES_KEYSIZE_192:
ctx->key_type = OTX_CPT_AES_192_BIT;
break;
case AES_KEYSIZE_256:
ctx->key_type = OTX_CPT_AES_256_BIT;
break;
default:
/* Invalid key length */
return -EINVAL;
}
memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen);
ipad = ctx->ipad;
opad = ctx->opad;
memcpy(ipad, ctx->key, authkeylen);
memset(ipad + authkeylen, 0, bs - authkeylen);
memcpy(opad, ipad, bs);
@ -798,7 +810,7 @@ static int aead_hmac_init(struct crypto_aead *cipher)
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
crypto_shash_export(&ctx->sdesc->shash, ipad);
ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
ret = swap_pad(ctx->mac_type, ipad);
if (ret)
goto calc_fail;
@ -806,25 +818,9 @@ static int aead_hmac_init(struct crypto_aead *cipher)
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, opad, bs);
crypto_shash_export(&ctx->sdesc->shash, opad);
ret = copy_pad(ctx->mac_type, ctx->opad, opad);
if (ret)
goto calc_fail;
kfree(ipad);
kfree(opad);
return 0;
ret = swap_pad(ctx->mac_type, opad);
calc_fail:
kfree(ctx->ipad);
ctx->ipad = NULL;
kfree(ctx->opad);
ctx->opad = NULL;
kfree(ipad);
kfree(opad);
kfree(ctx->sdesc);
ctx->sdesc = NULL;
return ret;
}
@ -832,57 +828,15 @@ static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
struct crypto_authenc_key_param *param;
int enckeylen = 0, authkeylen = 0;
struct rtattr *rta = (void *)key;
int status = -EINVAL;
struct crypto_authenc_keys authenc_keys;
int status;
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
goto badkey;
if (keylen > OTX_CPT_MAX_KEY_SIZE)
goto badkey;
authkeylen = keylen - enckeylen;
memcpy(ctx->key, key, keylen);
switch (enckeylen) {
case AES_KEYSIZE_128:
ctx->key_type = OTX_CPT_AES_128_BIT;
break;
case AES_KEYSIZE_192:
ctx->key_type = OTX_CPT_AES_192_BIT;
break;
case AES_KEYSIZE_256:
ctx->key_type = OTX_CPT_AES_256_BIT;
break;
default:
/* Invalid key length */
goto badkey;
}
ctx->enc_key_len = enckeylen;
ctx->auth_key_len = authkeylen;
status = aead_hmac_init(cipher);
status = crypto_authenc_extractkeys(&authenc_keys, key, keylen);
if (status)
goto badkey;
return 0;
status = aead_hmac_init(cipher, &authenc_keys);
badkey:
return status;
}
@ -891,36 +845,7 @@ static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
struct crypto_authenc_key_param *param;
struct rtattr *rta = (void *)key;
int enckeylen = 0;
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (enckeylen != 0)
goto badkey;
if (keylen > OTX_CPT_MAX_KEY_SIZE)
goto badkey;
memcpy(ctx->key, key, keylen);
ctx->enc_key_len = enckeylen;
ctx->auth_key_len = keylen;
return 0;
badkey:
return -EINVAL;
return otx_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen);
}
static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
@ -1613,14 +1538,6 @@ static int compare_func(const void *lptr, const void *rptr)
return 0;
}
static void swap_func(void *lptr, void *rptr, int size)
{
struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
swap(*ldesc, *rdesc);
}
int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
enum otx_cptpf_type pf_type,
enum otx_cptvf_type engine_type,
@ -1655,7 +1572,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
is_crypto_registered = true;
}
sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
compare_func, swap_func);
compare_func, NULL);
break;
case OTX_CPT_AE_TYPES:
@ -1670,7 +1587,7 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
ae_devices.desc[count++].dev = pdev;
atomic_inc(&ae_devices.count);
sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
compare_func, swap_func);
compare_func, NULL);
break;
default:

View File

@ -185,6 +185,5 @@ int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
int num_queues, int num_devices);
void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
enum otx_cptvf_type engine_type);
void otx_cpt_callback(int status, void *arg, void *req);
#endif /* __OTX_CPT_ALGS_H */

View File

@ -11,7 +11,6 @@
#include <crypto/xts.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
#include <linux/rtnetlink.h>
#include <linux/sort.h>
#include <linux/module.h>
#include "otx2_cptvf.h"
@ -55,6 +54,8 @@ static struct cpt_device_table se_devices = {
.count = ATOMIC_INIT(0)
};
static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg);
static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
{
int count;
@ -598,40 +599,56 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
ctx->cipher_type = cipher_type;
ctx->mac_type = mac_type;
switch (ctx->mac_type) {
case OTX2_CPT_SHA1:
ctx->hashalg = crypto_alloc_shash("sha1", 0, 0);
break;
case OTX2_CPT_SHA256:
ctx->hashalg = crypto_alloc_shash("sha256", 0, 0);
break;
case OTX2_CPT_SHA384:
ctx->hashalg = crypto_alloc_shash("sha384", 0, 0);
break;
case OTX2_CPT_SHA512:
ctx->hashalg = crypto_alloc_shash("sha512", 0, 0);
break;
}
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
if (ctx->hashalg) {
ctx->sdesc = alloc_sdesc(ctx->hashalg);
if (!ctx->sdesc) {
crypto_free_shash(ctx->hashalg);
return -ENOMEM;
}
}
/*
* When selected cipher is NULL we use HMAC opcode instead of
* FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
* for calculating ipad and opad
*/
if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) {
switch (ctx->mac_type) {
case OTX2_CPT_SHA1:
ctx->hashalg = crypto_alloc_shash("sha1", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL && ctx->hashalg) {
int ss = crypto_shash_statesize(ctx->hashalg);
case OTX2_CPT_SHA256:
ctx->hashalg = crypto_alloc_shash("sha256", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
ctx->ipad = kzalloc(ss, GFP_KERNEL);
if (!ctx->ipad) {
kfree(ctx->sdesc);
crypto_free_shash(ctx->hashalg);
return -ENOMEM;
}
case OTX2_CPT_SHA384:
ctx->hashalg = crypto_alloc_shash("sha384", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
case OTX2_CPT_SHA512:
ctx->hashalg = crypto_alloc_shash("sha512", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(ctx->hashalg))
return PTR_ERR(ctx->hashalg);
break;
ctx->opad = kzalloc(ss, GFP_KERNEL);
if (!ctx->opad) {
kfree(ctx->ipad);
kfree(ctx->sdesc);
crypto_free_shash(ctx->hashalg);
return -ENOMEM;
}
}
switch (ctx->cipher_type) {
@ -713,8 +730,7 @@ static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
kfree(ctx->ipad);
kfree(ctx->opad);
if (ctx->hashalg)
crypto_free_shash(ctx->hashalg);
crypto_free_shash(ctx->hashalg);
kfree(ctx->sdesc);
if (ctx->fbk_cipher) {
@ -788,7 +804,7 @@ static inline void swap_data64(void *buf, u32 len)
cpu_to_be64s(src);
}
static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
static int swap_pad(u8 mac_type, u8 *pad)
{
struct sha512_state *sha512;
struct sha256_state *sha256;
@ -796,22 +812,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
switch (mac_type) {
case OTX2_CPT_SHA1:
sha1 = (struct sha1_state *) in_pad;
sha1 = (struct sha1_state *)pad;
swap_data32(sha1->state, SHA1_DIGEST_SIZE);
memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
break;
case OTX2_CPT_SHA256:
sha256 = (struct sha256_state *) in_pad;
sha256 = (struct sha256_state *)pad;
swap_data32(sha256->state, SHA256_DIGEST_SIZE);
memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
break;
case OTX2_CPT_SHA384:
case OTX2_CPT_SHA512:
sha512 = (struct sha512_state *) in_pad;
sha512 = (struct sha512_state *)pad;
swap_data64(sha512->state, SHA512_DIGEST_SIZE);
memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
break;
default:
@ -821,55 +834,54 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
return 0;
}
static int aead_hmac_init(struct crypto_aead *cipher)
static int aead_hmac_init(struct crypto_aead *cipher,
struct crypto_authenc_keys *keys)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
int state_size = crypto_shash_statesize(ctx->hashalg);
int ds = crypto_shash_digestsize(ctx->hashalg);
int bs = crypto_shash_blocksize(ctx->hashalg);
int authkeylen = ctx->auth_key_len;
int authkeylen = keys->authkeylen;
u8 *ipad = NULL, *opad = NULL;
int ret = 0, icount = 0;
ctx->sdesc = alloc_sdesc(ctx->hashalg);
if (!ctx->sdesc)
return -ENOMEM;
ctx->ipad = kzalloc(bs, GFP_KERNEL);
if (!ctx->ipad) {
ret = -ENOMEM;
goto calc_fail;
}
ctx->opad = kzalloc(bs, GFP_KERNEL);
if (!ctx->opad) {
ret = -ENOMEM;
goto calc_fail;
}
ipad = kzalloc(state_size, GFP_KERNEL);
if (!ipad) {
ret = -ENOMEM;
goto calc_fail;
}
opad = kzalloc(state_size, GFP_KERNEL);
if (!opad) {
ret = -ENOMEM;
goto calc_fail;
}
int icount = 0;
int ret;
if (authkeylen > bs) {
ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
authkeylen, ipad);
ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey,
authkeylen, ctx->key);
if (ret)
goto calc_fail;
authkeylen = ds;
} else {
memcpy(ipad, ctx->key, authkeylen);
} else
memcpy(ctx->key, keys->authkey, authkeylen);
ctx->enc_key_len = keys->enckeylen;
ctx->auth_key_len = authkeylen;
if (ctx->cipher_type == OTX2_CPT_CIPHER_NULL)
return keys->enckeylen ? -EINVAL : 0;
switch (keys->enckeylen) {
case AES_KEYSIZE_128:
ctx->key_type = OTX2_CPT_AES_128_BIT;
break;
case AES_KEYSIZE_192:
ctx->key_type = OTX2_CPT_AES_192_BIT;
break;
case AES_KEYSIZE_256:
ctx->key_type = OTX2_CPT_AES_256_BIT;
break;
default:
/* Invalid key length */
return -EINVAL;
}
memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen);
ipad = ctx->ipad;
opad = ctx->opad;
memcpy(ipad, ctx->key, authkeylen);
memset(ipad + authkeylen, 0, bs - authkeylen);
memcpy(opad, ipad, bs);
@ -887,7 +899,7 @@ static int aead_hmac_init(struct crypto_aead *cipher)
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
crypto_shash_export(&ctx->sdesc->shash, ipad);
ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
ret = swap_pad(ctx->mac_type, ipad);
if (ret)
goto calc_fail;
@ -895,25 +907,9 @@ static int aead_hmac_init(struct crypto_aead *cipher)
crypto_shash_init(&ctx->sdesc->shash);
crypto_shash_update(&ctx->sdesc->shash, opad, bs);
crypto_shash_export(&ctx->sdesc->shash, opad);
ret = copy_pad(ctx->mac_type, ctx->opad, opad);
if (ret)
goto calc_fail;
kfree(ipad);
kfree(opad);
return 0;
ret = swap_pad(ctx->mac_type, opad);
calc_fail:
kfree(ctx->ipad);
ctx->ipad = NULL;
kfree(ctx->opad);
ctx->opad = NULL;
kfree(ipad);
kfree(opad);
kfree(ctx->sdesc);
ctx->sdesc = NULL;
return ret;
}
@ -921,87 +917,17 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
struct crypto_authenc_key_param *param;
int enckeylen = 0, authkeylen = 0;
struct rtattr *rta = (void *)key;
struct crypto_authenc_keys authenc_keys;
if (!RTA_OK(rta, keylen))
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
return -EINVAL;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
return -EINVAL;
if (keylen > OTX2_CPT_MAX_KEY_SIZE)
return -EINVAL;
authkeylen = keylen - enckeylen;
memcpy(ctx->key, key, keylen);
switch (enckeylen) {
case AES_KEYSIZE_128:
ctx->key_type = OTX2_CPT_AES_128_BIT;
break;
case AES_KEYSIZE_192:
ctx->key_type = OTX2_CPT_AES_192_BIT;
break;
case AES_KEYSIZE_256:
ctx->key_type = OTX2_CPT_AES_256_BIT;
break;
default:
/* Invalid key length */
return -EINVAL;
}
ctx->enc_key_len = enckeylen;
ctx->auth_key_len = authkeylen;
return aead_hmac_init(cipher);
return crypto_authenc_extractkeys(&authenc_keys, key, keylen) ?:
aead_hmac_init(cipher, &authenc_keys);
}
static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
const unsigned char *key,
unsigned int keylen)
{
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher);
struct crypto_authenc_key_param *param;
struct rtattr *rta = (void *)key;
int enckeylen = 0;
if (!RTA_OK(rta, keylen))
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
return -EINVAL;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (enckeylen != 0)
return -EINVAL;
if (keylen > OTX2_CPT_MAX_KEY_SIZE)
return -EINVAL;
memcpy(ctx->key, key, keylen);
ctx->enc_key_len = enckeylen;
ctx->auth_key_len = keylen;
return 0;
return otx2_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen);
}
static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
@ -1702,14 +1628,6 @@ static int compare_func(const void *lptr, const void *rptr)
return 0;
}
static void swap_func(void *lptr, void *rptr, int size)
{
struct cpt_device_desc *ldesc = lptr;
struct cpt_device_desc *rdesc = rptr;
swap(*ldesc, *rdesc);
}
int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
int num_queues, int num_devices)
{
@ -1739,7 +1657,7 @@ int otx2_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
is_crypto_registered = true;
}
sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
compare_func, swap_func);
compare_func, NULL);
unlock:
mutex_unlock(&mutex);

View File

@ -1353,6 +1353,7 @@ static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
ahash->setkey = n2_hmac_async_setkey;
base = &ahash->halg.base;
err = -EINVAL;
if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
p->child_alg) >= CRYPTO_MAX_ALG_NAME)
goto out_free_p;

View File

@ -157,6 +157,7 @@ struct nx842_crypto_header_group {
} __packed;
struct nx842_crypto_header {
/* New members MUST be added within the struct_group() macro below. */
struct_group_tagged(nx842_crypto_header_hdr, hdr,
__be16 magic; /* NX842_CRYPTO_MAGIC */
__be16 ignore; /* decompressed end bytes to ignore */
@ -164,6 +165,8 @@ struct nx842_crypto_header {
);
struct nx842_crypto_header_group group[];
} __packed;
static_assert(offsetof(struct nx842_crypto_header, group) == sizeof(struct nx842_crypto_header_hdr),
"struct member likely outside of struct_group_tagged()");
#define NX842_CRYPTO_GROUP_MAX (0x20)

View File

@ -36,14 +36,14 @@ struct qcom_rng {
void __iomem *base;
struct clk *clk;
struct hwrng hwrng;
struct qcom_rng_of_data *of_data;
struct qcom_rng_match_data *match_data;
};
struct qcom_rng_ctx {
struct qcom_rng *rng;
};
struct qcom_rng_of_data {
struct qcom_rng_match_data {
bool skip_init;
bool hwrng_support;
};
@ -155,7 +155,7 @@ static int qcom_rng_init(struct crypto_tfm *tfm)
ctx->rng = qcom_rng_dev;
if (!ctx->rng->of_data->skip_init)
if (!ctx->rng->match_data->skip_init)
return qcom_rng_enable(ctx->rng);
return 0;
@ -196,7 +196,7 @@ static int qcom_rng_probe(struct platform_device *pdev)
if (IS_ERR(rng->clk))
return PTR_ERR(rng->clk);
rng->of_data = (struct qcom_rng_of_data *)of_device_get_match_data(&pdev->dev);
rng->match_data = (struct qcom_rng_match_data *)device_get_match_data(&pdev->dev);
qcom_rng_dev = rng;
ret = crypto_register_rng(&qcom_rng_alg);
@ -206,7 +206,7 @@ static int qcom_rng_probe(struct platform_device *pdev)
return ret;
}
if (rng->of_data->hwrng_support) {
if (rng->match_data->hwrng_support) {
rng->hwrng.name = "qcom_hwrng";
rng->hwrng.read = qcom_hwrng_read;
rng->hwrng.quality = QCOM_TRNG_QUALITY;
@ -231,31 +231,31 @@ static void qcom_rng_remove(struct platform_device *pdev)
qcom_rng_dev = NULL;
}
static struct qcom_rng_of_data qcom_prng_of_data = {
static struct qcom_rng_match_data qcom_prng_match_data = {
.skip_init = false,
.hwrng_support = false,
};
static struct qcom_rng_of_data qcom_prng_ee_of_data = {
static struct qcom_rng_match_data qcom_prng_ee_match_data = {
.skip_init = true,
.hwrng_support = false,
};
static struct qcom_rng_of_data qcom_trng_of_data = {
static struct qcom_rng_match_data qcom_trng_match_data = {
.skip_init = true,
.hwrng_support = true,
};
static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = {
{ .id = "QCOM8160", .driver_data = 1 },
{ .id = "QCOM8160", .driver_data = (kernel_ulong_t)&qcom_prng_ee_match_data },
{}
};
MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
static const struct of_device_id __maybe_unused qcom_rng_of_match[] = {
{ .compatible = "qcom,prng", .data = &qcom_prng_of_data },
{ .compatible = "qcom,prng-ee", .data = &qcom_prng_ee_of_data },
{ .compatible = "qcom,trng", .data = &qcom_trng_of_data },
{ .compatible = "qcom,prng", .data = &qcom_prng_match_data },
{ .compatible = "qcom,prng-ee", .data = &qcom_prng_ee_match_data },
{ .compatible = "qcom,trng", .data = &qcom_trng_match_data },
{}
};
MODULE_DEVICE_TABLE(of, qcom_rng_of_match);

View File

@ -14,11 +14,10 @@
struct simd_skcipher_alg;
struct skcipher_alg;
struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
const char *algname,
const char *drvname,
const char *basename);
struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
const char *basename);
void simd_skcipher_free(struct simd_skcipher_alg *alg);
int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
@ -32,13 +31,6 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
struct simd_aead_alg;
struct aead_alg;
struct simd_aead_alg *simd_aead_create_compat(const char *algname,
const char *drvname,
const char *basename);
struct simd_aead_alg *simd_aead_create(const char *algname,
const char *basename);
void simd_aead_free(struct simd_aead_alg *alg);
int simd_register_aeads_compat(struct aead_alg *algs, int count,
struct simd_aead_alg **simd_algs);

View File

@ -40,79 +40,26 @@ struct gcry_mpi {
typedef struct gcry_mpi *MPI;
#define mpi_get_nlimbs(a) ((a)->nlimbs)
#define mpi_has_sign(a) ((a)->sign)
/*-- mpiutil.c --*/
MPI mpi_alloc(unsigned nlimbs);
void mpi_clear(MPI a);
void mpi_free(MPI a);
int mpi_resize(MPI a, unsigned nlimbs);
static inline MPI mpi_new(unsigned int nbits)
{
return mpi_alloc((nbits + BITS_PER_MPI_LIMB - 1) / BITS_PER_MPI_LIMB);
}
MPI mpi_copy(MPI a);
MPI mpi_alloc_like(MPI a);
void mpi_snatch(MPI w, MPI u);
MPI mpi_set(MPI w, MPI u);
MPI mpi_set_ui(MPI w, unsigned long u);
MPI mpi_alloc_set_ui(unsigned long u);
void mpi_swap_cond(MPI a, MPI b, unsigned long swap);
/* Constants used to return constant MPIs. See mpi_init if you
* want to add more constants.
*/
#define MPI_NUMBER_OF_CONSTANTS 6
enum gcry_mpi_constants {
MPI_C_ZERO,
MPI_C_ONE,
MPI_C_TWO,
MPI_C_THREE,
MPI_C_FOUR,
MPI_C_EIGHT
};
MPI mpi_const(enum gcry_mpi_constants no);
/*-- mpicoder.c --*/
/* Different formats of external big integer representation. */
enum gcry_mpi_format {
GCRYMPI_FMT_NONE = 0,
GCRYMPI_FMT_STD = 1, /* Twos complement stored without length. */
GCRYMPI_FMT_PGP = 2, /* As used by OpenPGP (unsigned only). */
GCRYMPI_FMT_SSH = 3, /* As used by SSH (like STD but with length). */
GCRYMPI_FMT_HEX = 4, /* Hex format. */
GCRYMPI_FMT_USG = 5, /* Like STD but unsigned. */
GCRYMPI_FMT_OPAQUE = 8 /* Opaque format (some functions only). */
};
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
int mpi_fromstr(MPI val, const char *str);
MPI mpi_scanval(const char *string);
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign);
int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
int *sign);
int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
size_t buflen, size_t *nwritten, MPI a);
/*-- mpi-mod.c --*/
void mpi_mod(MPI rem, MPI dividend, MPI divisor);
/* Context used with Barrett reduction. */
struct barrett_ctx_s;
typedef struct barrett_ctx_s *mpi_barrett_t;
mpi_barrett_t mpi_barrett_init(MPI m, int copy);
void mpi_barrett_free(mpi_barrett_t ctx);
void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx);
void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx);
int mpi_mod(MPI rem, MPI dividend, MPI divisor);
/*-- mpi-pow.c --*/
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
@ -120,7 +67,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
/*-- mpi-cmp.c --*/
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
int mpi_cmpabs(MPI u, MPI v);
/*-- mpi-sub-ui.c --*/
int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
@ -129,138 +75,22 @@ int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
int mpi_test_bit(MPI a, unsigned int n);
void mpi_set_bit(MPI a, unsigned int n);
void mpi_set_highbit(MPI a, unsigned int n);
void mpi_clear_highbit(MPI a, unsigned int n);
void mpi_clear_bit(MPI a, unsigned int n);
void mpi_rshift_limbs(MPI a, unsigned int count);
void mpi_rshift(MPI x, MPI a, unsigned int n);
void mpi_lshift_limbs(MPI a, unsigned int count);
void mpi_lshift(MPI x, MPI a, unsigned int n);
int mpi_set_bit(MPI a, unsigned int n);
int mpi_rshift(MPI x, MPI a, unsigned int n);
/*-- mpi-add.c --*/
void mpi_add_ui(MPI w, MPI u, unsigned long v);
void mpi_add(MPI w, MPI u, MPI v);
void mpi_sub(MPI w, MPI u, MPI v);
void mpi_addm(MPI w, MPI u, MPI v, MPI m);
void mpi_subm(MPI w, MPI u, MPI v, MPI m);
int mpi_add(MPI w, MPI u, MPI v);
int mpi_sub(MPI w, MPI u, MPI v);
int mpi_addm(MPI w, MPI u, MPI v, MPI m);
int mpi_subm(MPI w, MPI u, MPI v, MPI m);
/*-- mpi-mul.c --*/
void mpi_mul(MPI w, MPI u, MPI v);
void mpi_mulm(MPI w, MPI u, MPI v, MPI m);
int mpi_mul(MPI w, MPI u, MPI v);
int mpi_mulm(MPI w, MPI u, MPI v, MPI m);
/*-- mpi-div.c --*/
void mpi_tdiv_r(MPI rem, MPI num, MPI den);
void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
/*-- mpi-inv.c --*/
int mpi_invm(MPI x, MPI a, MPI n);
/*-- ec.c --*/
/* Object to represent a point in projective coordinates */
struct gcry_mpi_point {
MPI x;
MPI y;
MPI z;
};
typedef struct gcry_mpi_point *MPI_POINT;
/* Models describing an elliptic curve */
enum gcry_mpi_ec_models {
/* The Short Weierstrass equation is
* y^2 = x^3 + ax + b
*/
MPI_EC_WEIERSTRASS = 0,
/* The Montgomery equation is
* by^2 = x^3 + ax^2 + x
*/
MPI_EC_MONTGOMERY,
/* The Twisted Edwards equation is
* ax^2 + y^2 = 1 + bx^2y^2
* Note that we use 'b' instead of the commonly used 'd'.
*/
MPI_EC_EDWARDS
};
/* Dialects used with elliptic curves */
enum ecc_dialects {
ECC_DIALECT_STANDARD = 0,
ECC_DIALECT_ED25519,
ECC_DIALECT_SAFECURVE
};
/* This context is used with all our EC functions. */
struct mpi_ec_ctx {
enum gcry_mpi_ec_models model; /* The model describing this curve. */
enum ecc_dialects dialect; /* The ECC dialect used with the curve. */
int flags; /* Public key flags (not always used). */
unsigned int nbits; /* Number of bits. */
/* Domain parameters. Note that they may not all be set and if set
* the MPIs may be flagged as constant.
*/
MPI p; /* Prime specifying the field GF(p). */
MPI a; /* First coefficient of the Weierstrass equation. */
MPI b; /* Second coefficient of the Weierstrass equation. */
MPI_POINT G; /* Base point (generator). */
MPI n; /* Order of G. */
unsigned int h; /* Cofactor. */
/* The actual key. May not be set. */
MPI_POINT Q; /* Public key. */
MPI d; /* Private key. */
const char *name; /* Name of the curve. */
/* This structure is private to mpi/ec.c! */
struct {
struct {
unsigned int a_is_pminus3:1;
unsigned int two_inv_p:1;
} valid; /* Flags to help setting the helper vars below. */
int a_is_pminus3; /* True if A = P - 3. */
MPI two_inv_p;
mpi_barrett_t p_barrett;
/* Scratch variables. */
MPI scratch[11];
/* Helper for fast reduction. */
/* int nist_nbits; /\* If this is a NIST curve, the # of bits. *\/ */
/* MPI s[10]; */
/* MPI c; */
} t;
/* Curve specific computation routines for the field. */
void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec);
void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
};
void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
enum ecc_dialects dialect,
int flags, MPI p, MPI a, MPI b);
void mpi_ec_deinit(struct mpi_ec_ctx *ctx);
MPI_POINT mpi_point_new(unsigned int nbits);
void mpi_point_release(MPI_POINT p);
void mpi_point_init(MPI_POINT p);
void mpi_point_free_parts(MPI_POINT p);
int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx);
void mpi_ec_add_points(MPI_POINT result,
MPI_POINT p1, MPI_POINT p2,
struct mpi_ec_ctx *ctx);
void mpi_ec_mul_point(MPI_POINT result,
MPI scalar, MPI_POINT point,
struct mpi_ec_ctx *ctx);
int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx);
int mpi_tdiv_r(MPI rem, MPI num, MPI den);
int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
/* inline functions */

View File

@ -404,7 +404,8 @@ void padata_do_serial(struct padata_priv *padata)
/* Sort in ascending order of sequence number. */
list_for_each_prev(pos, &reorder->list) {
cur = list_entry(pos, struct padata_priv, list);
if (cur->seq_nr < padata->seq_nr)
/* Compare by difference to consider integer wrap around */
if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
break;
}
list_add(&padata->list, pos);
@ -512,9 +513,12 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
* thread function. Load balance large jobs between threads by
* increasing the number of chunks, guarantee at least the minimum
* chunk size from the caller, and honor the caller's alignment.
* Ensure chunk_size is at least 1 to prevent divide-by-0
* panic in padata_mt_helper().
*/
ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
ps.chunk_size = max(ps.chunk_size, job->min_chunk);
ps.chunk_size = max(ps.chunk_size, 1ul);
ps.chunk_size = roundup(ps.chunk_size, job->align);
/*

View File

@ -13,14 +13,12 @@ mpi-y = \
generic_mpih-rshift.o \
generic_mpih-sub1.o \
generic_mpih-add1.o \
ec.o \
mpicoder.o \
mpi-add.o \
mpi-bit.o \
mpi-cmp.o \
mpi-sub-ui.o \
mpi-div.o \
mpi-inv.o \
mpi-mod.o \
mpi-mul.o \
mpih-cmp.o \

File diff suppressed because it is too large Load Diff

View File

@ -13,62 +13,12 @@
#include "mpi-internal.h"
/****************
* Add the unsigned integer V to the mpi-integer U and store the
* result in W. U and V may be the same.
*/
void mpi_add_ui(MPI w, MPI u, unsigned long v)
{
mpi_ptr_t wp, up;
mpi_size_t usize, wsize;
int usign, wsign;
usize = u->nlimbs;
usign = u->sign;
wsign = 0;
/* If not space for W (and possible carry), increase space. */
wsize = usize + 1;
if (w->alloced < wsize)
mpi_resize(w, wsize);
/* These must be after realloc (U may be the same as W). */
up = u->d;
wp = w->d;
if (!usize) { /* simple */
wp[0] = v;
wsize = v ? 1:0;
} else if (!usign) { /* mpi is not negative */
mpi_limb_t cy;
cy = mpihelp_add_1(wp, up, usize, v);
wp[usize] = cy;
wsize = usize + cy;
} else {
/* The signs are different. Need exact comparison to determine
* which operand to subtract from which.
*/
if (usize == 1 && up[0] < v) {
wp[0] = v - up[0];
wsize = 1;
} else {
mpihelp_sub_1(wp, up, usize, v);
/* Size can decrease with at most one limb. */
wsize = usize - (wp[usize-1] == 0);
wsign = 1;
}
}
w->nlimbs = wsize;
w->sign = wsign;
}
void mpi_add(MPI w, MPI u, MPI v)
int mpi_add(MPI w, MPI u, MPI v)
{
mpi_ptr_t wp, up, vp;
mpi_size_t usize, vsize, wsize;
int usign, vsign, wsign;
int err;
if (u->nlimbs < v->nlimbs) { /* Swap U and V. */
usize = v->nlimbs;
@ -76,7 +26,9 @@ void mpi_add(MPI w, MPI u, MPI v)
vsize = u->nlimbs;
vsign = u->sign;
wsize = usize + 1;
RESIZE_IF_NEEDED(w, wsize);
err = RESIZE_IF_NEEDED(w, wsize);
if (err)
return err;
/* These must be after realloc (u or v may be the same as w). */
up = v->d;
vp = u->d;
@ -86,7 +38,9 @@ void mpi_add(MPI w, MPI u, MPI v)
vsize = v->nlimbs;
vsign = v->sign;
wsize = usize + 1;
RESIZE_IF_NEEDED(w, wsize);
err = RESIZE_IF_NEEDED(w, wsize);
if (err)
return err;
/* These must be after realloc (u or v may be the same as w). */
up = u->d;
vp = v->d;
@ -128,28 +82,37 @@ void mpi_add(MPI w, MPI u, MPI v)
w->nlimbs = wsize;
w->sign = wsign;
return 0;
}
EXPORT_SYMBOL_GPL(mpi_add);
void mpi_sub(MPI w, MPI u, MPI v)
int mpi_sub(MPI w, MPI u, MPI v)
{
MPI vv = mpi_copy(v);
int err;
MPI vv;
vv = mpi_copy(v);
if (!vv)
return -ENOMEM;
vv->sign = !vv->sign;
mpi_add(w, u, vv);
err = mpi_add(w, u, vv);
mpi_free(vv);
return err;
}
EXPORT_SYMBOL_GPL(mpi_sub);
void mpi_addm(MPI w, MPI u, MPI v, MPI m)
int mpi_addm(MPI w, MPI u, MPI v, MPI m)
{
mpi_add(w, u, v);
mpi_mod(w, w, m);
return mpi_add(w, u, v) ?:
mpi_mod(w, w, m);
}
EXPORT_SYMBOL_GPL(mpi_addm);
void mpi_subm(MPI w, MPI u, MPI v, MPI m)
int mpi_subm(MPI w, MPI u, MPI v, MPI m)
{
mpi_sub(w, u, v);
mpi_mod(w, w, m);
return mpi_sub(w, u, v) ?:
mpi_mod(w, w, m);
}
EXPORT_SYMBOL_GPL(mpi_subm);

View File

@ -32,7 +32,6 @@ void mpi_normalize(MPI a)
for (; a->nlimbs && !a->d[a->nlimbs - 1]; a->nlimbs--)
;
}
EXPORT_SYMBOL_GPL(mpi_normalize);
/****************
* Return the number of bits in A.
@ -77,9 +76,10 @@ EXPORT_SYMBOL_GPL(mpi_test_bit);
/****************
* Set bit N of A.
*/
void mpi_set_bit(MPI a, unsigned int n)
int mpi_set_bit(MPI a, unsigned int n)
{
unsigned int i, limbno, bitno;
int err;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
@ -87,106 +87,31 @@ void mpi_set_bit(MPI a, unsigned int n)
if (limbno >= a->nlimbs) {
for (i = a->nlimbs; i < a->alloced; i++)
a->d[i] = 0;
mpi_resize(a, limbno+1);
err = mpi_resize(a, limbno+1);
if (err)
return err;
a->nlimbs = limbno+1;
}
a->d[limbno] |= (A_LIMB_1<<bitno);
}
/****************
* Set bit N of A. and clear all bits above
*/
void mpi_set_highbit(MPI a, unsigned int n)
{
unsigned int i, limbno, bitno;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs) {
for (i = a->nlimbs; i < a->alloced; i++)
a->d[i] = 0;
mpi_resize(a, limbno+1);
a->nlimbs = limbno+1;
}
a->d[limbno] |= (A_LIMB_1<<bitno);
for (bitno++; bitno < BITS_PER_MPI_LIMB; bitno++)
a->d[limbno] &= ~(A_LIMB_1 << bitno);
a->nlimbs = limbno+1;
}
EXPORT_SYMBOL_GPL(mpi_set_highbit);
/****************
* clear bit N of A and all bits above
*/
void mpi_clear_highbit(MPI a, unsigned int n)
{
unsigned int limbno, bitno;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs)
return; /* not allocated, therefore no need to clear bits :-) */
for ( ; bitno < BITS_PER_MPI_LIMB; bitno++)
a->d[limbno] &= ~(A_LIMB_1 << bitno);
a->nlimbs = limbno+1;
}
/****************
* Clear bit N of A.
*/
void mpi_clear_bit(MPI a, unsigned int n)
{
unsigned int limbno, bitno;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
if (limbno >= a->nlimbs)
return; /* Don't need to clear this bit, it's far too left. */
a->d[limbno] &= ~(A_LIMB_1 << bitno);
}
EXPORT_SYMBOL_GPL(mpi_clear_bit);
/****************
* Shift A by COUNT limbs to the right
* This is used only within the MPI library
*/
void mpi_rshift_limbs(MPI a, unsigned int count)
{
mpi_ptr_t ap = a->d;
mpi_size_t n = a->nlimbs;
unsigned int i;
if (count >= n) {
a->nlimbs = 0;
return;
}
for (i = 0; i < n - count; i++)
ap[i] = ap[i+count];
ap[i] = 0;
a->nlimbs -= count;
return 0;
}
/*
* Shift A by N bits to the right.
*/
void mpi_rshift(MPI x, MPI a, unsigned int n)
int mpi_rshift(MPI x, MPI a, unsigned int n)
{
mpi_size_t xsize;
unsigned int i;
unsigned int nlimbs = (n/BITS_PER_MPI_LIMB);
unsigned int nbits = (n%BITS_PER_MPI_LIMB);
int err;
if (x == a) {
/* In-place operation. */
if (nlimbs >= x->nlimbs) {
x->nlimbs = 0;
return;
return 0;
}
if (nlimbs) {
@ -201,7 +126,9 @@ void mpi_rshift(MPI x, MPI a, unsigned int n)
/* Copy and shift by more or equal bits than in a limb. */
xsize = a->nlimbs;
x->sign = a->sign;
RESIZE_IF_NEEDED(x, xsize);
err = RESIZE_IF_NEEDED(x, xsize);
if (err)
return err;
x->nlimbs = xsize;
for (i = 0; i < a->nlimbs; i++)
x->d[i] = a->d[i];
@ -209,7 +136,7 @@ void mpi_rshift(MPI x, MPI a, unsigned int n)
if (nlimbs >= x->nlimbs) {
x->nlimbs = 0;
return;
return 0;
}
for (i = 0; i < x->nlimbs - nlimbs; i++)
@ -223,7 +150,9 @@ void mpi_rshift(MPI x, MPI a, unsigned int n)
/* Copy and shift by less than bits in a limb. */
xsize = a->nlimbs;
x->sign = a->sign;
RESIZE_IF_NEEDED(x, xsize);
err = RESIZE_IF_NEEDED(x, xsize);
if (err)
return err;
x->nlimbs = xsize;
if (xsize) {
@ -239,68 +168,7 @@ void mpi_rshift(MPI x, MPI a, unsigned int n)
}
}
MPN_NORMALIZE(x->d, x->nlimbs);
return 0;
}
EXPORT_SYMBOL_GPL(mpi_rshift);
/****************
* Shift A by COUNT limbs to the left
* This is used only within the MPI library
*/
void mpi_lshift_limbs(MPI a, unsigned int count)
{
mpi_ptr_t ap;
int n = a->nlimbs;
int i;
if (!count || !n)
return;
RESIZE_IF_NEEDED(a, n+count);
ap = a->d;
for (i = n-1; i >= 0; i--)
ap[i+count] = ap[i];
for (i = 0; i < count; i++)
ap[i] = 0;
a->nlimbs += count;
}
/*
* Shift A by N bits to the left.
*/
void mpi_lshift(MPI x, MPI a, unsigned int n)
{
unsigned int nlimbs = (n/BITS_PER_MPI_LIMB);
unsigned int nbits = (n%BITS_PER_MPI_LIMB);
if (x == a && !n)
return; /* In-place shift with an amount of zero. */
if (x != a) {
/* Copy A to X. */
unsigned int alimbs = a->nlimbs;
int asign = a->sign;
mpi_ptr_t xp, ap;
RESIZE_IF_NEEDED(x, alimbs+nlimbs+1);
xp = x->d;
ap = a->d;
MPN_COPY(xp, ap, alimbs);
x->nlimbs = alimbs;
x->flags = a->flags;
x->sign = asign;
}
if (nlimbs && !nbits) {
/* Shift a full number of limbs. */
mpi_lshift_limbs(x, nlimbs);
} else if (n) {
/* We use a very dump approach: Shift left by the number of
* limbs plus one and than fix it up by an rshift.
*/
mpi_lshift_limbs(x, nlimbs+1);
mpi_rshift(x, x, BITS_PER_MPI_LIMB - nbits);
}
MPN_NORMALIZE(x->d, x->nlimbs);
}

View File

@ -45,54 +45,28 @@ int mpi_cmp_ui(MPI u, unsigned long v)
}
EXPORT_SYMBOL_GPL(mpi_cmp_ui);
static int do_mpi_cmp(MPI u, MPI v, int absmode)
int mpi_cmp(MPI u, MPI v)
{
mpi_size_t usize;
mpi_size_t vsize;
int usign;
int vsign;
mpi_size_t usize, vsize;
int cmp;
mpi_normalize(u);
mpi_normalize(v);
usize = u->nlimbs;
vsize = v->nlimbs;
usign = absmode ? 0 : u->sign;
vsign = absmode ? 0 : v->sign;
/* Compare sign bits. */
if (!usign && vsign)
if (!u->sign && v->sign)
return 1;
if (usign && !vsign)
if (u->sign && !v->sign)
return -1;
/* U and V are either both positive or both negative. */
if (usize != vsize && !usign && !vsign)
if (usize != vsize && !u->sign && !v->sign)
return usize - vsize;
if (usize != vsize && usign && vsign)
return vsize + usize;
if (usize != vsize && u->sign && v->sign)
return vsize - usize;
if (!usize)
return 0;
cmp = mpihelp_cmp(u->d, v->d, usize);
if (!cmp)
return 0;
if ((cmp < 0?1:0) == (usign?1:0))
return 1;
return -1;
}
int mpi_cmp(MPI u, MPI v)
{
return do_mpi_cmp(u, v, 0);
if (u->sign)
return -cmp;
return cmp;
}
EXPORT_SYMBOL_GPL(mpi_cmp);
int mpi_cmpabs(MPI u, MPI v)
{
return do_mpi_cmp(u, v, 1);
}
EXPORT_SYMBOL_GPL(mpi_cmpabs);

View File

@ -14,13 +14,13 @@
#include "mpi-internal.h"
#include "longlong.h"
void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor);
int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor)
int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor)
{
int divisor_sign = divisor->sign;
MPI temp_divisor = NULL;
int err;
/* We need the original value of the divisor after the remainder has been
* preliminary calculated. We have to copy it to temporary space if it's
@ -28,44 +28,22 @@ void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor)
*/
if (rem == divisor) {
temp_divisor = mpi_copy(divisor);
if (!temp_divisor)
return -ENOMEM;
divisor = temp_divisor;
}
mpi_tdiv_r(rem, dividend, divisor);
err = mpi_tdiv_r(rem, dividend, divisor);
if (err)
goto free_temp_divisor;
if (((divisor_sign?1:0) ^ (dividend->sign?1:0)) && rem->nlimbs)
mpi_add(rem, rem, divisor);
err = mpi_add(rem, rem, divisor);
if (temp_divisor)
mpi_free(temp_divisor);
}
free_temp_divisor:
mpi_free(temp_divisor);
void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor)
{
MPI tmp = mpi_alloc(mpi_get_nlimbs(quot));
mpi_fdiv_qr(quot, tmp, dividend, divisor);
mpi_free(tmp);
}
void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor)
{
int divisor_sign = divisor->sign;
MPI temp_divisor = NULL;
if (quot == divisor || rem == divisor) {
temp_divisor = mpi_copy(divisor);
divisor = temp_divisor;
}
mpi_tdiv_qr(quot, rem, dividend, divisor);
if ((divisor_sign ^ dividend->sign) && rem->nlimbs) {
mpi_sub_ui(quot, quot, 1);
mpi_add(rem, rem, divisor);
}
if (temp_divisor)
mpi_free(temp_divisor);
return err;
}
/* If den == quot, den needs temporary storage.
@ -75,12 +53,12 @@ void mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor)
* i.e no extra storage should be allocated.
*/
void mpi_tdiv_r(MPI rem, MPI num, MPI den)
int mpi_tdiv_r(MPI rem, MPI num, MPI den)
{
mpi_tdiv_qr(NULL, rem, num, den);
return mpi_tdiv_qr(NULL, rem, num, den);
}
void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
{
mpi_ptr_t np, dp;
mpi_ptr_t qp, rp;
@ -93,13 +71,16 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
mpi_limb_t q_limb;
mpi_ptr_t marker[5];
int markidx = 0;
int err;
/* Ensure space is enough for quotient and remainder.
* We need space for an extra limb in the remainder, because it's
* up-shifted (normalized) below.
*/
rsize = nsize + 1;
mpi_resize(rem, rsize);
err = mpi_resize(rem, rsize);
if (err)
return err;
qsize = rsize - dsize; /* qsize cannot be bigger than this. */
if (qsize <= 0) {
@ -115,11 +96,14 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
quot->nlimbs = 0;
quot->sign = 0;
}
return;
return 0;
}
if (quot)
mpi_resize(quot, qsize);
if (quot) {
err = mpi_resize(quot, qsize);
if (err)
return err;
}
/* Read pointers here, when reallocation is finished. */
np = num->d;
@ -141,10 +125,10 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
rsize = rlimb != 0?1:0;
rem->nlimbs = rsize;
rem->sign = sign_remainder;
return;
return 0;
}
err = -ENOMEM;
if (quot) {
qp = quot->d;
/* Make sure QP and NP point to different objects. Otherwise the
@ -152,6 +136,8 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
*/
if (qp == np) { /* Copy NP object to temporary space. */
np = marker[markidx++] = mpi_alloc_limb_space(nsize);
if (!np)
goto out_free_marker;
MPN_COPY(np, qp, nsize);
}
} else /* Put quotient at top of remainder. */
@ -172,6 +158,8 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
* the original contents of the denominator.
*/
tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
if (!tp)
goto out_free_marker;
mpihelp_lshift(tp, dp, dsize, normalization_steps);
dp = tp;
@ -193,6 +181,8 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
mpi_ptr_t tp;
tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
if (!tp)
goto out_free_marker;
MPN_COPY(tp, dp, dsize);
dp = tp;
}
@ -227,8 +217,14 @@ void mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
rem->nlimbs = rsize;
rem->sign = sign_remainder;
err = 0;
out_free_marker:
while (markidx) {
markidx--;
mpi_free_limb_space(marker[markidx]);
}
return err;
}

View File

@ -52,11 +52,12 @@
typedef mpi_limb_t *mpi_ptr_t; /* pointer to a limb */
typedef int mpi_size_t; /* (must be a signed type) */
#define RESIZE_IF_NEEDED(a, b) \
do { \
if ((a)->alloced < (b)) \
mpi_resize((a), (b)); \
} while (0)
static inline int RESIZE_IF_NEEDED(MPI a, unsigned b)
{
if (a->alloced < b)
return mpi_resize(a, b);
return 0;
}
/* Copy N limbs from S to D. */
#define MPN_COPY(d, s, n) \
@ -66,14 +67,6 @@ typedef int mpi_size_t; /* (must be a signed type) */
(d)[_i] = (s)[_i]; \
} while (0)
#define MPN_COPY_INCR(d, s, n) \
do { \
mpi_size_t _i; \
for (_i = 0; _i < (n); _i++) \
(d)[_i] = (s)[_i]; \
} while (0)
#define MPN_COPY_DECR(d, s, n) \
do { \
mpi_size_t _i; \
@ -181,8 +174,6 @@ int mpihelp_mul(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize,
void mpih_sqr_n_basecase(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size);
void mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size,
mpi_ptr_t tspace);
void mpihelp_mul_n(mpi_ptr_t prodp,
mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size);
int mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t up, mpi_size_t usize,

View File

@ -1,143 +0,0 @@
/* mpi-inv.c - MPI functions
* Copyright (C) 1998, 2001, 2002, 2003 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "mpi-internal.h"
/****************
* Calculate the multiplicative inverse X of A mod N
* That is: Find the solution x for
* 1 = (a*x) mod n
*/
int mpi_invm(MPI x, MPI a, MPI n)
{
/* Extended Euclid's algorithm (See TAOCP Vol II, 4.5.2, Alg X)
* modified according to Michael Penk's solution for Exercise 35
* with further enhancement
*/
MPI u, v, u1, u2 = NULL, u3, v1, v2 = NULL, v3, t1, t2 = NULL, t3;
unsigned int k;
int sign;
int odd;
if (!mpi_cmp_ui(a, 0))
return 0; /* Inverse does not exists. */
if (!mpi_cmp_ui(n, 1))
return 0; /* Inverse does not exists. */
u = mpi_copy(a);
v = mpi_copy(n);
for (k = 0; !mpi_test_bit(u, 0) && !mpi_test_bit(v, 0); k++) {
mpi_rshift(u, u, 1);
mpi_rshift(v, v, 1);
}
odd = mpi_test_bit(v, 0);
u1 = mpi_alloc_set_ui(1);
if (!odd)
u2 = mpi_alloc_set_ui(0);
u3 = mpi_copy(u);
v1 = mpi_copy(v);
if (!odd) {
v2 = mpi_alloc(mpi_get_nlimbs(u));
mpi_sub(v2, u1, u); /* U is used as const 1 */
}
v3 = mpi_copy(v);
if (mpi_test_bit(u, 0)) { /* u is odd */
t1 = mpi_alloc_set_ui(0);
if (!odd) {
t2 = mpi_alloc_set_ui(1);
t2->sign = 1;
}
t3 = mpi_copy(v);
t3->sign = !t3->sign;
goto Y4;
} else {
t1 = mpi_alloc_set_ui(1);
if (!odd)
t2 = mpi_alloc_set_ui(0);
t3 = mpi_copy(u);
}
do {
do {
if (!odd) {
if (mpi_test_bit(t1, 0) || mpi_test_bit(t2, 0)) {
/* one is odd */
mpi_add(t1, t1, v);
mpi_sub(t2, t2, u);
}
mpi_rshift(t1, t1, 1);
mpi_rshift(t2, t2, 1);
mpi_rshift(t3, t3, 1);
} else {
if (mpi_test_bit(t1, 0))
mpi_add(t1, t1, v);
mpi_rshift(t1, t1, 1);
mpi_rshift(t3, t3, 1);
}
Y4:
;
} while (!mpi_test_bit(t3, 0)); /* while t3 is even */
if (!t3->sign) {
mpi_set(u1, t1);
if (!odd)
mpi_set(u2, t2);
mpi_set(u3, t3);
} else {
mpi_sub(v1, v, t1);
sign = u->sign; u->sign = !u->sign;
if (!odd)
mpi_sub(v2, u, t2);
u->sign = sign;
sign = t3->sign; t3->sign = !t3->sign;
mpi_set(v3, t3);
t3->sign = sign;
}
mpi_sub(t1, u1, v1);
if (!odd)
mpi_sub(t2, u2, v2);
mpi_sub(t3, u3, v3);
if (t1->sign) {
mpi_add(t1, t1, v);
if (!odd)
mpi_sub(t2, t2, u);
}
} while (mpi_cmp_ui(t3, 0)); /* while t3 != 0 */
/* mpi_lshift( u3, k ); */
mpi_set(x, u1);
mpi_free(u1);
mpi_free(v1);
mpi_free(t1);
if (!odd) {
mpi_free(u2);
mpi_free(v2);
mpi_free(t2);
}
mpi_free(u3);
mpi_free(v3);
mpi_free(t3);
mpi_free(u);
mpi_free(v);
return 1;
}
EXPORT_SYMBOL_GPL(mpi_invm);

View File

@ -5,153 +5,9 @@
* This file is part of Libgcrypt.
*/
#include "mpi-internal.h"
#include "longlong.h"
/* Context used with Barrett reduction. */
struct barrett_ctx_s {
MPI m; /* The modulus - may not be modified. */
int m_copied; /* If true, M needs to be released. */
int k;
MPI y;
MPI r1; /* Helper MPI. */
MPI r2; /* Helper MPI. */
MPI r3; /* Helper MPI allocated on demand. */
};
void mpi_mod(MPI rem, MPI dividend, MPI divisor)
int mpi_mod(MPI rem, MPI dividend, MPI divisor)
{
mpi_fdiv_r(rem, dividend, divisor);
}
/* This function returns a new context for Barrett based operations on
* the modulus M. This context needs to be released using
* _gcry_mpi_barrett_free. If COPY is true M will be transferred to
* the context and the user may change M. If COPY is false, M may not
* be changed until gcry_mpi_barrett_free has been called.
*/
mpi_barrett_t mpi_barrett_init(MPI m, int copy)
{
mpi_barrett_t ctx;
MPI tmp;
mpi_normalize(m);
ctx = kcalloc(1, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
if (copy) {
ctx->m = mpi_copy(m);
ctx->m_copied = 1;
} else
ctx->m = m;
ctx->k = mpi_get_nlimbs(m);
tmp = mpi_alloc(ctx->k + 1);
/* Barrett precalculation: y = floor(b^(2k) / m). */
mpi_set_ui(tmp, 1);
mpi_lshift_limbs(tmp, 2 * ctx->k);
mpi_fdiv_q(tmp, tmp, m);
ctx->y = tmp;
ctx->r1 = mpi_alloc(2 * ctx->k + 1);
ctx->r2 = mpi_alloc(2 * ctx->k + 1);
return ctx;
}
void mpi_barrett_free(mpi_barrett_t ctx)
{
if (ctx) {
mpi_free(ctx->y);
mpi_free(ctx->r1);
mpi_free(ctx->r2);
if (ctx->r3)
mpi_free(ctx->r3);
if (ctx->m_copied)
mpi_free(ctx->m);
kfree(ctx);
}
}
/* R = X mod M
*
* Using Barrett reduction. Before using this function
* _gcry_mpi_barrett_init must have been called to do the
* precalculations. CTX is the context created by this precalculation
* and also conveys M. If the Barret reduction could no be done a
* straightforward reduction method is used.
*
* We assume that these conditions are met:
* Input: x =(x_2k-1 ...x_0)_b
* m =(m_k-1 ....m_0)_b with m_k-1 != 0
* Output: r = x mod m
*/
void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx)
{
MPI m = ctx->m;
int k = ctx->k;
MPI y = ctx->y;
MPI r1 = ctx->r1;
MPI r2 = ctx->r2;
int sign;
mpi_normalize(x);
if (mpi_get_nlimbs(x) > 2*k) {
mpi_mod(r, x, m);
return;
}
sign = x->sign;
x->sign = 0;
/* 1. q1 = floor( x / b^k-1)
* q2 = q1 * y
* q3 = floor( q2 / b^k+1 )
* Actually, we don't need qx, we can work direct on r2
*/
mpi_set(r2, x);
mpi_rshift_limbs(r2, k-1);
mpi_mul(r2, r2, y);
mpi_rshift_limbs(r2, k+1);
/* 2. r1 = x mod b^k+1
* r2 = q3 * m mod b^k+1
* r = r1 - r2
* 3. if r < 0 then r = r + b^k+1
*/
mpi_set(r1, x);
if (r1->nlimbs > k+1) /* Quick modulo operation. */
r1->nlimbs = k+1;
mpi_mul(r2, r2, m);
if (r2->nlimbs > k+1) /* Quick modulo operation. */
r2->nlimbs = k+1;
mpi_sub(r, r1, r2);
if (mpi_has_sign(r)) {
if (!ctx->r3) {
ctx->r3 = mpi_alloc(k + 2);
mpi_set_ui(ctx->r3, 1);
mpi_lshift_limbs(ctx->r3, k + 1);
}
mpi_add(r, r, ctx->r3);
}
/* 4. while r >= m do r = r - m */
while (mpi_cmp(r, m) >= 0)
mpi_sub(r, r, m);
x->sign = sign;
}
void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx)
{
mpi_mul(w, u, v);
mpi_mod_barrett(w, w, ctx);
return mpi_fdiv_r(rem, dividend, divisor);
}

Some files were not shown because too many files have changed in this diff Show More