1
linux/crypto/ctr.c
Herbert Xu 0971eb0de9 [CRYPTO] ctr: Fix multi-page processing
When the data spans across a page boundary, CTR may incorrectly process
a partial block in the middle because the blkcipher walking code may
supply partial blocks in the middle as long as the total length of the
supplied data is more than a block.  CTR is supposed to return any unused
partial block in that case to the walker.

This patch fixes this by doing exactly that, returning partial blocks to
the walker unless we received less than a block-worth of data to start
with.

This also allows us to optimise the bulk of the processing since we no
longer have to worry about partial blocks until the very end.

Thanks to Tan Swee Heng for fixes and actually testing this :)

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2008-01-11 08:16:24 +11:00

343 lines
8.9 KiB
C

/*
* CTR: Counter mode
*
* (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
struct ctr_instance_ctx {
struct crypto_spawn alg;
unsigned int noncesize;
unsigned int ivsize;
unsigned int countersize;
};
struct crypto_ctr_ctx {
struct crypto_cipher *child;
u8 *nonce;
};
static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent);
struct crypto_cipher *child = ctx->child;
struct ctr_instance_ctx *ictx =
crypto_instance_ctx(crypto_tfm_alg_instance(parent));
unsigned int noncelen = ictx->noncesize;
int err = 0;
/* the nonce is stored in bytes at end of key */
if (keylen < noncelen)
return -EINVAL;
memcpy(ctx->nonce, key + (keylen - noncelen), noncelen);
keylen -= noncelen;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static void crypto_ctr_crypt_final(struct blkcipher_walk *walk,
struct crypto_cipher *tfm, u8 *ctrblk,
unsigned int countersize)
{
unsigned int bsize = crypto_cipher_blocksize(tfm);
u8 *keystream = ctrblk + bsize;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes);
}
static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk,
struct crypto_cipher *tfm, u8 *ctrblk,
unsigned int countersize)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned int bsize = crypto_cipher_blocksize(tfm);
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
do {
/* create keystream */
fn(crypto_cipher_tfm(tfm), dst, ctrblk);
crypto_xor(dst, src, bsize);
/* increment counter in counterblock */
crypto_inc(ctrblk + bsize - countersize, countersize);
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk,
struct crypto_cipher *tfm, u8 *ctrblk,
unsigned int countersize)
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *keystream = ctrblk + bsize;
do {
/* create keystream */
fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
crypto_xor(src, keystream, bsize);
/* increment counter in counterblock */
crypto_inc(ctrblk + bsize - countersize, countersize);
src += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_ctr_crypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
unsigned int bsize = crypto_cipher_blocksize(child);
struct ctr_instance_ctx *ictx =
crypto_instance_ctx(crypto_tfm_alg_instance(&tfm->base));
unsigned long alignmask = crypto_cipher_alignmask(child) |
(__alignof__(u32) - 1);
u8 cblk[bsize * 2 + alignmask];
u8 *counterblk = (u8 *)ALIGN((unsigned long)cblk, alignmask + 1);
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, bsize);
/* set up counter block */
memset(counterblk, 0 , bsize);
memcpy(counterblk, ctx->nonce, ictx->noncesize);
memcpy(counterblk + ictx->noncesize, walk.iv, ictx->ivsize);
/* initialize counter portion of counter block */
crypto_inc(counterblk + bsize - ictx->countersize, ictx->countersize);
while (walk.nbytes >= bsize) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_ctr_crypt_inplace(&walk, child,
counterblk,
ictx->countersize);
else
nbytes = crypto_ctr_crypt_segment(&walk, child,
counterblk,
ictx->countersize);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
if (walk.nbytes) {
crypto_ctr_crypt_final(&walk, child, counterblk,
ictx->countersize);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
static int crypto_ctr_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct ctr_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_cipher *cipher;
ctx->nonce = kzalloc(ictx->noncesize, GFP_KERNEL);
if (!ctx->nonce)
return -ENOMEM;
cipher = crypto_spawn_cipher(&ictx->alg);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
kfree(ctx->nonce);
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
struct ctr_instance_ctx *ictx;
unsigned int noncesize;
unsigned int ivsize;
unsigned int countersize;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER);
if (err)
return ERR_PTR(err);
alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));
err = crypto_attr_u32(tb[2], &noncesize);
if (err)
goto out_put_alg;
err = crypto_attr_u32(tb[3], &ivsize);
if (err)
goto out_put_alg;
err = crypto_attr_u32(tb[4], &countersize);
if (err)
goto out_put_alg;
/* verify size of nonce + iv + counter
* counter must be >= 4 bytes.
*/
err = -EINVAL;
if (((noncesize + ivsize + countersize) < alg->cra_blocksize) ||
((noncesize + ivsize) > alg->cra_blocksize) ||
(countersize > alg->cra_blocksize) || (countersize < 4))
goto out_put_alg;
/* If this is false we'd fail the alignment of crypto_inc. */
if ((alg->cra_blocksize - countersize) % 4)
goto out_put_alg;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
err = -ENOMEM;
if (!inst)
goto out_put_alg;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
"ctr(%s,%u,%u,%u)", alg->cra_name, noncesize,
ivsize, countersize) >= CRYPTO_MAX_ALG_NAME) {
goto err_free_inst;
}
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"ctr(%s,%u,%u,%u)", alg->cra_driver_name, noncesize,
ivsize, countersize) >= CRYPTO_MAX_ALG_NAME) {
goto err_free_inst;
}
ictx = crypto_instance_ctx(inst);
ictx->noncesize = noncesize;
ictx->ivsize = ivsize;
ictx->countersize = countersize;
err = crypto_init_spawn(&ictx->alg, alg, inst,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (err)
goto err_free_inst;
err = 0;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = 1;
inst->alg.cra_alignmask = alg->cra_alignmask | (__alignof__(u32) - 1);
inst->alg.cra_type = &crypto_blkcipher_type;
inst->alg.cra_blkcipher.ivsize = ivsize;
inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize
+ noncesize;
inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize
+ noncesize;
inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx);
inst->alg.cra_init = crypto_ctr_init_tfm;
inst->alg.cra_exit = crypto_ctr_exit_tfm;
inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey;
inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;
err_free_inst:
if (err)
kfree(inst);
out_put_alg:
crypto_mod_put(alg);
if (err)
inst = ERR_PTR(err);
return inst;
}
static void crypto_ctr_free(struct crypto_instance *inst)
{
struct ctr_instance_ctx *ictx = crypto_instance_ctx(inst);
crypto_drop_spawn(&ictx->alg);
kfree(inst);
}
static struct crypto_template crypto_ctr_tmpl = {
.name = "ctr",
.alloc = crypto_ctr_alloc,
.free = crypto_ctr_free,
.module = THIS_MODULE,
};
static int __init crypto_ctr_module_init(void)
{
return crypto_register_template(&crypto_ctr_tmpl);
}
static void __exit crypto_ctr_module_exit(void)
{
crypto_unregister_template(&crypto_ctr_tmpl);
}
module_init(crypto_ctr_module_init);
module_exit(crypto_ctr_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CTR Counter block mode");