1
mirror of https://github.com/jedisct1/libsodium.git synced 2024-12-20 02:25:14 -07:00

Remove aes128ctr

This commit is contained in:
Frank Denis 2017-09-27 15:07:54 +02:00
parent 1ddeab2a87
commit 96be673f82
14 changed files with 0 additions and 1586 deletions

View File

@ -169,16 +169,6 @@ libsodium_la_SOURCES += \
crypto_shorthash/siphash24/shorthash_siphashx24.c \
crypto_shorthash/siphash24/ref/shorthash_siphashx24_ref.c \
crypto_sign/ed25519/ref10/obsolete.c \
crypto_stream/aes128ctr/nacl/afternm_aes128ctr.c \
crypto_stream/aes128ctr/nacl/beforenm_aes128ctr.c \
crypto_stream/aes128ctr/nacl/common.h \
crypto_stream/aes128ctr/nacl/consts.h \
crypto_stream/aes128ctr/nacl/consts_aes128ctr.c \
crypto_stream/aes128ctr/nacl/int128.h \
crypto_stream/aes128ctr/nacl/int128_aes128ctr.c \
crypto_stream/aes128ctr/nacl/stream_aes128ctr_nacl.c \
crypto_stream/aes128ctr/nacl/xor_afternm_aes128ctr.c \
crypto_stream/aes128ctr/stream_aes128ctr.c \
crypto_stream/salsa2012/ref/stream_salsa2012_ref.c \
crypto_stream/salsa2012/stream_salsa2012.c \
crypto_stream/salsa208/ref/stream_salsa208_ref.c \

View File

@ -1,174 +0,0 @@
/* Author: Peter Schwabe, ported from an assembly implementation by Emilia
* Käsper
* Date: 2009-03-19
* Public domain */
#include "common.h"
#include "consts.h"
#include "crypto_stream_aes128ctr.h"
#include "int128.h"
int
crypto_stream_aes128ctr_afternm(unsigned char *out, unsigned long long len,
const unsigned char *nonce,
const unsigned char *c)
{
aes_uint128_t xmm0;
aes_uint128_t xmm1;
aes_uint128_t xmm2;
aes_uint128_t xmm3;
aes_uint128_t xmm4;
aes_uint128_t xmm5;
aes_uint128_t xmm6;
aes_uint128_t xmm7;
aes_uint128_t xmm8;
aes_uint128_t xmm9;
aes_uint128_t xmm10;
aes_uint128_t xmm11;
aes_uint128_t xmm12;
aes_uint128_t xmm13;
aes_uint128_t xmm14;
aes_uint128_t xmm15;
aes_uint128_t nonce_stack;
unsigned long long lensav;
unsigned char bl[128];
unsigned char *blp;
unsigned char *np;
unsigned char b;
uint32_t tmp;
/* Copy nonce on the stack */
copy2(&nonce_stack, (const aes_uint128_t *) (nonce + 0));
np = (unsigned char *) &nonce_stack;
enc_block:
xmm0 = *(aes_uint128_t *) (np + 0);
copy2(&xmm1, &xmm0);
shufb(&xmm1, SWAP32);
copy2(&xmm2, &xmm1);
copy2(&xmm3, &xmm1);
copy2(&xmm4, &xmm1);
copy2(&xmm5, &xmm1);
copy2(&xmm6, &xmm1);
copy2(&xmm7, &xmm1);
add_uint32_big(&xmm1, 1);
add_uint32_big(&xmm2, 2);
add_uint32_big(&xmm3, 3);
add_uint32_big(&xmm4, 4);
add_uint32_big(&xmm5, 5);
add_uint32_big(&xmm6, 6);
add_uint32_big(&xmm7, 7);
shufb(&xmm0, M0);
shufb(&xmm1, M0SWAP);
shufb(&xmm2, M0SWAP);
shufb(&xmm3, M0SWAP);
shufb(&xmm4, M0SWAP);
shufb(&xmm5, M0SWAP);
shufb(&xmm6, M0SWAP);
shufb(&xmm7, M0SWAP);
bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, xmm8);
aesround(1, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9,
xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, c);
aesround(2, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0,
xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, c);
aesround(3, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9,
xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, c);
aesround(4, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0,
xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, c);
aesround(5, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9,
xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, c);
aesround(6, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0,
xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, c);
aesround(7, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9,
xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, c);
aesround(8, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0,
xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, c);
aesround(9, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9,
xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, c);
lastround(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1,
xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, c);
bitslice(xmm13, xmm10, xmm15, xmm11, xmm14, xmm12, xmm9, xmm8, xmm0);
if (len < 128) {
goto partial;
}
if (len == 128) {
goto full;
}
tmp = LOAD32_BE(np + 12);
tmp += 8;
STORE32_BE(np + 12, tmp);
*(aes_uint128_t *) (out + 0) = xmm8;
*(aes_uint128_t *) (out + 16) = xmm9;
*(aes_uint128_t *) (out + 32) = xmm12;
*(aes_uint128_t *) (out + 48) = xmm14;
*(aes_uint128_t *) (out + 64) = xmm11;
*(aes_uint128_t *) (out + 80) = xmm15;
*(aes_uint128_t *) (out + 96) = xmm10;
*(aes_uint128_t *) (out + 112) = xmm13;
len -= 128;
out += 128;
goto enc_block;
partial:
lensav = len;
len >>= 4;
tmp = LOAD32_BE(np + 12);
tmp += len;
STORE32_BE(np + 12, tmp);
blp = bl;
*(aes_uint128_t *) (blp + 0) = xmm8;
*(aes_uint128_t *) (blp + 16) = xmm9;
*(aes_uint128_t *) (blp + 32) = xmm12;
*(aes_uint128_t *) (blp + 48) = xmm14;
*(aes_uint128_t *) (blp + 64) = xmm11;
*(aes_uint128_t *) (blp + 80) = xmm15;
*(aes_uint128_t *) (blp + 96) = xmm10;
*(aes_uint128_t *) (blp + 112) = xmm13;
bytes:
if (lensav == 0) {
goto end;
}
b = blp[0]; /* clang false positive */
*(unsigned char *) (out + 0) = b;
blp += 1;
out += 1;
lensav -= 1;
goto bytes;
full:
tmp = LOAD32_BE(np + 12);
tmp += 8;
STORE32_BE(np + 12, tmp);
*(aes_uint128_t *) (out + 0) = xmm8;
*(aes_uint128_t *) (out + 16) = xmm9;
*(aes_uint128_t *) (out + 32) = xmm12;
*(aes_uint128_t *) (out + 48) = xmm14;
*(aes_uint128_t *) (out + 64) = xmm11;
*(aes_uint128_t *) (out + 80) = xmm15;
*(aes_uint128_t *) (out + 96) = xmm10;
*(aes_uint128_t *) (out + 112) = xmm13;
end:
return 0;
}

View File

@ -1,66 +0,0 @@
/* Author: Peter Schwabe, ported from an assembly implementation by Emilia
* Käsper
* Date: 2009-03-19
* Public domain */
#include "common.h"
#include "consts.h"
#include "crypto_stream_aes128ctr.h"
#include "int128.h"
int
crypto_stream_aes128ctr_beforenm(unsigned char *c, const unsigned char *k)
{
aes_uint128_t xmm0;
aes_uint128_t xmm1;
aes_uint128_t xmm2;
aes_uint128_t xmm3;
aes_uint128_t xmm4;
aes_uint128_t xmm5;
aes_uint128_t xmm6;
aes_uint128_t xmm7;
aes_uint128_t xmm8;
aes_uint128_t xmm9;
aes_uint128_t xmm10;
aes_uint128_t xmm11;
aes_uint128_t xmm12;
aes_uint128_t xmm13;
aes_uint128_t xmm14;
aes_uint128_t xmm15;
aes_uint128_t t;
bitslicekey0(k, c);
keyexpbs1(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10,
xmm11, xmm12, xmm13, xmm14, xmm15, c);
keyexpbs(xmm0, xmm1, xmm4, xmm6, xmm3, xmm7, xmm2, xmm5, xmm8, xmm9, xmm10,
xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm1);
, 2, c);
keyexpbs(xmm0, xmm1, xmm3, xmm2, xmm6, xmm5, xmm4, xmm7, xmm8, xmm9, xmm10,
xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm6);
, 3, c);
keyexpbs(xmm0, xmm1, xmm6, xmm4, xmm2, xmm7, xmm3, xmm5, xmm8, xmm9, xmm10,
xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm3);
, 4, c);
keyexpbs(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10,
xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm3);
, 5, c);
keyexpbs(xmm0, xmm1, xmm4, xmm6, xmm3, xmm7, xmm2, xmm5, xmm8, xmm9, xmm10,
xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm5);
, 6, c);
keyexpbs(xmm0, xmm1, xmm3, xmm2, xmm6, xmm5, xmm4, xmm7, xmm8, xmm9, xmm10,
xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm3);
, 7, c);
keyexpbs(xmm0, xmm1, xmm6, xmm4, xmm2, xmm7, xmm3, xmm5, xmm8, xmm9, xmm10,
xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm7);
, 8, c);
keyexpbs(xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmm10,
xmm11, xmm12, xmm13, xmm14, xmm15, xor_rcon(&xmm0);
xor_rcon(&xmm1); xor_rcon(&xmm6); xor_rcon(&xmm3);, 9, c);
keyexpbs10(xmm0, xmm1, xmm4, xmm6, xmm3, xmm7, xmm2, xmm5, xmm8, xmm9,
xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, c);
return 0;
}

View File

@ -1,766 +0,0 @@
/* Author: Peter Schwabe, ported from an assembly implementation by Emilia
Käsper
Date: 2009-03-19
Public domain */
#ifndef aes128ctr_nacl_common_H
#define aes128ctr_nacl_common_H
#include "private/common.h"
/* Macros required only for key expansion */
#define keyexpbs1(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, \
t7, bskey) \
rotbyte(&b0); \
rotbyte(&b1); \
rotbyte(&b2); \
rotbyte(&b3); \
rotbyte(&b4); \
rotbyte(&b5); \
rotbyte(&b6); \
rotbyte(&b7); \
\
sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7); \
\
xor_rcon(&b0); \
shufb(&b0, EXPB0); \
shufb(&b1, EXPB0); \
shufb(&b4, EXPB0); \
shufb(&b6, EXPB0); \
shufb(&b3, EXPB0); \
shufb(&b7, EXPB0); \
shufb(&b2, EXPB0); \
shufb(&b5, EXPB0); \
shufb(&b0, EXPB0); \
\
t0 = *(aes_uint128_t *) (bskey + 0); \
t1 = *(aes_uint128_t *) (bskey + 16); \
t2 = *(aes_uint128_t *) (bskey + 32); \
t3 = *(aes_uint128_t *) (bskey + 48); \
t4 = *(aes_uint128_t *) (bskey + 64); \
t5 = *(aes_uint128_t *) (bskey + 80); \
t6 = *(aes_uint128_t *) (bskey + 96); \
t7 = *(aes_uint128_t *) (bskey + 112); \
\
xor2(&b0, &t0); \
xor2(&b1, &t1); \
xor2(&b4, &t2); \
xor2(&b6, &t3); \
xor2(&b3, &t4); \
xor2(&b7, &t5); \
xor2(&b2, &t6); \
xor2(&b5, &t7); \
\
rshift32_littleendian(&t0, 8); \
rshift32_littleendian(&t1, 8); \
rshift32_littleendian(&t2, 8); \
rshift32_littleendian(&t3, 8); \
rshift32_littleendian(&t4, 8); \
rshift32_littleendian(&t5, 8); \
rshift32_littleendian(&t6, 8); \
rshift32_littleendian(&t7, 8); \
\
xor2(&b0, &t0); \
xor2(&b1, &t1); \
xor2(&b4, &t2); \
xor2(&b6, &t3); \
xor2(&b3, &t4); \
xor2(&b7, &t5); \
xor2(&b2, &t6); \
xor2(&b5, &t7); \
\
rshift32_littleendian(&t0, 8); \
rshift32_littleendian(&t1, 8); \
rshift32_littleendian(&t2, 8); \
rshift32_littleendian(&t3, 8); \
rshift32_littleendian(&t4, 8); \
rshift32_littleendian(&t5, 8); \
rshift32_littleendian(&t6, 8); \
rshift32_littleendian(&t7, 8); \
\
xor2(&b0, &t0); \
xor2(&b1, &t1); \
xor2(&b4, &t2); \
xor2(&b6, &t3); \
xor2(&b3, &t4); \
xor2(&b7, &t5); \
xor2(&b2, &t6); \
xor2(&b5, &t7); \
\
rshift32_littleendian(&t0, 8); \
rshift32_littleendian(&t1, 8); \
rshift32_littleendian(&t2, 8); \
rshift32_littleendian(&t3, 8); \
rshift32_littleendian(&t4, 8); \
rshift32_littleendian(&t5, 8); \
rshift32_littleendian(&t6, 8); \
rshift32_littleendian(&t7, 8); \
\
xor2(&b0, &t0); \
xor2(&b1, &t1); \
xor2(&b4, &t2); \
xor2(&b6, &t3); \
xor2(&b3, &t4); \
xor2(&b7, &t5); \
xor2(&b2, &t6); \
xor2(&b5, &t7); \
\
*(aes_uint128_t *) (bskey + 128) = b0; \
*(aes_uint128_t *) (bskey + 144) = b1; \
*(aes_uint128_t *) (bskey + 160) = b4; \
*(aes_uint128_t *) (bskey + 176) = b6; \
*(aes_uint128_t *) (bskey + 192) = b3; \
*(aes_uint128_t *) (bskey + 208) = b7; \
*(aes_uint128_t *) (bskey + 224) = b2; \
*(aes_uint128_t *) (bskey + 240) = b5
#define keyexpbs10(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, \
t7, bskey) \
toggle(&b0); \
toggle(&b1); \
toggle(&b5); \
toggle(&b6); \
rotbyte(&b0); \
rotbyte(&b1); \
rotbyte(&b2); \
rotbyte(&b3); \
rotbyte(&b4); \
rotbyte(&b5); \
rotbyte(&b6); \
rotbyte(&b7); \
\
sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7); \
\
xor_rcon(&b1); \
xor_rcon(&b4); \
xor_rcon(&b3); \
xor_rcon(&b7); \
shufb(&b0, EXPB0); \
shufb(&b1, EXPB0); \
shufb(&b4, EXPB0); \
shufb(&b6, EXPB0); \
shufb(&b3, EXPB0); \
shufb(&b7, EXPB0); \
shufb(&b2, EXPB0); \
shufb(&b5, EXPB0); \
\
t0 = *(aes_uint128_t *) (bskey + 9 * 128 + 0); \
t1 = *(aes_uint128_t *) (bskey + 9 * 128 + 16); \
t2 = *(aes_uint128_t *) (bskey + 9 * 128 + 32); \
t3 = *(aes_uint128_t *) (bskey + 9 * 128 + 48); \
t4 = *(aes_uint128_t *) (bskey + 9 * 128 + 64); \
t5 = *(aes_uint128_t *) (bskey + 9 * 128 + 80); \
t6 = *(aes_uint128_t *) (bskey + 9 * 128 + 96); \
t7 = *(aes_uint128_t *) (bskey + 9 * 128 + 112); \
\
toggle(&t0); \
toggle(&t1); \
toggle(&t5); \
toggle(&t6); \
\
xor2(&b0, &t0); \
xor2(&b1, &t1); \
xor2(&b4, &t2); \
xor2(&b6, &t3); \
xor2(&b3, &t4); \
xor2(&b7, &t5); \
xor2(&b2, &t6); \
xor2(&b5, &t7); \
\
rshift32_littleendian(&t0, 8); \
rshift32_littleendian(&t1, 8); \
rshift32_littleendian(&t2, 8); \
rshift32_littleendian(&t3, 8); \
rshift32_littleendian(&t4, 8); \
rshift32_littleendian(&t5, 8); \
rshift32_littleendian(&t6, 8); \
rshift32_littleendian(&t7, 8); \
\
xor2(&b0, &t0); \
xor2(&b1, &t1); \
xor2(&b4, &t2); \
xor2(&b6, &t3); \
xor2(&b3, &t4); \
xor2(&b7, &t5); \
xor2(&b2, &t6); \
xor2(&b5, &t7); \
\
rshift32_littleendian(&t0, 8); \
rshift32_littleendian(&t1, 8); \
rshift32_littleendian(&t2, 8); \
rshift32_littleendian(&t3, 8); \
rshift32_littleendian(&t4, 8); \
rshift32_littleendian(&t5, 8); \
rshift32_littleendian(&t6, 8); \
rshift32_littleendian(&t7, 8); \
\
xor2(&b0, &t0); \
xor2(&b1, &t1); \
xor2(&b4, &t2); \
xor2(&b6, &t3); \
xor2(&b3, &t4); \
xor2(&b7, &t5); \
xor2(&b2, &t6); \
xor2(&b5, &t7); \
\
rshift32_littleendian(&t0, 8); \
rshift32_littleendian(&t1, 8); \
rshift32_littleendian(&t2, 8); \
rshift32_littleendian(&t3, 8); \
rshift32_littleendian(&t4, 8); \
rshift32_littleendian(&t5, 8); \
rshift32_littleendian(&t6, 8); \
rshift32_littleendian(&t7, 8); \
\
xor2(&b0, &t0); \
xor2(&b1, &t1); \
xor2(&b4, &t2); \
xor2(&b6, &t3); \
xor2(&b3, &t4); \
xor2(&b7, &t5); \
xor2(&b2, &t6); \
xor2(&b5, &t7); \
\
shufb(&b0, M0); \
shufb(&b1, M0); \
shufb(&b2, M0); \
shufb(&b3, M0); \
shufb(&b4, M0); \
shufb(&b5, M0); \
shufb(&b6, M0); \
shufb(&b7, M0); \
\
*(aes_uint128_t *) (bskey + 1280) = b0; \
*(aes_uint128_t *) (bskey + 1296) = b1; \
*(aes_uint128_t *) (bskey + 1312) = b4; \
*(aes_uint128_t *) (bskey + 1328) = b6; \
*(aes_uint128_t *) (bskey + 1344) = b3; \
*(aes_uint128_t *) (bskey + 1360) = b7; \
*(aes_uint128_t *) (bskey + 1376) = b2; \
*(aes_uint128_t *) (bskey + 1392) = b5
#define keyexpbs(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, \
t7, rcon, i, bskey) \
toggle(&b0); \
toggle(&b1); \
toggle(&b5); \
toggle(&b6); \
rotbyte(&b0); \
rotbyte(&b1); \
rotbyte(&b2); \
rotbyte(&b3); \
rotbyte(&b4); \
rotbyte(&b5); \
rotbyte(&b6); \
rotbyte(&b7); \
\
sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7); \
\
rcon; \
shufb(&b0, EXPB0); \
shufb(&b1, EXPB0); \
shufb(&b4, EXPB0); \
shufb(&b6, EXPB0); \
shufb(&b3, EXPB0); \
shufb(&b7, EXPB0); \
shufb(&b2, EXPB0); \
shufb(&b5, EXPB0); \
\
t0 = *(aes_uint128_t *) (bskey + (i - 1) * 128 + 0); \
t1 = *(aes_uint128_t *) (bskey + (i - 1) * 128 + 16); \
t2 = *(aes_uint128_t *) (bskey + (i - 1) * 128 + 32); \
t3 = *(aes_uint128_t *) (bskey + (i - 1) * 128 + 48); \
t4 = *(aes_uint128_t *) (bskey + (i - 1) * 128 + 64); \
t5 = *(aes_uint128_t *) (bskey + (i - 1) * 128 + 80); \
t6 = *(aes_uint128_t *) (bskey + (i - 1) * 128 + 96); \
t7 = *(aes_uint128_t *) (bskey + (i - 1) * 128 + 112); \
\
toggle(&t0); \
toggle(&t1); \
toggle(&t5); \
toggle(&t6); \
\
xor2(&b0, &t0); \
xor2(&b1, &t1); \
xor2(&b4, &t2); \
xor2(&b6, &t3); \
xor2(&b3, &t4); \
xor2(&b7, &t5); \
xor2(&b2, &t6); \
xor2(&b5, &t7); \
\
rshift32_littleendian(&t0, 8); \
rshift32_littleendian(&t1, 8); \
rshift32_littleendian(&t2, 8); \
rshift32_littleendian(&t3, 8); \
rshift32_littleendian(&t4, 8); \
rshift32_littleendian(&t5, 8); \
rshift32_littleendian(&t6, 8); \
rshift32_littleendian(&t7, 8); \
\
xor2(&b0, &t0); \
xor2(&b1, &t1); \
xor2(&b4, &t2); \
xor2(&b6, &t3); \
xor2(&b3, &t4); \
xor2(&b7, &t5); \
xor2(&b2, &t6); \
xor2(&b5, &t7); \
\
rshift32_littleendian(&t0, 8); \
rshift32_littleendian(&t1, 8); \
rshift32_littleendian(&t2, 8); \
rshift32_littleendian(&t3, 8); \
rshift32_littleendian(&t4, 8); \
rshift32_littleendian(&t5, 8); \
rshift32_littleendian(&t6, 8); \
rshift32_littleendian(&t7, 8); \
\
xor2(&b0, &t0); \
xor2(&b1, &t1); \
xor2(&b4, &t2); \
xor2(&b6, &t3); \
xor2(&b3, &t4); \
xor2(&b7, &t5); \
xor2(&b2, &t6); \
xor2(&b5, &t7); \
\
rshift32_littleendian(&t0, 8); \
rshift32_littleendian(&t1, 8); \
rshift32_littleendian(&t2, 8); \
rshift32_littleendian(&t3, 8); \
rshift32_littleendian(&t4, 8); \
rshift32_littleendian(&t5, 8); \
rshift32_littleendian(&t6, 8); \
rshift32_littleendian(&t7, 8); \
\
xor2(&b0, &t0); \
xor2(&b1, &t1); \
xor2(&b4, &t2); \
xor2(&b6, &t3); \
xor2(&b3, &t4); \
xor2(&b7, &t5); \
xor2(&b2, &t6); \
xor2(&b5, &t7); \
\
*(aes_uint128_t *) (bskey + i * 128 + 0) = b0; \
*(aes_uint128_t *) (bskey + i * 128 + 16) = b1; \
*(aes_uint128_t *) (bskey + i * 128 + 32) = b4; \
*(aes_uint128_t *) (bskey + i * 128 + 48) = b6; \
*(aes_uint128_t *) (bskey + i * 128 + 64) = b3; \
*(aes_uint128_t *) (bskey + i * 128 + 80) = b7; \
*(aes_uint128_t *) (bskey + i * 128 + 96) = b2; \
*(aes_uint128_t *) (bskey + i * 128 + 112) = b5
/* Macros used in multiple contexts */
#define bitslicekey0(key, bskey) \
xmm0 = *(const aes_uint128_t *) (key + 0); \
shufb(&xmm0, M0); \
copy2(&xmm1, &xmm0); \
copy2(&xmm2, &xmm0); \
copy2(&xmm3, &xmm0); \
copy2(&xmm4, &xmm0); \
copy2(&xmm5, &xmm0); \
copy2(&xmm6, &xmm0); \
copy2(&xmm7, &xmm0); \
\
bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, t); \
\
*(aes_uint128_t *) (bskey + 0) = xmm0; \
*(aes_uint128_t *) (bskey + 16) = xmm1; \
*(aes_uint128_t *) (bskey + 32) = xmm2; \
*(aes_uint128_t *) (bskey + 48) = xmm3; \
*(aes_uint128_t *) (bskey + 64) = xmm4; \
*(aes_uint128_t *) (bskey + 80) = xmm5; \
*(aes_uint128_t *) (bskey + 96) = xmm6; \
*(aes_uint128_t *) (bskey + 112) = xmm7
#define bitslicekey10(key, bskey) \
xmm0 = *(aes_uint128_t *) (key + 0); \
copy2(xmm1, xmm0); \
copy2(xmm2, xmm0); \
copy2(xmm3, xmm0); \
copy2(xmm4, xmm0); \
copy2(xmm5, xmm0); \
copy2(xmm6, xmm0); \
copy2(xmm7, xmm0); \
\
bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, t); \
\
toggle(&xmm6); \
toggle(&xmm5); \
toggle(&xmm1); \
toggle(&xmm0); \
\
*(aes_uint128_t *) (bskey + 0 + 1280) = xmm0; \
*(aes_uint128_t *) (bskey + 16 + 1280) = xmm1; \
*(aes_uint128_t *) (bskey + 32 + 1280) = xmm2; \
*(aes_uint128_t *) (bskey + 48 + 1280) = xmm3; \
*(aes_uint128_t *) (bskey + 64 + 1280) = xmm4; \
*(aes_uint128_t *) (bskey + 80 + 1280) = xmm5; \
*(aes_uint128_t *) (bskey + 96 + 1280) = xmm6; \
*(aes_uint128_t *) (bskey + 112 + 1280) = xmm7
#define bitslicekey(i, key, bskey) \
xmm0 = *(aes_uint128_t *) (key + 0); \
shufb(&xmm0, M0); \
copy2(&xmm1, &xmm0); \
copy2(&xmm2, &xmm0); \
copy2(&xmm3, &xmm0); \
copy2(&xmm4, &xmm0); \
copy2(&xmm5, &xmm0); \
copy2(&xmm6, &xmm0); \
copy2(&xmm7, &xmm0); \
\
bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, t); \
\
toggle(&xmm6); \
toggle(&xmm5); \
toggle(&xmm1); \
toggle(&xmm0); \
\
*(aes_uint128_t *) (bskey + 0 + 128 * i) = xmm0; \
*(aes_uint128_t *) (bskey + 16 + 128 * i) = xmm1; \
*(aes_uint128_t *) (bskey + 32 + 128 * i) = xmm2; \
*(aes_uint128_t *) (bskey + 48 + 128 * i) = xmm3; \
*(aes_uint128_t *) (bskey + 64 + 128 * i) = xmm4; \
*(aes_uint128_t *) (bskey + 80 + 128 * i) = xmm5; \
*(aes_uint128_t *) (bskey + 96 + 128 * i) = xmm6; \
*(aes_uint128_t *) (bskey + 112 + 128 * i) = xmm7
#define bitslice(x0, x1, x2, x3, x4, x5, x6, x7, t) \
swapmove(x0, x1, 1, BS0, t); \
swapmove(x2, x3, 1, BS0, t); \
swapmove(x4, x5, 1, BS0, t); \
swapmove(x6, x7, 1, BS0, t); \
\
swapmove(x0, x2, 2, BS1, t); \
swapmove(x1, x3, 2, BS1, t); \
swapmove(x4, x6, 2, BS1, t); \
swapmove(x5, x7, 2, BS1, t); \
\
swapmove(x0, x4, 4, BS2, t); \
swapmove(x1, x5, 4, BS2, t); \
swapmove(x2, x6, 4, BS2, t); \
swapmove(x3, x7, 4, BS2, t)
#define swapmove(a, b, n, m, t) \
copy2(&t, &b); \
rshift64_littleendian(&t, n); \
xor2(&t, &a); \
and2(&t, &m); \
xor2(&a, &t); \
lshift64_littleendian(&t, n); \
xor2(&b, &t)
#define rotbyte(x) shufb(x, ROTB) /* TODO: Make faster */
/* Macros used for encryption (and decryption) */
#define shiftrows(x0, x1, x2, x3, x4, x5, x6, x7, i, M, bskey) \
xor2(&x0, (const aes_uint128_t *) (bskey + 128 * (i - 1) + 0)); \
shufb(&x0, M); \
xor2(&x1, (const aes_uint128_t *) (bskey + 128 * (i - 1) + 16)); \
shufb(&x1, M); \
xor2(&x2, (const aes_uint128_t *) (bskey + 128 * (i - 1) + 32)); \
shufb(&x2, M); \
xor2(&x3, (const aes_uint128_t *) (bskey + 128 * (i - 1) + 48)); \
shufb(&x3, M); \
xor2(&x4, (const aes_uint128_t *) (bskey + 128 * (i - 1) + 64)); \
shufb(&x4, M); \
xor2(&x5, (const aes_uint128_t *) (bskey + 128 * (i - 1) + 80)); \
shufb(&x5, M); \
xor2(&x6, (const aes_uint128_t *) (bskey + 128 * (i - 1) + 96)); \
shufb(&x6, M); \
xor2(&x7, (const aes_uint128_t *) (bskey + 128 * (i - 1) + 112)); \
shufb(&x7, M)
#define mixcolumns(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \
t7) \
shufd(&t0, &x0, 0x93); \
shufd(&t1, &x1, 0x93); \
shufd(&t2, &x2, 0x93); \
shufd(&t3, &x3, 0x93); \
shufd(&t4, &x4, 0x93); \
shufd(&t5, &x5, 0x93); \
shufd(&t6, &x6, 0x93); \
shufd(&t7, &x7, 0x93); \
\
xor2(&x0, &t0); \
xor2(&x1, &t1); \
xor2(&x2, &t2); \
xor2(&x3, &t3); \
xor2(&x4, &t4); \
xor2(&x5, &t5); \
xor2(&x6, &t6); \
xor2(&x7, &t7); \
\
xor2(&t0, &x7); \
xor2(&t1, &x0); \
xor2(&t2, &x1); \
xor2(&t1, &x7); \
xor2(&t3, &x2); \
xor2(&t4, &x3); \
xor2(&t5, &x4); \
xor2(&t3, &x7); \
xor2(&t6, &x5); \
xor2(&t7, &x6); \
xor2(&t4, &x7); \
\
shufd(&x0, &x0, 0x4e); \
shufd(&x1, &x1, 0x4e); \
shufd(&x2, &x2, 0x4e); \
shufd(&x3, &x3, 0x4e); \
shufd(&x4, &x4, 0x4e); \
shufd(&x5, &x5, 0x4e); \
shufd(&x6, &x6, 0x4e); \
shufd(&x7, &x7, 0x4e); \
\
xor2(&t0, &x0); \
xor2(&t1, &x1); \
xor2(&t2, &x2); \
xor2(&t3, &x3); \
xor2(&t4, &x4); \
xor2(&t5, &x5); \
xor2(&t6, &x6); \
xor2(&t7, &x7)
#define aesround(i, b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, \
t6, t7, bskey) \
shiftrows(b0, b1, b2, b3, b4, b5, b6, b7, i, SR, bskey); \
sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7); \
mixcolumns(b0, b1, b4, b6, b3, b7, b2, b5, t0, t1, t2, t3, t4, t5, t6, t7)
#define lastround(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, \
t7, bskey) \
shiftrows(b0, b1, b2, b3, b4, b5, b6, b7, 10, SRM0, bskey); \
sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, t4, t5, t6, t7); \
xor2(&b0, (const aes_uint128_t *) (bskey + 128 * 10)); \
xor2(&b1, (const aes_uint128_t *) (bskey + 128 * 10 + 16)); \
xor2(&b4, (const aes_uint128_t *) (bskey + 128 * 10 + 32)); \
xor2(&b6, (const aes_uint128_t *) (bskey + 128 * 10 + 48)); \
xor2(&b3, (const aes_uint128_t *) (bskey + 128 * 10 + 64)); \
xor2(&b7, (const aes_uint128_t *) (bskey + 128 * 10 + 80)); \
xor2(&b2, (const aes_uint128_t *) (bskey + 128 * 10 + 96)); \
xor2(&b5, (const aes_uint128_t *) (bskey + 128 * 10 + 112))
#define sbox(b0, b1, b2, b3, b4, b5, b6, b7, t0, t1, t2, t3, s0, s1, s2, s3) \
InBasisChange(b0, b1, b2, b3, b4, b5, b6, b7); \
Inv_GF256(b6, b5, b0, b3, b7, b1, b4, b2, t0, t1, t2, t3, s0, s1, s2, s3); \
OutBasisChange(b7, b1, b4, b2, b6, b5, b0, b3)
#define InBasisChange(b0, b1, b2, b3, b4, b5, b6, b7) \
xor2(&b5, &b6); \
xor2(&b2, &b1); \
xor2(&b5, &b0); \
xor2(&b6, &b2); \
xor2(&b3, &b0); \
\
xor2(&b6, &b3); \
xor2(&b3, &b7); \
xor2(&b3, &b4); \
xor2(&b7, &b5); \
xor2(&b3, &b1); \
\
xor2(&b4, &b5); \
xor2(&b2, &b7); \
xor2(&b1, &b5)
#define OutBasisChange(b0, b1, b2, b3, b4, b5, b6, b7) \
xor2(&b0, &b6); \
xor2(&b1, &b4); \
xor2(&b2, &b0); \
xor2(&b4, &b6); \
xor2(&b6, &b1); \
\
xor2(&b1, &b5); \
xor2(&b5, &b3); \
xor2(&b2, &b5); \
xor2(&b3, &b7); \
xor2(&b7, &b5); \
\
xor2(&b4, &b7)
#define Mul_GF4(x0, x1, y0, y1, t0) \
copy2(&t0, &y0); \
xor2(&t0, &y1); \
and2(&t0, &x0); \
xor2(&x0, &x1); \
and2(&x0, &y1); \
and2(&x1, &y0); \
xor2(&x0, &x1); \
xor2(&x1, &t0)
#define Mul_GF4_N(x0, x1, y0, y1, t0) \
copy2(&t0, &y0); \
xor2(&t0, &y1); \
and2(&t0, &x0); \
xor2(&x0, &x1); \
and2(&x0, &y1); \
and2(&x1, &y0); \
xor2(&x1, &x0); \
xor2(&x0, &t0)
#define Mul_GF4_2(x0, x1, x2, x3, y0, y1, t0, t1) \
copy2(&t0, = y0); \
xor2(&t0, &y1); \
copy2(&t1, &t0); \
and2(&t0, &x0); \
and2(&t1, &x2); \
xor2(&x0, &x1); \
xor2(&x2, &x3); \
and2(&x0, &y1); \
and2(&x2, &y1); \
and2(&x1, &y0); \
and2(&x3, &y0); \
xor2(&x0, &x1); \
xor2(&x2, &x3); \
xor2(&x1, &t0); \
xor2(&x3, &t1)
#define Mul_GF16(x0, x1, x2, x3, y0, y1, y2, y3, t0, t1, t2, t3) \
copy2(&t0, &x0); \
copy2(&t1, &x1); \
Mul_GF4(x0, x1, y0, y1, t2); \
xor2(&t0, &x2); \
xor2(&t1, &x3); \
xor2(&y0, &y2); \
xor2(&y1, &y3); \
Mul_GF4_N(t0, t1, y0, y1, t2); \
Mul_GF4(x2, x3, y2, y3, t3); \
\
xor2(&x0, &t0); \
xor2(&x2, &t0); \
xor2(&x1, &t1); \
xor2(&x3, &t1)
#define Mul_GF16_2(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, t0, t1, t2, \
t3) \
copy2(&t0, &x0); \
copy2(&t1, &x1); \
Mul_GF4(x0, x1, y0, y1, t2); \
xor2(&t0, &x2); \
xor2(&t1, &x3); \
xor2(&y0, &y2); \
xor2(&y1, &y3); \
Mul_GF4_N(t0, t1, y0, y1, t3); \
Mul_GF4(x2, x3, y2, y3, t2); \
\
xor2(&x0, &t0); \
xor2(&x2, &t0); \
xor2(&x1, &t1); \
xor2(&x3, &t1); \
\
copy2(&t0, &x4); \
copy2(&t1, &x5); \
xor2(&t0, &x6); \
xor2(&t1, &x7); \
Mul_GF4_N(t0, t1, y0, y1, t3); \
Mul_GF4(x6, x7, y2, y3, t2); \
xor2(&y0, &y2); \
xor2(&y1, &y3); \
Mul_GF4(x4, x5, y0, y1, t3); \
\
xor2(&x4, &t0); \
xor2(&x6, &t0); \
xor2(&x5, &t1); \
xor2(&x7, &t1)
#define Inv_GF16(x0, x1, x2, x3, t0, t1, t2, t3) \
copy2(&t0, &x1); \
copy2(&t1, &x0); \
and2(&t0, &x3); \
or2(&t1, &x2); \
copy2(&t2, &x1); \
copy2(&t3, &x0); \
or2(&t2, &x2); \
or2(&t3, &x3); \
xor2(&t2, &t3); \
\
xor2(&t0, &t2); \
xor2(&t1, &t2); \
\
Mul_GF4_2(x0, x1, x2, x3, t1, t0, t2, t3)
#define Inv_GF256(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, s0, s1, s2, \
s3) \
copy2(&t3, &x4); \
copy2(&t2, &x5); \
copy2(&t1, &x1); \
copy2(&s1, &x7); \
copy2(&s0, &x0); \
\
xor2(&t3, &x6); \
xor2(&t2, &x7); \
xor2(&t1, &x3); \
xor2(&s1, &x6); \
xor2(&s0, &x2); \
\
copy2(&s2, &t3); \
copy2(&t0, &t2); \
copy2(&s3, &t3); \
\
or2(&t2, &t1); \
or2(&t3, &s0); \
xor2(&s3, &t0); \
and2(&s2, &s0); \
and2(&t0, &t1); \
xor2(&s0, &t1); \
and2(&s3, &s0); \
copy2(&s0, &x3); \
xor2(&s0, &x2); \
and2(&s1, &s0); \
xor2(&t3, &s1); \
xor2(&t2, &s1); \
copy2(&s1, &x4); \
xor2(&s1, &x5); \
copy2(&s0, &x1); \
copy2(&t1, &s1); \
xor2(&s0, &x0); \
or2(&t1, &s0); \
and2(&s1, &s0); \
xor2(&t0, &s1); \
xor2(&t3, &s3); \
xor2(&t2, &s2); \
xor2(&t1, &s3); \
xor2(&t0, &s2); \
xor2(&t1, &s2); \
copy2(&s0, &x7); \
copy2(&s1, &x6); \
copy2(&s2, &x5); \
copy2(&s3, &x4); \
and2(&s0, &x3); \
and2(&s1, &x2); \
and2(&s2, &x1); \
or2(&s3, &x0); \
xor2(&t3, &s0); \
xor2(&t2, &s1); \
xor2(&t1, &s2); \
xor2(&t0, &s3); \
\
copy2(&s0, &t3); \
xor2(&s0, &t2); \
and2(&t3, &t1); \
copy2(&s2, &t0); \
xor2(&s2, &t3); \
copy2(&s3, &s0); \
and2(&s3, &s2); \
xor2(&s3, &t2); \
copy2(&s1, &t1); \
xor2(&s1, &t0); \
xor2(&t3, &t2); \
and2(&s1, &t3); \
xor2(&s1, &t0); \
xor2(&t1, &s1); \
copy2(&t2, &s2); \
xor2(&t2, &s1); \
and2(&t2, &t0); \
xor2(&t1, &t2); \
xor2(&s2, &t2); \
and2(&s2, &s3); \
xor2(&s2, &s0); \
\
Mul_GF16_2(x0, x1, x2, x3, x4, x5, x6, x7, s3, s2, s1, t1, s0, t0, t2, t3)
#endif

View File

@ -1,28 +0,0 @@
#ifndef aes128ctr_nacl_consts_H
#define aes128ctr_nacl_consts_H
#include "int128.h"
#define ROTB crypto_stream_aes128ctr_nacl_ROTB
#define M0 crypto_stream_aes128ctr_nacl_M0
#define EXPB0 crypto_stream_aes128ctr_nacl_EXPB0
#define SWAP32 crypto_stream_aes128ctr_nacl_SWAP32
#define M0SWAP crypto_stream_aes128ctr_nacl_M0SWAP
#define SR crypto_stream_aes128ctr_nacl_SR
#define SRM0 crypto_stream_aes128ctr_nacl_SRM0
#define BS0 crypto_stream_aes128ctr_nacl_BS0
#define BS1 crypto_stream_aes128ctr_nacl_BS1
#define BS2 crypto_stream_aes128ctr_nacl_BS2
extern const unsigned char ROTB[16];
extern const unsigned char M0[16];
extern const unsigned char EXPB0[16];
extern const unsigned char SWAP32[16];
extern const unsigned char M0SWAP[16];
extern const unsigned char SR[16];
extern const unsigned char SRM0[16];
extern const aes_uint128_t BS0;
extern const aes_uint128_t BS1;
extern const aes_uint128_t BS2;
#endif

View File

@ -1,28 +0,0 @@
#include "consts.h"
const unsigned char ROTB[16] = {
0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08
};
const unsigned char M0[16] = { 0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02,
0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00 };
const unsigned char EXPB0[16] = { 0x03, 0x03, 0x03, 0x03, 0x07, 0x07,
0x07, 0x07, 0x0b, 0x0b, 0x0b, 0x0b,
0x0f, 0x0f, 0x0f, 0x0f };
const unsigned char SWAP32[16] = { 0x03, 0x02, 0x01, 0x00, 0x07, 0x06,
0x05, 0x04, 0x0b, 0x0a, 0x09, 0x08,
0x0f, 0x0e, 0x0d, 0x0c };
const unsigned char M0SWAP[16] = { 0x0c, 0x08, 0x04, 0x00, 0x0d, 0x09,
0x05, 0x01, 0x0e, 0x0a, 0x06, 0x02,
0x0f, 0x0b, 0x07, 0x03 };
const unsigned char SR[16] = { 0x01, 0x02, 0x03, 0x00, 0x06, 0x07, 0x04, 0x05,
0x0b, 0x08, 0x09, 0x0a, 0x0c, 0x0d, 0x0e, 0x0f };
const unsigned char SRM0[16] = {
0x0f, 0x0a, 0x05, 0x00, 0x0e, 0x09, 0x04, 0x03,
0x0d, 0x08, 0x07, 0x02, 0x0c, 0x0b, 0x06, 0x01
};
const aes_uint128_t BS0 = { { 0x5555555555555555ULL, 0x5555555555555555ULL } };
const aes_uint128_t BS1 = { { 0x3333333333333333ULL, 0x3333333333333333ULL } };
const aes_uint128_t BS2 = { { 0x0f0f0f0f0f0f0f0fULL, 0x0f0f0f0f0f0f0f0fULL } };

View File

@ -1,50 +0,0 @@
#ifndef aes128ctr_nacl_int128_H
#define aes128ctr_nacl_int128_H
#include <stdint.h>
#include "common.h"
typedef union {
uint64_t u64[2];
uint32_t u32[4];
uint8_t u8[16];
} aes_uint128_t;
#define xor2 crypto_stream_aes128ctr_nacl_xor2
void xor2(aes_uint128_t *r, const aes_uint128_t *x);
#define and2 crypto_stream_aes128ctr_nacl_and2
void and2(aes_uint128_t *r, const aes_uint128_t *x);
#define or2 crypto_stream_aes128ctr_nacl_or2
void or2(aes_uint128_t *r, const aes_uint128_t *x);
#define copy2 crypto_stream_aes128ctr_nacl_copy2
void copy2(aes_uint128_t *r, const aes_uint128_t *x);
#define shufb crypto_stream_aes128ctr_nacl_shufb
void shufb(aes_uint128_t *r, const unsigned char *l);
#define shufd crypto_stream_aes128ctr_nacl_shufd
void shufd(aes_uint128_t *r, const aes_uint128_t *x, const unsigned int c);
#define rshift32_littleendian crypto_stream_aes128ctr_nacl_rshift32_littleendian
void rshift32_littleendian(aes_uint128_t *r, const unsigned int n);
#define rshift64_littleendian crypto_stream_aes128ctr_nacl_rshift64_littleendian
void rshift64_littleendian(aes_uint128_t *r, const unsigned int n);
#define lshift64_littleendian crypto_stream_aes128ctr_nacl_lshift64_littleendian
void lshift64_littleendian(aes_uint128_t *r, const unsigned int n);
#define toggle crypto_stream_aes128ctr_nacl_toggle
void toggle(aes_uint128_t *r);
#define xor_rcon crypto_stream_aes128ctr_nacl_xor_rcon
void xor_rcon(aes_uint128_t *r);
#define add_uint32_big crypto_stream_aes128ctr_nacl_add_uint32_big
void add_uint32_big(aes_uint128_t *r, uint32_t x);
#endif

View File

@ -1,149 +0,0 @@
#include <stdint.h>
#include "common.h"
#include "int128.h"
void
xor2(aes_uint128_t *r, const aes_uint128_t *x)
{
r->u64[0] ^= x->u64[0];
r->u64[1] ^= x->u64[1];
}
void
and2(aes_uint128_t *r, const aes_uint128_t *x)
{
r->u64[0] &= x->u64[0];
r->u64[1] &= x->u64[1];
}
void
or2(aes_uint128_t *r, const aes_uint128_t *x)
{
r->u64[0] |= x->u64[0];
r->u64[1] |= x->u64[1];
}
void
copy2(aes_uint128_t *r, const aes_uint128_t *x)
{
r->u64[0] = x->u64[0];
r->u64[1] = x->u64[1];
}
void
shufb(aes_uint128_t *r, const unsigned char *l)
{
aes_uint128_t t;
uint8_t *ct;
uint8_t *cr;
copy2(&t, r);
cr = r->u8;
ct = t.u8;
cr[0] = ct[l[0]];
cr[1] = ct[l[1]];
cr[2] = ct[l[2]];
cr[3] = ct[l[3]];
cr[4] = ct[l[4]];
cr[5] = ct[l[5]];
cr[6] = ct[l[6]];
cr[7] = ct[l[7]];
cr[8] = ct[l[8]];
cr[9] = ct[l[9]];
cr[10] = ct[l[10]];
cr[11] = ct[l[11]];
cr[12] = ct[l[12]];
cr[13] = ct[l[13]];
cr[14] = ct[l[14]];
cr[15] = ct[l[15]];
}
void
shufd(aes_uint128_t *r, const aes_uint128_t *x, const unsigned int c)
{
aes_uint128_t t;
t.u32[0] = x->u32[c >> 0 & 3];
t.u32[1] = x->u32[c >> 2 & 3];
t.u32[2] = x->u32[c >> 4 & 3];
t.u32[3] = x->u32[c >> 6 & 3];
copy2(r, &t);
}
void
rshift32_littleendian(aes_uint128_t *r, const unsigned int n)
{
unsigned char *rp = (unsigned char *) r;
uint32_t t;
t = LOAD32_LE(rp);
t >>= n;
STORE32_LE(rp, t);
t = LOAD32_LE(rp + 4);
t >>= n;
STORE32_LE(rp + 4, t);
t = LOAD32_LE(rp + 8);
t >>= n;
STORE32_LE(rp + 8, t);
t = LOAD32_LE(rp + 12);
t >>= n;
STORE32_LE(rp + 12, t);
}
void
rshift64_littleendian(aes_uint128_t *r, const unsigned int n)
{
unsigned char *rp = (unsigned char *) r;
uint64_t t;
t = LOAD64_LE(rp);
t >>= n;
STORE64_LE(rp, t);
t = LOAD64_LE(rp + 8);
t >>= n;
STORE64_LE(rp + 8, t);
}
void
lshift64_littleendian(aes_uint128_t *r, const unsigned int n)
{
unsigned char *rp = (unsigned char *) r;
uint64_t t;
t = LOAD64_LE(rp);
t <<= n;
STORE64_LE(rp, t);
t = LOAD64_LE(rp + 8);
t <<= n;
STORE64_LE(rp + 8, t);
}
void
toggle(aes_uint128_t *r)
{
r->u64[0] ^= 0xffffffffffffffffULL;
r->u64[1] ^= 0xffffffffffffffffULL;
}
void
xor_rcon(aes_uint128_t *r)
{
unsigned char *rp = (unsigned char *) r;
uint32_t t;
t = LOAD32_LE(rp + 12);
t ^= 0xffffffff;
STORE32_LE(rp + 12, t);
}
void
add_uint32_big(aes_uint128_t *r, uint32_t x)
{
unsigned char *rp = (unsigned char *) r;
uint32_t t;
t = LOAD32_LE(rp + 12);
t += x;
STORE32_LE(rp + 12, t);
}

View File

@ -1,31 +0,0 @@
#include "crypto_stream_aes128ctr.h"
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
int
crypto_stream_aes128ctr(unsigned char *out, unsigned long long outlen,
const unsigned char *n, const unsigned char *k)
{
unsigned char d[crypto_stream_aes128ctr_BEFORENMBYTES];
crypto_stream_aes128ctr_beforenm(d, k);
crypto_stream_aes128ctr_afternm(out, outlen, n, d);
return 0;
}
int
crypto_stream_aes128ctr_xor(unsigned char *out, const unsigned char *in,
unsigned long long inlen, const unsigned char *n,
const unsigned char *k)
{
unsigned char d[crypto_stream_aes128ctr_BEFORENMBYTES];
crypto_stream_aes128ctr_beforenm(d, k);
crypto_stream_aes128ctr_xor_afternm(out, in, inlen, n, d);
return 0;
}

View File

@ -1,195 +0,0 @@
/* Author: Peter Schwabe, ported from an assembly implementation by Emilia
* Käsper
* Date: 2009-03-19
* Public domain */
#include "common.h"
#include "consts.h"
#include "crypto_stream_aes128ctr.h"
#include "int128.h"
int
crypto_stream_aes128ctr_xor_afternm(unsigned char *out, const unsigned char *in,
unsigned long long len,
const unsigned char *nonce,
const unsigned char *c)
{
aes_uint128_t xmm0;
aes_uint128_t xmm1;
aes_uint128_t xmm2;
aes_uint128_t xmm3;
aes_uint128_t xmm4;
aes_uint128_t xmm5;
aes_uint128_t xmm6;
aes_uint128_t xmm7;
aes_uint128_t xmm8;
aes_uint128_t xmm9;
aes_uint128_t xmm10;
aes_uint128_t xmm11;
aes_uint128_t xmm12;
aes_uint128_t xmm13;
aes_uint128_t xmm14;
aes_uint128_t xmm15;
aes_uint128_t nonce_stack;
unsigned long long lensav;
unsigned char bl[128];
unsigned char *blp;
unsigned char *np;
unsigned char b;
uint32_t tmp;
/* Copy nonce on the stack */
copy2(&nonce_stack, (const aes_uint128_t *) (nonce + 0));
np = (unsigned char *) &nonce_stack;
enc_block:
xmm0 = *(aes_uint128_t *) (np + 0);
copy2(&xmm1, &xmm0);
shufb(&xmm1, SWAP32);
copy2(&xmm2, &xmm1);
copy2(&xmm3, &xmm1);
copy2(&xmm4, &xmm1);
copy2(&xmm5, &xmm1);
copy2(&xmm6, &xmm1);
copy2(&xmm7, &xmm1);
add_uint32_big(&xmm1, 1);
add_uint32_big(&xmm2, 2);
add_uint32_big(&xmm3, 3);
add_uint32_big(&xmm4, 4);
add_uint32_big(&xmm5, 5);
add_uint32_big(&xmm6, 6);
add_uint32_big(&xmm7, 7);
shufb(&xmm0, M0);
shufb(&xmm1, M0SWAP);
shufb(&xmm2, M0SWAP);
shufb(&xmm3, M0SWAP);
shufb(&xmm4, M0SWAP);
shufb(&xmm5, M0SWAP);
shufb(&xmm6, M0SWAP);
shufb(&xmm7, M0SWAP);
bitslice(xmm7, xmm6, xmm5, xmm4, xmm3, xmm2, xmm1, xmm0, xmm8);
aesround(1, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9,
xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, c);
aesround(2, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0,
xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, c);
aesround(3, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9,
xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, c);
aesround(4, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0,
xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, c);
aesround(5, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9,
xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, c);
aesround(6, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0,
xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, c);
aesround(7, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9,
xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, c);
aesround(8, xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0,
xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, c);
aesround(9, xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9,
xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, c);
lastround(xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15, xmm0, xmm1,
xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, c);
bitslice(xmm13, xmm10, xmm15, xmm11, xmm14, xmm12, xmm9, xmm8, xmm0);
if (len < 128) {
goto partial;
}
if (len == 128) {
goto full;
}
tmp = LOAD32_BE(np + 12);
tmp += 8;
STORE32_BE(np + 12, tmp);
xor2(&xmm8, (const aes_uint128_t *) (in + 0));
xor2(&xmm9, (const aes_uint128_t *) (in + 16));
xor2(&xmm12, (const aes_uint128_t *) (in + 32));
xor2(&xmm14, (const aes_uint128_t *) (in + 48));
xor2(&xmm11, (const aes_uint128_t *) (in + 64));
xor2(&xmm15, (const aes_uint128_t *) (in + 80));
xor2(&xmm10, (const aes_uint128_t *) (in + 96));
xor2(&xmm13, (const aes_uint128_t *) (in + 112));
*(aes_uint128_t *) (out + 0) = xmm8;
*(aes_uint128_t *) (out + 16) = xmm9;
*(aes_uint128_t *) (out + 32) = xmm12;
*(aes_uint128_t *) (out + 48) = xmm14;
*(aes_uint128_t *) (out + 64) = xmm11;
*(aes_uint128_t *) (out + 80) = xmm15;
*(aes_uint128_t *) (out + 96) = xmm10;
*(aes_uint128_t *) (out + 112) = xmm13;
len -= 128;
in += 128;
out += 128;
goto enc_block;
partial:
lensav = len;
len >>= 4;
tmp = LOAD32_BE(np + 12);
tmp += len;
STORE32_BE(np + 12, tmp);
blp = bl;
*(aes_uint128_t *) (blp + 0) = xmm8;
*(aes_uint128_t *) (blp + 16) = xmm9;
*(aes_uint128_t *) (blp + 32) = xmm12;
*(aes_uint128_t *) (blp + 48) = xmm14;
*(aes_uint128_t *) (blp + 64) = xmm11;
*(aes_uint128_t *) (blp + 80) = xmm15;
*(aes_uint128_t *) (blp + 96) = xmm10;
*(aes_uint128_t *) (blp + 112) = xmm13;
bytes:
if (lensav == 0) {
goto end;
}
b = blp[0]; /* clang false positive */
b ^= *(const unsigned char *) (in + 0);
*(unsigned char *) (out + 0) = b;
blp += 1;
in += 1;
out += 1;
lensav -= 1;
goto bytes;
full:
tmp = LOAD32_BE(np + 12);
tmp += 8;
STORE32_BE(np + 12, tmp);
xor2(&xmm8, (const aes_uint128_t *) (in + 0));
xor2(&xmm9, (const aes_uint128_t *) (in + 16));
xor2(&xmm12, (const aes_uint128_t *) (in + 32));
xor2(&xmm14, (const aes_uint128_t *) (in + 48));
xor2(&xmm11, (const aes_uint128_t *) (in + 64));
xor2(&xmm15, (const aes_uint128_t *) (in + 80));
xor2(&xmm10, (const aes_uint128_t *) (in + 96));
xor2(&xmm13, (const aes_uint128_t *) (in + 112));
*(aes_uint128_t *) (out + 0) = xmm8;
*(aes_uint128_t *) (out + 16) = xmm9;
*(aes_uint128_t *) (out + 32) = xmm12;
*(aes_uint128_t *) (out + 48) = xmm14;
*(aes_uint128_t *) (out + 64) = xmm11;
*(aes_uint128_t *) (out + 80) = xmm15;
*(aes_uint128_t *) (out + 96) = xmm10;
*(aes_uint128_t *) (out + 112) = xmm13;
end:
return 0;
}

View File

@ -1,19 +0,0 @@
#include "crypto_stream_aes128ctr.h"
size_t
crypto_stream_aes128ctr_keybytes(void)
{
return crypto_stream_aes128ctr_KEYBYTES;
}
size_t
crypto_stream_aes128ctr_noncebytes(void)
{
return crypto_stream_aes128ctr_NONCEBYTES;
}
size_t
crypto_stream_aes128ctr_beforenmbytes(void)
{
return crypto_stream_aes128ctr_BEFORENMBYTES;
}

View File

@ -43,7 +43,6 @@ SODIUM_EXPORT = \
sodium/crypto_sign_ed25519.h \
sodium/crypto_sign_edwards25519sha512batch.h \
sodium/crypto_stream.h \
sodium/crypto_stream_aes128ctr.h \
sodium/crypto_stream_chacha20.h \
sodium/crypto_stream_salsa20.h \
sodium/crypto_stream_salsa2012.h \

View File

@ -60,7 +60,6 @@
# include "sodium/crypto_box_curve25519xchacha20poly1305.h"
# include "sodium/crypto_secretbox_xchacha20poly1305.h"
# include "sodium/crypto_pwhash_scryptsalsa208sha256.h"
# include "sodium/crypto_stream_aes128ctr.h"
# include "sodium/crypto_stream_salsa2012.h"
# include "sodium/crypto_stream_salsa208.h"
# include "sodium/crypto_stream_xchacha20.h"

View File

@ -1,68 +0,0 @@
#ifndef crypto_stream_aes128ctr_H
#define crypto_stream_aes128ctr_H
/*
* WARNING: This is just a stream cipher. It is NOT authenticated encryption.
* While it provides some protection against eavesdropping, it does NOT
* provide any security against active attacks.
* Unless you know what you're doing, what you are looking for is probably
* the crypto_box functions.
*/
#include <stddef.h>
#include "export.h"
#ifdef __cplusplus
# ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wlong-long"
# endif
extern "C" {
#endif
#define crypto_stream_aes128ctr_KEYBYTES 16U
SODIUM_EXPORT
size_t crypto_stream_aes128ctr_keybytes(void);
#define crypto_stream_aes128ctr_NONCEBYTES 16U
SODIUM_EXPORT
size_t crypto_stream_aes128ctr_noncebytes(void);
#define crypto_stream_aes128ctr_BEFORENMBYTES 1408U
SODIUM_EXPORT
size_t crypto_stream_aes128ctr_beforenmbytes(void);
#define crypto_stream_aes128ctr_SIZE_MAX \
SODIUM_MIN(SODIUM_SIZE_MAX, 16ULL * (1ULL << 32))
SODIUM_EXPORT
int crypto_stream_aes128ctr(unsigned char *out, unsigned long long outlen,
const unsigned char *n, const unsigned char *k)
__attribute__ ((deprecated));
SODIUM_EXPORT
int crypto_stream_aes128ctr_xor(unsigned char *out, const unsigned char *in,
unsigned long long inlen, const unsigned char *n,
const unsigned char *k)
__attribute__ ((deprecated));
SODIUM_EXPORT
int crypto_stream_aes128ctr_beforenm(unsigned char *c, const unsigned char *k)
__attribute__ ((deprecated));
SODIUM_EXPORT
int crypto_stream_aes128ctr_afternm(unsigned char *out, unsigned long long len,
const unsigned char *nonce, const unsigned char *c)
__attribute__ ((deprecated));
SODIUM_EXPORT
int crypto_stream_aes128ctr_xor_afternm(unsigned char *out, const unsigned char *in,
unsigned long long len,
const unsigned char *nonce,
const unsigned char *c)
__attribute__ ((deprecated));
#ifdef __cplusplus
}
#endif
#endif