1
mirror of https://github.com/jedisct1/libsodium.git synced 2024-12-19 18:15:18 -07:00

10% speedup on AVX2 for BLAKE2b

Thanks to Shunsuke Shimizu (@grafi-tt)
This commit is contained in:
Frank Denis 2019-05-03 20:14:05 +02:00
parent 8a1ac8e11f
commit 80206ada63
2 changed files with 71 additions and 71 deletions

View File

@ -68,16 +68,16 @@ LOADU64(const void *p)
#define BLAKE2B_DIAG_V1(a, b, c, d) \
do { \
d = _mm256_permute4x64_epi64(d, _MM_SHUFFLE(2, 1, 0, 3)); \
c = _mm256_permute4x64_epi64(c, _MM_SHUFFLE(1, 0, 3, 2)); \
b = _mm256_permute4x64_epi64(b, _MM_SHUFFLE(0, 3, 2, 1)); \
a = _mm256_permute4x64_epi64(a, _MM_SHUFFLE(2, 1, 0, 3)); \
d = _mm256_permute4x64_epi64(d, _MM_SHUFFLE(1, 0, 3, 2)); \
c = _mm256_permute4x64_epi64(c, _MM_SHUFFLE(0, 3, 2, 1)); \
} while(0)
#define BLAKE2B_UNDIAG_V1(a, b, c, d) \
do { \
d = _mm256_permute4x64_epi64(d, _MM_SHUFFLE(0, 3, 2, 1)); \
c = _mm256_permute4x64_epi64(c, _MM_SHUFFLE(1, 0, 3, 2)); \
b = _mm256_permute4x64_epi64(b, _MM_SHUFFLE(2, 1, 0, 3)); \
a = _mm256_permute4x64_epi64(a, _MM_SHUFFLE(0, 3, 2, 1)); \
d = _mm256_permute4x64_epi64(d, _MM_SHUFFLE(1, 0, 3, 2)); \
c = _mm256_permute4x64_epi64(c, _MM_SHUFFLE(2, 1, 0, 3)); \
} while(0)
#include "blake2b-load-avx2.h"

View File

@ -17,15 +17,15 @@
#define BLAKE2B_LOAD_MSG_0_3(b0) \
do { \
t0 = _mm256_unpacklo_epi64(m4, m5); \
t1 = _mm256_unpacklo_epi64(m6, m7); \
t0 = _mm256_unpacklo_epi64(m7, m4); \
t1 = _mm256_unpacklo_epi64(m5, m6); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
#define BLAKE2B_LOAD_MSG_0_4(b0) \
do { \
t0 = _mm256_unpackhi_epi64(m4, m5); \
t1 = _mm256_unpackhi_epi64(m6, m7); \
t0 = _mm256_unpackhi_epi64(m7, m4); \
t1 = _mm256_unpackhi_epi64(m5, m6); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
@ -45,15 +45,15 @@
#define BLAKE2B_LOAD_MSG_1_3(b0) \
do { \
t0 = _mm256_shuffle_epi32(m0, _MM_SHUFFLE(1, 0, 3, 2)); \
t1 = _mm256_unpackhi_epi64(m5, m2); \
t0 = _mm256_unpackhi_epi64(m2, m0); \
t1 = _mm256_blend_epi32(m5, m0, 0x33); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
#define BLAKE2B_LOAD_MSG_1_4(b0) \
do { \
t0 = _mm256_unpacklo_epi64(m6, m1); \
t1 = _mm256_unpackhi_epi64(m3, m1); \
t0 = _mm256_alignr_epi8(m6, m1, 8); \
t1 = _mm256_blend_epi32(m3, m1, 0x33); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
@ -73,15 +73,15 @@
#define BLAKE2B_LOAD_MSG_2_3(b0) \
do { \
t0 = _mm256_blend_epi32(m1, m5, 0x33); \
t1 = _mm256_unpackhi_epi64(m3, m4); \
t0 = _mm256_alignr_epi8(m5, m4, 8); \
t1 = _mm256_unpackhi_epi64(m1, m3); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
#define BLAKE2B_LOAD_MSG_2_4(b0) \
do { \
t0 = _mm256_unpacklo_epi64(m7, m3); \
t1 = _mm256_alignr_epi8(m2, m0, 8); \
t0 = _mm256_unpacklo_epi64(m2, m7); \
t1 = _mm256_blend_epi32(m0, m3, 0x33); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
@ -101,15 +101,15 @@
#define BLAKE2B_LOAD_MSG_3_3(b0) \
do { \
t0 = _mm256_blend_epi32(m2, m1, 0x33); \
t1 = _mm256_blend_epi32(m7, m2, 0x33); \
t0 = _mm256_alignr_epi8(m1, m7, 8); \
t1 = _mm256_shuffle_epi32(m2, _MM_SHUFFLE(1, 0, 3, 2)); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
#define BLAKE2B_LOAD_MSG_3_4(b0) \
do { \
t0 = _mm256_unpacklo_epi64(m3, m5); \
t1 = _mm256_unpacklo_epi64(m0, m4); \
t0 = _mm256_unpacklo_epi64(m4, m3); \
t1 = _mm256_unpacklo_epi64(m5, m0); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
@ -129,15 +129,15 @@
#define BLAKE2B_LOAD_MSG_4_3(b0) \
do { \
t0 = _mm256_blend_epi32(m5, m7, 0x33); \
t1 = _mm256_blend_epi32(m1, m3, 0x33); \
t0 = _mm256_alignr_epi8(m7, m1, 8); \
t1 = _mm256_alignr_epi8(m3, m5, 8); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
#define BLAKE2B_LOAD_MSG_4_4(b0) \
do { \
t0 = _mm256_alignr_epi8(m6, m0, 8); \
t1 = _mm256_blend_epi32(m6, m4, 0x33); \
t0 = _mm256_unpackhi_epi64(m6, m0); \
t1 = _mm256_unpacklo_epi64(m6, m4); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
@ -157,15 +157,15 @@
#define BLAKE2B_LOAD_MSG_5_3(b0) \
do { \
t0 = _mm256_blend_epi32(m3, m2, 0x33); \
t1 = _mm256_unpackhi_epi64(m7, m0); \
t0 = _mm256_alignr_epi8(m2, m0, 8); \
t1 = _mm256_unpackhi_epi64(m3, m7); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
#define BLAKE2B_LOAD_MSG_5_4(b0) \
do { \
t0 = _mm256_unpackhi_epi64(m6, m2); \
t1 = _mm256_blend_epi32(m4, m7, 0x33); \
t0 = _mm256_unpackhi_epi64(m4, m6); \
t1 = _mm256_alignr_epi8(m7, m2, 8); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
@ -185,15 +185,15 @@
#define BLAKE2B_LOAD_MSG_6_3(b0) \
do { \
t0 = _mm256_unpacklo_epi64(m0, m3); \
t1 = _mm256_shuffle_epi32(m4, _MM_SHUFFLE(1, 0, 3, 2)); \
t0 = _mm256_unpacklo_epi64(m4, m0); \
t1 = _mm256_blend_epi32(m4, m3, 0x33); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
#define BLAKE2B_LOAD_MSG_6_4(b0) \
do { \
t0 = _mm256_unpackhi_epi64(m3, m1); \
t1 = _mm256_blend_epi32(m5, m1, 0x33); \
t0 = _mm256_unpackhi_epi64(m5, m3); \
t1 = _mm256_shuffle_epi32(m1, _MM_SHUFFLE(1, 0, 3, 2)); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
@ -213,15 +213,15 @@
#define BLAKE2B_LOAD_MSG_7_3(b0) \
do { \
t0 = _mm256_unpackhi_epi64(m2, m7); \
t1 = _mm256_unpacklo_epi64(m4, m1); \
t0 = _mm256_blend_epi32(m2, m1, 0x33); \
t1 = _mm256_alignr_epi8(m4, m7, 8); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
#define BLAKE2B_LOAD_MSG_7_4(b0) \
do { \
t0 = _mm256_unpacklo_epi64(m0, m2); \
t1 = _mm256_unpacklo_epi64(m3, m5); \
t0 = _mm256_unpacklo_epi64(m5, m0); \
t1 = _mm256_unpacklo_epi64(m2, m3); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
@ -241,15 +241,15 @@
#define BLAKE2B_LOAD_MSG_8_3(b0) \
do { \
t0 = m6; \
t1 = _mm256_alignr_epi8(m5, m0, 8); \
t0 = _mm256_unpacklo_epi64(m5, m6); \
t1 = _mm256_unpackhi_epi64(m6, m0); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
#define BLAKE2B_LOAD_MSG_8_4(b0) \
do { \
t0 = _mm256_blend_epi32(m3, m1, 0x33); \
t1 = m2; \
t0 = _mm256_alignr_epi8(m1, m2, 8); \
t1 = _mm256_alignr_epi8(m2, m3, 8); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
@ -269,15 +269,15 @@
#define BLAKE2B_LOAD_MSG_9_3(b0) \
do { \
t0 = _mm256_unpackhi_epi64(m7, m4); \
t1 = _mm256_unpackhi_epi64(m1, m6); \
t0 = _mm256_unpackhi_epi64(m6, m7); \
t1 = _mm256_unpackhi_epi64(m4, m1); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
#define BLAKE2B_LOAD_MSG_9_4(b0) \
do { \
t0 = _mm256_alignr_epi8(m7, m5, 8); \
t1 = _mm256_unpacklo_epi64(m6, m0); \
t0 = _mm256_blend_epi32(m5, m0, 0x33); \
t1 = _mm256_unpacklo_epi64(m7, m6); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
@ -297,15 +297,15 @@
#define BLAKE2B_LOAD_MSG_10_3(b0) \
do { \
t0 = _mm256_unpacklo_epi64(m4, m5); \
t1 = _mm256_unpacklo_epi64(m6, m7); \
t0 = _mm256_unpacklo_epi64(m7, m4); \
t1 = _mm256_unpacklo_epi64(m5, m6); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
#define BLAKE2B_LOAD_MSG_10_4(b0) \
do { \
t0 = _mm256_unpackhi_epi64(m4, m5); \
t1 = _mm256_unpackhi_epi64(m6, m7); \
t0 = _mm256_unpackhi_epi64(m7, m4); \
t1 = _mm256_unpackhi_epi64(m5, m6); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
@ -325,15 +325,15 @@
#define BLAKE2B_LOAD_MSG_11_3(b0) \
do { \
t0 = _mm256_shuffle_epi32(m0, _MM_SHUFFLE(1, 0, 3, 2)); \
t1 = _mm256_unpackhi_epi64(m5, m2); \
t0 = _mm256_unpackhi_epi64(m2, m0); \
t1 = _mm256_blend_epi32(m5, m0, 0x33); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)
#define BLAKE2B_LOAD_MSG_11_4(b0) \
do { \
t0 = _mm256_unpacklo_epi64(m6, m1); \
t1 = _mm256_unpackhi_epi64(m3, m1); \
t0 = _mm256_alignr_epi8(m6, m1, 8); \
t1 = _mm256_blend_epi32(m3, m1, 0x33); \
b0 = _mm256_blend_epi32(t0, t1, 0xF0); \
} while (0)