1

crypto: x86/sha256 - Add parentheses around macros' single arguments

The macros FOUR_ROUNDS_AND_SCHED and DO_4ROUNDS rely on an
unexpected/undocumented behavior of the GNU assembler, which might
change in the future
(https://sourceware.org/bugzilla/show_bug.cgi?id=32073).

    M (1) (2) // 1 arg !? Future: 2 args
    M 1 + 2   // 1 arg !? Future: 3 args

    M 1 2     // 2 args

Add parentheses around the single arguments to support future GNU
assembler and LLVM integrated assembler (when the IsOperator hack from
the following link is dropped).

Link: 055006475e
Signed-off-by: Fangrui Song <maskray@google.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Fangrui Song 2024-08-13 21:48:02 -07:00 committed by Herbert Xu
parent 86c85d6657
commit 3363c460ef

View File

@ -592,22 +592,22 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
leaq K256+0*32(%rip), INP ## reuse INP as scratch reg leaq K256+0*32(%rip), INP ## reuse INP as scratch reg
vpaddd (INP, SRND), X0, XFER vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND) vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 0*32 FOUR_ROUNDS_AND_SCHED (_XFER + 0*32)
leaq K256+1*32(%rip), INP leaq K256+1*32(%rip), INP
vpaddd (INP, SRND), X0, XFER vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND) vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 1*32 FOUR_ROUNDS_AND_SCHED (_XFER + 1*32)
leaq K256+2*32(%rip), INP leaq K256+2*32(%rip), INP
vpaddd (INP, SRND), X0, XFER vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 2*32+_XFER(%rsp, SRND) vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 2*32 FOUR_ROUNDS_AND_SCHED (_XFER + 2*32)
leaq K256+3*32(%rip), INP leaq K256+3*32(%rip), INP
vpaddd (INP, SRND), X0, XFER vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 3*32+_XFER(%rsp, SRND) vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
FOUR_ROUNDS_AND_SCHED _XFER + 3*32 FOUR_ROUNDS_AND_SCHED (_XFER + 3*32)
add $4*32, SRND add $4*32, SRND
cmp $3*4*32, SRND cmp $3*4*32, SRND
@ -618,12 +618,12 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
leaq K256+0*32(%rip), INP leaq K256+0*32(%rip), INP
vpaddd (INP, SRND), X0, XFER vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND) vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
DO_4ROUNDS _XFER + 0*32 DO_4ROUNDS (_XFER + 0*32)
leaq K256+1*32(%rip), INP leaq K256+1*32(%rip), INP
vpaddd (INP, SRND), X1, XFER vpaddd (INP, SRND), X1, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND) vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
DO_4ROUNDS _XFER + 1*32 DO_4ROUNDS (_XFER + 1*32)
add $2*32, SRND add $2*32, SRND
vmovdqa X2, X0 vmovdqa X2, X0
@ -651,8 +651,8 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
xor SRND, SRND xor SRND, SRND
.align 16 .align 16
.Lloop3: .Lloop3:
DO_4ROUNDS _XFER + 0*32 + 16 DO_4ROUNDS (_XFER + 0*32 + 16)
DO_4ROUNDS _XFER + 1*32 + 16 DO_4ROUNDS (_XFER + 1*32 + 16)
add $2*32, SRND add $2*32, SRND
cmp $4*4*32, SRND cmp $4*4*32, SRND
jb .Lloop3 jb .Lloop3