Fix BN ASM for PowerPC

This commit is contained in:
Manuel Pégourié-Gonnard 2014-01-07 19:16:48 +01:00
parent 8b1b103418
commit 02d800c151

View file

@ -279,62 +279,83 @@
#if defined(__MACH__) && defined(__APPLE__)
#define MULADDC_INIT \
asm( "ld r3, %0 " :: "m" (s)); \
asm( "ld r4, %0 " :: "m" (d)); \
asm( "ld r5, %0 " :: "m" (c)); \
asm( "ld r6, %0 " :: "m" (b)); \
asm( "addi r3, r3, -8 " ); \
asm( "addi r4, r4, -8 " ); \
asm( "addic r5, r5, 0 " );
asm( \
" \
ld r3, %3; \
ld r4, %4; \
ld r5, %5; \
ld r6, %6; \
addi r3, r3, -8; \
addi r4, r4, -8; \
addic r5, r5, 0; \
"
#define MULADDC_CORE \
asm( "ldu r7, 8(r3) " ); \
asm( "mulld r8, r7, r6 " ); \
asm( "mulhdu r9, r7, r6 " ); \
asm( "adde r8, r8, r5 " ); \
asm( "ld r7, 8(r4) " ); \
asm( "addze r5, r9 " ); \
asm( "addc r8, r8, r7 " ); \
asm( "stdu r8, 8(r4) " );
" \
ldu r7, 8(r3); \
mulld r8, r7, r6; \
mulhdu r9, r7, r6; \
adde r8, r8, r5; \
ld r7, 8(r4); \
addze r5, r9; \
addc r8, r8, r7; \
stdu r8, 8(r4); \
"
#define MULADDC_STOP \
asm( "addze r5, r5 " ); \
asm( "addi r4, r4, 8 " ); \
asm( "addi r3, r3, 8 " ); \
asm( "std r5, %0 " : "=m" (c)); \
asm( "std r4, %0 " : "=m" (d)); \
asm( "std r3, %0 " : "=m" (s) :: \
"r3", "r4", "r5", "r6", "r7", "r8", "r9" );
" \
addze r5, r5; \
addi r4, r4, 8; \
addi r3, r3, 8; \
std r5, %0; \
std r4, %1; \
std r3, %2; \
" \
: "=m" (c), "=m" (d), "=m" (s) \
: "m" (s), "m" (d), "m" (c), "m" (b) \
: "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
);
#else
#define MULADDC_INIT \
asm( "ld %%r3, %0 " :: "m" (s)); \
asm( "ld %%r4, %0 " :: "m" (d)); \
asm( "ld %%r5, %0 " :: "m" (c)); \
asm( "ld %%r6, %0 " :: "m" (b)); \
asm( "addi %r3, %r3, -8 " ); \
asm( "addi %r4, %r4, -8 " ); \
asm( "addic %r5, %r5, 0 " );
asm( \
" \
ld %%r3, %3; \
ld %%r4, %4; \
ld %%r5, %5; \
ld %%r6, %6; \
addi %%r3, %%r3, -8; \
addi %%r4, %%r4, -8; \
addic %%r5, %%r5, 0; \
"
#define MULADDC_CORE \
asm( "ldu %r7, 8(%r3) " ); \
asm( "mulld %r8, %r7, %r6 " ); \
asm( "mulhdu %r9, %r7, %r6 " ); \
asm( "adde %r8, %r8, %r5 " ); \
asm( "ld %r7, 8(%r4) " ); \
asm( "addze %r5, %r9 " ); \
asm( "addc %r8, %r8, %r7 " ); \
asm( "stdu %r8, 8(%r4) " );
" \
ldu %%r7, 8(%%r3); \
mulld %%r8, %%r7, %%r6; \
mulhdu %%r9, %%r7, %%r6; \
adde %%r8, %%r8, %%r5; \
ld %%r7, 8(%%r4); \
addze %%r5, %%r9; \
addc %%r8, %%r8, %%r7; \
stdu %%r8, 8(%%r4); \
"
#define MULADDC_STOP \
asm( "addze %r5, %r5 " ); \
asm( "addi %r4, %r4, 8 " ); \
asm( "addi %r3, %r3, 8 " ); \
asm( "std %%r5, %0 " : "=m" (c)); \
asm( "std %%r4, %0 " : "=m" (d)); \
asm( "std %%r3, %0 " : "=m" (s) :: \
"r3", "r4", "r5", "r6", "r7", "r8", "r9" );
" \
addze %%r5, %%r5; \
addi %%r4, %%r4, 8; \
addi %%r3, %%r3, 8; \
std %%r5, %0; \
std %%r4, %1; \
std %%r3, %2; \
" \
: "=m" (c), "=m" (d), "=m" (s) \
: "m" (s), "m" (d), "m" (c), "m" (b) \
: "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
);
#endif
@ -343,62 +364,82 @@
#if defined(__MACH__) && defined(__APPLE__)
#define MULADDC_INIT \
asm( "lwz r3, %0 " :: "m" (s)); \
asm( "lwz r4, %0 " :: "m" (d)); \
asm( "lwz r5, %0 " :: "m" (c)); \
asm( "lwz r6, %0 " :: "m" (b)); \
asm( "addi r3, r3, -4 " ); \
asm( "addi r4, r4, -4 " ); \
asm( "addic r5, r5, 0 " );
asm( \
" \
lwz r3, %3; \
lwz r4, %4; \
lwz r5, %5; \
lwz r6, %6; \
addi r3, r3, -4; \
addi r4, r4, -4; \
addic r5, r5, 0; \
"
#define MULADDC_CORE \
asm( "lwzu r7, 4(r3) " ); \
asm( "mullw r8, r7, r6 " ); \
asm( "mulhwu r9, r7, r6 " ); \
asm( "adde r8, r8, r5 " ); \
asm( "lwz r7, 4(r4) " ); \
asm( "addze r5, r9 " ); \
asm( "addc r8, r8, r7 " ); \
asm( "stwu r8, 4(r4) " );
" \
lwzu r7, 4(r3); \
mullw r8, r7, r6; \
mulhwu r9, r7, r6; \
adde r8, r8, r5; \
lwz r7, 4(r4); \
addze r5, r9; \
addc r8, r8, r7; \
stwu r8, 4(r4); \
"
#define MULADDC_STOP \
asm( "addze r5, r5 " ); \
asm( "addi r4, r4, 4 " ); \
asm( "addi r3, r3, 4 " ); \
asm( "stw r5, %0 " : "=m" (c)); \
asm( "stw r4, %0 " : "=m" (d)); \
asm( "stw r3, %0 " : "=m" (s) :: \
"r3", "r4", "r5", "r6", "r7", "r8", "r9" );
" \
addze r5, r5; \
addi r4, r4, 4; \
addi r3, r3, 4; \
stw r5, %0; \
stw r4, %1; \
stw r3, %2; \
" \
: "=m" (c), "=m" (d), "=m" (s) \
: "m" (s), "m" (d), "m" (c), "m" (b) \
: "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
);
#else
#define MULADDC_INIT \
asm( "lwz %%r3, %0 " :: "m" (s)); \
asm( "lwz %%r4, %0 " :: "m" (d)); \
asm( "lwz %%r5, %0 " :: "m" (c)); \
asm( "lwz %%r6, %0 " :: "m" (b)); \
asm( "addi %r3, %r3, -4 " ); \
asm( "addi %r4, %r4, -4 " ); \
asm( "addic %r5, %r5, 0 " );
asm( \
" \
lwz %%r3, %3; \
lwz %%r4, %4; \
lwz %%r5, %5; \
lwz %%r6, %6; \
addi %%r3, %%r3, -4; \
addi %%r4, %%r4, -4; \
addic %%r5, %%r5, 0; \
"
#define MULADDC_CORE \
asm( "lwzu %r7, 4(%r3) " ); \
asm( "mullw %r8, %r7, %r6 " ); \
asm( "mulhwu %r9, %r7, %r6 " ); \
asm( "adde %r8, %r8, %r5 " ); \
asm( "lwz %r7, 4(%r4) " ); \
asm( "addze %r5, %r9 " ); \
asm( "addc %r8, %r8, %r7 " ); \
asm( "stwu %r8, 4(%r4) " );
" \
lwzu %%r7, 4(%%r3); \
mullw %%r8, %%r7, %%r6; \
mulhwu %%r9, %%r7, %%r6; \
adde %%r8, %%r8, %%r5; \
lwz %%r7, 4(%%r4); \
addze %%r5, %%r9; \
addc %%r8, %%r8, %%r7; \
stwu %%r8, 4(%%r4); \
"
#define MULADDC_STOP \
asm( "addze %r5, %r5 " ); \
asm( "addi %r4, %r4, 4 " ); \
asm( "addi %r3, %r3, 4 " ); \
asm( "stw %%r5, %0 " : "=m" (c)); \
asm( "stw %%r4, %0 " : "=m" (d)); \
asm( "stw %%r3, %0 " : "=m" (s) :: \
"r3", "r4", "r5", "r6", "r7", "r8", "r9" );
" \
addze %%r5, %%r5; \
addi %%r4, %%r4, 4; \
addi %%r3, %%r3, 4; \
stw %%r5, %0; \
stw %%r4, %1; \
stw %%r3, %2; \
" \
: "=m" (c), "=m" (d), "=m" (s) \
: "m" (s), "m" (d), "m" (c), "m" (b) \
: "r3", "r4", "r5", "r6", "r7", "r8", "r9" \
);
#endif