jmp .L1_test
.p2align 4
-.loop1:
+.Loop1:
movaps (%rsi), %xmm0
mulps (%rdi), %xmm0
add $0x10, %rdi
addps %xmm0, %xmm4
.L1_test:
dec %rax
- jge .loop1
+ jge .Loop1
# set up for primary loop which is unrolled 4 times
movaps %xmm5, %xmm7
shr $2, %rdx # n_4_float_blocks / 4
- je .cleanup # if zero, take short path
+ je .Lcleanup # if zero, take short path
# finish setup and loop priming
# hence enter loop at top
.p2align 4
-.loop2:
+.Loop2:
mulps (%rdi), %xmm0
addps %xmm2, %xmm6
movaps 0x20(%rsi), %xmm2
add $0x40, %rdi
add $0x40, %rsi
dec %rdx
- jne .loop2
+ jne .Loop2
# OK, now we've done with all the multiplies, but
# we still need to handle the unaccumulated
# to compute a "horizontal add" across xmm4.
# This is a fairly nasty operation...
-.cleanup: # xmm4 = d1 d2 d3 d4
+.Lcleanup: # xmm4 = d1 d2 d3 d4
xorps %xmm0, %xmm0 # xmm0 = 0 0 0 0 (may be unnecessary)
movhlps %xmm4, %xmm0 # xmm0 = 0 0 d1 d2
addps %xmm4, %xmm0 # xmm0 = d1 d2 d1+d3 d2+d4
FUNC_TAIL(float_dotprod_sse)
.ident "Hand coded x86_64 SSE assembly"
+
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif