VirtualBox

source: vbox/trunk/src/libs/openssl-3.1.0/crypto/bn/asm/x86_64-mont5.pl@ 99371

最後變更 在這個檔案從99371是 99366,由 vboxsync 提交於 23 月 前

openssl-3.1.0: Applied and adjusted our OpenSSL changes to 3.0.7. bugref:10418

  • 屬性 svn:executable 設為 *
檔案大小: 82.0 KB
 
1#! /usr/bin/env perl
2# Copyright 2011-2022 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the Apache License 2.0 (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10# ====================================================================
11# Written by Andy Polyakov <[email protected]> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16
17# August 2011.
18#
19# Companion to x86_64-mont.pl that optimizes cache-timing attack
20# countermeasures. The subroutines are produced by replacing bp[i]
21# references in their x86_64-mont.pl counterparts with cache-neutral
22# references to powers table computed in BN_mod_exp_mont_consttime.
23# In addition subroutine that scatters elements of the powers table
24# is implemented, so that scatter-/gathering can be tuned without
25# bn_exp.c modifications.
26
27# August 2013.
28#
29# Add MULX/AD*X code paths and additional interfaces to optimize for
30# branch prediction unit. For input lengths that are multiples of 8
31# the np argument is not just modulus value, but one interleaved
32# with 0. This is to optimize post-condition...
33
34# $output is the last argument if it looks like a file (it has an extension)
35# $flavour is the first argument if it doesn't look like a file
36$output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
37$flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
38
39$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
40
41$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
42( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
43( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
44die "can't locate x86_64-xlate.pl";
45
46open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""
47 or die "can't call $xlate: $!";
48*STDOUT=*OUT;
49
50if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
51 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
52 $addx = ($1>=2.23);
53}
54
55if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
56 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
57 $addx = ($1>=2.10);
58}
59
60if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
61 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
62 $addx = ($1>=12);
63}
64
65if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|.*based on LLVM) ([0-9]+)\.([0-9]+)/) {
66 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
67 $addx = ($ver>=3.03);
68}
69
70# int bn_mul_mont_gather5(
71$rp="%rdi"; # BN_ULONG *rp,
72$ap="%rsi"; # const BN_ULONG *ap,
73$bp="%rdx"; # const BN_ULONG *bp,
74$np="%rcx"; # const BN_ULONG *np,
75$n0="%r8"; # const BN_ULONG *n0,
76$num="%r9"; # int num,
77 # int idx); # 0 to 2^5-1, "index" in $bp holding
78 # pre-computed powers of a', interlaced
79 # in such manner that b[0] is $bp[idx],
80 # b[1] is [2^5+idx], etc.
81$lo0="%r10";
82$hi0="%r11";
83$hi1="%r13";
84$i="%r14";
85$j="%r15";
86$m0="%rbx";
87$m1="%rbp";
88
89$code=<<___;
90.text
91
92.extern OPENSSL_ia32cap_P
93
94.globl bn_mul_mont_gather5
95.type bn_mul_mont_gather5,\@function,6
96.align 64
97bn_mul_mont_gather5:
98.cfi_startproc
99 mov ${num}d,${num}d
100 mov %rsp,%rax
101.cfi_def_cfa_register %rax
102 test \$7,${num}d
103 jnz .Lmul_enter
104___
105$code.=<<___ if ($addx);
106 mov OPENSSL_ia32cap_P+8(%rip),%r11d
107___
108$code.=<<___;
109 jmp .Lmul4x_enter
110
111.align 16
112.Lmul_enter:
113 movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument
114 push %rbx
115.cfi_push %rbx
116 push %rbp
117.cfi_push %rbp
118 push %r12
119.cfi_push %r12
120 push %r13
121.cfi_push %r13
122 push %r14
123.cfi_push %r14
124 push %r15
125.cfi_push %r15
126
127 neg $num
128 mov %rsp,%r11
129 lea -280(%rsp,$num,8),%r10 # future alloca(8*(num+2)+256+8)
130 neg $num # restore $num
131 and \$-1024,%r10 # minimize TLB usage
132
133 # An OS-agnostic version of __chkstk.
134 #
135 # Some OSes (Windows) insist on stack being "wired" to
136 # physical memory in strictly sequential manner, i.e. if stack
137 # allocation spans two pages, then reference to farmost one can
138 # be punishable by SEGV. But page walking can do good even on
139 # other OSes, because it guarantees that villain thread hits
140 # the guard page before it can make damage to innocent one...
141 sub %r10,%r11
142 and \$-4096,%r11
143 lea (%r10,%r11),%rsp
144 mov (%rsp),%r11
145 cmp %r10,%rsp
146 ja .Lmul_page_walk
147 jmp .Lmul_page_walk_done
148
149.Lmul_page_walk:
150 lea -4096(%rsp),%rsp
151 mov (%rsp),%r11
152 cmp %r10,%rsp
153 ja .Lmul_page_walk
154.Lmul_page_walk_done:
155
156 lea .Linc(%rip),%r10
157 mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
158.cfi_cfa_expression %rsp+8,$num,8,mul,plus,deref,+8
159.Lmul_body:
160
161 lea 128($bp),%r12 # reassign $bp (+size optimization)
162___
163 $bp="%r12";
164 $STRIDE=2**5*8; # 5 is "window size"
165 $N=$STRIDE/4; # should match cache line size
166$code.=<<___;
167 movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000
168 movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002
169 lea 24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
170 and \$-16,%r10
171
172 pshufd \$0,%xmm5,%xmm5 # broadcast index
173 movdqa %xmm1,%xmm4
174 movdqa %xmm1,%xmm2
175___
176########################################################################
177# calculate mask by comparing 0..31 to index and save result to stack
178#
179$code.=<<___;
180 paddd %xmm0,%xmm1
181 pcmpeqd %xmm5,%xmm0 # compare to 1,0
182 .byte 0x67
183 movdqa %xmm4,%xmm3
184___
185for($k=0;$k<$STRIDE/16-4;$k+=4) {
186$code.=<<___;
187 paddd %xmm1,%xmm2
188 pcmpeqd %xmm5,%xmm1 # compare to 3,2
189 movdqa %xmm0,`16*($k+0)+112`(%r10)
190 movdqa %xmm4,%xmm0
191
192 paddd %xmm2,%xmm3
193 pcmpeqd %xmm5,%xmm2 # compare to 5,4
194 movdqa %xmm1,`16*($k+1)+112`(%r10)
195 movdqa %xmm4,%xmm1
196
197 paddd %xmm3,%xmm0
198 pcmpeqd %xmm5,%xmm3 # compare to 7,6
199 movdqa %xmm2,`16*($k+2)+112`(%r10)
200 movdqa %xmm4,%xmm2
201
202 paddd %xmm0,%xmm1
203 pcmpeqd %xmm5,%xmm0
204 movdqa %xmm3,`16*($k+3)+112`(%r10)
205 movdqa %xmm4,%xmm3
206___
207}
208$code.=<<___; # last iteration can be optimized
209 paddd %xmm1,%xmm2
210 pcmpeqd %xmm5,%xmm1
211 movdqa %xmm0,`16*($k+0)+112`(%r10)
212
213 paddd %xmm2,%xmm3
214 .byte 0x67
215 pcmpeqd %xmm5,%xmm2
216 movdqa %xmm1,`16*($k+1)+112`(%r10)
217
218 pcmpeqd %xmm5,%xmm3
219 movdqa %xmm2,`16*($k+2)+112`(%r10)
220 pand `16*($k+0)-128`($bp),%xmm0 # while it's still in register
221
222 pand `16*($k+1)-128`($bp),%xmm1
223 pand `16*($k+2)-128`($bp),%xmm2
224 movdqa %xmm3,`16*($k+3)+112`(%r10)
225 pand `16*($k+3)-128`($bp),%xmm3
226 por %xmm2,%xmm0
227 por %xmm3,%xmm1
228___
229for($k=0;$k<$STRIDE/16-4;$k+=4) {
230$code.=<<___;
231 movdqa `16*($k+0)-128`($bp),%xmm4
232 movdqa `16*($k+1)-128`($bp),%xmm5
233 movdqa `16*($k+2)-128`($bp),%xmm2
234 pand `16*($k+0)+112`(%r10),%xmm4
235 movdqa `16*($k+3)-128`($bp),%xmm3
236 pand `16*($k+1)+112`(%r10),%xmm5
237 por %xmm4,%xmm0
238 pand `16*($k+2)+112`(%r10),%xmm2
239 por %xmm5,%xmm1
240 pand `16*($k+3)+112`(%r10),%xmm3
241 por %xmm2,%xmm0
242 por %xmm3,%xmm1
243___
244}
245$code.=<<___;
246 por %xmm1,%xmm0
247 pshufd \$0x4e,%xmm0,%xmm1
248 por %xmm1,%xmm0
249 lea $STRIDE($bp),$bp
250 movq %xmm0,$m0 # m0=bp[0]
251
252 mov ($n0),$n0 # pull n0[0] value
253 mov ($ap),%rax
254
255 xor $i,$i # i=0
256 xor $j,$j # j=0
257
258 mov $n0,$m1
259 mulq $m0 # ap[0]*bp[0]
260 mov %rax,$lo0
261 mov ($np),%rax
262
263 imulq $lo0,$m1 # "tp[0]"*n0
264 mov %rdx,$hi0
265
266 mulq $m1 # np[0]*m1
267 add %rax,$lo0 # discarded
268 mov 8($ap),%rax
269 adc \$0,%rdx
270 mov %rdx,$hi1
271
272 lea 1($j),$j # j++
273 jmp .L1st_enter
274
275.align 16
276.L1st:
277 add %rax,$hi1
278 mov ($ap,$j,8),%rax
279 adc \$0,%rdx
280 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
281 mov $lo0,$hi0
282 adc \$0,%rdx
283 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
284 mov %rdx,$hi1
285
286.L1st_enter:
287 mulq $m0 # ap[j]*bp[0]
288 add %rax,$hi0
289 mov ($np,$j,8),%rax
290 adc \$0,%rdx
291 lea 1($j),$j # j++
292 mov %rdx,$lo0
293
294 mulq $m1 # np[j]*m1
295 cmp $num,$j
296 jne .L1st # note that upon exit $j==$num, so
297 # they can be used interchangeably
298
299 add %rax,$hi1
300 adc \$0,%rdx
301 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
302 adc \$0,%rdx
303 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
304 mov %rdx,$hi1
305 mov $lo0,$hi0
306
307 xor %rdx,%rdx
308 add $hi0,$hi1
309 adc \$0,%rdx
310 mov $hi1,-8(%rsp,$num,8)
311 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
312
313 lea 1($i),$i # i++
314 jmp .Louter
315.align 16
316.Louter:
317 lea 24+128(%rsp,$num,8),%rdx # where 256-byte mask is (+size optimization)
318 and \$-16,%rdx
319 pxor %xmm4,%xmm4
320 pxor %xmm5,%xmm5
321___
322for($k=0;$k<$STRIDE/16;$k+=4) {
323$code.=<<___;
324 movdqa `16*($k+0)-128`($bp),%xmm0
325 movdqa `16*($k+1)-128`($bp),%xmm1
326 movdqa `16*($k+2)-128`($bp),%xmm2
327 movdqa `16*($k+3)-128`($bp),%xmm3
328 pand `16*($k+0)-128`(%rdx),%xmm0
329 pand `16*($k+1)-128`(%rdx),%xmm1
330 por %xmm0,%xmm4
331 pand `16*($k+2)-128`(%rdx),%xmm2
332 por %xmm1,%xmm5
333 pand `16*($k+3)-128`(%rdx),%xmm3
334 por %xmm2,%xmm4
335 por %xmm3,%xmm5
336___
337}
338$code.=<<___;
339 por %xmm5,%xmm4
340 pshufd \$0x4e,%xmm4,%xmm0
341 por %xmm4,%xmm0
342 lea $STRIDE($bp),$bp
343
344 mov ($ap),%rax # ap[0]
345 movq %xmm0,$m0 # m0=bp[i]
346
347 xor $j,$j # j=0
348 mov $n0,$m1
349 mov (%rsp),$lo0
350
351 mulq $m0 # ap[0]*bp[i]
352 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
353 mov ($np),%rax
354 adc \$0,%rdx
355
356 imulq $lo0,$m1 # tp[0]*n0
357 mov %rdx,$hi0
358
359 mulq $m1 # np[0]*m1
360 add %rax,$lo0 # discarded
361 mov 8($ap),%rax
362 adc \$0,%rdx
363 mov 8(%rsp),$lo0 # tp[1]
364 mov %rdx,$hi1
365
366 lea 1($j),$j # j++
367 jmp .Linner_enter
368
369.align 16
370.Linner:
371 add %rax,$hi1
372 mov ($ap,$j,8),%rax
373 adc \$0,%rdx
374 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
375 mov (%rsp,$j,8),$lo0
376 adc \$0,%rdx
377 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
378 mov %rdx,$hi1
379
380.Linner_enter:
381 mulq $m0 # ap[j]*bp[i]
382 add %rax,$hi0
383 mov ($np,$j,8),%rax
384 adc \$0,%rdx
385 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
386 mov %rdx,$hi0
387 adc \$0,$hi0
388 lea 1($j),$j # j++
389
390 mulq $m1 # np[j]*m1
391 cmp $num,$j
392 jne .Linner # note that upon exit $j==$num, so
393 # they can be used interchangeably
394 add %rax,$hi1
395 adc \$0,%rdx
396 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
397 mov (%rsp,$num,8),$lo0
398 adc \$0,%rdx
399 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
400 mov %rdx,$hi1
401
402 xor %rdx,%rdx
403 add $hi0,$hi1
404 adc \$0,%rdx
405 add $lo0,$hi1 # pull upmost overflow bit
406 adc \$0,%rdx
407 mov $hi1,-8(%rsp,$num,8)
408 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
409
410 lea 1($i),$i # i++
411 cmp $num,$i
412 jb .Louter
413
414 xor $i,$i # i=0 and clear CF!
415 mov (%rsp),%rax # tp[0]
416 lea (%rsp),$ap # borrow ap for tp
417 mov $num,$j # j=num
418 jmp .Lsub
419.align 16
420.Lsub: sbb ($np,$i,8),%rax
421 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
422 mov 8($ap,$i,8),%rax # tp[i+1]
423 lea 1($i),$i # i++
424 dec $j # doesn't affect CF!
425 jnz .Lsub
426
427 sbb \$0,%rax # handle upmost overflow bit
428 mov \$-1,%rbx
429 xor %rax,%rbx
430 xor $i,$i
431 mov $num,$j # j=num
432
433.Lcopy: # conditional copy
434 mov ($rp,$i,8),%rcx
435 mov (%rsp,$i,8),%rdx
436 and %rbx,%rcx
437 and %rax,%rdx
438 mov $i,(%rsp,$i,8) # zap temporary vector
439 or %rcx,%rdx
440 mov %rdx,($rp,$i,8) # rp[i]=tp[i]
441 lea 1($i),$i
442 sub \$1,$j
443 jnz .Lcopy
444
445 mov 8(%rsp,$num,8),%rsi # restore %rsp
446.cfi_def_cfa %rsi,8
447 mov \$1,%rax
448
449 mov -48(%rsi),%r15
450.cfi_restore %r15
451 mov -40(%rsi),%r14
452.cfi_restore %r14
453 mov -32(%rsi),%r13
454.cfi_restore %r13
455 mov -24(%rsi),%r12
456.cfi_restore %r12
457 mov -16(%rsi),%rbp
458.cfi_restore %rbp
459 mov -8(%rsi),%rbx
460.cfi_restore %rbx
461 lea (%rsi),%rsp
462.cfi_def_cfa_register %rsp
463.Lmul_epilogue:
464 ret
465.cfi_endproc
466.size bn_mul_mont_gather5,.-bn_mul_mont_gather5
467___
468{{{
469my @A=("%r10","%r11");
470my @N=("%r13","%rdi");
471$code.=<<___;
472.type bn_mul4x_mont_gather5,\@function,6
473.align 32
474bn_mul4x_mont_gather5:
475.cfi_startproc
476 .byte 0x67
477 mov %rsp,%rax
478.cfi_def_cfa_register %rax
479.Lmul4x_enter:
480___
481$code.=<<___ if ($addx);
482 and \$0x80108,%r11d
483 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
484 je .Lmulx4x_enter
485___
486$code.=<<___;
487 push %rbx
488.cfi_push %rbx
489 push %rbp
490.cfi_push %rbp
491 push %r12
492.cfi_push %r12
493 push %r13
494.cfi_push %r13
495 push %r14
496.cfi_push %r14
497 push %r15
498.cfi_push %r15
499.Lmul4x_prologue:
500
501 .byte 0x67
502 shl \$3,${num}d # convert $num to bytes
503 lea ($num,$num,2),%r10 # 3*$num in bytes
504 neg $num # -$num
505
506 ##############################################################
507 # Ensure that stack frame doesn't alias with $rptr+3*$num
508 # modulo 4096, which covers ret[num], am[num] and n[num]
509 # (see bn_exp.c). This is done to allow memory disambiguation
510 # logic do its magic. [Extra [num] is allocated in order
511 # to align with bn_power5's frame, which is cleansed after
512 # completing exponentiation. Extra 256 bytes is for power mask
513 # calculated from 7th argument, the index.]
514 #
515 lea -320(%rsp,$num,2),%r11
516 mov %rsp,%rbp
517 sub $rp,%r11
518 and \$4095,%r11
519 cmp %r11,%r10
520 jb .Lmul4xsp_alt
521 sub %r11,%rbp # align with $rp
522 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
523 jmp .Lmul4xsp_done
524
525.align 32
526.Lmul4xsp_alt:
527 lea 4096-320(,$num,2),%r10
528 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
529 sub %r10,%r11
530 mov \$0,%r10
531 cmovc %r10,%r11
532 sub %r11,%rbp
533.Lmul4xsp_done:
534 and \$-64,%rbp
535 mov %rsp,%r11
536 sub %rbp,%r11
537 and \$-4096,%r11
538 lea (%rbp,%r11),%rsp
539 mov (%rsp),%r10
540 cmp %rbp,%rsp
541 ja .Lmul4x_page_walk
542 jmp .Lmul4x_page_walk_done
543
544.Lmul4x_page_walk:
545 lea -4096(%rsp),%rsp
546 mov (%rsp),%r10
547 cmp %rbp,%rsp
548 ja .Lmul4x_page_walk
549.Lmul4x_page_walk_done:
550
551 neg $num
552
553 mov %rax,40(%rsp)
554.cfi_cfa_expression %rsp+40,deref,+8
555.Lmul4x_body:
556
557 call mul4x_internal
558
559 mov 40(%rsp),%rsi # restore %rsp
560.cfi_def_cfa %rsi,8
561 mov \$1,%rax
562
563 mov -48(%rsi),%r15
564.cfi_restore %r15
565 mov -40(%rsi),%r14
566.cfi_restore %r14
567 mov -32(%rsi),%r13
568.cfi_restore %r13
569 mov -24(%rsi),%r12
570.cfi_restore %r12
571 mov -16(%rsi),%rbp
572.cfi_restore %rbp
573 mov -8(%rsi),%rbx
574.cfi_restore %rbx
575 lea (%rsi),%rsp
576.cfi_def_cfa_register %rsp
577.Lmul4x_epilogue:
578 ret
579.cfi_endproc
580.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
581
582.type mul4x_internal,\@abi-omnipotent
583.align 32
584mul4x_internal:
585.cfi_startproc
586 shl \$5,$num # $num was in bytes
587 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument, index
588 lea .Linc(%rip),%rax
589 lea 128(%rdx,$num),%r13 # end of powers table (+size optimization)
590 shr \$5,$num # restore $num
591___
592 $bp="%r12";
593 $STRIDE=2**5*8; # 5 is "window size"
594 $N=$STRIDE/4; # should match cache line size
595 $tp=$i;
596$code.=<<___;
597 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
598 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
599 lea 88-112(%rsp,$num),%r10 # place the mask after tp[num+1] (+ICache optimization)
600 lea 128(%rdx),$bp # size optimization
601
602 pshufd \$0,%xmm5,%xmm5 # broadcast index
603 movdqa %xmm1,%xmm4
604 .byte 0x67,0x67
605 movdqa %xmm1,%xmm2
606___
607########################################################################
608# calculate mask by comparing 0..31 to index and save result to stack
609#
610$code.=<<___;
611 paddd %xmm0,%xmm1
612 pcmpeqd %xmm5,%xmm0 # compare to 1,0
613 .byte 0x67
614 movdqa %xmm4,%xmm3
615___
616for($i=0;$i<$STRIDE/16-4;$i+=4) {
617$code.=<<___;
618 paddd %xmm1,%xmm2
619 pcmpeqd %xmm5,%xmm1 # compare to 3,2
620 movdqa %xmm0,`16*($i+0)+112`(%r10)
621 movdqa %xmm4,%xmm0
622
623 paddd %xmm2,%xmm3
624 pcmpeqd %xmm5,%xmm2 # compare to 5,4
625 movdqa %xmm1,`16*($i+1)+112`(%r10)
626 movdqa %xmm4,%xmm1
627
628 paddd %xmm3,%xmm0
629 pcmpeqd %xmm5,%xmm3 # compare to 7,6
630 movdqa %xmm2,`16*($i+2)+112`(%r10)
631 movdqa %xmm4,%xmm2
632
633 paddd %xmm0,%xmm1
634 pcmpeqd %xmm5,%xmm0
635 movdqa %xmm3,`16*($i+3)+112`(%r10)
636 movdqa %xmm4,%xmm3
637___
638}
639$code.=<<___; # last iteration can be optimized
640 paddd %xmm1,%xmm2
641 pcmpeqd %xmm5,%xmm1
642 movdqa %xmm0,`16*($i+0)+112`(%r10)
643
644 paddd %xmm2,%xmm3
645 .byte 0x67
646 pcmpeqd %xmm5,%xmm2
647 movdqa %xmm1,`16*($i+1)+112`(%r10)
648
649 pcmpeqd %xmm5,%xmm3
650 movdqa %xmm2,`16*($i+2)+112`(%r10)
651 pand `16*($i+0)-128`($bp),%xmm0 # while it's still in register
652
653 pand `16*($i+1)-128`($bp),%xmm1
654 pand `16*($i+2)-128`($bp),%xmm2
655 movdqa %xmm3,`16*($i+3)+112`(%r10)
656 pand `16*($i+3)-128`($bp),%xmm3
657 por %xmm2,%xmm0
658 por %xmm3,%xmm1
659___
660for($i=0;$i<$STRIDE/16-4;$i+=4) {
661$code.=<<___;
662 movdqa `16*($i+0)-128`($bp),%xmm4
663 movdqa `16*($i+1)-128`($bp),%xmm5
664 movdqa `16*($i+2)-128`($bp),%xmm2
665 pand `16*($i+0)+112`(%r10),%xmm4
666 movdqa `16*($i+3)-128`($bp),%xmm3
667 pand `16*($i+1)+112`(%r10),%xmm5
668 por %xmm4,%xmm0
669 pand `16*($i+2)+112`(%r10),%xmm2
670 por %xmm5,%xmm1
671 pand `16*($i+3)+112`(%r10),%xmm3
672 por %xmm2,%xmm0
673 por %xmm3,%xmm1
674___
675}
676$code.=<<___;
677 por %xmm1,%xmm0
678 pshufd \$0x4e,%xmm0,%xmm1
679 por %xmm1,%xmm0
680 lea $STRIDE($bp),$bp
681 movq %xmm0,$m0 # m0=bp[0]
682
683 mov %r13,16+8(%rsp) # save end of b[num]
684 mov $rp, 56+8(%rsp) # save $rp
685
686 mov ($n0),$n0 # pull n0[0] value
687 mov ($ap),%rax
688 lea ($ap,$num),$ap # end of a[num]
689 neg $num
690
691 mov $n0,$m1
692 mulq $m0 # ap[0]*bp[0]
693 mov %rax,$A[0]
694 mov ($np),%rax
695
696 imulq $A[0],$m1 # "tp[0]"*n0
697 lea 64+8(%rsp),$tp
698 mov %rdx,$A[1]
699
700 mulq $m1 # np[0]*m1
701 add %rax,$A[0] # discarded
702 mov 8($ap,$num),%rax
703 adc \$0,%rdx
704 mov %rdx,$N[1]
705
706 mulq $m0
707 add %rax,$A[1]
708 mov 8*1($np),%rax
709 adc \$0,%rdx
710 mov %rdx,$A[0]
711
712 mulq $m1
713 add %rax,$N[1]
714 mov 16($ap,$num),%rax
715 adc \$0,%rdx
716 add $A[1],$N[1]
717 lea 4*8($num),$j # j=4
718 lea 8*4($np),$np
719 adc \$0,%rdx
720 mov $N[1],($tp)
721 mov %rdx,$N[0]
722 jmp .L1st4x
723
724.align 32
725.L1st4x:
726 mulq $m0 # ap[j]*bp[0]
727 add %rax,$A[0]
728 mov -8*2($np),%rax
729 lea 32($tp),$tp
730 adc \$0,%rdx
731 mov %rdx,$A[1]
732
733 mulq $m1 # np[j]*m1
734 add %rax,$N[0]
735 mov -8($ap,$j),%rax
736 adc \$0,%rdx
737 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
738 adc \$0,%rdx
739 mov $N[0],-24($tp) # tp[j-1]
740 mov %rdx,$N[1]
741
742 mulq $m0 # ap[j]*bp[0]
743 add %rax,$A[1]
744 mov -8*1($np),%rax
745 adc \$0,%rdx
746 mov %rdx,$A[0]
747
748 mulq $m1 # np[j]*m1
749 add %rax,$N[1]
750 mov ($ap,$j),%rax
751 adc \$0,%rdx
752 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
753 adc \$0,%rdx
754 mov $N[1],-16($tp) # tp[j-1]
755 mov %rdx,$N[0]
756
757 mulq $m0 # ap[j]*bp[0]
758 add %rax,$A[0]
759 mov 8*0($np),%rax
760 adc \$0,%rdx
761 mov %rdx,$A[1]
762
763 mulq $m1 # np[j]*m1
764 add %rax,$N[0]
765 mov 8($ap,$j),%rax
766 adc \$0,%rdx
767 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
768 adc \$0,%rdx
769 mov $N[0],-8($tp) # tp[j-1]
770 mov %rdx,$N[1]
771
772 mulq $m0 # ap[j]*bp[0]
773 add %rax,$A[1]
774 mov 8*1($np),%rax
775 adc \$0,%rdx
776 mov %rdx,$A[0]
777
778 mulq $m1 # np[j]*m1
779 add %rax,$N[1]
780 mov 16($ap,$j),%rax
781 adc \$0,%rdx
782 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
783 lea 8*4($np),$np
784 adc \$0,%rdx
785 mov $N[1],($tp) # tp[j-1]
786 mov %rdx,$N[0]
787
788 add \$32,$j # j+=4
789 jnz .L1st4x
790
791 mulq $m0 # ap[j]*bp[0]
792 add %rax,$A[0]
793 mov -8*2($np),%rax
794 lea 32($tp),$tp
795 adc \$0,%rdx
796 mov %rdx,$A[1]
797
798 mulq $m1 # np[j]*m1
799 add %rax,$N[0]
800 mov -8($ap),%rax
801 adc \$0,%rdx
802 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
803 adc \$0,%rdx
804 mov $N[0],-24($tp) # tp[j-1]
805 mov %rdx,$N[1]
806
807 mulq $m0 # ap[j]*bp[0]
808 add %rax,$A[1]
809 mov -8*1($np),%rax
810 adc \$0,%rdx
811 mov %rdx,$A[0]
812
813 mulq $m1 # np[j]*m1
814 add %rax,$N[1]
815 mov ($ap,$num),%rax # ap[0]
816 adc \$0,%rdx
817 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
818 adc \$0,%rdx
819 mov $N[1],-16($tp) # tp[j-1]
820 mov %rdx,$N[0]
821
822 lea ($np,$num),$np # rewind $np
823
824 xor $N[1],$N[1]
825 add $A[0],$N[0]
826 adc \$0,$N[1]
827 mov $N[0],-8($tp)
828
829 jmp .Louter4x
830
831.align 32
832.Louter4x:
833 lea 16+128($tp),%rdx # where 256-byte mask is (+size optimization)
834 pxor %xmm4,%xmm4
835 pxor %xmm5,%xmm5
836___
837for($i=0;$i<$STRIDE/16;$i+=4) {
838$code.=<<___;
839 movdqa `16*($i+0)-128`($bp),%xmm0
840 movdqa `16*($i+1)-128`($bp),%xmm1
841 movdqa `16*($i+2)-128`($bp),%xmm2
842 movdqa `16*($i+3)-128`($bp),%xmm3
843 pand `16*($i+0)-128`(%rdx),%xmm0
844 pand `16*($i+1)-128`(%rdx),%xmm1
845 por %xmm0,%xmm4
846 pand `16*($i+2)-128`(%rdx),%xmm2
847 por %xmm1,%xmm5
848 pand `16*($i+3)-128`(%rdx),%xmm3
849 por %xmm2,%xmm4
850 por %xmm3,%xmm5
851___
852}
853$code.=<<___;
854 por %xmm5,%xmm4
855 pshufd \$0x4e,%xmm4,%xmm0
856 por %xmm4,%xmm0
857 lea $STRIDE($bp),$bp
858 movq %xmm0,$m0 # m0=bp[i]
859
860 mov ($tp,$num),$A[0]
861 mov $n0,$m1
862 mulq $m0 # ap[0]*bp[i]
863 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
864 mov ($np),%rax
865 adc \$0,%rdx
866
867 imulq $A[0],$m1 # tp[0]*n0
868 mov %rdx,$A[1]
869 mov $N[1],($tp) # store upmost overflow bit
870
871 lea ($tp,$num),$tp # rewind $tp
872
873 mulq $m1 # np[0]*m1
874 add %rax,$A[0] # "$N[0]", discarded
875 mov 8($ap,$num),%rax
876 adc \$0,%rdx
877 mov %rdx,$N[1]
878
879 mulq $m0 # ap[j]*bp[i]
880 add %rax,$A[1]
881 mov 8*1($np),%rax
882 adc \$0,%rdx
883 add 8($tp),$A[1] # +tp[1]
884 adc \$0,%rdx
885 mov %rdx,$A[0]
886
887 mulq $m1 # np[j]*m1
888 add %rax,$N[1]
889 mov 16($ap,$num),%rax
890 adc \$0,%rdx
891 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
892 lea 4*8($num),$j # j=4
893 lea 8*4($np),$np
894 adc \$0,%rdx
895 mov %rdx,$N[0]
896 jmp .Linner4x
897
898.align 32
899.Linner4x:
900 mulq $m0 # ap[j]*bp[i]
901 add %rax,$A[0]
902 mov -8*2($np),%rax
903 adc \$0,%rdx
904 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
905 lea 32($tp),$tp
906 adc \$0,%rdx
907 mov %rdx,$A[1]
908
909 mulq $m1 # np[j]*m1
910 add %rax,$N[0]
911 mov -8($ap,$j),%rax
912 adc \$0,%rdx
913 add $A[0],$N[0]
914 adc \$0,%rdx
915 mov $N[1],-32($tp) # tp[j-1]
916 mov %rdx,$N[1]
917
918 mulq $m0 # ap[j]*bp[i]
919 add %rax,$A[1]
920 mov -8*1($np),%rax
921 adc \$0,%rdx
922 add -8($tp),$A[1]
923 adc \$0,%rdx
924 mov %rdx,$A[0]
925
926 mulq $m1 # np[j]*m1
927 add %rax,$N[1]
928 mov ($ap,$j),%rax
929 adc \$0,%rdx
930 add $A[1],$N[1]
931 adc \$0,%rdx
932 mov $N[0],-24($tp) # tp[j-1]
933 mov %rdx,$N[0]
934
935 mulq $m0 # ap[j]*bp[i]
936 add %rax,$A[0]
937 mov 8*0($np),%rax
938 adc \$0,%rdx
939 add ($tp),$A[0] # ap[j]*bp[i]+tp[j]
940 adc \$0,%rdx
941 mov %rdx,$A[1]
942
943 mulq $m1 # np[j]*m1
944 add %rax,$N[0]
945 mov 8($ap,$j),%rax
946 adc \$0,%rdx
947 add $A[0],$N[0]
948 adc \$0,%rdx
949 mov $N[1],-16($tp) # tp[j-1]
950 mov %rdx,$N[1]
951
952 mulq $m0 # ap[j]*bp[i]
953 add %rax,$A[1]
954 mov 8*1($np),%rax
955 adc \$0,%rdx
956 add 8($tp),$A[1]
957 adc \$0,%rdx
958 mov %rdx,$A[0]
959
960 mulq $m1 # np[j]*m1
961 add %rax,$N[1]
962 mov 16($ap,$j),%rax
963 adc \$0,%rdx
964 add $A[1],$N[1]
965 lea 8*4($np),$np
966 adc \$0,%rdx
967 mov $N[0],-8($tp) # tp[j-1]
968 mov %rdx,$N[0]
969
970 add \$32,$j # j+=4
971 jnz .Linner4x
972
973 mulq $m0 # ap[j]*bp[i]
974 add %rax,$A[0]
975 mov -8*2($np),%rax
976 adc \$0,%rdx
977 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
978 lea 32($tp),$tp
979 adc \$0,%rdx
980 mov %rdx,$A[1]
981
982 mulq $m1 # np[j]*m1
983 add %rax,$N[0]
984 mov -8($ap),%rax
985 adc \$0,%rdx
986 add $A[0],$N[0]
987 adc \$0,%rdx
988 mov $N[1],-32($tp) # tp[j-1]
989 mov %rdx,$N[1]
990
991 mulq $m0 # ap[j]*bp[i]
992 add %rax,$A[1]
993 mov $m1,%rax
994 mov -8*1($np),$m1
995 adc \$0,%rdx
996 add -8($tp),$A[1]
997 adc \$0,%rdx
998 mov %rdx,$A[0]
999
1000 mulq $m1 # np[j]*m1
1001 add %rax,$N[1]
1002 mov ($ap,$num),%rax # ap[0]
1003 adc \$0,%rdx
1004 add $A[1],$N[1]
1005 adc \$0,%rdx
1006 mov $N[0],-24($tp) # tp[j-1]
1007 mov %rdx,$N[0]
1008
1009 mov $N[1],-16($tp) # tp[j-1]
1010 lea ($np,$num),$np # rewind $np
1011
1012 xor $N[1],$N[1]
1013 add $A[0],$N[0]
1014 adc \$0,$N[1]
1015 add ($tp),$N[0] # pull upmost overflow bit
1016 adc \$0,$N[1] # upmost overflow bit
1017 mov $N[0],-8($tp)
1018
1019 cmp 16+8(%rsp),$bp
1020 jb .Louter4x
1021___
1022if (1) {
1023$code.=<<___;
1024 xor %rax,%rax
1025 sub $N[0],$m1 # compare top-most words
1026 adc $j,$j # $j is zero
1027 or $j,$N[1]
1028 sub $N[1],%rax # %rax=-$N[1]
1029 lea ($tp,$num),%rbx # tptr in .sqr4x_sub
1030 mov ($np),%r12
1031 lea ($np),%rbp # nptr in .sqr4x_sub
1032 mov %r9,%rcx
1033 sar \$3+2,%rcx
1034 mov 56+8(%rsp),%rdi # rptr in .sqr4x_sub
1035 dec %r12 # so that after 'not' we get -n[0]
1036 xor %r10,%r10
1037 mov 8*1(%rbp),%r13
1038 mov 8*2(%rbp),%r14
1039 mov 8*3(%rbp),%r15
1040 jmp .Lsqr4x_sub_entry
1041___
1042} else {
1043my @ri=("%rax",$bp,$m0,$m1);
1044my $rp="%rdx";
1045$code.=<<___
1046 xor \$1,$N[1]
1047 lea ($tp,$num),$tp # rewind $tp
1048 sar \$5,$num # cf=0
1049 lea ($np,$N[1],8),$np
1050 mov 56+8(%rsp),$rp # restore $rp
1051 jmp .Lsub4x
1052
1053.align 32
1054.Lsub4x:
1055 .byte 0x66
1056 mov 8*0($tp),@ri[0]
1057 mov 8*1($tp),@ri[1]
1058 .byte 0x66
1059 sbb 16*0($np),@ri[0]
1060 mov 8*2($tp),@ri[2]
1061 sbb 16*1($np),@ri[1]
1062 mov 3*8($tp),@ri[3]
1063 lea 4*8($tp),$tp
1064 sbb 16*2($np),@ri[2]
1065 mov @ri[0],8*0($rp)
1066 sbb 16*3($np),@ri[3]
1067 lea 16*4($np),$np
1068 mov @ri[1],8*1($rp)
1069 mov @ri[2],8*2($rp)
1070 mov @ri[3],8*3($rp)
1071 lea 8*4($rp),$rp
1072
1073 inc $num
1074 jnz .Lsub4x
1075
1076 ret
1077___
1078}
1079$code.=<<___;
1080.cfi_endproc
1081.size mul4x_internal,.-mul4x_internal
1082___
1083}}}
1084
1085{{{
1086######################################################################
1087# void bn_power5(
1088my $rptr="%rdi"; # BN_ULONG *rptr,
1089my $aptr="%rsi"; # const BN_ULONG *aptr,
1090my $bptr="%rdx"; # const void *table,
1091my $nptr="%rcx"; # const BN_ULONG *nptr,
1092my $n0 ="%r8"; # const BN_ULONG *n0);
1093my $num ="%r9"; # int num, has to be divisible by 8
1094 # int pwr
1095
1096my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
1097my @A0=("%r10","%r11");
1098my @A1=("%r12","%r13");
1099my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
1100
1101$code.=<<___;
1102.globl bn_power5
1103.type bn_power5,\@function,6
1104.align 32
1105bn_power5:
1106.cfi_startproc
1107 mov %rsp,%rax
1108.cfi_def_cfa_register %rax
1109___
1110$code.=<<___ if ($addx);
1111 mov OPENSSL_ia32cap_P+8(%rip),%r11d
1112 and \$0x80108,%r11d
1113 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
1114 je .Lpowerx5_enter
1115___
1116$code.=<<___;
1117 push %rbx
1118.cfi_push %rbx
1119 push %rbp
1120.cfi_push %rbp
1121 push %r12
1122.cfi_push %r12
1123 push %r13
1124.cfi_push %r13
1125 push %r14
1126.cfi_push %r14
1127 push %r15
1128.cfi_push %r15
1129.Lpower5_prologue:
1130
1131 shl \$3,${num}d # convert $num to bytes
1132 lea ($num,$num,2),%r10d # 3*$num
1133 neg $num
1134 mov ($n0),$n0 # *n0
1135
1136 ##############################################################
1137 # Ensure that stack frame doesn't alias with $rptr+3*$num
1138 # modulo 4096, which covers ret[num], am[num] and n[num]
1139 # (see bn_exp.c). This is done to allow memory disambiguation
1140 # logic do its magic. [Extra 256 bytes is for power mask
1141 # calculated from 7th argument, the index.]
1142 #
1143 lea -320(%rsp,$num,2),%r11
1144 mov %rsp,%rbp
1145 sub $rptr,%r11
1146 and \$4095,%r11
1147 cmp %r11,%r10
1148 jb .Lpwr_sp_alt
1149 sub %r11,%rbp # align with $aptr
1150 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
1151 jmp .Lpwr_sp_done
1152
1153.align 32
1154.Lpwr_sp_alt:
1155 lea 4096-320(,$num,2),%r10
1156 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
1157 sub %r10,%r11
1158 mov \$0,%r10
1159 cmovc %r10,%r11
1160 sub %r11,%rbp
1161.Lpwr_sp_done:
1162 and \$-64,%rbp
1163 mov %rsp,%r11
1164 sub %rbp,%r11
1165 and \$-4096,%r11
1166 lea (%rbp,%r11),%rsp
1167 mov (%rsp),%r10
1168 cmp %rbp,%rsp
1169 ja .Lpwr_page_walk
1170 jmp .Lpwr_page_walk_done
1171
1172.Lpwr_page_walk:
1173 lea -4096(%rsp),%rsp
1174 mov (%rsp),%r10
1175 cmp %rbp,%rsp
1176 ja .Lpwr_page_walk
1177.Lpwr_page_walk_done:
1178
1179 mov $num,%r10
1180 neg $num
1181
1182 ##############################################################
1183 # Stack layout
1184 #
1185 # +0 saved $num, used in reduction section
1186 # +8 &t[2*$num], used in reduction section
1187 # +32 saved *n0
1188 # +40 saved %rsp
1189 # +48 t[2*$num]
1190 #
1191 mov $n0, 32(%rsp)
1192 mov %rax, 40(%rsp) # save original %rsp
1193.cfi_cfa_expression %rsp+40,deref,+8
1194.Lpower5_body:
1195 movq $rptr,%xmm1 # save $rptr, used in sqr8x
1196 movq $nptr,%xmm2 # save $nptr
1197 movq %r10, %xmm3 # -$num, used in sqr8x
1198 movq $bptr,%xmm4
1199
1200 call __bn_sqr8x_internal
1201 call __bn_post4x_internal
1202 call __bn_sqr8x_internal
1203 call __bn_post4x_internal
1204 call __bn_sqr8x_internal
1205 call __bn_post4x_internal
1206 call __bn_sqr8x_internal
1207 call __bn_post4x_internal
1208 call __bn_sqr8x_internal
1209 call __bn_post4x_internal
1210
1211 movq %xmm2,$nptr
1212 movq %xmm4,$bptr
1213 mov $aptr,$rptr
1214 mov 40(%rsp),%rax
1215 lea 32(%rsp),$n0
1216
1217 call mul4x_internal
1218
1219 mov 40(%rsp),%rsi # restore %rsp
1220.cfi_def_cfa %rsi,8
1221 mov \$1,%rax
1222 mov -48(%rsi),%r15
1223.cfi_restore %r15
1224 mov -40(%rsi),%r14
1225.cfi_restore %r14
1226 mov -32(%rsi),%r13
1227.cfi_restore %r13
1228 mov -24(%rsi),%r12
1229.cfi_restore %r12
1230 mov -16(%rsi),%rbp
1231.cfi_restore %rbp
1232 mov -8(%rsi),%rbx
1233.cfi_restore %rbx
1234 lea (%rsi),%rsp
1235.cfi_def_cfa_register %rsp
1236.Lpower5_epilogue:
1237 ret
1238.cfi_endproc
1239.size bn_power5,.-bn_power5
1240
1241.globl bn_sqr8x_internal
1242.hidden bn_sqr8x_internal
1243.type bn_sqr8x_internal,\@abi-omnipotent
1244.align 32
1245bn_sqr8x_internal:
1246__bn_sqr8x_internal:
1247.cfi_startproc
1248 ##############################################################
1249 # Squaring part:
1250 #
1251 # a) multiply-n-add everything but a[i]*a[i];
1252 # b) shift result of a) by 1 to the left and accumulate
1253 # a[i]*a[i] products;
1254 #
1255 ##############################################################
1256 # a[1]a[0]
1257 # a[2]a[0]
1258 # a[3]a[0]
1259 # a[2]a[1]
1260 # a[4]a[0]
1261 # a[3]a[1]
1262 # a[5]a[0]
1263 # a[4]a[1]
1264 # a[3]a[2]
1265 # a[6]a[0]
1266 # a[5]a[1]
1267 # a[4]a[2]
1268 # a[7]a[0]
1269 # a[6]a[1]
1270 # a[5]a[2]
1271 # a[4]a[3]
1272 # a[7]a[1]
1273 # a[6]a[2]
1274 # a[5]a[3]
1275 # a[7]a[2]
1276 # a[6]a[3]
1277 # a[5]a[4]
1278 # a[7]a[3]
1279 # a[6]a[4]
1280 # a[7]a[4]
1281 # a[6]a[5]
1282 # a[7]a[5]
1283 # a[7]a[6]
1284 # a[1]a[0]
1285 # a[2]a[0]
1286 # a[3]a[0]
1287 # a[4]a[0]
1288 # a[5]a[0]
1289 # a[6]a[0]
1290 # a[7]a[0]
1291 # a[2]a[1]
1292 # a[3]a[1]
1293 # a[4]a[1]
1294 # a[5]a[1]
1295 # a[6]a[1]
1296 # a[7]a[1]
1297 # a[3]a[2]
1298 # a[4]a[2]
1299 # a[5]a[2]
1300 # a[6]a[2]
1301 # a[7]a[2]
1302 # a[4]a[3]
1303 # a[5]a[3]
1304 # a[6]a[3]
1305 # a[7]a[3]
1306 # a[5]a[4]
1307 # a[6]a[4]
1308 # a[7]a[4]
1309 # a[6]a[5]
1310 # a[7]a[5]
1311 # a[7]a[6]
1312 # a[0]a[0]
1313 # a[1]a[1]
1314 # a[2]a[2]
1315 # a[3]a[3]
1316 # a[4]a[4]
1317 # a[5]a[5]
1318 # a[6]a[6]
1319 # a[7]a[7]
1320
1321 lea 32(%r10),$i # $i=-($num-32)
1322 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
1323
1324 mov $num,$j # $j=$num
1325
1326 # comments apply to $num==8 case
1327 mov -32($aptr,$i),$a0 # a[0]
1328 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1329 mov -24($aptr,$i),%rax # a[1]
1330 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1331 mov -16($aptr,$i),$ai # a[2]
1332 mov %rax,$a1
1333
1334 mul $a0 # a[1]*a[0]
1335 mov %rax,$A0[0] # a[1]*a[0]
1336 mov $ai,%rax # a[2]
1337 mov %rdx,$A0[1]
1338 mov $A0[0],-24($tptr,$i) # t[1]
1339
1340 mul $a0 # a[2]*a[0]
1341 add %rax,$A0[1]
1342 mov $ai,%rax
1343 adc \$0,%rdx
1344 mov $A0[1],-16($tptr,$i) # t[2]
1345 mov %rdx,$A0[0]
1346
1347
1348 mov -8($aptr,$i),$ai # a[3]
1349 mul $a1 # a[2]*a[1]
1350 mov %rax,$A1[0] # a[2]*a[1]+t[3]
1351 mov $ai,%rax
1352 mov %rdx,$A1[1]
1353
1354 lea ($i),$j
1355 mul $a0 # a[3]*a[0]
1356 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1357 mov $ai,%rax
1358 mov %rdx,$A0[1]
1359 adc \$0,$A0[1]
1360 add $A1[0],$A0[0]
1361 adc \$0,$A0[1]
1362 mov $A0[0],-8($tptr,$j) # t[3]
1363 jmp .Lsqr4x_1st
1364
1365.align 32
1366.Lsqr4x_1st:
1367 mov ($aptr,$j),$ai # a[4]
1368 mul $a1 # a[3]*a[1]
1369 add %rax,$A1[1] # a[3]*a[1]+t[4]
1370 mov $ai,%rax
1371 mov %rdx,$A1[0]
1372 adc \$0,$A1[0]
1373
1374 mul $a0 # a[4]*a[0]
1375 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1376 mov $ai,%rax # a[3]
1377 mov 8($aptr,$j),$ai # a[5]
1378 mov %rdx,$A0[0]
1379 adc \$0,$A0[0]
1380 add $A1[1],$A0[1]
1381 adc \$0,$A0[0]
1382
1383
1384 mul $a1 # a[4]*a[3]
1385 add %rax,$A1[0] # a[4]*a[3]+t[5]
1386 mov $ai,%rax
1387 mov $A0[1],($tptr,$j) # t[4]
1388 mov %rdx,$A1[1]
1389 adc \$0,$A1[1]
1390
1391 mul $a0 # a[5]*a[2]
1392 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1393 mov $ai,%rax
1394 mov 16($aptr,$j),$ai # a[6]
1395 mov %rdx,$A0[1]
1396 adc \$0,$A0[1]
1397 add $A1[0],$A0[0]
1398 adc \$0,$A0[1]
1399
1400 mul $a1 # a[5]*a[3]
1401 add %rax,$A1[1] # a[5]*a[3]+t[6]
1402 mov $ai,%rax
1403 mov $A0[0],8($tptr,$j) # t[5]
1404 mov %rdx,$A1[0]
1405 adc \$0,$A1[0]
1406
1407 mul $a0 # a[6]*a[2]
1408 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
1409 mov $ai,%rax # a[3]
1410 mov 24($aptr,$j),$ai # a[7]
1411 mov %rdx,$A0[0]
1412 adc \$0,$A0[0]
1413 add $A1[1],$A0[1]
1414 adc \$0,$A0[0]
1415
1416
1417 mul $a1 # a[6]*a[5]
1418 add %rax,$A1[0] # a[6]*a[5]+t[7]
1419 mov $ai,%rax
1420 mov $A0[1],16($tptr,$j) # t[6]
1421 mov %rdx,$A1[1]
1422 adc \$0,$A1[1]
1423 lea 32($j),$j
1424
1425 mul $a0 # a[7]*a[4]
1426 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
1427 mov $ai,%rax
1428 mov %rdx,$A0[1]
1429 adc \$0,$A0[1]
1430 add $A1[0],$A0[0]
1431 adc \$0,$A0[1]
1432 mov $A0[0],-8($tptr,$j) # t[7]
1433
1434 cmp \$0,$j
1435 jne .Lsqr4x_1st
1436
1437 mul $a1 # a[7]*a[5]
1438 add %rax,$A1[1]
1439 lea 16($i),$i
1440 adc \$0,%rdx
1441 add $A0[1],$A1[1]
1442 adc \$0,%rdx
1443
1444 mov $A1[1],($tptr) # t[8]
1445 mov %rdx,$A1[0]
1446 mov %rdx,8($tptr) # t[9]
1447 jmp .Lsqr4x_outer
1448
1449.align 32
1450.Lsqr4x_outer: # comments apply to $num==6 case
1451 mov -32($aptr,$i),$a0 # a[0]
1452 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1453 mov -24($aptr,$i),%rax # a[1]
1454 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1455 mov -16($aptr,$i),$ai # a[2]
1456 mov %rax,$a1
1457
1458 mul $a0 # a[1]*a[0]
1459 mov -24($tptr,$i),$A0[0] # t[1]
1460 add %rax,$A0[0] # a[1]*a[0]+t[1]
1461 mov $ai,%rax # a[2]
1462 adc \$0,%rdx
1463 mov $A0[0],-24($tptr,$i) # t[1]
1464 mov %rdx,$A0[1]
1465
1466 mul $a0 # a[2]*a[0]
1467 add %rax,$A0[1]
1468 mov $ai,%rax
1469 adc \$0,%rdx
1470 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
1471 mov %rdx,$A0[0]
1472 adc \$0,$A0[0]
1473 mov $A0[1],-16($tptr,$i) # t[2]
1474
1475 xor $A1[0],$A1[0]
1476
1477 mov -8($aptr,$i),$ai # a[3]
1478 mul $a1 # a[2]*a[1]
1479 add %rax,$A1[0] # a[2]*a[1]+t[3]
1480 mov $ai,%rax
1481 adc \$0,%rdx
1482 add -8($tptr,$i),$A1[0]
1483 mov %rdx,$A1[1]
1484 adc \$0,$A1[1]
1485
1486 mul $a0 # a[3]*a[0]
1487 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1488 mov $ai,%rax
1489 adc \$0,%rdx
1490 add $A1[0],$A0[0]
1491 mov %rdx,$A0[1]
1492 adc \$0,$A0[1]
1493 mov $A0[0],-8($tptr,$i) # t[3]
1494
1495 lea ($i),$j
1496 jmp .Lsqr4x_inner
1497
1498.align 32
1499.Lsqr4x_inner:
1500 mov ($aptr,$j),$ai # a[4]
1501 mul $a1 # a[3]*a[1]
1502 add %rax,$A1[1] # a[3]*a[1]+t[4]
1503 mov $ai,%rax
1504 mov %rdx,$A1[0]
1505 adc \$0,$A1[0]
1506 add ($tptr,$j),$A1[1]
1507 adc \$0,$A1[0]
1508
1509 .byte 0x67
1510 mul $a0 # a[4]*a[0]
1511 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1512 mov $ai,%rax # a[3]
1513 mov 8($aptr,$j),$ai # a[5]
1514 mov %rdx,$A0[0]
1515 adc \$0,$A0[0]
1516 add $A1[1],$A0[1]
1517 adc \$0,$A0[0]
1518
1519 mul $a1 # a[4]*a[3]
1520 add %rax,$A1[0] # a[4]*a[3]+t[5]
1521 mov $A0[1],($tptr,$j) # t[4]
1522 mov $ai,%rax
1523 mov %rdx,$A1[1]
1524 adc \$0,$A1[1]
1525 add 8($tptr,$j),$A1[0]
1526 lea 16($j),$j # j++
1527 adc \$0,$A1[1]
1528
1529 mul $a0 # a[5]*a[2]
1530 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1531 mov $ai,%rax
1532 adc \$0,%rdx
1533 add $A1[0],$A0[0]
1534 mov %rdx,$A0[1]
1535 adc \$0,$A0[1]
1536 mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
1537
1538 cmp \$0,$j
1539 jne .Lsqr4x_inner
1540
1541 .byte 0x67
1542 mul $a1 # a[5]*a[3]
1543 add %rax,$A1[1]
1544 adc \$0,%rdx
1545 add $A0[1],$A1[1]
1546 adc \$0,%rdx
1547
1548 mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
1549 mov %rdx,$A1[0]
1550 mov %rdx,8($tptr) # t[7], "preloaded t[3]" below
1551
1552 add \$16,$i
1553 jnz .Lsqr4x_outer
1554
1555 # comments apply to $num==4 case
1556 mov -32($aptr),$a0 # a[0]
1557 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1558 mov -24($aptr),%rax # a[1]
1559 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1560 mov -16($aptr),$ai # a[2]
1561 mov %rax,$a1
1562
1563 mul $a0 # a[1]*a[0]
1564 add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
1565 mov $ai,%rax # a[2]
1566 mov %rdx,$A0[1]
1567 adc \$0,$A0[1]
1568
1569 mul $a0 # a[2]*a[0]
1570 add %rax,$A0[1]
1571 mov $ai,%rax
1572 mov $A0[0],-24($tptr) # t[1]
1573 mov %rdx,$A0[0]
1574 adc \$0,$A0[0]
1575 add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
1576 mov -8($aptr),$ai # a[3]
1577 adc \$0,$A0[0]
1578
1579 mul $a1 # a[2]*a[1]
1580 add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
1581 mov $ai,%rax
1582 mov $A0[1],-16($tptr) # t[2]
1583 mov %rdx,$A1[1]
1584 adc \$0,$A1[1]
1585
1586 mul $a0 # a[3]*a[0]
1587 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1588 mov $ai,%rax
1589 mov %rdx,$A0[1]
1590 adc \$0,$A0[1]
1591 add $A1[0],$A0[0]
1592 adc \$0,$A0[1]
1593 mov $A0[0],-8($tptr) # t[3]
1594
1595 mul $a1 # a[3]*a[1]
1596 add %rax,$A1[1]
1597 mov -16($aptr),%rax # a[2]
1598 adc \$0,%rdx
1599 add $A0[1],$A1[1]
1600 adc \$0,%rdx
1601
1602 mov $A1[1],($tptr) # t[4]
1603 mov %rdx,$A1[0]
1604 mov %rdx,8($tptr) # t[5]
1605
1606 mul $ai # a[2]*a[3]
1607___
1608{
1609my ($shift,$carry)=($a0,$a1);
1610my @S=(@A1,$ai,$n0);
1611$code.=<<___;
1612 add \$16,$i
1613 xor $shift,$shift
1614 sub $num,$i # $i=16-$num
1615 xor $carry,$carry
1616
1617 add $A1[0],%rax # t[5]
1618 adc \$0,%rdx
1619 mov %rax,8($tptr) # t[5]
1620 mov %rdx,16($tptr) # t[6]
1621 mov $carry,24($tptr) # t[7]
1622
1623 mov -16($aptr,$i),%rax # a[0]
1624 lea 48+8(%rsp),$tptr
1625 xor $A0[0],$A0[0] # t[0]
1626 mov 8($tptr),$A0[1] # t[1]
1627
1628 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1629 shr \$63,$A0[0]
1630 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1631 shr \$63,$A0[1]
1632 or $A0[0],$S[1] # | t[2*i]>>63
1633 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1634 mov $A0[1],$shift # shift=t[2*i+1]>>63
1635 mul %rax # a[i]*a[i]
1636 neg $carry # mov $carry,cf
1637 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1638 adc %rax,$S[0]
1639 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1640 mov $S[0],($tptr)
1641 adc %rdx,$S[1]
1642
1643 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1644 mov $S[1],8($tptr)
1645 sbb $carry,$carry # mov cf,$carry
1646 shr \$63,$A0[0]
1647 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1648 shr \$63,$A0[1]
1649 or $A0[0],$S[3] # | t[2*i]>>63
1650 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1651 mov $A0[1],$shift # shift=t[2*i+1]>>63
1652 mul %rax # a[i]*a[i]
1653 neg $carry # mov $carry,cf
1654 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1655 adc %rax,$S[2]
1656 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1657 mov $S[2],16($tptr)
1658 adc %rdx,$S[3]
1659 lea 16($i),$i
1660 mov $S[3],24($tptr)
1661 sbb $carry,$carry # mov cf,$carry
1662 lea 64($tptr),$tptr
1663 jmp .Lsqr4x_shift_n_add
1664
1665.align 32
1666.Lsqr4x_shift_n_add:
1667 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1668 shr \$63,$A0[0]
1669 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1670 shr \$63,$A0[1]
1671 or $A0[0],$S[1] # | t[2*i]>>63
1672 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1673 mov $A0[1],$shift # shift=t[2*i+1]>>63
1674 mul %rax # a[i]*a[i]
1675 neg $carry # mov $carry,cf
1676 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1677 adc %rax,$S[0]
1678 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1679 mov $S[0],-32($tptr)
1680 adc %rdx,$S[1]
1681
1682 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1683 mov $S[1],-24($tptr)
1684 sbb $carry,$carry # mov cf,$carry
1685 shr \$63,$A0[0]
1686 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1687 shr \$63,$A0[1]
1688 or $A0[0],$S[3] # | t[2*i]>>63
1689 mov 0($tptr),$A0[0] # t[2*i+2] # prefetch
1690 mov $A0[1],$shift # shift=t[2*i+1]>>63
1691 mul %rax # a[i]*a[i]
1692 neg $carry # mov $carry,cf
1693 mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1694 adc %rax,$S[2]
1695 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1696 mov $S[2],-16($tptr)
1697 adc %rdx,$S[3]
1698
1699 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1700 mov $S[3],-8($tptr)
1701 sbb $carry,$carry # mov cf,$carry
1702 shr \$63,$A0[0]
1703 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1704 shr \$63,$A0[1]
1705 or $A0[0],$S[1] # | t[2*i]>>63
1706 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1707 mov $A0[1],$shift # shift=t[2*i+1]>>63
1708 mul %rax # a[i]*a[i]
1709 neg $carry # mov $carry,cf
1710 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1711 adc %rax,$S[0]
1712 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1713 mov $S[0],0($tptr)
1714 adc %rdx,$S[1]
1715
1716 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1717 mov $S[1],8($tptr)
1718 sbb $carry,$carry # mov cf,$carry
1719 shr \$63,$A0[0]
1720 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1721 shr \$63,$A0[1]
1722 or $A0[0],$S[3] # | t[2*i]>>63
1723 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1724 mov $A0[1],$shift # shift=t[2*i+1]>>63
1725 mul %rax # a[i]*a[i]
1726 neg $carry # mov $carry,cf
1727 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1728 adc %rax,$S[2]
1729 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1730 mov $S[2],16($tptr)
1731 adc %rdx,$S[3]
1732 mov $S[3],24($tptr)
1733 sbb $carry,$carry # mov cf,$carry
1734 lea 64($tptr),$tptr
1735 add \$32,$i
1736 jnz .Lsqr4x_shift_n_add
1737
1738 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1739 .byte 0x67
1740 shr \$63,$A0[0]
1741 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1742 shr \$63,$A0[1]
1743 or $A0[0],$S[1] # | t[2*i]>>63
1744 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1745 mov $A0[1],$shift # shift=t[2*i+1]>>63
1746 mul %rax # a[i]*a[i]
1747 neg $carry # mov $carry,cf
1748 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1749 adc %rax,$S[0]
1750 mov -8($aptr),%rax # a[i+1] # prefetch
1751 mov $S[0],-32($tptr)
1752 adc %rdx,$S[1]
1753
1754 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1755 mov $S[1],-24($tptr)
1756 sbb $carry,$carry # mov cf,$carry
1757 shr \$63,$A0[0]
1758 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1759 shr \$63,$A0[1]
1760 or $A0[0],$S[3] # | t[2*i]>>63
1761 mul %rax # a[i]*a[i]
1762 neg $carry # mov $carry,cf
1763 adc %rax,$S[2]
1764 adc %rdx,$S[3]
1765 mov $S[2],-16($tptr)
1766 mov $S[3],-8($tptr)
1767___
1768}
1769
1770######################################################################
1771# Montgomery reduction part, "word-by-word" algorithm.
1772#
1773# This new path is inspired by multiple submissions from Intel, by
1774# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1775# Vinodh Gopal...
1776{
1777my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1778
1779$code.=<<___;
1780 movq %xmm2,$nptr
1781__bn_sqr8x_reduction:
1782 xor %rax,%rax
1783 lea ($nptr,$num),%rcx # end of n[]
1784 lea 48+8(%rsp,$num,2),%rdx # end of t[] buffer
1785 mov %rcx,0+8(%rsp)
1786 lea 48+8(%rsp,$num),$tptr # end of initial t[] window
1787 mov %rdx,8+8(%rsp)
1788 neg $num
1789 jmp .L8x_reduction_loop
1790
1791.align 32
1792.L8x_reduction_loop:
1793 lea ($tptr,$num),$tptr # start of current t[] window
1794 .byte 0x66
1795 mov 8*0($tptr),$m0
1796 mov 8*1($tptr),%r9
1797 mov 8*2($tptr),%r10
1798 mov 8*3($tptr),%r11
1799 mov 8*4($tptr),%r12
1800 mov 8*5($tptr),%r13
1801 mov 8*6($tptr),%r14
1802 mov 8*7($tptr),%r15
1803 mov %rax,(%rdx) # store top-most carry bit
1804 lea 8*8($tptr),$tptr
1805
1806 .byte 0x67
1807 mov $m0,%r8
1808 imulq 32+8(%rsp),$m0 # n0*a[0]
1809 mov 8*0($nptr),%rax # n[0]
1810 mov \$8,%ecx
1811 jmp .L8x_reduce
1812
1813.align 32
1814.L8x_reduce:
1815 mulq $m0
1816 mov 8*1($nptr),%rax # n[1]
1817 neg %r8
1818 mov %rdx,%r8
1819 adc \$0,%r8
1820
1821 mulq $m0
1822 add %rax,%r9
1823 mov 8*2($nptr),%rax
1824 adc \$0,%rdx
1825 add %r9,%r8
1826 mov $m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i]
1827 mov %rdx,%r9
1828 adc \$0,%r9
1829
1830 mulq $m0
1831 add %rax,%r10
1832 mov 8*3($nptr),%rax
1833 adc \$0,%rdx
1834 add %r10,%r9
1835 mov 32+8(%rsp),$carry # pull n0, borrow $carry
1836 mov %rdx,%r10
1837 adc \$0,%r10
1838
1839 mulq $m0
1840 add %rax,%r11
1841 mov 8*4($nptr),%rax
1842 adc \$0,%rdx
1843 imulq %r8,$carry # modulo-scheduled
1844 add %r11,%r10
1845 mov %rdx,%r11
1846 adc \$0,%r11
1847
1848 mulq $m0
1849 add %rax,%r12
1850 mov 8*5($nptr),%rax
1851 adc \$0,%rdx
1852 add %r12,%r11
1853 mov %rdx,%r12
1854 adc \$0,%r12
1855
1856 mulq $m0
1857 add %rax,%r13
1858 mov 8*6($nptr),%rax
1859 adc \$0,%rdx
1860 add %r13,%r12
1861 mov %rdx,%r13
1862 adc \$0,%r13
1863
1864 mulq $m0
1865 add %rax,%r14
1866 mov 8*7($nptr),%rax
1867 adc \$0,%rdx
1868 add %r14,%r13
1869 mov %rdx,%r14
1870 adc \$0,%r14
1871
1872 mulq $m0
1873 mov $carry,$m0 # n0*a[i]
1874 add %rax,%r15
1875 mov 8*0($nptr),%rax # n[0]
1876 adc \$0,%rdx
1877 add %r15,%r14
1878 mov %rdx,%r15
1879 adc \$0,%r15
1880
1881 dec %ecx
1882 jnz .L8x_reduce
1883
1884 lea 8*8($nptr),$nptr
1885 xor %rax,%rax
1886 mov 8+8(%rsp),%rdx # pull end of t[]
1887 cmp 0+8(%rsp),$nptr # end of n[]?
1888 jae .L8x_no_tail
1889
1890 .byte 0x66
1891 add 8*0($tptr),%r8
1892 adc 8*1($tptr),%r9
1893 adc 8*2($tptr),%r10
1894 adc 8*3($tptr),%r11
1895 adc 8*4($tptr),%r12
1896 adc 8*5($tptr),%r13
1897 adc 8*6($tptr),%r14
1898 adc 8*7($tptr),%r15
1899 sbb $carry,$carry # top carry
1900
1901 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1902 mov \$8,%ecx
1903 mov 8*0($nptr),%rax
1904 jmp .L8x_tail
1905
1906.align 32
1907.L8x_tail:
1908 mulq $m0
1909 add %rax,%r8
1910 mov 8*1($nptr),%rax
1911 mov %r8,($tptr) # save result
1912 mov %rdx,%r8
1913 adc \$0,%r8
1914
1915 mulq $m0
1916 add %rax,%r9
1917 mov 8*2($nptr),%rax
1918 adc \$0,%rdx
1919 add %r9,%r8
1920 lea 8($tptr),$tptr # $tptr++
1921 mov %rdx,%r9
1922 adc \$0,%r9
1923
1924 mulq $m0
1925 add %rax,%r10
1926 mov 8*3($nptr),%rax
1927 adc \$0,%rdx
1928 add %r10,%r9
1929 mov %rdx,%r10
1930 adc \$0,%r10
1931
1932 mulq $m0
1933 add %rax,%r11
1934 mov 8*4($nptr),%rax
1935 adc \$0,%rdx
1936 add %r11,%r10
1937 mov %rdx,%r11
1938 adc \$0,%r11
1939
1940 mulq $m0
1941 add %rax,%r12
1942 mov 8*5($nptr),%rax
1943 adc \$0,%rdx
1944 add %r12,%r11
1945 mov %rdx,%r12
1946 adc \$0,%r12
1947
1948 mulq $m0
1949 add %rax,%r13
1950 mov 8*6($nptr),%rax
1951 adc \$0,%rdx
1952 add %r13,%r12
1953 mov %rdx,%r13
1954 adc \$0,%r13
1955
1956 mulq $m0
1957 add %rax,%r14
1958 mov 8*7($nptr),%rax
1959 adc \$0,%rdx
1960 add %r14,%r13
1961 mov %rdx,%r14
1962 adc \$0,%r14
1963
1964 mulq $m0
1965 mov 48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1966 add %rax,%r15
1967 adc \$0,%rdx
1968 add %r15,%r14
1969 mov 8*0($nptr),%rax # pull n[0]
1970 mov %rdx,%r15
1971 adc \$0,%r15
1972
1973 dec %ecx
1974 jnz .L8x_tail
1975
1976 lea 8*8($nptr),$nptr
1977 mov 8+8(%rsp),%rdx # pull end of t[]
1978 cmp 0+8(%rsp),$nptr # end of n[]?
1979 jae .L8x_tail_done # break out of loop
1980
1981 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1982 neg $carry
1983 mov 8*0($nptr),%rax # pull n[0]
1984 adc 8*0($tptr),%r8
1985 adc 8*1($tptr),%r9
1986 adc 8*2($tptr),%r10
1987 adc 8*3($tptr),%r11
1988 adc 8*4($tptr),%r12
1989 adc 8*5($tptr),%r13
1990 adc 8*6($tptr),%r14
1991 adc 8*7($tptr),%r15
1992 sbb $carry,$carry # top carry
1993
1994 mov \$8,%ecx
1995 jmp .L8x_tail
1996
1997.align 32
1998.L8x_tail_done:
1999 xor %rax,%rax
2000 add (%rdx),%r8 # can this overflow?
2001 adc \$0,%r9
2002 adc \$0,%r10
2003 adc \$0,%r11
2004 adc \$0,%r12
2005 adc \$0,%r13
2006 adc \$0,%r14
2007 adc \$0,%r15
2008 adc \$0,%rax
2009
2010 neg $carry
2011.L8x_no_tail:
2012 adc 8*0($tptr),%r8
2013 adc 8*1($tptr),%r9
2014 adc 8*2($tptr),%r10
2015 adc 8*3($tptr),%r11
2016 adc 8*4($tptr),%r12
2017 adc 8*5($tptr),%r13
2018 adc 8*6($tptr),%r14
2019 adc 8*7($tptr),%r15
2020 adc \$0,%rax # top-most carry
2021 mov -8($nptr),%rcx # np[num-1]
2022 xor $carry,$carry
2023
2024 movq %xmm2,$nptr # restore $nptr
2025
2026 mov %r8,8*0($tptr) # store top 512 bits
2027 mov %r9,8*1($tptr)
2028 movq %xmm3,$num # $num is %r9, can't be moved upwards
2029 mov %r10,8*2($tptr)
2030 mov %r11,8*3($tptr)
2031 mov %r12,8*4($tptr)
2032 mov %r13,8*5($tptr)
2033 mov %r14,8*6($tptr)
2034 mov %r15,8*7($tptr)
2035 lea 8*8($tptr),$tptr
2036
2037 cmp %rdx,$tptr # end of t[]?
2038 jb .L8x_reduction_loop
2039 ret
2040.cfi_endproc
2041.size bn_sqr8x_internal,.-bn_sqr8x_internal
2042___
2043}
2044
2045##############################################################
2046# Post-condition, 4x unrolled
2047#
2048{
2049my ($tptr,$nptr)=("%rbx","%rbp");
2050$code.=<<___;
2051.type __bn_post4x_internal,\@abi-omnipotent
2052.align 32
2053__bn_post4x_internal:
2054.cfi_startproc
2055 mov 8*0($nptr),%r12
2056 lea (%rdi,$num),$tptr # %rdi was $tptr above
2057 mov $num,%rcx
2058 movq %xmm1,$rptr # restore $rptr
2059 neg %rax
2060 movq %xmm1,$aptr # prepare for back-to-back call
2061 sar \$3+2,%rcx
2062 dec %r12 # so that after 'not' we get -n[0]
2063 xor %r10,%r10
2064 mov 8*1($nptr),%r13
2065 mov 8*2($nptr),%r14
2066 mov 8*3($nptr),%r15
2067 jmp .Lsqr4x_sub_entry
2068
2069.align 16
2070.Lsqr4x_sub:
2071 mov 8*0($nptr),%r12
2072 mov 8*1($nptr),%r13
2073 mov 8*2($nptr),%r14
2074 mov 8*3($nptr),%r15
2075.Lsqr4x_sub_entry:
2076 lea 8*4($nptr),$nptr
2077 not %r12
2078 not %r13
2079 not %r14
2080 not %r15
2081 and %rax,%r12
2082 and %rax,%r13
2083 and %rax,%r14
2084 and %rax,%r15
2085
2086 neg %r10 # mov %r10,%cf
2087 adc 8*0($tptr),%r12
2088 adc 8*1($tptr),%r13
2089 adc 8*2($tptr),%r14
2090 adc 8*3($tptr),%r15
2091 mov %r12,8*0($rptr)
2092 lea 8*4($tptr),$tptr
2093 mov %r13,8*1($rptr)
2094 sbb %r10,%r10 # mov %cf,%r10
2095 mov %r14,8*2($rptr)
2096 mov %r15,8*3($rptr)
2097 lea 8*4($rptr),$rptr
2098
2099 inc %rcx # pass %cf
2100 jnz .Lsqr4x_sub
2101
2102 mov $num,%r10 # prepare for back-to-back call
2103 neg $num # restore $num
2104 ret
2105.cfi_endproc
2106.size __bn_post4x_internal,.-__bn_post4x_internal
2107___
2108}
2109}}}
2110
2111
2112if ($addx) {{{
2113my $bp="%rdx"; # restore original value
2114
2115$code.=<<___;
2116.type bn_mulx4x_mont_gather5,\@function,6
2117.align 32
2118bn_mulx4x_mont_gather5:
2119.cfi_startproc
2120 mov %rsp,%rax
2121.cfi_def_cfa_register %rax
2122.Lmulx4x_enter:
2123 push %rbx
2124.cfi_push %rbx
2125 push %rbp
2126.cfi_push %rbp
2127 push %r12
2128.cfi_push %r12
2129 push %r13
2130.cfi_push %r13
2131 push %r14
2132.cfi_push %r14
2133 push %r15
2134.cfi_push %r15
2135.Lmulx4x_prologue:
2136
2137 shl \$3,${num}d # convert $num to bytes
2138 lea ($num,$num,2),%r10 # 3*$num in bytes
2139 neg $num # -$num
2140 mov ($n0),$n0 # *n0
2141
2142 ##############################################################
2143 # Ensure that stack frame doesn't alias with $rptr+3*$num
2144 # modulo 4096, which covers ret[num], am[num] and n[num]
2145 # (see bn_exp.c). This is done to allow memory disambiguation
2146 # logic do its magic. [Extra [num] is allocated in order
2147 # to align with bn_power5's frame, which is cleansed after
2148 # completing exponentiation. Extra 256 bytes is for power mask
2149 # calculated from 7th argument, the index.]
2150 #
2151 lea -320(%rsp,$num,2),%r11
2152 mov %rsp,%rbp
2153 sub $rp,%r11
2154 and \$4095,%r11
2155 cmp %r11,%r10
2156 jb .Lmulx4xsp_alt
2157 sub %r11,%rbp # align with $aptr
2158 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
2159 jmp .Lmulx4xsp_done
2160
2161.Lmulx4xsp_alt:
2162 lea 4096-320(,$num,2),%r10
2163 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
2164 sub %r10,%r11
2165 mov \$0,%r10
2166 cmovc %r10,%r11
2167 sub %r11,%rbp
2168.Lmulx4xsp_done:
2169 and \$-64,%rbp # ensure alignment
2170 mov %rsp,%r11
2171 sub %rbp,%r11
2172 and \$-4096,%r11
2173 lea (%rbp,%r11),%rsp
2174 mov (%rsp),%r10
2175 cmp %rbp,%rsp
2176 ja .Lmulx4x_page_walk
2177 jmp .Lmulx4x_page_walk_done
2178
2179.Lmulx4x_page_walk:
2180 lea -4096(%rsp),%rsp
2181 mov (%rsp),%r10
2182 cmp %rbp,%rsp
2183 ja .Lmulx4x_page_walk
2184.Lmulx4x_page_walk_done:
2185
2186 ##############################################################
2187 # Stack layout
2188 # +0 -num
2189 # +8 off-loaded &b[i]
2190 # +16 end of b[num]
2191 # +24 inner counter
2192 # +32 saved n0
2193 # +40 saved %rsp
2194 # +48
2195 # +56 saved rp
2196 # +64 tmp[num+1]
2197 #
2198 mov $n0, 32(%rsp) # save *n0
2199 mov %rax,40(%rsp) # save original %rsp
2200.cfi_cfa_expression %rsp+40,deref,+8
2201.Lmulx4x_body:
2202 call mulx4x_internal
2203
2204 mov 40(%rsp),%rsi # restore %rsp
2205.cfi_def_cfa %rsi,8
2206 mov \$1,%rax
2207
2208 mov -48(%rsi),%r15
2209.cfi_restore %r15
2210 mov -40(%rsi),%r14
2211.cfi_restore %r14
2212 mov -32(%rsi),%r13
2213.cfi_restore %r13
2214 mov -24(%rsi),%r12
2215.cfi_restore %r12
2216 mov -16(%rsi),%rbp
2217.cfi_restore %rbp
2218 mov -8(%rsi),%rbx
2219.cfi_restore %rbx
2220 lea (%rsi),%rsp
2221.cfi_def_cfa_register %rsp
2222.Lmulx4x_epilogue:
2223 ret
2224.cfi_endproc
2225.size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
2226
2227.type mulx4x_internal,\@abi-omnipotent
2228.align 32
2229mulx4x_internal:
2230.cfi_startproc
2231 mov $num,8(%rsp) # save -$num (it was in bytes)
2232 mov $num,%r10
2233 neg $num # restore $num
2234 shl \$5,$num
2235 neg %r10 # restore $num
2236 lea 128($bp,$num),%r13 # end of powers table (+size optimization)
2237 shr \$5+5,$num
2238 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument
2239 sub \$1,$num
2240 lea .Linc(%rip),%rax
2241 mov %r13,16+8(%rsp) # end of b[num]
2242 mov $num,24+8(%rsp) # inner counter
2243 mov $rp, 56+8(%rsp) # save $rp
2244___
2245my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
2246 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2247my $rptr=$bptr;
2248my $STRIDE=2**5*8; # 5 is "window size"
2249my $N=$STRIDE/4; # should match cache line size
2250$code.=<<___;
2251 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
2252 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
2253 lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimization)
2254 lea 128($bp),$bptr # size optimization
2255
2256 pshufd \$0,%xmm5,%xmm5 # broadcast index
2257 movdqa %xmm1,%xmm4
2258 .byte 0x67
2259 movdqa %xmm1,%xmm2
2260___
2261########################################################################
2262# calculate mask by comparing 0..31 to index and save result to stack
2263#
2264$code.=<<___;
2265 .byte 0x67
2266 paddd %xmm0,%xmm1
2267 pcmpeqd %xmm5,%xmm0 # compare to 1,0
2268 movdqa %xmm4,%xmm3
2269___
2270for($i=0;$i<$STRIDE/16-4;$i+=4) {
2271$code.=<<___;
2272 paddd %xmm1,%xmm2
2273 pcmpeqd %xmm5,%xmm1 # compare to 3,2
2274 movdqa %xmm0,`16*($i+0)+112`(%r10)
2275 movdqa %xmm4,%xmm0
2276
2277 paddd %xmm2,%xmm3
2278 pcmpeqd %xmm5,%xmm2 # compare to 5,4
2279 movdqa %xmm1,`16*($i+1)+112`(%r10)
2280 movdqa %xmm4,%xmm1
2281
2282 paddd %xmm3,%xmm0
2283 pcmpeqd %xmm5,%xmm3 # compare to 7,6
2284 movdqa %xmm2,`16*($i+2)+112`(%r10)
2285 movdqa %xmm4,%xmm2
2286
2287 paddd %xmm0,%xmm1
2288 pcmpeqd %xmm5,%xmm0
2289 movdqa %xmm3,`16*($i+3)+112`(%r10)
2290 movdqa %xmm4,%xmm3
2291___
2292}
2293$code.=<<___; # last iteration can be optimized
2294 .byte 0x67
2295 paddd %xmm1,%xmm2
2296 pcmpeqd %xmm5,%xmm1
2297 movdqa %xmm0,`16*($i+0)+112`(%r10)
2298
2299 paddd %xmm2,%xmm3
2300 pcmpeqd %xmm5,%xmm2
2301 movdqa %xmm1,`16*($i+1)+112`(%r10)
2302
2303 pcmpeqd %xmm5,%xmm3
2304 movdqa %xmm2,`16*($i+2)+112`(%r10)
2305
2306 pand `16*($i+0)-128`($bptr),%xmm0 # while it's still in register
2307 pand `16*($i+1)-128`($bptr),%xmm1
2308 pand `16*($i+2)-128`($bptr),%xmm2
2309 movdqa %xmm3,`16*($i+3)+112`(%r10)
2310 pand `16*($i+3)-128`($bptr),%xmm3
2311 por %xmm2,%xmm0
2312 por %xmm3,%xmm1
2313___
2314for($i=0;$i<$STRIDE/16-4;$i+=4) {
2315$code.=<<___;
2316 movdqa `16*($i+0)-128`($bptr),%xmm4
2317 movdqa `16*($i+1)-128`($bptr),%xmm5
2318 movdqa `16*($i+2)-128`($bptr),%xmm2
2319 pand `16*($i+0)+112`(%r10),%xmm4
2320 movdqa `16*($i+3)-128`($bptr),%xmm3
2321 pand `16*($i+1)+112`(%r10),%xmm5
2322 por %xmm4,%xmm0
2323 pand `16*($i+2)+112`(%r10),%xmm2
2324 por %xmm5,%xmm1
2325 pand `16*($i+3)+112`(%r10),%xmm3
2326 por %xmm2,%xmm0
2327 por %xmm3,%xmm1
2328___
2329}
2330$code.=<<___;
2331 pxor %xmm1,%xmm0
2332 pshufd \$0x4e,%xmm0,%xmm1
2333 por %xmm1,%xmm0
2334 lea $STRIDE($bptr),$bptr
2335 movq %xmm0,%rdx # bp[0]
2336 lea 64+8*4+8(%rsp),$tptr
2337
2338 mov %rdx,$bi
2339 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
2340 mulx 1*8($aptr),%r11,%r12 # a[1]*b[0]
2341 add %rax,%r11
2342 mulx 2*8($aptr),%rax,%r13 # ...
2343 adc %rax,%r12
2344 adc \$0,%r13
2345 mulx 3*8($aptr),%rax,%r14
2346
2347 mov $mi,%r15
2348 imulq 32+8(%rsp),$mi # "t[0]"*n0
2349 xor $zero,$zero # cf=0, of=0
2350 mov $mi,%rdx
2351
2352 mov $bptr,8+8(%rsp) # off-load &b[i]
2353
2354 lea 4*8($aptr),$aptr
2355 adcx %rax,%r13
2356 adcx $zero,%r14 # cf=0
2357
2358 mulx 0*8($nptr),%rax,%r10
2359 adcx %rax,%r15 # discarded
2360 adox %r11,%r10
2361 mulx 1*8($nptr),%rax,%r11
2362 adcx %rax,%r10
2363 adox %r12,%r11
2364 mulx 2*8($nptr),%rax,%r12
2365 mov 24+8(%rsp),$bptr # counter value
2366 mov %r10,-8*4($tptr)
2367 adcx %rax,%r11
2368 adox %r13,%r12
2369 mulx 3*8($nptr),%rax,%r15
2370 mov $bi,%rdx
2371 mov %r11,-8*3($tptr)
2372 adcx %rax,%r12
2373 adox $zero,%r15 # of=0
2374 lea 4*8($nptr),$nptr
2375 mov %r12,-8*2($tptr)
2376 jmp .Lmulx4x_1st
2377
2378.align 32
2379.Lmulx4x_1st:
2380 adcx $zero,%r15 # cf=0, modulo-scheduled
2381 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
2382 adcx %r14,%r10
2383 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
2384 adcx %rax,%r11
2385 mulx 2*8($aptr),%r12,%rax # ...
2386 adcx %r14,%r12
2387 mulx 3*8($aptr),%r13,%r14
2388 .byte 0x67,0x67
2389 mov $mi,%rdx
2390 adcx %rax,%r13
2391 adcx $zero,%r14 # cf=0
2392 lea 4*8($aptr),$aptr
2393 lea 4*8($tptr),$tptr
2394
2395 adox %r15,%r10
2396 mulx 0*8($nptr),%rax,%r15
2397 adcx %rax,%r10
2398 adox %r15,%r11
2399 mulx 1*8($nptr),%rax,%r15
2400 adcx %rax,%r11
2401 adox %r15,%r12
2402 mulx 2*8($nptr),%rax,%r15
2403 mov %r10,-5*8($tptr)
2404 adcx %rax,%r12
2405 mov %r11,-4*8($tptr)
2406 adox %r15,%r13
2407 mulx 3*8($nptr),%rax,%r15
2408 mov $bi,%rdx
2409 mov %r12,-3*8($tptr)
2410 adcx %rax,%r13
2411 adox $zero,%r15
2412 lea 4*8($nptr),$nptr
2413 mov %r13,-2*8($tptr)
2414
2415 dec $bptr # of=0, pass cf
2416 jnz .Lmulx4x_1st
2417
2418 mov 8(%rsp),$num # load -num
2419 adc $zero,%r15 # modulo-scheduled
2420 lea ($aptr,$num),$aptr # rewind $aptr
2421 add %r15,%r14
2422 mov 8+8(%rsp),$bptr # re-load &b[i]
2423 adc $zero,$zero # top-most carry
2424 mov %r14,-1*8($tptr)
2425 jmp .Lmulx4x_outer
2426
2427.align 32
2428.Lmulx4x_outer:
2429 lea 16-256($tptr),%r10 # where 256-byte mask is (+density control)
2430 pxor %xmm4,%xmm4
2431 .byte 0x67,0x67
2432 pxor %xmm5,%xmm5
2433___
2434for($i=0;$i<$STRIDE/16;$i+=4) {
2435$code.=<<___;
2436 movdqa `16*($i+0)-128`($bptr),%xmm0
2437 movdqa `16*($i+1)-128`($bptr),%xmm1
2438 movdqa `16*($i+2)-128`($bptr),%xmm2
2439 pand `16*($i+0)+256`(%r10),%xmm0
2440 movdqa `16*($i+3)-128`($bptr),%xmm3
2441 pand `16*($i+1)+256`(%r10),%xmm1
2442 por %xmm0,%xmm4
2443 pand `16*($i+2)+256`(%r10),%xmm2
2444 por %xmm1,%xmm5
2445 pand `16*($i+3)+256`(%r10),%xmm3
2446 por %xmm2,%xmm4
2447 por %xmm3,%xmm5
2448___
2449}
2450$code.=<<___;
2451 por %xmm5,%xmm4
2452 pshufd \$0x4e,%xmm4,%xmm0
2453 por %xmm4,%xmm0
2454 lea $STRIDE($bptr),$bptr
2455 movq %xmm0,%rdx # m0=bp[i]
2456
2457 mov $zero,($tptr) # save top-most carry
2458 lea 4*8($tptr,$num),$tptr # rewind $tptr
2459 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
2460 xor $zero,$zero # cf=0, of=0
2461 mov %rdx,$bi
2462 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
2463 adox -4*8($tptr),$mi # +t[0]
2464 adcx %r14,%r11
2465 mulx 2*8($aptr),%r15,%r13 # ...
2466 adox -3*8($tptr),%r11
2467 adcx %r15,%r12
2468 mulx 3*8($aptr),%rdx,%r14
2469 adox -2*8($tptr),%r12
2470 adcx %rdx,%r13
2471 lea ($nptr,$num),$nptr # rewind $nptr
2472 lea 4*8($aptr),$aptr
2473 adox -1*8($tptr),%r13
2474 adcx $zero,%r14
2475 adox $zero,%r14
2476
2477 mov $mi,%r15
2478 imulq 32+8(%rsp),$mi # "t[0]"*n0
2479
2480 mov $mi,%rdx
2481 xor $zero,$zero # cf=0, of=0
2482 mov $bptr,8+8(%rsp) # off-load &b[i]
2483
2484 mulx 0*8($nptr),%rax,%r10
2485 adcx %rax,%r15 # discarded
2486 adox %r11,%r10
2487 mulx 1*8($nptr),%rax,%r11
2488 adcx %rax,%r10
2489 adox %r12,%r11
2490 mulx 2*8($nptr),%rax,%r12
2491 adcx %rax,%r11
2492 adox %r13,%r12
2493 mulx 3*8($nptr),%rax,%r15
2494 mov $bi,%rdx
2495 mov 24+8(%rsp),$bptr # counter value
2496 mov %r10,-8*4($tptr)
2497 adcx %rax,%r12
2498 mov %r11,-8*3($tptr)
2499 adox $zero,%r15 # of=0
2500 mov %r12,-8*2($tptr)
2501 lea 4*8($nptr),$nptr
2502 jmp .Lmulx4x_inner
2503
2504.align 32
2505.Lmulx4x_inner:
2506 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
2507 adcx $zero,%r15 # cf=0, modulo-scheduled
2508 adox %r14,%r10
2509 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
2510 adcx 0*8($tptr),%r10
2511 adox %rax,%r11
2512 mulx 2*8($aptr),%r12,%rax # ...
2513 adcx 1*8($tptr),%r11
2514 adox %r14,%r12
2515 mulx 3*8($aptr),%r13,%r14
2516 mov $mi,%rdx
2517 adcx 2*8($tptr),%r12
2518 adox %rax,%r13
2519 adcx 3*8($tptr),%r13
2520 adox $zero,%r14 # of=0
2521 lea 4*8($aptr),$aptr
2522 lea 4*8($tptr),$tptr
2523 adcx $zero,%r14 # cf=0
2524
2525 adox %r15,%r10
2526 mulx 0*8($nptr),%rax,%r15
2527 adcx %rax,%r10
2528 adox %r15,%r11
2529 mulx 1*8($nptr),%rax,%r15
2530 adcx %rax,%r11
2531 adox %r15,%r12
2532 mulx 2*8($nptr),%rax,%r15
2533 mov %r10,-5*8($tptr)
2534 adcx %rax,%r12
2535 adox %r15,%r13
2536 mov %r11,-4*8($tptr)
2537 mulx 3*8($nptr),%rax,%r15
2538 mov $bi,%rdx
2539 lea 4*8($nptr),$nptr
2540 mov %r12,-3*8($tptr)
2541 adcx %rax,%r13
2542 adox $zero,%r15
2543 mov %r13,-2*8($tptr)
2544
2545 dec $bptr # of=0, pass cf
2546 jnz .Lmulx4x_inner
2547
2548 mov 0+8(%rsp),$num # load -num
2549 adc $zero,%r15 # modulo-scheduled
2550 sub 0*8($tptr),$bptr # pull top-most carry to %cf
2551 mov 8+8(%rsp),$bptr # re-load &b[i]
2552 mov 16+8(%rsp),%r10
2553 adc %r15,%r14
2554 lea ($aptr,$num),$aptr # rewind $aptr
2555 adc $zero,$zero # top-most carry
2556 mov %r14,-1*8($tptr)
2557
2558 cmp %r10,$bptr
2559 jb .Lmulx4x_outer
2560
2561 mov -8($nptr),%r10
2562 mov $zero,%r8
2563 mov ($nptr,$num),%r12
2564 lea ($nptr,$num),%rbp # rewind $nptr
2565 mov $num,%rcx
2566 lea ($tptr,$num),%rdi # rewind $tptr
2567 xor %eax,%eax
2568 xor %r15,%r15
2569 sub %r14,%r10 # compare top-most words
2570 adc %r15,%r15
2571 or %r15,%r8
2572 sar \$3+2,%rcx
2573 sub %r8,%rax # %rax=-%r8
2574 mov 56+8(%rsp),%rdx # restore rp
2575 dec %r12 # so that after 'not' we get -n[0]
2576 mov 8*1(%rbp),%r13
2577 xor %r8,%r8
2578 mov 8*2(%rbp),%r14
2579 mov 8*3(%rbp),%r15
2580 jmp .Lsqrx4x_sub_entry # common post-condition
2581.cfi_endproc
2582.size mulx4x_internal,.-mulx4x_internal
2583___
2584}
2585{
2586######################################################################
2587# void bn_power5(
2588my $rptr="%rdi"; # BN_ULONG *rptr,
2589my $aptr="%rsi"; # const BN_ULONG *aptr,
2590my $bptr="%rdx"; # const void *table,
2591my $nptr="%rcx"; # const BN_ULONG *nptr,
2592my $n0 ="%r8"; # const BN_ULONG *n0);
2593my $num ="%r9"; # int num, has to be divisible by 8
2594 # int pwr);
2595
2596my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2597my @A0=("%r10","%r11");
2598my @A1=("%r12","%r13");
2599my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2600
2601$code.=<<___;
2602.type bn_powerx5,\@function,6
2603.align 32
2604bn_powerx5:
2605.cfi_startproc
2606 mov %rsp,%rax
2607.cfi_def_cfa_register %rax
2608.Lpowerx5_enter:
2609 push %rbx
2610.cfi_push %rbx
2611 push %rbp
2612.cfi_push %rbp
2613 push %r12
2614.cfi_push %r12
2615 push %r13
2616.cfi_push %r13
2617 push %r14
2618.cfi_push %r14
2619 push %r15
2620.cfi_push %r15
2621.Lpowerx5_prologue:
2622
2623 shl \$3,${num}d # convert $num to bytes
2624 lea ($num,$num,2),%r10 # 3*$num in bytes
2625 neg $num
2626 mov ($n0),$n0 # *n0
2627
2628 ##############################################################
2629 # Ensure that stack frame doesn't alias with $rptr+3*$num
2630 # modulo 4096, which covers ret[num], am[num] and n[num]
2631 # (see bn_exp.c). This is done to allow memory disambiguation
2632 # logic do its magic. [Extra 256 bytes is for power mask
2633 # calculated from 7th argument, the index.]
2634 #
2635 lea -320(%rsp,$num,2),%r11
2636 mov %rsp,%rbp
2637 sub $rptr,%r11
2638 and \$4095,%r11
2639 cmp %r11,%r10
2640 jb .Lpwrx_sp_alt
2641 sub %r11,%rbp # align with $aptr
2642 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
2643 jmp .Lpwrx_sp_done
2644
2645.align 32
2646.Lpwrx_sp_alt:
2647 lea 4096-320(,$num,2),%r10
2648 lea -320(%rbp,$num,2),%rbp # alloca(frame+2*$num*8+256)
2649 sub %r10,%r11
2650 mov \$0,%r10
2651 cmovc %r10,%r11
2652 sub %r11,%rbp
2653.Lpwrx_sp_done:
2654 and \$-64,%rbp
2655 mov %rsp,%r11
2656 sub %rbp,%r11
2657 and \$-4096,%r11
2658 lea (%rbp,%r11),%rsp
2659 mov (%rsp),%r10
2660 cmp %rbp,%rsp
2661 ja .Lpwrx_page_walk
2662 jmp .Lpwrx_page_walk_done
2663
2664.Lpwrx_page_walk:
2665 lea -4096(%rsp),%rsp
2666 mov (%rsp),%r10
2667 cmp %rbp,%rsp
2668 ja .Lpwrx_page_walk
2669.Lpwrx_page_walk_done:
2670
2671 mov $num,%r10
2672 neg $num
2673
2674 ##############################################################
2675 # Stack layout
2676 #
2677 # +0 saved $num, used in reduction section
2678 # +8 &t[2*$num], used in reduction section
2679 # +16 intermediate carry bit
2680 # +24 top-most carry bit, used in reduction section
2681 # +32 saved *n0
2682 # +40 saved %rsp
2683 # +48 t[2*$num]
2684 #
2685 pxor %xmm0,%xmm0
2686 movq $rptr,%xmm1 # save $rptr
2687 movq $nptr,%xmm2 # save $nptr
2688 movq %r10, %xmm3 # -$num
2689 movq $bptr,%xmm4
2690 mov $n0, 32(%rsp)
2691 mov %rax, 40(%rsp) # save original %rsp
2692.cfi_cfa_expression %rsp+40,deref,+8
2693.Lpowerx5_body:
2694
2695 call __bn_sqrx8x_internal
2696 call __bn_postx4x_internal
2697 call __bn_sqrx8x_internal
2698 call __bn_postx4x_internal
2699 call __bn_sqrx8x_internal
2700 call __bn_postx4x_internal
2701 call __bn_sqrx8x_internal
2702 call __bn_postx4x_internal
2703 call __bn_sqrx8x_internal
2704 call __bn_postx4x_internal
2705
2706 mov %r10,$num # -num
2707 mov $aptr,$rptr
2708 movq %xmm2,$nptr
2709 movq %xmm4,$bptr
2710 mov 40(%rsp),%rax
2711
2712 call mulx4x_internal
2713
2714 mov 40(%rsp),%rsi # restore %rsp
2715.cfi_def_cfa %rsi,8
2716 mov \$1,%rax
2717
2718 mov -48(%rsi),%r15
2719.cfi_restore %r15
2720 mov -40(%rsi),%r14
2721.cfi_restore %r14
2722 mov -32(%rsi),%r13
2723.cfi_restore %r13
2724 mov -24(%rsi),%r12
2725.cfi_restore %r12
2726 mov -16(%rsi),%rbp
2727.cfi_restore %rbp
2728 mov -8(%rsi),%rbx
2729.cfi_restore %rbx
2730 lea (%rsi),%rsp
2731.cfi_def_cfa_register %rsp
2732.Lpowerx5_epilogue:
2733 ret
2734.cfi_endproc
2735.size bn_powerx5,.-bn_powerx5
2736
2737.globl bn_sqrx8x_internal
2738.hidden bn_sqrx8x_internal
2739.type bn_sqrx8x_internal,\@abi-omnipotent
2740.align 32
2741bn_sqrx8x_internal:
2742__bn_sqrx8x_internal:
2743.cfi_startproc
2744 ##################################################################
2745 # Squaring part:
2746 #
2747 # a) multiply-n-add everything but a[i]*a[i];
2748 # b) shift result of a) by 1 to the left and accumulate
2749 # a[i]*a[i] products;
2750 #
2751 ##################################################################
2752 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2753 # a[1]a[0]
2754 # a[2]a[0]
2755 # a[3]a[0]
2756 # a[2]a[1]
2757 # a[3]a[1]
2758 # a[3]a[2]
2759 #
2760 # a[4]a[0]
2761 # a[5]a[0]
2762 # a[6]a[0]
2763 # a[7]a[0]
2764 # a[4]a[1]
2765 # a[5]a[1]
2766 # a[6]a[1]
2767 # a[7]a[1]
2768 # a[4]a[2]
2769 # a[5]a[2]
2770 # a[6]a[2]
2771 # a[7]a[2]
2772 # a[4]a[3]
2773 # a[5]a[3]
2774 # a[6]a[3]
2775 # a[7]a[3]
2776 #
2777 # a[5]a[4]
2778 # a[6]a[4]
2779 # a[7]a[4]
2780 # a[6]a[5]
2781 # a[7]a[5]
2782 # a[7]a[6]
2783 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2784___
2785{
2786my ($zero,$carry)=("%rbp","%rcx");
2787my $aaptr=$zero;
2788$code.=<<___;
2789 lea 48+8(%rsp),$tptr
2790 lea ($aptr,$num),$aaptr
2791 mov $num,0+8(%rsp) # save $num
2792 mov $aaptr,8+8(%rsp) # save end of $aptr
2793 jmp .Lsqr8x_zero_start
2794
2795.align 32
2796.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2797.Lsqrx8x_zero:
2798 .byte 0x3e
2799 movdqa %xmm0,0*8($tptr)
2800 movdqa %xmm0,2*8($tptr)
2801 movdqa %xmm0,4*8($tptr)
2802 movdqa %xmm0,6*8($tptr)
2803.Lsqr8x_zero_start: # aligned at 32
2804 movdqa %xmm0,8*8($tptr)
2805 movdqa %xmm0,10*8($tptr)
2806 movdqa %xmm0,12*8($tptr)
2807 movdqa %xmm0,14*8($tptr)
2808 lea 16*8($tptr),$tptr
2809 sub \$64,$num
2810 jnz .Lsqrx8x_zero
2811
2812 mov 0*8($aptr),%rdx # a[0], modulo-scheduled
2813 #xor %r9,%r9 # t[1], ex-$num, zero already
2814 xor %r10,%r10
2815 xor %r11,%r11
2816 xor %r12,%r12
2817 xor %r13,%r13
2818 xor %r14,%r14
2819 xor %r15,%r15
2820 lea 48+8(%rsp),$tptr
2821 xor $zero,$zero # cf=0, cf=0
2822 jmp .Lsqrx8x_outer_loop
2823
2824.align 32
2825.Lsqrx8x_outer_loop:
2826 mulx 1*8($aptr),%r8,%rax # a[1]*a[0]
2827 adcx %r9,%r8 # a[1]*a[0]+=t[1]
2828 adox %rax,%r10
2829 mulx 2*8($aptr),%r9,%rax # a[2]*a[0]
2830 adcx %r10,%r9
2831 adox %rax,%r11
2832 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ...
2833 adcx %r11,%r10
2834 adox %rax,%r12
2835 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax
2836 adcx %r12,%r11
2837 adox %rax,%r13
2838 mulx 5*8($aptr),%r12,%rax
2839 adcx %r13,%r12
2840 adox %rax,%r14
2841 mulx 6*8($aptr),%r13,%rax
2842 adcx %r14,%r13
2843 adox %r15,%rax
2844 mulx 7*8($aptr),%r14,%r15
2845 mov 1*8($aptr),%rdx # a[1]
2846 adcx %rax,%r14
2847 adox $zero,%r15
2848 adc 8*8($tptr),%r15
2849 mov %r8,1*8($tptr) # t[1]
2850 mov %r9,2*8($tptr) # t[2]
2851 sbb $carry,$carry # mov %cf,$carry
2852 xor $zero,$zero # cf=0, of=0
2853
2854
2855 mulx 2*8($aptr),%r8,%rbx # a[2]*a[1]
2856 mulx 3*8($aptr),%r9,%rax # a[3]*a[1]
2857 adcx %r10,%r8
2858 adox %rbx,%r9
2859 mulx 4*8($aptr),%r10,%rbx # ...
2860 adcx %r11,%r9
2861 adox %rax,%r10
2862 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
2863 adcx %r12,%r10
2864 adox %rbx,%r11
2865 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
2866 adcx %r13,%r11
2867 adox %r14,%r12
2868 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
2869 mov 2*8($aptr),%rdx # a[2]
2870 adcx %rax,%r12
2871 adox %rbx,%r13
2872 adcx %r15,%r13
2873 adox $zero,%r14 # of=0
2874 adcx $zero,%r14 # cf=0
2875
2876 mov %r8,3*8($tptr) # t[3]
2877 mov %r9,4*8($tptr) # t[4]
2878
2879 mulx 3*8($aptr),%r8,%rbx # a[3]*a[2]
2880 mulx 4*8($aptr),%r9,%rax # a[4]*a[2]
2881 adcx %r10,%r8
2882 adox %rbx,%r9
2883 mulx 5*8($aptr),%r10,%rbx # ...
2884 adcx %r11,%r9
2885 adox %rax,%r10
2886 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
2887 adcx %r12,%r10
2888 adox %r13,%r11
2889 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
2890 .byte 0x3e
2891 mov 3*8($aptr),%rdx # a[3]
2892 adcx %rbx,%r11
2893 adox %rax,%r12
2894 adcx %r14,%r12
2895 mov %r8,5*8($tptr) # t[5]
2896 mov %r9,6*8($tptr) # t[6]
2897 mulx 4*8($aptr),%r8,%rax # a[4]*a[3]
2898 adox $zero,%r13 # of=0
2899 adcx $zero,%r13 # cf=0
2900
2901 mulx 5*8($aptr),%r9,%rbx # a[5]*a[3]
2902 adcx %r10,%r8
2903 adox %rax,%r9
2904 mulx 6*8($aptr),%r10,%rax # ...
2905 adcx %r11,%r9
2906 adox %r12,%r10
2907 mulx 7*8($aptr),%r11,%r12
2908 mov 4*8($aptr),%rdx # a[4]
2909 mov 5*8($aptr),%r14 # a[5]
2910 adcx %rbx,%r10
2911 adox %rax,%r11
2912 mov 6*8($aptr),%r15 # a[6]
2913 adcx %r13,%r11
2914 adox $zero,%r12 # of=0
2915 adcx $zero,%r12 # cf=0
2916
2917 mov %r8,7*8($tptr) # t[7]
2918 mov %r9,8*8($tptr) # t[8]
2919
2920 mulx %r14,%r9,%rax # a[5]*a[4]
2921 mov 7*8($aptr),%r8 # a[7]
2922 adcx %r10,%r9
2923 mulx %r15,%r10,%rbx # a[6]*a[4]
2924 adox %rax,%r10
2925 adcx %r11,%r10
2926 mulx %r8,%r11,%rax # a[7]*a[4]
2927 mov %r14,%rdx # a[5]
2928 adox %rbx,%r11
2929 adcx %r12,%r11
2930 #adox $zero,%rax # of=0
2931 adcx $zero,%rax # cf=0
2932
2933 mulx %r15,%r14,%rbx # a[6]*a[5]
2934 mulx %r8,%r12,%r13 # a[7]*a[5]
2935 mov %r15,%rdx # a[6]
2936 lea 8*8($aptr),$aptr
2937 adcx %r14,%r11
2938 adox %rbx,%r12
2939 adcx %rax,%r12
2940 adox $zero,%r13
2941
2942 .byte 0x67,0x67
2943 mulx %r8,%r8,%r14 # a[7]*a[6]
2944 adcx %r8,%r13
2945 adcx $zero,%r14
2946
2947 cmp 8+8(%rsp),$aptr
2948 je .Lsqrx8x_outer_break
2949
2950 neg $carry # mov $carry,%cf
2951 mov \$-8,%rcx
2952 mov $zero,%r15
2953 mov 8*8($tptr),%r8
2954 adcx 9*8($tptr),%r9 # +=t[9]
2955 adcx 10*8($tptr),%r10 # ...
2956 adcx 11*8($tptr),%r11
2957 adc 12*8($tptr),%r12
2958 adc 13*8($tptr),%r13
2959 adc 14*8($tptr),%r14
2960 adc 15*8($tptr),%r15
2961 lea ($aptr),$aaptr
2962 lea 2*64($tptr),$tptr
2963 sbb %rax,%rax # mov %cf,$carry
2964
2965 mov -64($aptr),%rdx # a[0]
2966 mov %rax,16+8(%rsp) # offload $carry
2967 mov $tptr,24+8(%rsp)
2968
2969 #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above
2970 xor %eax,%eax # cf=0, of=0
2971 jmp .Lsqrx8x_loop
2972
2973.align 32
2974.Lsqrx8x_loop:
2975 mov %r8,%rbx
2976 mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i]
2977 adcx %rax,%rbx # +=t[8]
2978 adox %r9,%r8
2979
2980 mulx 1*8($aaptr),%rax,%r9 # ...
2981 adcx %rax,%r8
2982 adox %r10,%r9
2983
2984 mulx 2*8($aaptr),%rax,%r10
2985 adcx %rax,%r9
2986 adox %r11,%r10
2987
2988 mulx 3*8($aaptr),%rax,%r11
2989 adcx %rax,%r10
2990 adox %r12,%r11
2991
2992 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
2993 adcx %rax,%r11
2994 adox %r13,%r12
2995
2996 mulx 5*8($aaptr),%rax,%r13
2997 adcx %rax,%r12
2998 adox %r14,%r13
2999
3000 mulx 6*8($aaptr),%rax,%r14
3001 mov %rbx,($tptr,%rcx,8) # store t[8+i]
3002 mov \$0,%ebx
3003 adcx %rax,%r13
3004 adox %r15,%r14
3005
3006 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
3007 mov 8($aptr,%rcx,8),%rdx # a[i]
3008 adcx %rax,%r14
3009 adox %rbx,%r15 # %rbx is 0, of=0
3010 adcx %rbx,%r15 # cf=0
3011
3012 .byte 0x67
3013 inc %rcx # of=0
3014 jnz .Lsqrx8x_loop
3015
3016 lea 8*8($aaptr),$aaptr
3017 mov \$-8,%rcx
3018 cmp 8+8(%rsp),$aaptr # done?
3019 je .Lsqrx8x_break
3020
3021 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
3022 .byte 0x66
3023 mov -64($aptr),%rdx
3024 adcx 0*8($tptr),%r8
3025 adcx 1*8($tptr),%r9
3026 adc 2*8($tptr),%r10
3027 adc 3*8($tptr),%r11
3028 adc 4*8($tptr),%r12
3029 adc 5*8($tptr),%r13
3030 adc 6*8($tptr),%r14
3031 adc 7*8($tptr),%r15
3032 lea 8*8($tptr),$tptr
3033 .byte 0x67
3034 sbb %rax,%rax # mov %cf,%rax
3035 xor %ebx,%ebx # cf=0, of=0
3036 mov %rax,16+8(%rsp) # offload carry
3037 jmp .Lsqrx8x_loop
3038
3039.align 32
3040.Lsqrx8x_break:
3041 xor $zero,$zero
3042 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
3043 adcx $zero,%r8
3044 mov 24+8(%rsp),$carry # initial $tptr, borrow $carry
3045 adcx $zero,%r9
3046 mov 0*8($aptr),%rdx # a[8], modulo-scheduled
3047 adc \$0,%r10
3048 mov %r8,0*8($tptr)
3049 adc \$0,%r11
3050 adc \$0,%r12
3051 adc \$0,%r13
3052 adc \$0,%r14
3053 adc \$0,%r15
3054 cmp $carry,$tptr # cf=0, of=0
3055 je .Lsqrx8x_outer_loop
3056
3057 mov %r9,1*8($tptr)
3058 mov 1*8($carry),%r9
3059 mov %r10,2*8($tptr)
3060 mov 2*8($carry),%r10
3061 mov %r11,3*8($tptr)
3062 mov 3*8($carry),%r11
3063 mov %r12,4*8($tptr)
3064 mov 4*8($carry),%r12
3065 mov %r13,5*8($tptr)
3066 mov 5*8($carry),%r13
3067 mov %r14,6*8($tptr)
3068 mov 6*8($carry),%r14
3069 mov %r15,7*8($tptr)
3070 mov 7*8($carry),%r15
3071 mov $carry,$tptr
3072 jmp .Lsqrx8x_outer_loop
3073
3074.align 32
3075.Lsqrx8x_outer_break:
3076 mov %r9,9*8($tptr) # t[9]
3077 movq %xmm3,%rcx # -$num
3078 mov %r10,10*8($tptr) # ...
3079 mov %r11,11*8($tptr)
3080 mov %r12,12*8($tptr)
3081 mov %r13,13*8($tptr)
3082 mov %r14,14*8($tptr)
3083___
3084}
3085{
3086my $i="%rcx";
3087$code.=<<___;
3088 lea 48+8(%rsp),$tptr
3089 mov ($aptr,$i),%rdx # a[0]
3090
3091 mov 8($tptr),$A0[1] # t[1]
3092 xor $A0[0],$A0[0] # t[0], of=0, cf=0
3093 mov 0+8(%rsp),$num # restore $num
3094 adox $A0[1],$A0[1]
3095 mov 16($tptr),$A1[0] # t[2] # prefetch
3096 mov 24($tptr),$A1[1] # t[3] # prefetch
3097 #jmp .Lsqrx4x_shift_n_add # happens to be aligned
3098
3099.align 32
3100.Lsqrx4x_shift_n_add:
3101 mulx %rdx,%rax,%rbx
3102 adox $A1[0],$A1[0]
3103 adcx $A0[0],%rax
3104 .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
3105 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
3106 adox $A1[1],$A1[1]
3107 adcx $A0[1],%rbx
3108 mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch
3109 mov %rax,0($tptr)
3110 mov %rbx,8($tptr)
3111
3112 mulx %rdx,%rax,%rbx
3113 adox $A0[0],$A0[0]
3114 adcx $A1[0],%rax
3115 mov 16($aptr,$i),%rdx # a[i+2] # prefetch
3116 mov 48($tptr),$A1[0] # t[2*i+6] # prefetch
3117 adox $A0[1],$A0[1]
3118 adcx $A1[1],%rbx
3119 mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch
3120 mov %rax,16($tptr)
3121 mov %rbx,24($tptr)
3122
3123 mulx %rdx,%rax,%rbx
3124 adox $A1[0],$A1[0]
3125 adcx $A0[0],%rax
3126 mov 24($aptr,$i),%rdx # a[i+3] # prefetch
3127 lea 32($i),$i
3128 mov 64($tptr),$A0[0] # t[2*i+8] # prefetch
3129 adox $A1[1],$A1[1]
3130 adcx $A0[1],%rbx
3131 mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch
3132 mov %rax,32($tptr)
3133 mov %rbx,40($tptr)
3134
3135 mulx %rdx,%rax,%rbx
3136 adox $A0[0],$A0[0]
3137 adcx $A1[0],%rax
3138 jrcxz .Lsqrx4x_shift_n_add_break
3139 .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
3140 adox $A0[1],$A0[1]
3141 adcx $A1[1],%rbx
3142 mov 80($tptr),$A1[0] # t[2*i+10] # prefetch
3143 mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch
3144 mov %rax,48($tptr)
3145 mov %rbx,56($tptr)
3146 lea 64($tptr),$tptr
3147 nop
3148 jmp .Lsqrx4x_shift_n_add
3149
3150.align 32
3151.Lsqrx4x_shift_n_add_break:
3152 adcx $A1[1],%rbx
3153 mov %rax,48($tptr)
3154 mov %rbx,56($tptr)
3155 lea 64($tptr),$tptr # end of t[] buffer
3156___
3157}
3158
3159######################################################################
3160# Montgomery reduction part, "word-by-word" algorithm.
3161#
3162# This new path is inspired by multiple submissions from Intel, by
3163# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
3164# Vinodh Gopal...
3165{
3166my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
3167
3168$code.=<<___;
3169 movq %xmm2,$nptr
3170__bn_sqrx8x_reduction:
3171 xor %eax,%eax # initial top-most carry bit
3172 mov 32+8(%rsp),%rbx # n0
3173 mov 48+8(%rsp),%rdx # "%r8", 8*0($tptr)
3174 lea -8*8($nptr,$num),%rcx # end of n[]
3175 #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer
3176 mov %rcx, 0+8(%rsp) # save end of n[]
3177 mov $tptr,8+8(%rsp) # save end of t[]
3178
3179 lea 48+8(%rsp),$tptr # initial t[] window
3180 jmp .Lsqrx8x_reduction_loop
3181
3182.align 32
3183.Lsqrx8x_reduction_loop:
3184 mov 8*1($tptr),%r9
3185 mov 8*2($tptr),%r10
3186 mov 8*3($tptr),%r11
3187 mov 8*4($tptr),%r12
3188 mov %rdx,%r8
3189 imulq %rbx,%rdx # n0*a[i]
3190 mov 8*5($tptr),%r13
3191 mov 8*6($tptr),%r14
3192 mov 8*7($tptr),%r15
3193 mov %rax,24+8(%rsp) # store top-most carry bit
3194
3195 lea 8*8($tptr),$tptr
3196 xor $carry,$carry # cf=0,of=0
3197 mov \$-8,%rcx
3198 jmp .Lsqrx8x_reduce
3199
3200.align 32
3201.Lsqrx8x_reduce:
3202 mov %r8, %rbx
3203 mulx 8*0($nptr),%rax,%r8 # n[0]
3204 adcx %rbx,%rax # discarded
3205 adox %r9,%r8
3206
3207 mulx 8*1($nptr),%rbx,%r9 # n[1]
3208 adcx %rbx,%r8
3209 adox %r10,%r9
3210
3211 mulx 8*2($nptr),%rbx,%r10
3212 adcx %rbx,%r9
3213 adox %r11,%r10
3214
3215 mulx 8*3($nptr),%rbx,%r11
3216 adcx %rbx,%r10
3217 adox %r12,%r11
3218
3219 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rbx,%r12
3220 mov %rdx,%rax
3221 mov %r8,%rdx
3222 adcx %rbx,%r11
3223 adox %r13,%r12
3224
3225 mulx 32+8(%rsp),%rbx,%rdx # %rdx discarded
3226 mov %rax,%rdx
3227 mov %rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i]
3228
3229 mulx 8*5($nptr),%rax,%r13
3230 adcx %rax,%r12
3231 adox %r14,%r13
3232
3233 mulx 8*6($nptr),%rax,%r14
3234 adcx %rax,%r13
3235 adox %r15,%r14
3236
3237 mulx 8*7($nptr),%rax,%r15
3238 mov %rbx,%rdx
3239 adcx %rax,%r14
3240 adox $carry,%r15 # $carry is 0
3241 adcx $carry,%r15 # cf=0
3242
3243 .byte 0x67,0x67,0x67
3244 inc %rcx # of=0
3245 jnz .Lsqrx8x_reduce
3246
3247 mov $carry,%rax # xor %rax,%rax
3248 cmp 0+8(%rsp),$nptr # end of n[]?
3249 jae .Lsqrx8x_no_tail
3250
3251 mov 48+8(%rsp),%rdx # pull n0*a[0]
3252 add 8*0($tptr),%r8
3253 lea 8*8($nptr),$nptr
3254 mov \$-8,%rcx
3255 adcx 8*1($tptr),%r9
3256 adcx 8*2($tptr),%r10
3257 adc 8*3($tptr),%r11
3258 adc 8*4($tptr),%r12
3259 adc 8*5($tptr),%r13
3260 adc 8*6($tptr),%r14
3261 adc 8*7($tptr),%r15
3262 lea 8*8($tptr),$tptr
3263 sbb %rax,%rax # top carry
3264
3265 xor $carry,$carry # of=0, cf=0
3266 mov %rax,16+8(%rsp)
3267 jmp .Lsqrx8x_tail
3268
3269.align 32
3270.Lsqrx8x_tail:
3271 mov %r8,%rbx
3272 mulx 8*0($nptr),%rax,%r8
3273 adcx %rax,%rbx
3274 adox %r9,%r8
3275
3276 mulx 8*1($nptr),%rax,%r9
3277 adcx %rax,%r8
3278 adox %r10,%r9
3279
3280 mulx 8*2($nptr),%rax,%r10
3281 adcx %rax,%r9
3282 adox %r11,%r10
3283
3284 mulx 8*3($nptr),%rax,%r11
3285 adcx %rax,%r10
3286 adox %r12,%r11
3287
3288 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rax,%r12
3289 adcx %rax,%r11
3290 adox %r13,%r12
3291
3292 mulx 8*5($nptr),%rax,%r13
3293 adcx %rax,%r12
3294 adox %r14,%r13
3295
3296 mulx 8*6($nptr),%rax,%r14
3297 adcx %rax,%r13
3298 adox %r15,%r14
3299
3300 mulx 8*7($nptr),%rax,%r15
3301 mov 72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i]
3302 adcx %rax,%r14
3303 adox $carry,%r15
3304 mov %rbx,($tptr,%rcx,8) # save result
3305 mov %r8,%rbx
3306 adcx $carry,%r15 # cf=0
3307
3308 inc %rcx # of=0
3309 jnz .Lsqrx8x_tail
3310
3311 cmp 0+8(%rsp),$nptr # end of n[]?
3312 jae .Lsqrx8x_tail_done # break out of loop
3313
3314 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3315 mov 48+8(%rsp),%rdx # pull n0*a[0]
3316 lea 8*8($nptr),$nptr
3317 adc 8*0($tptr),%r8
3318 adc 8*1($tptr),%r9
3319 adc 8*2($tptr),%r10
3320 adc 8*3($tptr),%r11
3321 adc 8*4($tptr),%r12
3322 adc 8*5($tptr),%r13
3323 adc 8*6($tptr),%r14
3324 adc 8*7($tptr),%r15
3325 lea 8*8($tptr),$tptr
3326 sbb %rax,%rax
3327 sub \$8,%rcx # mov \$-8,%rcx
3328
3329 xor $carry,$carry # of=0, cf=0
3330 mov %rax,16+8(%rsp)
3331 jmp .Lsqrx8x_tail
3332
3333.align 32
3334.Lsqrx8x_tail_done:
3335 xor %rax,%rax
3336 add 24+8(%rsp),%r8 # can this overflow?
3337 adc \$0,%r9
3338 adc \$0,%r10
3339 adc \$0,%r11
3340 adc \$0,%r12
3341 adc \$0,%r13
3342 adc \$0,%r14
3343 adc \$0,%r15
3344 adc \$0,%rax
3345
3346 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3347.Lsqrx8x_no_tail: # %cf is 0 if jumped here
3348 adc 8*0($tptr),%r8
3349 movq %xmm3,%rcx
3350 adc 8*1($tptr),%r9
3351 mov 8*7($nptr),$carry
3352 movq %xmm2,$nptr # restore $nptr
3353 adc 8*2($tptr),%r10
3354 adc 8*3($tptr),%r11
3355 adc 8*4($tptr),%r12
3356 adc 8*5($tptr),%r13
3357 adc 8*6($tptr),%r14
3358 adc 8*7($tptr),%r15
3359 adc \$0,%rax # top-most carry
3360
3361 mov 32+8(%rsp),%rbx # n0
3362 mov 8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8"
3363
3364 mov %r8,8*0($tptr) # store top 512 bits
3365 lea 8*8($tptr),%r8 # borrow %r8
3366 mov %r9,8*1($tptr)
3367 mov %r10,8*2($tptr)
3368 mov %r11,8*3($tptr)
3369 mov %r12,8*4($tptr)
3370 mov %r13,8*5($tptr)
3371 mov %r14,8*6($tptr)
3372 mov %r15,8*7($tptr)
3373
3374 lea 8*8($tptr,%rcx),$tptr # start of current t[] window
3375 cmp 8+8(%rsp),%r8 # end of t[]?
3376 jb .Lsqrx8x_reduction_loop
3377 ret
3378.cfi_endproc
3379.size bn_sqrx8x_internal,.-bn_sqrx8x_internal
3380___
3381}
3382
3383##############################################################
3384# Post-condition, 4x unrolled
3385#
3386{
3387my ($rptr,$nptr)=("%rdx","%rbp");
3388$code.=<<___;
3389.align 32
3390__bn_postx4x_internal:
3391.cfi_startproc
3392 mov 8*0($nptr),%r12
3393 mov %rcx,%r10 # -$num
3394 mov %rcx,%r9 # -$num
3395 neg %rax
3396 sar \$3+2,%rcx
3397 #lea 48+8(%rsp,%r9),$tptr
3398 movq %xmm1,$rptr # restore $rptr
3399 movq %xmm1,$aptr # prepare for back-to-back call
3400 dec %r12 # so that after 'not' we get -n[0]
3401 mov 8*1($nptr),%r13
3402 xor %r8,%r8
3403 mov 8*2($nptr),%r14
3404 mov 8*3($nptr),%r15
3405 jmp .Lsqrx4x_sub_entry
3406
3407.align 16
3408.Lsqrx4x_sub:
3409 mov 8*0($nptr),%r12
3410 mov 8*1($nptr),%r13
3411 mov 8*2($nptr),%r14
3412 mov 8*3($nptr),%r15
3413.Lsqrx4x_sub_entry:
3414 andn %rax,%r12,%r12
3415 lea 8*4($nptr),$nptr
3416 andn %rax,%r13,%r13
3417 andn %rax,%r14,%r14
3418 andn %rax,%r15,%r15
3419
3420 neg %r8 # mov %r8,%cf
3421 adc 8*0($tptr),%r12
3422 adc 8*1($tptr),%r13
3423 adc 8*2($tptr),%r14
3424 adc 8*3($tptr),%r15
3425 mov %r12,8*0($rptr)
3426 lea 8*4($tptr),$tptr
3427 mov %r13,8*1($rptr)
3428 sbb %r8,%r8 # mov %cf,%r8
3429 mov %r14,8*2($rptr)
3430 mov %r15,8*3($rptr)
3431 lea 8*4($rptr),$rptr
3432
3433 inc %rcx
3434 jnz .Lsqrx4x_sub
3435
3436 neg %r9 # restore $num
3437
3438 ret
3439.cfi_endproc
3440.size __bn_postx4x_internal,.-__bn_postx4x_internal
3441___
3442}
3443}}}
3444{
3445my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order
3446 ("%rdi","%esi","%rdx","%ecx"); # Unix order
3447my $out=$inp;
3448my $STRIDE=2**5*8;
3449my $N=$STRIDE/4;
3450
3451$code.=<<___;
3452.globl bn_get_bits5
3453.type bn_get_bits5,\@abi-omnipotent
3454.align 16
3455bn_get_bits5:
3456.cfi_startproc
3457 lea 0($inp),%r10
3458 lea 1($inp),%r11
3459 mov $num,%ecx
3460 shr \$4,$num
3461 and \$15,%ecx
3462 lea -8(%ecx),%eax
3463 cmp \$11,%ecx
3464 cmova %r11,%r10
3465 cmova %eax,%ecx
3466 movzw (%r10,$num,2),%eax
3467 shrl %cl,%eax
3468 and \$31,%eax
3469 ret
3470.cfi_endproc
3471.size bn_get_bits5,.-bn_get_bits5
3472
3473.globl bn_scatter5
3474.type bn_scatter5,\@abi-omnipotent
3475.align 16
3476bn_scatter5:
3477.cfi_startproc
3478 cmp \$0, $num
3479 jz .Lscatter_epilogue
3480 lea ($tbl,$idx,8),$tbl
3481.Lscatter:
3482 mov ($inp),%rax
3483 lea 8($inp),$inp
3484 mov %rax,($tbl)
3485 lea 32*8($tbl),$tbl
3486 sub \$1,$num
3487 jnz .Lscatter
3488.Lscatter_epilogue:
3489 ret
3490.cfi_endproc
3491.size bn_scatter5,.-bn_scatter5
3492
3493.globl bn_gather5
3494.type bn_gather5,\@abi-omnipotent
3495.align 32
3496bn_gather5:
3497.LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases
3498.cfi_startproc
3499 # I can't trust assembler to use specific encoding:-(
3500 .byte 0x4c,0x8d,0x14,0x24 #lea (%rsp),%r10
3501 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 #sub $0x108,%rsp
3502 lea .Linc(%rip),%rax
3503 and \$-16,%rsp # shouldn't be formally required
3504
3505 movd $idx,%xmm5
3506 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
3507 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
3508 lea 128($tbl),%r11 # size optimization
3509 lea 128(%rsp),%rax # size optimization
3510
3511 pshufd \$0,%xmm5,%xmm5 # broadcast $idx
3512 movdqa %xmm1,%xmm4
3513 movdqa %xmm1,%xmm2
3514___
3515########################################################################
3516# calculate mask by comparing 0..31 to $idx and save result to stack
3517#
3518for($i=0;$i<$STRIDE/16;$i+=4) {
3519$code.=<<___;
3520 paddd %xmm0,%xmm1
3521 pcmpeqd %xmm5,%xmm0 # compare to 1,0
3522___
3523$code.=<<___ if ($i);
3524 movdqa %xmm3,`16*($i-1)-128`(%rax)
3525___
3526$code.=<<___;
3527 movdqa %xmm4,%xmm3
3528
3529 paddd %xmm1,%xmm2
3530 pcmpeqd %xmm5,%xmm1 # compare to 3,2
3531 movdqa %xmm0,`16*($i+0)-128`(%rax)
3532 movdqa %xmm4,%xmm0
3533
3534 paddd %xmm2,%xmm3
3535 pcmpeqd %xmm5,%xmm2 # compare to 5,4
3536 movdqa %xmm1,`16*($i+1)-128`(%rax)
3537 movdqa %xmm4,%xmm1
3538
3539 paddd %xmm3,%xmm0
3540 pcmpeqd %xmm5,%xmm3 # compare to 7,6
3541 movdqa %xmm2,`16*($i+2)-128`(%rax)
3542 movdqa %xmm4,%xmm2
3543___
3544}
3545$code.=<<___;
3546 movdqa %xmm3,`16*($i-1)-128`(%rax)
3547 jmp .Lgather
3548
3549.align 32
3550.Lgather:
3551 pxor %xmm4,%xmm4
3552 pxor %xmm5,%xmm5
3553___
3554for($i=0;$i<$STRIDE/16;$i+=4) {
3555$code.=<<___;
3556 movdqa `16*($i+0)-128`(%r11),%xmm0
3557 movdqa `16*($i+1)-128`(%r11),%xmm1
3558 movdqa `16*($i+2)-128`(%r11),%xmm2
3559 pand `16*($i+0)-128`(%rax),%xmm0
3560 movdqa `16*($i+3)-128`(%r11),%xmm3
3561 pand `16*($i+1)-128`(%rax),%xmm1
3562 por %xmm0,%xmm4
3563 pand `16*($i+2)-128`(%rax),%xmm2
3564 por %xmm1,%xmm5
3565 pand `16*($i+3)-128`(%rax),%xmm3
3566 por %xmm2,%xmm4
3567 por %xmm3,%xmm5
3568___
3569}
3570$code.=<<___;
3571 por %xmm5,%xmm4
3572 lea $STRIDE(%r11),%r11
3573 pshufd \$0x4e,%xmm4,%xmm0
3574 por %xmm4,%xmm0
3575 movq %xmm0,($out) # m0=bp[0]
3576 lea 8($out),$out
3577 sub \$1,$num
3578 jnz .Lgather
3579
3580 lea (%r10),%rsp
3581 ret
3582.LSEH_end_bn_gather5:
3583.cfi_endproc
3584.size bn_gather5,.-bn_gather5
3585___
3586}
3587$code.=<<___;
3588.align 64
3589.Linc:
3590 .long 0,0, 1,1
3591 .long 2,2, 2,2
3592.asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3593___
3594
3595# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3596# CONTEXT *context,DISPATCHER_CONTEXT *disp)
3597if ($win64) {
3598$rec="%rcx";
3599$frame="%rdx";
3600$context="%r8";
3601$disp="%r9";
3602
3603$code.=<<___;
3604.extern __imp_RtlVirtualUnwind
3605.type mul_handler,\@abi-omnipotent
3606.align 16
3607mul_handler:
3608 push %rsi
3609 push %rdi
3610 push %rbx
3611 push %rbp
3612 push %r12
3613 push %r13
3614 push %r14
3615 push %r15
3616 pushfq
3617 sub \$64,%rsp
3618
3619 mov 120($context),%rax # pull context->Rax
3620 mov 248($context),%rbx # pull context->Rip
3621
3622 mov 8($disp),%rsi # disp->ImageBase
3623 mov 56($disp),%r11 # disp->HandlerData
3624
3625 mov 0(%r11),%r10d # HandlerData[0]
3626 lea (%rsi,%r10),%r10 # end of prologue label
3627 cmp %r10,%rbx # context->Rip<end of prologue label
3628 jb .Lcommon_seh_tail
3629
3630 mov 4(%r11),%r10d # HandlerData[1]
3631 lea (%rsi,%r10),%r10 # beginning of body label
3632 cmp %r10,%rbx # context->Rip<body label
3633 jb .Lcommon_pop_regs
3634
3635 mov 152($context),%rax # pull context->Rsp
3636
3637 mov 8(%r11),%r10d # HandlerData[2]
3638 lea (%rsi,%r10),%r10 # epilogue label
3639 cmp %r10,%rbx # context->Rip>=epilogue label
3640 jae .Lcommon_seh_tail
3641
3642 lea .Lmul_epilogue(%rip),%r10
3643 cmp %r10,%rbx
3644 ja .Lbody_40
3645
3646 mov 192($context),%r10 # pull $num
3647 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
3648
3649 jmp .Lcommon_pop_regs
3650
3651.Lbody_40:
3652 mov 40(%rax),%rax # pull saved stack pointer
3653.Lcommon_pop_regs:
3654 mov -8(%rax),%rbx
3655 mov -16(%rax),%rbp
3656 mov -24(%rax),%r12
3657 mov -32(%rax),%r13
3658 mov -40(%rax),%r14
3659 mov -48(%rax),%r15
3660 mov %rbx,144($context) # restore context->Rbx
3661 mov %rbp,160($context) # restore context->Rbp
3662 mov %r12,216($context) # restore context->R12
3663 mov %r13,224($context) # restore context->R13
3664 mov %r14,232($context) # restore context->R14
3665 mov %r15,240($context) # restore context->R15
3666
3667.Lcommon_seh_tail:
3668 mov 8(%rax),%rdi
3669 mov 16(%rax),%rsi
3670 mov %rax,152($context) # restore context->Rsp
3671 mov %rsi,168($context) # restore context->Rsi
3672 mov %rdi,176($context) # restore context->Rdi
3673
3674 mov 40($disp),%rdi # disp->ContextRecord
3675 mov $context,%rsi # context
3676 mov \$154,%ecx # sizeof(CONTEXT)
3677 .long 0xa548f3fc # cld; rep movsq
3678
3679 mov $disp,%rsi
3680 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3681 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3682 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3683 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3684 mov 40(%rsi),%r10 # disp->ContextRecord
3685 lea 56(%rsi),%r11 # &disp->HandlerData
3686 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3687 mov %r10,32(%rsp) # arg5
3688 mov %r11,40(%rsp) # arg6
3689 mov %r12,48(%rsp) # arg7
3690 mov %rcx,56(%rsp) # arg8, (NULL)
3691 call *__imp_RtlVirtualUnwind(%rip)
3692
3693 mov \$1,%eax # ExceptionContinueSearch
3694 add \$64,%rsp
3695 popfq
3696 pop %r15
3697 pop %r14
3698 pop %r13
3699 pop %r12
3700 pop %rbp
3701 pop %rbx
3702 pop %rdi
3703 pop %rsi
3704 ret
3705.size mul_handler,.-mul_handler
3706
3707.section .pdata
3708.align 4
3709 .rva .LSEH_begin_bn_mul_mont_gather5
3710 .rva .LSEH_end_bn_mul_mont_gather5
3711 .rva .LSEH_info_bn_mul_mont_gather5
3712
3713 .rva .LSEH_begin_bn_mul4x_mont_gather5
3714 .rva .LSEH_end_bn_mul4x_mont_gather5
3715 .rva .LSEH_info_bn_mul4x_mont_gather5
3716
3717 .rva .LSEH_begin_bn_power5
3718 .rva .LSEH_end_bn_power5
3719 .rva .LSEH_info_bn_power5
3720___
3721$code.=<<___ if ($addx);
3722 .rva .LSEH_begin_bn_mulx4x_mont_gather5
3723 .rva .LSEH_end_bn_mulx4x_mont_gather5
3724 .rva .LSEH_info_bn_mulx4x_mont_gather5
3725
3726 .rva .LSEH_begin_bn_powerx5
3727 .rva .LSEH_end_bn_powerx5
3728 .rva .LSEH_info_bn_powerx5
3729___
3730$code.=<<___;
3731 .rva .LSEH_begin_bn_gather5
3732 .rva .LSEH_end_bn_gather5
3733 .rva .LSEH_info_bn_gather5
3734
3735.section .xdata
3736.align 8
3737.LSEH_info_bn_mul_mont_gather5:
3738 .byte 9,0,0,0
3739 .rva mul_handler
3740 .rva .Lmul_body,.Lmul_body,.Lmul_epilogue # HandlerData[]
3741.align 8
3742.LSEH_info_bn_mul4x_mont_gather5:
3743 .byte 9,0,0,0
3744 .rva mul_handler
3745 .rva .Lmul4x_prologue,.Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
3746.align 8
3747.LSEH_info_bn_power5:
3748 .byte 9,0,0,0
3749 .rva mul_handler
3750 .rva .Lpower5_prologue,.Lpower5_body,.Lpower5_epilogue # HandlerData[]
3751___
3752$code.=<<___ if ($addx);
3753.align 8
3754.LSEH_info_bn_mulx4x_mont_gather5:
3755 .byte 9,0,0,0
3756 .rva mul_handler
3757 .rva .Lmulx4x_prologue,.Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
3758.align 8
3759.LSEH_info_bn_powerx5:
3760 .byte 9,0,0,0
3761 .rva mul_handler
3762 .rva .Lpowerx5_prologue,.Lpowerx5_body,.Lpowerx5_epilogue # HandlerData[]
3763___
3764$code.=<<___;
3765.align 8
3766.LSEH_info_bn_gather5:
3767 .byte 0x01,0x0b,0x03,0x0a
3768 .byte 0x0b,0x01,0x21,0x00 # sub rsp,0x108
3769 .byte 0x04,0xa3,0x00,0x00 # lea r10,(rsp)
3770.align 8
3771___
3772}
3773
3774$code =~ s/\`([^\`]*)\`/eval($1)/gem;
3775
3776print $code;
3777close STDOUT or die "error closing STDOUT: $!";
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette