VirtualBox

source: vbox/trunk/src/libs/openssl-3.1.3/crypto/bn/asm/armv4-mont.pl@ 102334

最後變更 在這個檔案從102334是 101211,由 vboxsync 提交於 17 月 前

openssl-3.1.3: Applied and adjusted our OpenSSL changes to 3.1.2. bugref:10527

檔案大小: 19.5 KB
 
1#! /usr/bin/env perl
2# Copyright 2007-2023 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the Apache License 2.0 (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10# ====================================================================
11# Written by Andy Polyakov <[email protected]> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16
17# January 2007.
18
19# Montgomery multiplication for ARMv4.
20#
21# Performance improvement naturally varies among CPU implementations
22# and compilers. The code was observed to provide +65-35% improvement
23# [depending on key length, less for longer keys] on ARM920T, and
24# +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
25# base and compiler generated code with in-lined umull and even umlal
26# instructions. The latter means that this code didn't really have an
27# "advantage" of utilizing some "secret" instruction.
28#
29# The code is interoperable with Thumb ISA and is rather compact, less
30# than 1/2KB. Windows CE port would be trivial, as it's exclusively
31# about decorations, ABI and instruction syntax are identical.
32
33# November 2013
34#
35# Add NEON code path, which handles lengths divisible by 8. RSA/DSA
36# performance improvement on Cortex-A8 is ~45-100% depending on key
37# length, more for longer keys. On Cortex-A15 the span is ~10-105%.
38# On Snapdragon S4 improvement was measured to vary from ~70% to
39# incredible ~380%, yes, 4.8x faster, for RSA4096 sign. But this is
40# rather because original integer-only code seems to perform
41# suboptimally on S4. Situation on Cortex-A9 is unfortunately
42# different. It's being looked into, but the trouble is that
43# performance for vectors longer than 256 bits is actually couple
44# of percent worse than for integer-only code. The code is chosen
45# for execution on all NEON-capable processors, because gain on
46# others outweighs the marginal loss on Cortex-A9.
47
48# September 2015
49#
50# Align Cortex-A9 performance with November 2013 improvements, i.e.
51# NEON code is now ~20-105% faster than integer-only one on this
52# processor. But this optimization further improved performance even
53# on other processors: NEON code path is ~45-180% faster than original
54# integer-only on Cortex-A8, ~10-210% on Cortex-A15, ~70-450% on
55# Snapdragon S4.
56
57# $output is the last argument if it looks like a file (it has an extension)
58# $flavour is the first argument if it doesn't look like a file
59my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
60my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
61
62if ($flavour && $flavour ne "void") {
63 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
64 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
65 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
66 die "can't locate arm-xlate.pl";
67
68 open STDOUT,"| \"$^X\" $xlate $flavour \"$output\""
69 or die "can't call $xlate: $1";
70} else {
71 $output and open STDOUT,">$output";
72}
73
74$num="r0"; # starts as num argument, but holds &tp[num-1]
75$ap="r1";
76$bp="r2"; $bi="r2"; $rp="r2";
77$np="r3";
78$tp="r4";
79$aj="r5";
80$nj="r6";
81$tj="r7";
82$n0="r8";
83########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer
84$alo="r10"; # sl, gcc uses it to keep @GOT
85$ahi="r11"; # fp
86$nlo="r12"; # ip
87########### # r13 is stack pointer
88$nhi="r14"; # lr
89########### # r15 is program counter
90
91#### argument block layout relative to &tp[num-1], a.k.a. $num
92$_rp="$num,#12*4";
93# ap permanently resides in r1
94$_bp="$num,#13*4";
95# np permanently resides in r3
96$_n0="$num,#14*4";
97$_num="$num,#15*4"; $_bpend=$_num;
98
99$code=<<___;
100#include "arm_arch.h"
101
102#if defined(__thumb2__)
103.syntax unified
104.thumb
105#else
106.code 32
107#endif
108
109.text
110
111#if __ARM_MAX_ARCH__>=7
112.align 5
113.LOPENSSL_armcap:
114# ifdef _WIN32
115.word OPENSSL_armcap_P
116# else
117.word OPENSSL_armcap_P-.Lbn_mul_mont
118# endif
119#endif
120
121.global bn_mul_mont
122.type bn_mul_mont,%function
123
124.align 5
125bn_mul_mont:
126.Lbn_mul_mont:
127 ldr ip,[sp,#4] @ load num
128 stmdb sp!,{r0,r2} @ sp points at argument block
129#if __ARM_MAX_ARCH__>=7
130 tst ip,#7
131 bne .Lialu
132 ldr r0,.LOPENSSL_armcap
133#if !defined(_WIN32)
134 adr r2,.Lbn_mul_mont
135 ldr r0,[r0,r2]
136# endif
137# if defined(__APPLE__) || defined(_WIN32)
138 ldr r0,[r0]
139# endif
140 tst r0,#ARMV7_NEON @ NEON available?
141 ldmia sp, {r0,r2}
142 beq .Lialu
143 add sp,sp,#8
144 b bn_mul8x_mont_neon
145.align 4
146.Lialu:
147#endif
148 cmp ip,#2
149 mov $num,ip @ load num
150#ifdef __thumb2__
151 ittt lt
152#endif
153 movlt r0,#0
154 addlt sp,sp,#2*4
155 blt .Labrt
156
157 stmdb sp!,{r4-r12,lr} @ save 10 registers
158
159 mov $num,$num,lsl#2 @ rescale $num for byte count
160 sub sp,sp,$num @ alloca(4*num)
161 sub sp,sp,#4 @ +extra dword
162 sub $num,$num,#4 @ "num=num-1"
163 add $tp,$bp,$num @ &bp[num-1]
164
165 add $num,sp,$num @ $num to point at &tp[num-1]
166 ldr $n0,[$_n0] @ &n0
167 ldr $bi,[$bp] @ bp[0]
168 ldr $aj,[$ap],#4 @ ap[0],ap++
169 ldr $nj,[$np],#4 @ np[0],np++
170 ldr $n0,[$n0] @ *n0
171 str $tp,[$_bpend] @ save &bp[num]
172
173 umull $alo,$ahi,$aj,$bi @ ap[0]*bp[0]
174 str $n0,[$_n0] @ save n0 value
175 mul $n0,$alo,$n0 @ "tp[0]"*n0
176 mov $nlo,#0
177 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]"
178 mov $tp,sp
179
180.L1st:
181 ldr $aj,[$ap],#4 @ ap[j],ap++
182 mov $alo,$ahi
183 ldr $nj,[$np],#4 @ np[j],np++
184 mov $ahi,#0
185 umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0]
186 mov $nhi,#0
187 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
188 adds $nlo,$nlo,$alo
189 str $nlo,[$tp],#4 @ tp[j-1]=,tp++
190 adc $nlo,$nhi,#0
191 cmp $tp,$num
192 bne .L1st
193
194 adds $nlo,$nlo,$ahi
195 ldr $tp,[$_bp] @ restore bp
196 mov $nhi,#0
197 ldr $n0,[$_n0] @ restore n0
198 adc $nhi,$nhi,#0
199 str $nlo,[$num] @ tp[num-1]=
200 mov $tj,sp
201 str $nhi,[$num,#4] @ tp[num]=
202
203
204.Louter:
205 sub $tj,$num,$tj @ "original" $num-1 value
206 sub $ap,$ap,$tj @ "rewind" ap to &ap[1]
207 ldr $bi,[$tp,#4]! @ *(++bp)
208 sub $np,$np,$tj @ "rewind" np to &np[1]
209 ldr $aj,[$ap,#-4] @ ap[0]
210 ldr $alo,[sp] @ tp[0]
211 ldr $nj,[$np,#-4] @ np[0]
212 ldr $tj,[sp,#4] @ tp[1]
213
214 mov $ahi,#0
215 umlal $alo,$ahi,$aj,$bi @ ap[0]*bp[i]+tp[0]
216 str $tp,[$_bp] @ save bp
217 mul $n0,$alo,$n0
218 mov $nlo,#0
219 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]"
220 mov $tp,sp
221
222.Linner:
223 ldr $aj,[$ap],#4 @ ap[j],ap++
224 adds $alo,$ahi,$tj @ +=tp[j]
225 ldr $nj,[$np],#4 @ np[j],np++
226 mov $ahi,#0
227 umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i]
228 mov $nhi,#0
229 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
230 adc $ahi,$ahi,#0
231 ldr $tj,[$tp,#8] @ tp[j+1]
232 adds $nlo,$nlo,$alo
233 str $nlo,[$tp],#4 @ tp[j-1]=,tp++
234 adc $nlo,$nhi,#0
235 cmp $tp,$num
236 bne .Linner
237
238 adds $nlo,$nlo,$ahi
239 mov $nhi,#0
240 ldr $tp,[$_bp] @ restore bp
241 adc $nhi,$nhi,#0
242 ldr $n0,[$_n0] @ restore n0
243 adds $nlo,$nlo,$tj
244 ldr $tj,[$_bpend] @ restore &bp[num]
245 adc $nhi,$nhi,#0
246 str $nlo,[$num] @ tp[num-1]=
247 str $nhi,[$num,#4] @ tp[num]=
248
249 cmp $tp,$tj
250#ifdef __thumb2__
251 itt ne
252#endif
253 movne $tj,sp
254 bne .Louter
255
256
257 ldr $rp,[$_rp] @ pull rp
258 mov $aj,sp
259 add $num,$num,#4 @ $num to point at &tp[num]
260 sub $aj,$num,$aj @ "original" num value
261 mov $tp,sp @ "rewind" $tp
262 mov $ap,$tp @ "borrow" $ap
263 sub $np,$np,$aj @ "rewind" $np to &np[0]
264
265 subs $tj,$tj,$tj @ "clear" carry flag
266.Lsub: ldr $tj,[$tp],#4
267 ldr $nj,[$np],#4
268 sbcs $tj,$tj,$nj @ tp[j]-np[j]
269 str $tj,[$rp],#4 @ rp[j]=
270 teq $tp,$num @ preserve carry
271 bne .Lsub
272 sbcs $nhi,$nhi,#0 @ upmost carry
273 mov $tp,sp @ "rewind" $tp
274 sub $rp,$rp,$aj @ "rewind" $rp
275
276.Lcopy: ldr $tj,[$tp] @ conditional copy
277 ldr $aj,[$rp]
278 str sp,[$tp],#4 @ zap tp
279#ifdef __thumb2__
280 it cc
281#endif
282 movcc $aj,$tj
283 str $aj,[$rp],#4
284 teq $tp,$num @ preserve carry
285 bne .Lcopy
286
287 mov sp,$num
288 add sp,sp,#4 @ skip over tp[num+1]
289 ldmia sp!,{r4-r12,lr} @ restore registers
290 add sp,sp,#2*4 @ skip over {r0,r2}
291 mov r0,#1
292.Labrt:
293#if __ARM_ARCH__>=5
294 ret @ bx lr
295#else
296 tst lr,#1
297 moveq pc,lr @ be binary compatible with V4, yet
298 bx lr @ interoperable with Thumb ISA:-)
299#endif
300.size bn_mul_mont,.-bn_mul_mont
301___
302{
303my ($A0,$A1,$A2,$A3)=map("d$_",(0..3));
304my ($N0,$N1,$N2,$N3)=map("d$_",(4..7));
305my ($Z,$Temp)=("q4","q5");
306my @ACC=map("q$_",(6..13));
307my ($Bi,$Ni,$M0)=map("d$_",(28..31));
308my $zero="$Z#lo";
309my $temp="$Temp#lo";
310
311my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5));
312my ($tinptr,$toutptr,$inner,$outer,$bnptr)=map("r$_",(6..11));
313
314$code.=<<___;
315#if __ARM_MAX_ARCH__>=7
316.arch armv7-a
317.fpu neon
318
319.type bn_mul8x_mont_neon,%function
320.align 5
321bn_mul8x_mont_neon:
322 mov ip,sp
323 stmdb sp!,{r4-r11}
324 vstmdb sp!,{d8-d15} @ ABI specification says so
325 ldmia ip,{r4-r5} @ load rest of parameter block
326 mov ip,sp
327
328 cmp $num,#8
329 bhi .LNEON_8n
330
331 @ special case for $num==8, everything is in register bank...
332
333 vld1.32 {${Bi}[0]}, [$bptr,:32]!
334 veor $zero,$zero,$zero
335 sub $toutptr,sp,$num,lsl#4
336 vld1.32 {$A0-$A3}, [$aptr]! @ can't specify :32 :-(
337 and $toutptr,$toutptr,#-64
338 vld1.32 {${M0}[0]}, [$n0,:32]
339 mov sp,$toutptr @ alloca
340 vzip.16 $Bi,$zero
341
342 vmull.u32 @ACC[0],$Bi,${A0}[0]
343 vmull.u32 @ACC[1],$Bi,${A0}[1]
344 vmull.u32 @ACC[2],$Bi,${A1}[0]
345 vshl.i64 $Ni,@ACC[0]#hi,#16
346 vmull.u32 @ACC[3],$Bi,${A1}[1]
347
348 vadd.u64 $Ni,$Ni,@ACC[0]#lo
349 veor $zero,$zero,$zero
350 vmul.u32 $Ni,$Ni,$M0
351
352 vmull.u32 @ACC[4],$Bi,${A2}[0]
353 vld1.32 {$N0-$N3}, [$nptr]!
354 vmull.u32 @ACC[5],$Bi,${A2}[1]
355 vmull.u32 @ACC[6],$Bi,${A3}[0]
356 vzip.16 $Ni,$zero
357 vmull.u32 @ACC[7],$Bi,${A3}[1]
358
359 vmlal.u32 @ACC[0],$Ni,${N0}[0]
360 sub $outer,$num,#1
361 vmlal.u32 @ACC[1],$Ni,${N0}[1]
362 vmlal.u32 @ACC[2],$Ni,${N1}[0]
363 vmlal.u32 @ACC[3],$Ni,${N1}[1]
364
365 vmlal.u32 @ACC[4],$Ni,${N2}[0]
366 vmov $Temp,@ACC[0]
367 vmlal.u32 @ACC[5],$Ni,${N2}[1]
368 vmov @ACC[0],@ACC[1]
369 vmlal.u32 @ACC[6],$Ni,${N3}[0]
370 vmov @ACC[1],@ACC[2]
371 vmlal.u32 @ACC[7],$Ni,${N3}[1]
372 vmov @ACC[2],@ACC[3]
373 vmov @ACC[3],@ACC[4]
374 vshr.u64 $temp,$temp,#16
375 vmov @ACC[4],@ACC[5]
376 vmov @ACC[5],@ACC[6]
377 vadd.u64 $temp,$temp,$Temp#hi
378 vmov @ACC[6],@ACC[7]
379 veor @ACC[7],@ACC[7]
380 vshr.u64 $temp,$temp,#16
381
382 b .LNEON_outer8
383
384.align 4
385.LNEON_outer8:
386 vld1.32 {${Bi}[0]}, [$bptr,:32]!
387 veor $zero,$zero,$zero
388 vzip.16 $Bi,$zero
389 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
390
391 vmlal.u32 @ACC[0],$Bi,${A0}[0]
392 vmlal.u32 @ACC[1],$Bi,${A0}[1]
393 vmlal.u32 @ACC[2],$Bi,${A1}[0]
394 vshl.i64 $Ni,@ACC[0]#hi,#16
395 vmlal.u32 @ACC[3],$Bi,${A1}[1]
396
397 vadd.u64 $Ni,$Ni,@ACC[0]#lo
398 veor $zero,$zero,$zero
399 subs $outer,$outer,#1
400 vmul.u32 $Ni,$Ni,$M0
401
402 vmlal.u32 @ACC[4],$Bi,${A2}[0]
403 vmlal.u32 @ACC[5],$Bi,${A2}[1]
404 vmlal.u32 @ACC[6],$Bi,${A3}[0]
405 vzip.16 $Ni,$zero
406 vmlal.u32 @ACC[7],$Bi,${A3}[1]
407
408 vmlal.u32 @ACC[0],$Ni,${N0}[0]
409 vmlal.u32 @ACC[1],$Ni,${N0}[1]
410 vmlal.u32 @ACC[2],$Ni,${N1}[0]
411 vmlal.u32 @ACC[3],$Ni,${N1}[1]
412
413 vmlal.u32 @ACC[4],$Ni,${N2}[0]
414 vmov $Temp,@ACC[0]
415 vmlal.u32 @ACC[5],$Ni,${N2}[1]
416 vmov @ACC[0],@ACC[1]
417 vmlal.u32 @ACC[6],$Ni,${N3}[0]
418 vmov @ACC[1],@ACC[2]
419 vmlal.u32 @ACC[7],$Ni,${N3}[1]
420 vmov @ACC[2],@ACC[3]
421 vmov @ACC[3],@ACC[4]
422 vshr.u64 $temp,$temp,#16
423 vmov @ACC[4],@ACC[5]
424 vmov @ACC[5],@ACC[6]
425 vadd.u64 $temp,$temp,$Temp#hi
426 vmov @ACC[6],@ACC[7]
427 veor @ACC[7],@ACC[7]
428 vshr.u64 $temp,$temp,#16
429
430 bne .LNEON_outer8
431
432 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
433 mov $toutptr,sp
434 vshr.u64 $temp,@ACC[0]#lo,#16
435 mov $inner,$num
436 vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
437 add $tinptr,sp,#96
438 vshr.u64 $temp,@ACC[0]#hi,#16
439 vzip.16 @ACC[0]#lo,@ACC[0]#hi
440
441 b .LNEON_tail_entry
442
443.align 4
444.LNEON_8n:
445 veor @ACC[0],@ACC[0],@ACC[0]
446 sub $toutptr,sp,#128
447 veor @ACC[1],@ACC[1],@ACC[1]
448 sub $toutptr,$toutptr,$num,lsl#4
449 veor @ACC[2],@ACC[2],@ACC[2]
450 and $toutptr,$toutptr,#-64
451 veor @ACC[3],@ACC[3],@ACC[3]
452 mov sp,$toutptr @ alloca
453 veor @ACC[4],@ACC[4],@ACC[4]
454 add $toutptr,$toutptr,#256
455 veor @ACC[5],@ACC[5],@ACC[5]
456 sub $inner,$num,#8
457 veor @ACC[6],@ACC[6],@ACC[6]
458 veor @ACC[7],@ACC[7],@ACC[7]
459
460.LNEON_8n_init:
461 vst1.64 {@ACC[0]-@ACC[1]},[$toutptr,:256]!
462 subs $inner,$inner,#8
463 vst1.64 {@ACC[2]-@ACC[3]},[$toutptr,:256]!
464 vst1.64 {@ACC[4]-@ACC[5]},[$toutptr,:256]!
465 vst1.64 {@ACC[6]-@ACC[7]},[$toutptr,:256]!
466 bne .LNEON_8n_init
467
468 add $tinptr,sp,#256
469 vld1.32 {$A0-$A3},[$aptr]!
470 add $bnptr,sp,#8
471 vld1.32 {${M0}[0]},[$n0,:32]
472 mov $outer,$num
473 b .LNEON_8n_outer
474
475.align 4
476.LNEON_8n_outer:
477 vld1.32 {${Bi}[0]},[$bptr,:32]! @ *b++
478 veor $zero,$zero,$zero
479 vzip.16 $Bi,$zero
480 add $toutptr,sp,#128
481 vld1.32 {$N0-$N3},[$nptr]!
482
483 vmlal.u32 @ACC[0],$Bi,${A0}[0]
484 vmlal.u32 @ACC[1],$Bi,${A0}[1]
485 veor $zero,$zero,$zero
486 vmlal.u32 @ACC[2],$Bi,${A1}[0]
487 vshl.i64 $Ni,@ACC[0]#hi,#16
488 vmlal.u32 @ACC[3],$Bi,${A1}[1]
489 vadd.u64 $Ni,$Ni,@ACC[0]#lo
490 vmlal.u32 @ACC[4],$Bi,${A2}[0]
491 vmul.u32 $Ni,$Ni,$M0
492 vmlal.u32 @ACC[5],$Bi,${A2}[1]
493 vst1.32 {$Bi},[sp,:64] @ put aside smashed b[8*i+0]
494 vmlal.u32 @ACC[6],$Bi,${A3}[0]
495 vzip.16 $Ni,$zero
496 vmlal.u32 @ACC[7],$Bi,${A3}[1]
497___
498for ($i=0; $i<7;) {
499$code.=<<___;
500 vld1.32 {${Bi}[0]},[$bptr,:32]! @ *b++
501 vmlal.u32 @ACC[0],$Ni,${N0}[0]
502 veor $temp,$temp,$temp
503 vmlal.u32 @ACC[1],$Ni,${N0}[1]
504 vzip.16 $Bi,$temp
505 vmlal.u32 @ACC[2],$Ni,${N1}[0]
506 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
507 vmlal.u32 @ACC[3],$Ni,${N1}[1]
508 vmlal.u32 @ACC[4],$Ni,${N2}[0]
509 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
510 vmlal.u32 @ACC[5],$Ni,${N2}[1]
511 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
512 vmlal.u32 @ACC[6],$Ni,${N3}[0]
513 vmlal.u32 @ACC[7],$Ni,${N3}[1]
514 vadd.u64 @ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
515 vst1.32 {$Ni},[$bnptr,:64]! @ put aside smashed m[8*i+$i]
516___
517 push(@ACC,shift(@ACC)); $i++;
518$code.=<<___;
519 vmlal.u32 @ACC[0],$Bi,${A0}[0]
520 vld1.64 {@ACC[7]},[$tinptr,:128]!
521 vmlal.u32 @ACC[1],$Bi,${A0}[1]
522 veor $zero,$zero,$zero
523 vmlal.u32 @ACC[2],$Bi,${A1}[0]
524 vshl.i64 $Ni,@ACC[0]#hi,#16
525 vmlal.u32 @ACC[3],$Bi,${A1}[1]
526 vadd.u64 $Ni,$Ni,@ACC[0]#lo
527 vmlal.u32 @ACC[4],$Bi,${A2}[0]
528 vmul.u32 $Ni,$Ni,$M0
529 vmlal.u32 @ACC[5],$Bi,${A2}[1]
530 vst1.32 {$Bi},[$bnptr,:64]! @ put aside smashed b[8*i+$i]
531 vmlal.u32 @ACC[6],$Bi,${A3}[0]
532 vzip.16 $Ni,$zero
533 vmlal.u32 @ACC[7],$Bi,${A3}[1]
534___
535}
536$code.=<<___;
537 vld1.32 {$Bi},[sp,:64] @ pull smashed b[8*i+0]
538 vmlal.u32 @ACC[0],$Ni,${N0}[0]
539 vld1.32 {$A0-$A3},[$aptr]!
540 vmlal.u32 @ACC[1],$Ni,${N0}[1]
541 vmlal.u32 @ACC[2],$Ni,${N1}[0]
542 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
543 vmlal.u32 @ACC[3],$Ni,${N1}[1]
544 vmlal.u32 @ACC[4],$Ni,${N2}[0]
545 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
546 vmlal.u32 @ACC[5],$Ni,${N2}[1]
547 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
548 vmlal.u32 @ACC[6],$Ni,${N3}[0]
549 vmlal.u32 @ACC[7],$Ni,${N3}[1]
550 vadd.u64 @ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
551 vst1.32 {$Ni},[$bnptr,:64] @ put aside smashed m[8*i+$i]
552 add $bnptr,sp,#8 @ rewind
553___
554 push(@ACC,shift(@ACC));
555$code.=<<___;
556 sub $inner,$num,#8
557 b .LNEON_8n_inner
558
559.align 4
560.LNEON_8n_inner:
561 subs $inner,$inner,#8
562 vmlal.u32 @ACC[0],$Bi,${A0}[0]
563 vld1.64 {@ACC[7]},[$tinptr,:128]
564 vmlal.u32 @ACC[1],$Bi,${A0}[1]
565 vld1.32 {$Ni},[$bnptr,:64]! @ pull smashed m[8*i+0]
566 vmlal.u32 @ACC[2],$Bi,${A1}[0]
567 vld1.32 {$N0-$N3},[$nptr]!
568 vmlal.u32 @ACC[3],$Bi,${A1}[1]
569 it ne
570 addne $tinptr,$tinptr,#16 @ don't advance in last iteration
571 vmlal.u32 @ACC[4],$Bi,${A2}[0]
572 vmlal.u32 @ACC[5],$Bi,${A2}[1]
573 vmlal.u32 @ACC[6],$Bi,${A3}[0]
574 vmlal.u32 @ACC[7],$Bi,${A3}[1]
575___
576for ($i=1; $i<8; $i++) {
577$code.=<<___;
578 vld1.32 {$Bi},[$bnptr,:64]! @ pull smashed b[8*i+$i]
579 vmlal.u32 @ACC[0],$Ni,${N0}[0]
580 vmlal.u32 @ACC[1],$Ni,${N0}[1]
581 vmlal.u32 @ACC[2],$Ni,${N1}[0]
582 vmlal.u32 @ACC[3],$Ni,${N1}[1]
583 vmlal.u32 @ACC[4],$Ni,${N2}[0]
584 vmlal.u32 @ACC[5],$Ni,${N2}[1]
585 vmlal.u32 @ACC[6],$Ni,${N3}[0]
586 vmlal.u32 @ACC[7],$Ni,${N3}[1]
587 vst1.64 {@ACC[0]},[$toutptr,:128]!
588___
589 push(@ACC,shift(@ACC));
590$code.=<<___;
591 vmlal.u32 @ACC[0],$Bi,${A0}[0]
592 vld1.64 {@ACC[7]},[$tinptr,:128]
593 vmlal.u32 @ACC[1],$Bi,${A0}[1]
594 vld1.32 {$Ni},[$bnptr,:64]! @ pull smashed m[8*i+$i]
595 vmlal.u32 @ACC[2],$Bi,${A1}[0]
596 it ne
597 addne $tinptr,$tinptr,#16 @ don't advance in last iteration
598 vmlal.u32 @ACC[3],$Bi,${A1}[1]
599 vmlal.u32 @ACC[4],$Bi,${A2}[0]
600 vmlal.u32 @ACC[5],$Bi,${A2}[1]
601 vmlal.u32 @ACC[6],$Bi,${A3}[0]
602 vmlal.u32 @ACC[7],$Bi,${A3}[1]
603___
604}
605$code.=<<___;
606 it eq
607 subeq $aptr,$aptr,$num,lsl#2 @ rewind
608 vmlal.u32 @ACC[0],$Ni,${N0}[0]
609 vld1.32 {$Bi},[sp,:64] @ pull smashed b[8*i+0]
610 vmlal.u32 @ACC[1],$Ni,${N0}[1]
611 vld1.32 {$A0-$A3},[$aptr]!
612 vmlal.u32 @ACC[2],$Ni,${N1}[0]
613 add $bnptr,sp,#8 @ rewind
614 vmlal.u32 @ACC[3],$Ni,${N1}[1]
615 vmlal.u32 @ACC[4],$Ni,${N2}[0]
616 vmlal.u32 @ACC[5],$Ni,${N2}[1]
617 vmlal.u32 @ACC[6],$Ni,${N3}[0]
618 vst1.64 {@ACC[0]},[$toutptr,:128]!
619 vmlal.u32 @ACC[7],$Ni,${N3}[1]
620
621 bne .LNEON_8n_inner
622___
623 push(@ACC,shift(@ACC));
624$code.=<<___;
625 add $tinptr,sp,#128
626 vst1.64 {@ACC[0]-@ACC[1]},[$toutptr,:256]!
627 veor q2,q2,q2 @ $N0-$N1
628 vst1.64 {@ACC[2]-@ACC[3]},[$toutptr,:256]!
629 veor q3,q3,q3 @ $N2-$N3
630 vst1.64 {@ACC[4]-@ACC[5]},[$toutptr,:256]!
631 vst1.64 {@ACC[6]},[$toutptr,:128]
632
633 subs $outer,$outer,#8
634 vld1.64 {@ACC[0]-@ACC[1]},[$tinptr,:256]!
635 vld1.64 {@ACC[2]-@ACC[3]},[$tinptr,:256]!
636 vld1.64 {@ACC[4]-@ACC[5]},[$tinptr,:256]!
637 vld1.64 {@ACC[6]-@ACC[7]},[$tinptr,:256]!
638
639 itt ne
640 subne $nptr,$nptr,$num,lsl#2 @ rewind
641 bne .LNEON_8n_outer
642
643 add $toutptr,sp,#128
644 vst1.64 {q2-q3}, [sp,:256]! @ start wiping stack frame
645 vshr.u64 $temp,@ACC[0]#lo,#16
646 vst1.64 {q2-q3},[sp,:256]!
647 vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
648 vst1.64 {q2-q3}, [sp,:256]!
649 vshr.u64 $temp,@ACC[0]#hi,#16
650 vst1.64 {q2-q3}, [sp,:256]!
651 vzip.16 @ACC[0]#lo,@ACC[0]#hi
652
653 mov $inner,$num
654 b .LNEON_tail_entry
655
656.align 4
657.LNEON_tail:
658 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
659 vshr.u64 $temp,@ACC[0]#lo,#16
660 vld1.64 {@ACC[2]-@ACC[3]}, [$tinptr, :256]!
661 vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
662 vld1.64 {@ACC[4]-@ACC[5]}, [$tinptr, :256]!
663 vshr.u64 $temp,@ACC[0]#hi,#16
664 vld1.64 {@ACC[6]-@ACC[7]}, [$tinptr, :256]!
665 vzip.16 @ACC[0]#lo,@ACC[0]#hi
666
667.LNEON_tail_entry:
668___
669for ($i=1; $i<8; $i++) {
670$code.=<<___;
671 vadd.u64 @ACC[1]#lo,@ACC[1]#lo,$temp
672 vst1.32 {@ACC[0]#lo[0]}, [$toutptr, :32]!
673 vshr.u64 $temp,@ACC[1]#lo,#16
674 vadd.u64 @ACC[1]#hi,@ACC[1]#hi,$temp
675 vshr.u64 $temp,@ACC[1]#hi,#16
676 vzip.16 @ACC[1]#lo,@ACC[1]#hi
677___
678 push(@ACC,shift(@ACC));
679}
680 push(@ACC,shift(@ACC));
681$code.=<<___;
682 vld1.64 {@ACC[0]-@ACC[1]}, [$tinptr, :256]!
683 subs $inner,$inner,#8
684 vst1.32 {@ACC[7]#lo[0]}, [$toutptr, :32]!
685 bne .LNEON_tail
686
687 vst1.32 {${temp}[0]}, [$toutptr, :32] @ top-most bit
688 sub $nptr,$nptr,$num,lsl#2 @ rewind $nptr
689 subs $aptr,sp,#0 @ clear carry flag
690 add $bptr,sp,$num,lsl#2
691
692.LNEON_sub:
693 ldmia $aptr!, {r4-r7}
694 ldmia $nptr!, {r8-r11}
695 sbcs r8, r4,r8
696 sbcs r9, r5,r9
697 sbcs r10,r6,r10
698 sbcs r11,r7,r11
699 teq $aptr,$bptr @ preserves carry
700 stmia $rptr!, {r8-r11}
701 bne .LNEON_sub
702
703 ldr r10, [$aptr] @ load top-most bit
704 mov r11,sp
705 veor q0,q0,q0
706 sub r11,$bptr,r11 @ this is num*4
707 veor q1,q1,q1
708 mov $aptr,sp
709 sub $rptr,$rptr,r11 @ rewind $rptr
710 mov $nptr,$bptr @ second 3/4th of frame
711 sbcs r10,r10,#0 @ result is carry flag
712
713.LNEON_copy_n_zap:
714 ldmia $aptr!, {r4-r7}
715 ldmia $rptr, {r8-r11}
716 it cc
717 movcc r8, r4
718 vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
719 itt cc
720 movcc r9, r5
721 movcc r10,r6
722 vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
723 it cc
724 movcc r11,r7
725 ldmia $aptr, {r4-r7}
726 stmia $rptr!, {r8-r11}
727 sub $aptr,$aptr,#16
728 ldmia $rptr, {r8-r11}
729 it cc
730 movcc r8, r4
731 vst1.64 {q0-q1}, [$aptr,:256]! @ wipe
732 itt cc
733 movcc r9, r5
734 movcc r10,r6
735 vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
736 it cc
737 movcc r11,r7
738 teq $aptr,$bptr @ preserves carry
739 stmia $rptr!, {r8-r11}
740 bne .LNEON_copy_n_zap
741
742 mov sp,ip
743 vldmia sp!,{d8-d15}
744 ldmia sp!,{r4-r11}
745 ret @ bx lr
746.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
747#endif
748___
749}
750$code.=<<___;
751.asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
752.align 2
753#if __ARM_MAX_ARCH__>=7
754.extern OPENSSL_armcap_P
755#endif
756___
757
758foreach (split("\n",$code)) {
759 s/\`([^\`]*)\`/eval $1/ge;
760
761 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/ge or
762 s/\bret\b/bx lr/g or
763 s/\bbx\s+lr\b/.word\t0xe12fff1e/g; # make it possible to compile with -march=armv4
764
765 print $_,"\n";
766}
767
768close STDOUT or die "error closing STDOUT: $!";
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette