#! /usr/bin/env perl # Copyright 2004-2016 The OpenSSL Project Authors. All Rights Reserved. # # Licensed under the OpenSSL license (the "License"). You may not use # this file except in compliance with the License. You can obtain a copy # in the file LICENSE in the source distribution or at # https://www.openssl.org/source/license.html # Implemented as a Perl wrapper as we want to support several different # architectures with single file. We pick up the target based on the # file name we are asked to generate. # # It should be noted though that this perl code is nothing like # /crypto/perlasm/x86*. In this case perl is used pretty much # as pre-processor to cover for platform differences in name decoration, # linker tables, 32-/64-bit instruction sets... # # As you might know there're several PowerPC ABI in use. Most notably # Linux and AIX use different 32-bit ABIs. Good news are that these ABIs # are similar enough to implement leaf(!) functions, which would be ABI # neutral. And that's what you find here: ABI neutral leaf functions. # In case you wonder what that is... # # AIX performance # # MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e. # # The following is the performance of 32-bit compiler # generated code: # # OpenSSL 0.9.6c 21 dec 2001 # built on: Tue Jun 11 11:06:51 EDT 2002 # options:bn(64,32) ... #compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3 # sign verify sign/s verify/s #rsa 512 bits 0.0098s 0.0009s 102.0 1170.6 #rsa 1024 bits 0.0507s 0.0026s 19.7 387.5 #rsa 2048 bits 0.3036s 0.0085s 3.3 117.1 #rsa 4096 bits 2.0040s 0.0299s 0.5 33.4 #dsa 512 bits 0.0087s 0.0106s 114.3 94.5 #dsa 1024 bits 0.0256s 0.0313s 39.0 32.0 # # Same bechmark with this assembler code: # #rsa 512 bits 0.0056s 0.0005s 178.6 2049.2 #rsa 1024 bits 0.0283s 0.0015s 35.3 674.1 #rsa 2048 bits 0.1744s 0.0050s 5.7 201.2 #rsa 4096 bits 1.1644s 0.0179s 0.9 55.7 #dsa 512 bits 0.0052s 0.0062s 191.6 162.0 #dsa 1024 bits 0.0149s 0.0180s 67.0 55.5 # # Number of operations increases by at almost 75% # # Here are performance numbers for 64-bit compiler # generated code: # # OpenSSL 0.9.6g [engine] 9 Aug 2002 # built on: Fri Apr 18 16:59:20 EDT 2003 # options:bn(64,64) ... # compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3 # sign verify sign/s verify/s #rsa 512 bits 0.0028s 0.0003s 357.1 3844.4 #rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7 #rsa 2048 bits 0.0963s 0.0028s 10.4 353.0 #rsa 4096 bits 0.6538s 0.0102s 1.5 98.1 #dsa 512 bits 0.0026s 0.0032s 382.5 313.7 #dsa 1024 bits 0.0081s 0.0099s 122.8 100.6 # # Same benchmark with this assembler code: # #rsa 512 bits 0.0020s 0.0002s 510.4 6273.7 #rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3 #rsa 2048 bits 0.0540s 0.0016s 18.5 622.5 #rsa 4096 bits 0.3700s 0.0058s 2.7 171.0 #dsa 512 bits 0.0016s 0.0020s 610.7 507.1 #dsa 1024 bits 0.0047s 0.0058s 212.5 173.2 # # Again, performance increases by at about 75% # # Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code) # OpenSSL 0.9.7c 30 Sep 2003 # # Original code. # #rsa 512 bits 0.0011s 0.0001s 906.1 11012.5 #rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1 #rsa 2048 bits 0.0370s 0.0010s 27.1 982.4 #rsa 4096 bits 0.2426s 0.0036s 4.1 280.4 #dsa 512 bits 0.0010s 0.0012s 1038.1 841.5 #dsa 1024 bits 0.0030s 0.0037s 329.6 269.7 #dsa 2048 bits 0.0101s 0.0127s 98.9 78.6 # # Same benchmark with this assembler code: # #rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9 #rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6 #rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5 #rsa 4096 bits 0.1469s 0.0022s 6.8 449.6 #dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2 #dsa 1024 bits 0.0018s 0.0023s 545.0 442.2 #dsa 2048 bits 0.0061s 0.0075s 163.5 132.8 # # Performance increase of ~60% # Based on submission from Suresh N. Chari of IBM $flavour = shift; if ($flavour =~ /32/) { $BITS= 32; $BNSZ= $BITS/8; $ISA= "\"ppc\""; $LD= "lwz"; # load $LDU= "lwzu"; # load and update $ST= "stw"; # store $STU= "stwu"; # store and update $UMULL= "mullw"; # unsigned multiply low $UMULH= "mulhwu"; # unsigned multiply high $UDIV= "divwu"; # unsigned divide $UCMPI= "cmplwi"; # unsigned compare with immediate $UCMP= "cmplw"; # unsigned compare $CNTLZ= "cntlzw"; # count leading zeros $SHL= "slw"; # shift left $SHR= "srw"; # unsigned shift right $SHRI= "srwi"; # unsigned shift right by immediate $SHLI= "slwi"; # shift left by immediate $CLRU= "clrlwi"; # clear upper bits $INSR= "insrwi"; # insert right $ROTL= "rotlwi"; # rotate left by immediate $TR= "tw"; # conditional trap } elsif ($flavour =~ /64/) { $BITS= 64; $BNSZ= $BITS/8; $ISA= "\"ppc64\""; # same as above, but 64-bit mnemonics... $LD= "ld"; # load $LDU= "ldu"; # load and update $ST= "std"; # store $STU= "stdu"; # store and update $UMULL= "mulld"; # unsigned multiply low $UMULH= "mulhdu"; # unsigned multiply high $UDIV= "divdu"; # unsigned divide $UCMPI= "cmpldi"; # unsigned compare with immediate $UCMP= "cmpld"; # unsigned compare $CNTLZ= "cntlzd"; # count leading zeros $SHL= "sld"; # shift left $SHR= "srd"; # unsigned shift right $SHRI= "srdi"; # unsigned shift right by immediate $SHLI= "sldi"; # shift left by immediate $CLRU= "clrldi"; # clear upper bits $INSR= "insrdi"; # insert right $ROTL= "rotldi"; # rotate left by immediate $TR= "td"; # conditional trap } else { die "nonsense $flavour"; } $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or die "can't locate ppc-xlate.pl"; open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!"; $data=< 0 then result !=0 # In either case carry bit is set. beq Lppcasm_sub_adios addi r4,r4,-$BNSZ addi r3,r3,-$BNSZ addi r5,r5,-$BNSZ mtctr r6 Lppcasm_sub_mainloop: $LDU r7,$BNSZ(r4) $LDU r8,$BNSZ(r5) subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8) # if carry = 1 this is r7-r8. Else it # is r7-r8 -1 as we need. $STU r6,$BNSZ(r3) bdnz Lppcasm_sub_mainloop Lppcasm_sub_adios: subfze r3,r0 # if carry bit is set then r3 = 0 else -1 andi. r3,r3,1 # keep only last bit. blr .long 0 .byte 0,12,0x14,0,0,0,4,0 .long 0 .size .bn_sub_words,.-.bn_sub_words # # NOTE: The following label name should be changed to # "bn_add_words" i.e. remove the first dot # for the gcc compiler. This should be automatically # done in the build # .align 4 .bn_add_words: # # Handcoded version of bn_add_words # #BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n) # # r3 = r # r4 = a # r5 = b # r6 = n # # Note: No loop unrolling done since this is not a performance # critical loop. xor r0,r0,r0 # # check for r6 = 0. Is this needed? # addic. r6,r6,0 #test r6 and clear carry bit. beq Lppcasm_add_adios addi r4,r4,-$BNSZ addi r3,r3,-$BNSZ addi r5,r5,-$BNSZ mtctr r6 Lppcasm_add_mainloop: $LDU r7,$BNSZ(r4) $LDU r8,$BNSZ(r5) adde r8,r7,r8 $STU r8,$BNSZ(r3) bdnz Lppcasm_add_mainloop Lppcasm_add_adios: addze r3,r0 #return carry bit. blr .long 0 .byte 0,12,0x14,0,0,0,4,0 .long 0 .size .bn_add_words,.-.bn_add_words # # NOTE: The following label name should be changed to # "bn_div_words" i.e. remove the first dot # for the gcc compiler. This should be automatically # done in the build # .align 4 .bn_div_words: # # This is a cleaned up version of code generated by # the AIX compiler. The only optimization is to use # the PPC instruction to count leading zeros instead # of call to num_bits_word. Since this was compiled # only at level -O2 we can possibly squeeze it more? # # r3 = h # r4 = l # r5 = d $UCMPI 0,r5,0 # compare r5 and 0 bne Lppcasm_div1 # proceed if d!=0 li r3,-1 # d=0 return -1 blr Lppcasm_div1: xor r0,r0,r0 #r0=0 li r8,$BITS $CNTLZ. r7,r5 #r7 = num leading 0s in d. beq Lppcasm_div2 #proceed if no leading zeros subf r8,r7,r8 #r8 = BN_num_bits_word(d) $SHR. r9,r3,r8 #are there any bits above r8'th? $TR 16,r9,r0 #if there're, signal to dump core... Lppcasm_div2: $UCMP 0,r3,r5 #h>=d? blt Lppcasm_div3 #goto Lppcasm_div3 if not subf r3,r5,r3 #h-=d ; Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i cmpi 0,0,r7,0 # is (i == 0)? beq Lppcasm_div4 $SHL r3,r3,r7 # h = (h<< i) $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i) $SHL r5,r5,r7 # d<<=i or r3,r3,r8 # h = (h<>(BN_BITS2-i)) $SHL r4,r4,r7 # l <<=i Lppcasm_div4: $SHRI r9,r5,`$BITS/2` # r9 = dh # dl will be computed when needed # as it saves registers. li r6,2 #r6=2 mtctr r6 #counter will be in count. Lppcasm_divouterloop: $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4) $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4 # compute here for innerloop. $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh bne Lppcasm_div5 # goto Lppcasm_div5 if not li r8,-1 $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l b Lppcasm_div6 Lppcasm_div5: $UDIV r8,r3,r9 #q = h/dh Lppcasm_div6: $UMULL r12,r9,r8 #th = q*dh $CLRU r10,r5,`$BITS/2` #r10=dl $UMULL r6,r8,r10 #tl = q*dl Lppcasm_divinnerloop: subf r10,r12,r3 #t = h -th $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of... addic. r7,r7,0 #test if r7 == 0. used below. # now want to compute # r7 = (t<>BN_BITS4) # the following 2 instructions do that $SHLI r7,r10,`$BITS/2` # r7 = (t<>BN_BITS4) $UCMP cr1,r6,r7 # compare (tl <= r7) bne Lppcasm_divinnerexit ble cr1,Lppcasm_divinnerexit addi r8,r8,-1 #q-- subf r12,r9,r12 #th -=dh $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop. subf r6,r10,r6 #tl -=dl b Lppcasm_divinnerloop Lppcasm_divinnerexit: $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4) $SHLI r11,r6,`$BITS/2` #tl=(tl<=tl) goto Lppcasm_div7 addi r12,r12,1 # th++ Lppcasm_div7: subf r11,r11,r4 #r11=l-tl $UCMP cr1,r3,r12 #compare h and th bge cr1,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8 addi r8,r8,-1 # q-- add r3,r5,r3 # h+=d Lppcasm_div8: subf r12,r12,r3 #r12 = h-th $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<>BN_BITS4))&BN_MASK2 # the following 2 instructions will do this. $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2. $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3 bdz Lppcasm_div9 #if (count==0) break ; $SHLI r0,r8,`$BITS/2` #ret =q<> 2 beq Lppcasm_mw_REM mtctr r7 Lppcasm_mw_LOOP: #mul(rp[0],ap[0],w,c1); $LD r8,`0*$BNSZ`(r4) $UMULL r9,r6,r8 $UMULH r10,r6,r8 addc r9,r9,r12 #addze r10,r10 #carry is NOT ignored. #will be taken care of #in second spin below #using adde. $ST r9,`0*$BNSZ`(r3) #mul(rp[1],ap[1],w,c1); $LD r8,`1*$BNSZ`(r4) $UMULL r11,r6,r8 $UMULH r12,r6,r8 adde r11,r11,r10 #addze r12,r12 $ST r11,`1*$BNSZ`(r3) #mul(rp[2],ap[2],w,c1); $LD r8,`2*$BNSZ`(r4) $UMULL r9,r6,r8 $UMULH r10,r6,r8 adde r9,r9,r12 #addze r10,r10 $ST r9,`2*$BNSZ`(r3) #mul_add(rp[3],ap[3],w,c1); $LD r8,`3*$BNSZ`(r4) $UMULL r11,r6,r8 $UMULH r12,r6,r8 adde r11,r11,r10 addze r12,r12 #this spin we collect carry into #r12 $ST r11,`3*$BNSZ`(r3) addi r3,r3,`4*$BNSZ` addi r4,r4,`4*$BNSZ` bdnz Lppcasm_mw_LOOP Lppcasm_mw_REM: andi. r5,r5,0x3 beq Lppcasm_mw_OVER #mul(rp[0],ap[0],w,c1); $LD r8,`0*$BNSZ`(r4) $UMULL r9,r6,r8 $UMULH r10,r6,r8 addc r9,r9,r12 addze r10,r10 $ST r9,`0*$BNSZ`(r3) addi r12,r10,0 addi r5,r5,-1 cmpli 0,0,r5,0 beq Lppcasm_mw_OVER #mul(rp[1],ap[1],w,c1); $LD r8,`1*$BNSZ`(r4) $UMULL r9,r6,r8 $UMULH r10,r6,r8 addc r9,r9,r12 addze r10,r10 $ST r9,`1*$BNSZ`(r3) addi r12,r10,0 addi r5,r5,-1 cmpli 0,0,r5,0 beq Lppcasm_mw_OVER #mul_add(rp[2],ap[2],w,c1); $LD r8,`2*$BNSZ`(r4) $UMULL r9,r6,r8 $UMULH r10,r6,r8 addc r9,r9,r12 addze r10,r10 $ST r9,`2*$BNSZ`(r3) addi r12,r10,0 Lppcasm_mw_OVER: addi r3,r12,0 blr .long 0 .byte 0,12,0x14,0,0,0,4,0 .long 0 .size .bn_mul_words,.-.bn_mul_words # # NOTE: The following label name should be changed to # "bn_mul_add_words" i.e. remove the first dot # for the gcc compiler. This should be automatically # done in the build # .align 4 .bn_mul_add_words: # # BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) # # r3 = rp # r4 = ap # r5 = num # r6 = w # # empirical evidence suggests that unrolled version performs best!! # xor r0,r0,r0 #r0 = 0 xor r12,r12,r12 #r12 = 0 . used for carry rlwinm. r7,r5,30,2,31 # num >> 2 beq Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover mtctr r7 Lppcasm_maw_mainloop: #mul_add(rp[0],ap[0],w,c1); $LD r8,`0*$BNSZ`(r4) $LD r11,`0*$BNSZ`(r3) $UMULL r9,r6,r8 $UMULH r10,r6,r8 addc r9,r9,r12 #r12 is carry. addze r10,r10 addc r9,r9,r11 #addze r10,r10 #the above instruction addze #is NOT needed. Carry will NOT #be ignored. It's not affected #by multiply and will be collected #in the next spin $ST r9,`0*$BNSZ`(r3) #mul_add(rp[1],ap[1],w,c1); $LD r8,`1*$BNSZ`(r4) $LD r9,`1*$BNSZ`(r3) $UMULL r11,r6,r8 $UMULH r12,r6,r8 adde r11,r11,r10 #r10 is carry. addze r12,r12 addc r11,r11,r9 #addze r12,r12 $ST r11,`1*$BNSZ`(r3) #mul_add(rp[2],ap[2],w,c1); $LD r8,`2*$BNSZ`(r4) $UMULL r9,r6,r8 $LD r11,`2*$BNSZ`(r3) $UMULH r10,r6,r8 adde r9,r9,r12 addze r10,r10 addc r9,r9,r11 #addze r10,r10 $ST r9,`2*$BNSZ`(r3) #mul_add(rp[3],ap[3],w,c1); $LD r8,`3*$BNSZ`(r4) $UMULL r11,r6,r8 $LD r9,`3*$BNSZ`(r3) $UMULH r12,r6,r8 adde r11,r11,r10 addze r12,r12 addc r11,r11,r9 addze r12,r12 $ST r11,`3*$BNSZ`(r3) addi r3,r3,`4*$BNSZ` addi r4,r4,`4*$BNSZ` bdnz Lppcasm_maw_mainloop Lppcasm_maw_leftover: andi. r5,r5,0x3 beq Lppcasm_maw_adios addi r3,r3,-$BNSZ addi r4,r4,-$BNSZ #mul_add(rp[0],ap[0],w,c1); mtctr r5 $LDU r8,$BNSZ(r4) $UMULL r9,r6,r8 $UMULH r10,r6,r8 $LDU r11,$BNSZ(r3) addc r9,r9,r11 addze r10,r10 addc r9,r9,r12 addze r12,r10 $ST r9,0(r3) bdz Lppcasm_maw_adios #mul_add(rp[1],ap[1],w,c1); $LDU r8,$BNSZ(r4) $UMULL r9,r6,r8 $UMULH r10,r6,r8 $LDU r11,$BNSZ(r3) addc r9,r9,r11 addze r10,r10 addc r9,r9,r12 addze r12,r10 $ST r9,0(r3) bdz Lppcasm_maw_adios #mul_add(rp[2],ap[2],w,c1); $LDU r8,$BNSZ(r4) $UMULL r9,r6,r8 $UMULH r10,r6,r8 $LDU r11,$BNSZ(r3) addc r9,r9,r11 addze r10,r10 addc r9,r9,r12 addze r12,r10 $ST r9,0(r3) Lppcasm_maw_adios: addi r3,r12,0 blr .long 0 .byte 0,12,0x14,0,0,0,4,0 .long 0 .size .bn_mul_add_words,.-.bn_mul_add_words .align 4 EOF $data =~ s/\`([^\`]*)\`/eval $1/gem; print $data; close STDOUT;