1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun/*---------------------------------------------------------------------------+ 3*4882a593Smuzhiyun | round_Xsig.S | 4*4882a593Smuzhiyun | | 5*4882a593Smuzhiyun | Copyright (C) 1992,1993,1994,1995 | 6*4882a593Smuzhiyun | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, | 7*4882a593Smuzhiyun | Australia. E-mail billm@jacobi.maths.monash.edu.au | 8*4882a593Smuzhiyun | | 9*4882a593Smuzhiyun | Normalize and round a 12 byte quantity. | 10*4882a593Smuzhiyun | Call from C as: | 11*4882a593Smuzhiyun | int round_Xsig(Xsig *n) | 12*4882a593Smuzhiyun | | 13*4882a593Smuzhiyun | Normalize a 12 byte quantity. | 14*4882a593Smuzhiyun | Call from C as: | 15*4882a593Smuzhiyun | int norm_Xsig(Xsig *n) | 16*4882a593Smuzhiyun | | 17*4882a593Smuzhiyun | Each function returns the size of the shift (nr of bits). | 18*4882a593Smuzhiyun | | 19*4882a593Smuzhiyun +---------------------------------------------------------------------------*/ 20*4882a593Smuzhiyun .file "round_Xsig.S" 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun#include "fpu_emu.h" 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun.text 26*4882a593SmuzhiyunSYM_FUNC_START(round_Xsig) 27*4882a593Smuzhiyun pushl %ebp 28*4882a593Smuzhiyun movl %esp,%ebp 29*4882a593Smuzhiyun pushl %ebx /* Reserve some space */ 30*4882a593Smuzhiyun pushl %ebx 31*4882a593Smuzhiyun pushl %esi 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun movl PARAM1,%esi 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun movl 8(%esi),%edx 36*4882a593Smuzhiyun movl 4(%esi),%ebx 37*4882a593Smuzhiyun movl (%esi),%eax 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun movl $0,-4(%ebp) 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun orl %edx,%edx /* ms bits */ 42*4882a593Smuzhiyun js L_round /* Already normalized */ 43*4882a593Smuzhiyun jnz L_shift_1 /* Shift left 1 - 31 bits */ 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun movl %ebx,%edx 46*4882a593Smuzhiyun movl %eax,%ebx 47*4882a593Smuzhiyun xorl %eax,%eax 48*4882a593Smuzhiyun movl $-32,-4(%ebp) 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun/* We need to shift left by 1 - 31 bits */ 51*4882a593SmuzhiyunL_shift_1: 52*4882a593Smuzhiyun bsrl %edx,%ecx /* get the required shift in %ecx */ 53*4882a593Smuzhiyun subl $31,%ecx 54*4882a593Smuzhiyun negl %ecx 55*4882a593Smuzhiyun subl %ecx,-4(%ebp) 56*4882a593Smuzhiyun shld %cl,%ebx,%edx 57*4882a593Smuzhiyun shld %cl,%eax,%ebx 58*4882a593Smuzhiyun shl %cl,%eax 59*4882a593Smuzhiyun 60*4882a593SmuzhiyunL_round: 61*4882a593Smuzhiyun testl $0x80000000,%eax 62*4882a593Smuzhiyun jz L_exit 63*4882a593Smuzhiyun 64*4882a593Smuzhiyun addl $1,%ebx 65*4882a593Smuzhiyun adcl $0,%edx 66*4882a593Smuzhiyun jnz L_exit 67*4882a593Smuzhiyun 68*4882a593Smuzhiyun movl $0x80000000,%edx 69*4882a593Smuzhiyun incl -4(%ebp) 70*4882a593Smuzhiyun 71*4882a593SmuzhiyunL_exit: 72*4882a593Smuzhiyun movl %edx,8(%esi) 73*4882a593Smuzhiyun movl %ebx,4(%esi) 74*4882a593Smuzhiyun movl %eax,(%esi) 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun movl -4(%ebp),%eax 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun popl %esi 79*4882a593Smuzhiyun popl %ebx 80*4882a593Smuzhiyun leave 81*4882a593Smuzhiyun RET 82*4882a593SmuzhiyunSYM_FUNC_END(round_Xsig) 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun 85*4882a593Smuzhiyun 86*4882a593SmuzhiyunSYM_FUNC_START(norm_Xsig) 87*4882a593Smuzhiyun pushl %ebp 88*4882a593Smuzhiyun movl %esp,%ebp 89*4882a593Smuzhiyun pushl %ebx /* Reserve some space */ 90*4882a593Smuzhiyun pushl %ebx 91*4882a593Smuzhiyun pushl %esi 92*4882a593Smuzhiyun 93*4882a593Smuzhiyun movl PARAM1,%esi 94*4882a593Smuzhiyun 95*4882a593Smuzhiyun movl 8(%esi),%edx 96*4882a593Smuzhiyun movl 4(%esi),%ebx 97*4882a593Smuzhiyun movl (%esi),%eax 98*4882a593Smuzhiyun 99*4882a593Smuzhiyun movl $0,-4(%ebp) 100*4882a593Smuzhiyun 101*4882a593Smuzhiyun orl %edx,%edx /* ms bits */ 102*4882a593Smuzhiyun js L_n_exit /* Already normalized */ 103*4882a593Smuzhiyun jnz L_n_shift_1 /* Shift left 1 - 31 bits */ 104*4882a593Smuzhiyun 105*4882a593Smuzhiyun movl %ebx,%edx 106*4882a593Smuzhiyun movl %eax,%ebx 107*4882a593Smuzhiyun xorl %eax,%eax 108*4882a593Smuzhiyun movl $-32,-4(%ebp) 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun orl %edx,%edx /* ms bits */ 111*4882a593Smuzhiyun js L_n_exit /* Normalized now */ 112*4882a593Smuzhiyun jnz L_n_shift_1 /* Shift left 1 - 31 bits */ 113*4882a593Smuzhiyun 114*4882a593Smuzhiyun movl %ebx,%edx 115*4882a593Smuzhiyun movl %eax,%ebx 116*4882a593Smuzhiyun xorl %eax,%eax 117*4882a593Smuzhiyun addl $-32,-4(%ebp) 118*4882a593Smuzhiyun jmp L_n_exit /* Might not be normalized, 119*4882a593Smuzhiyun but shift no more. */ 120*4882a593Smuzhiyun 121*4882a593Smuzhiyun/* We need to shift left by 1 - 31 bits */ 122*4882a593SmuzhiyunL_n_shift_1: 123*4882a593Smuzhiyun bsrl %edx,%ecx /* get the required shift in %ecx */ 124*4882a593Smuzhiyun subl $31,%ecx 125*4882a593Smuzhiyun negl %ecx 126*4882a593Smuzhiyun subl %ecx,-4(%ebp) 127*4882a593Smuzhiyun shld %cl,%ebx,%edx 128*4882a593Smuzhiyun shld %cl,%eax,%ebx 129*4882a593Smuzhiyun shl %cl,%eax 130*4882a593Smuzhiyun 131*4882a593SmuzhiyunL_n_exit: 132*4882a593Smuzhiyun movl %edx,8(%esi) 133*4882a593Smuzhiyun movl %ebx,4(%esi) 134*4882a593Smuzhiyun movl %eax,(%esi) 135*4882a593Smuzhiyun 136*4882a593Smuzhiyun movl -4(%ebp),%eax 137*4882a593Smuzhiyun 138*4882a593Smuzhiyun popl %esi 139*4882a593Smuzhiyun popl %ebx 140*4882a593Smuzhiyun leave 141*4882a593Smuzhiyun RET 142*4882a593SmuzhiyunSYM_FUNC_END(norm_Xsig) 143