1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*---------------------------------------------------------------------------+
3*4882a593Smuzhiyun | poly.h |
4*4882a593Smuzhiyun | |
5*4882a593Smuzhiyun | Header file for the FPU-emu poly*.c source files. |
6*4882a593Smuzhiyun | |
7*4882a593Smuzhiyun | Copyright (C) 1994,1999 |
8*4882a593Smuzhiyun | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
9*4882a593Smuzhiyun | Australia. E-mail billm@melbpc.org.au |
10*4882a593Smuzhiyun | |
11*4882a593Smuzhiyun | Declarations and definitions for functions operating on Xsig (12-byte |
12*4882a593Smuzhiyun | extended-significand) quantities. |
13*4882a593Smuzhiyun | |
14*4882a593Smuzhiyun +---------------------------------------------------------------------------*/
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #ifndef _POLY_H
17*4882a593Smuzhiyun #define _POLY_H
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /* This 12-byte structure is used to improve the accuracy of computation
20*4882a593Smuzhiyun of transcendental functions.
21*4882a593Smuzhiyun Intended to be used to get results better than 8-byte computation
22*4882a593Smuzhiyun allows. 9-byte would probably be sufficient.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun typedef struct {
25*4882a593Smuzhiyun unsigned long lsw;
26*4882a593Smuzhiyun unsigned long midw;
27*4882a593Smuzhiyun unsigned long msw;
28*4882a593Smuzhiyun } Xsig;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun asmlinkage void mul64(unsigned long long const *a, unsigned long long const *b,
31*4882a593Smuzhiyun unsigned long long *result);
32*4882a593Smuzhiyun asmlinkage void polynomial_Xsig(Xsig *, const unsigned long long *x,
33*4882a593Smuzhiyun const unsigned long long terms[], const int n);
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun asmlinkage void mul32_Xsig(Xsig *, const unsigned long mult);
36*4882a593Smuzhiyun asmlinkage void mul64_Xsig(Xsig *, const unsigned long long *mult);
37*4882a593Smuzhiyun asmlinkage void mul_Xsig_Xsig(Xsig *dest, const Xsig *mult);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun asmlinkage void shr_Xsig(Xsig *, const int n);
40*4882a593Smuzhiyun asmlinkage int round_Xsig(Xsig *);
41*4882a593Smuzhiyun asmlinkage int norm_Xsig(Xsig *);
42*4882a593Smuzhiyun asmlinkage void div_Xsig(Xsig *x1, const Xsig *x2, const Xsig *dest);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* Macro to extract the most significant 32 bits from a long long */
45*4882a593Smuzhiyun #define LL_MSW(x) (((unsigned long *)&x)[1])
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* Macro to initialize an Xsig struct */
48*4882a593Smuzhiyun #define MK_XSIG(a,b,c) { c, b, a }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* Macro to access the 8 ms bytes of an Xsig as a long long */
51*4882a593Smuzhiyun #define XSIG_LL(x) (*(unsigned long long *)&x.midw)
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun Need to run gcc with optimizations on to get these to
55*4882a593Smuzhiyun actually be in-line.
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* Multiply two fixed-point 32 bit numbers, producing a 32 bit result.
59*4882a593Smuzhiyun The answer is the ms word of the product. */
60*4882a593Smuzhiyun /* Some versions of gcc make it difficult to stop eax from being clobbered.
61*4882a593Smuzhiyun Merely specifying that it is used doesn't work...
62*4882a593Smuzhiyun */
mul_32_32(const unsigned long arg1,const unsigned long arg2)63*4882a593Smuzhiyun static inline unsigned long mul_32_32(const unsigned long arg1,
64*4882a593Smuzhiyun const unsigned long arg2)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun int retval;
67*4882a593Smuzhiyun asm volatile ("mull %2; movl %%edx,%%eax":"=a" (retval)
68*4882a593Smuzhiyun :"0"(arg1), "g"(arg2)
69*4882a593Smuzhiyun :"dx");
70*4882a593Smuzhiyun return retval;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* Add the 12 byte Xsig x2 to Xsig dest, with no checks for overflow. */
add_Xsig_Xsig(Xsig * dest,const Xsig * x2)74*4882a593Smuzhiyun static inline void add_Xsig_Xsig(Xsig *dest, const Xsig *x2)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun asm volatile ("movl %1,%%edi; movl %2,%%esi;\n"
77*4882a593Smuzhiyun "movl (%%esi),%%eax; addl %%eax,(%%edi);\n"
78*4882a593Smuzhiyun "movl 4(%%esi),%%eax; adcl %%eax,4(%%edi);\n"
79*4882a593Smuzhiyun "movl 8(%%esi),%%eax; adcl %%eax,8(%%edi);\n":"=g"
80*4882a593Smuzhiyun (*dest):"g"(dest), "g"(x2)
81*4882a593Smuzhiyun :"ax", "si", "di");
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* Add the 12 byte Xsig x2 to Xsig dest, adjust exp if overflow occurs. */
85*4882a593Smuzhiyun /* Note: the constraints in the asm statement didn't always work properly
86*4882a593Smuzhiyun with gcc 2.5.8. Changing from using edi to using ecx got around the
87*4882a593Smuzhiyun problem, but keep fingers crossed! */
add_two_Xsig(Xsig * dest,const Xsig * x2,long int * exp)88*4882a593Smuzhiyun static inline void add_two_Xsig(Xsig *dest, const Xsig *x2, long int *exp)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun asm volatile ("movl %2,%%ecx; movl %3,%%esi;\n"
91*4882a593Smuzhiyun "movl (%%esi),%%eax; addl %%eax,(%%ecx);\n"
92*4882a593Smuzhiyun "movl 4(%%esi),%%eax; adcl %%eax,4(%%ecx);\n"
93*4882a593Smuzhiyun "movl 8(%%esi),%%eax; adcl %%eax,8(%%ecx);\n"
94*4882a593Smuzhiyun "jnc 0f;\n"
95*4882a593Smuzhiyun "rcrl 8(%%ecx); rcrl 4(%%ecx); rcrl (%%ecx)\n"
96*4882a593Smuzhiyun "movl %4,%%ecx; incl (%%ecx)\n"
97*4882a593Smuzhiyun "movl $1,%%eax; jmp 1f;\n"
98*4882a593Smuzhiyun "0: xorl %%eax,%%eax;\n" "1:\n":"=g" (*exp), "=g"(*dest)
99*4882a593Smuzhiyun :"g"(dest), "g"(x2), "g"(exp)
100*4882a593Smuzhiyun :"cx", "si", "ax");
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* Negate (subtract from 1.0) the 12 byte Xsig */
104*4882a593Smuzhiyun /* This is faster in a loop on my 386 than using the "neg" instruction. */
negate_Xsig(Xsig * x)105*4882a593Smuzhiyun static inline void negate_Xsig(Xsig *x)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun asm volatile ("movl %1,%%esi;\n"
108*4882a593Smuzhiyun "xorl %%ecx,%%ecx;\n"
109*4882a593Smuzhiyun "movl %%ecx,%%eax; subl (%%esi),%%eax; movl %%eax,(%%esi);\n"
110*4882a593Smuzhiyun "movl %%ecx,%%eax; sbbl 4(%%esi),%%eax; movl %%eax,4(%%esi);\n"
111*4882a593Smuzhiyun "movl %%ecx,%%eax; sbbl 8(%%esi),%%eax; movl %%eax,8(%%esi);\n":"=g"
112*4882a593Smuzhiyun (*x):"g"(x):"si", "ax", "cx");
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun #endif /* _POLY_H */
116