| /optee_os/lib/libutils/isoc/arch/arm/softfloat/source/ |
| H A D | f128_rem.c | 70 uiA64 = uA.ui.v64; in f128_rem() 74 sigA.v64 = fracF128UI64( uiA64 ); in f128_rem() 77 uiB64 = uB.ui.v64; in f128_rem() 81 sigB.v64 = fracF128UI64( uiB64 ); in f128_rem() 87 (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) in f128_rem() 94 if ( sigB.v64 | sigB.v0 ) goto propagateNaN; in f128_rem() 100 if ( ! (sigB.v64 | sigB.v0) ) goto invalid; in f128_rem() 101 normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); in f128_rem() 106 if ( ! (sigA.v64 | sigA.v0) ) return a; in f128_rem() 107 normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); in f128_rem() [all …]
|
| H A D | s_mulAddF128.c | 83 sigA.v64 = fracF128UI64( uiA64 ); in softfloat_mulAddF128() 87 sigB.v64 = fracF128UI64( uiB64 ); in softfloat_mulAddF128() 91 sigC.v64 = fracF128UI64( uiC64 ); in softfloat_mulAddF128() 98 (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) in softfloat_mulAddF128() 102 magBits = expB | sigB.v64 | sigB.v0; in softfloat_mulAddF128() 106 if ( sigB.v64 | sigB.v0 ) goto propagateNaN_ABC; in softfloat_mulAddF128() 107 magBits = expA | sigA.v64 | sigA.v0; in softfloat_mulAddF128() 111 if ( sigC.v64 | sigC.v0 ) { in softfloat_mulAddF128() 112 uiZ.v64 = 0; in softfloat_mulAddF128() 116 uiZ.v64 = uiC64; in softfloat_mulAddF128() [all …]
|
| H A D | f128_div.c | 74 uiA64 = uA.ui.v64; in f128_div() 78 sigA.v64 = fracF128UI64( uiA64 ); in f128_div() 81 uiB64 = uB.ui.v64; in f128_div() 85 sigB.v64 = fracF128UI64( uiB64 ); in f128_div() 91 if ( sigA.v64 | sigA.v0 ) goto propagateNaN; in f128_div() 93 if ( sigB.v64 | sigB.v0 ) goto propagateNaN; in f128_div() 99 if ( sigB.v64 | sigB.v0 ) goto propagateNaN; in f128_div() 105 if ( ! (sigB.v64 | sigB.v0) ) { in f128_div() 106 if ( ! (expA | sigA.v64 | sigA.v0) ) goto invalid; in f128_div() 110 normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); in f128_div() [all …]
|
| H A D | s_subMagsF128.c | 63 sigA.v64 = fracF128UI64( uiA64 ); in softfloat_subMagsF128() 66 sigB.v64 = fracF128UI64( uiB64 ); in softfloat_subMagsF128() 68 sigA = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 4 ); in softfloat_subMagsF128() 69 sigB = softfloat_shortShiftLeft128( sigB.v64, sigB.v0, 4 ); in softfloat_subMagsF128() 74 if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; in softfloat_subMagsF128() 76 uiZ.v64 = defaultNaNF128UI64; in softfloat_subMagsF128() 82 if ( sigB.v64 < sigA.v64 ) goto aBigger; in softfloat_subMagsF128() 83 if ( sigA.v64 < sigB.v64 ) goto bBigger; in softfloat_subMagsF128() 86 uiZ.v64 = in softfloat_subMagsF128() 93 if ( sigB.v64 | sigB.v0 ) goto propagateNaN; in softfloat_subMagsF128() [all …]
|
| H A D | f128_sqrt.c | 67 uiA64 = uA.ui.v64; in f128_sqrt() 71 sigA.v64 = fracF128UI64( uiA64 ); in f128_sqrt() 76 if ( sigA.v64 | sigA.v0 ) { in f128_sqrt() 86 if ( ! (expA | sigA.v64 | sigA.v0) ) return a; in f128_sqrt() 92 if ( ! (sigA.v64 | sigA.v0) ) return a; in f128_sqrt() 93 normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); in f128_sqrt() 104 sigA.v64 |= UINT64_C( 0x0001000000000000 ); in f128_sqrt() 105 sig32A = sigA.v64>>17; in f128_sqrt() 110 rem = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 12 ); in f128_sqrt() 112 rem = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 13 ); in f128_sqrt() [all …]
|
| H A D | f128_roundToInt.c | 59 uiA64 = uA.ui.v64; in f128_roundToInt() 78 uiZ.v64 = uiA64; in f128_roundToInt() 84 ++uiZ.v64; in f128_roundToInt() 89 uiZ.v64 &= ~1; in f128_roundToInt() 93 uiZ = softfloat_add128( uiZ.v64, uiZ.v0, 0, lastBitMask>>1 ); in f128_roundToInt() 100 signF128UI64( uiZ.v64 ) ^ (roundingMode == softfloat_round_max) in f128_roundToInt() 102 uiZ = softfloat_add128( uiZ.v64, uiZ.v0, 0, roundBitsMask ); in f128_roundToInt() 114 uiZ.v64 = uiA64 & packToF128UI64( 1, 0, 0 ); in f128_roundToInt() 120 if ( exp == 0x3FFE ) uiZ.v64 |= packToF128UI64( 0, 0x3FFF, 0 ); in f128_roundToInt() 123 if ( uiZ.v64 ) uiZ.v64 = packToF128UI64( 1, 0x3FFF, 0 ); in f128_roundToInt() [all …]
|
| H A D | s_addMagsF128.c | 65 sigA.v64 = fracF128UI64( uiA64 ); in softfloat_addMagsF128() 68 sigB.v64 = fracF128UI64( uiB64 ); in softfloat_addMagsF128() 73 if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; in softfloat_addMagsF128() 74 uiZ.v64 = uiA64; in softfloat_addMagsF128() 78 sigZ = softfloat_add128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ); in softfloat_addMagsF128() 80 uiZ.v64 = packToF128UI64( signZ, 0, sigZ.v64 ); in softfloat_addMagsF128() 85 sigZ.v64 |= UINT64_C( 0x0002000000000000 ); in softfloat_addMagsF128() 91 if ( sigB.v64 | sigB.v0 ) goto propagateNaN; in softfloat_addMagsF128() 92 uiZ.v64 = packToF128UI64( signZ, 0x7FFF, 0 ); in softfloat_addMagsF128() 98 sigA.v64 |= UINT64_C( 0x0001000000000000 ); in softfloat_addMagsF128() [all …]
|
| H A D | f128_mul.c | 71 uiA64 = uA.ui.v64; in f128_mul() 75 sigA.v64 = fracF128UI64( uiA64 ); in f128_mul() 78 uiB64 = uB.ui.v64; in f128_mul() 82 sigB.v64 = fracF128UI64( uiB64 ); in f128_mul() 89 (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) in f128_mul() 93 magBits = expB | sigB.v64 | sigB.v0; in f128_mul() 97 if ( sigB.v64 | sigB.v0 ) goto propagateNaN; in f128_mul() 98 magBits = expA | sigA.v64 | sigA.v0; in f128_mul() 104 if ( ! (sigA.v64 | sigA.v0) ) goto zero; in f128_mul() 105 normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); in f128_mul() [all …]
|
| H A D | extF80_rem.c | 140 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 147 q64 = (uint_fast64_t) (uint32_t) (rem.v64>>2) * recip32; in extF80_rem() 150 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_rem() 152 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_rem() 153 if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { in extF80_rem() 156 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 164 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, expDiff + 30 ); in extF80_rem() 166 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_rem() 167 if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { in extF80_rem() 170 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() [all …]
|
| H A D | extF80_div.c | 137 q64 = (uint_fast64_t) (uint32_t) (rem.v64>>2) * recip32; in extF80_div() 141 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_div() 143 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 144 if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { in extF80_div() 146 rem = softfloat_add128( rem.v64, rem.v0, sigB>>32, sigB<<32 ); in extF80_div() 153 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_div() 155 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 157 if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { in extF80_div() 159 rem = softfloat_add128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 160 } else if ( softfloat_le128( term.v64, term.v0, rem.v64, rem.v0 ) ) { in extF80_div() [all …]
|
| H A D | extF80_sqrt.c | 78 uiZ64 = uiZ.v64; in extF80_sqrt() 116 rem.v64 -= (uint_fast64_t) sig32Z * sig32Z; in extF80_sqrt() 119 q = ((uint_fast64_t) (uint32_t) (rem.v64>>2) * recipSqrt32)>>32; in extF80_sqrt() 123 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_sqrt() 124 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_sqrt() 127 q = (((uint_fast64_t) (uint32_t) (rem.v64>>2) * recipSqrt32)>>32) + 2; in extF80_sqrt() 138 term = softfloat_add128( term.v64, term.v0, 0, x64 ); in extF80_sqrt() 139 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 28 ); in extF80_sqrt() 140 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_sqrt() 141 if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { in extF80_sqrt() [all …]
|
| H A D | s_mulAddF64.c | 123 if ( sig128Z.v64 < UINT64_C( 0x2000000000000000 ) ) { in softfloat_mulAddF64() 127 sig128Z.v64, sig128Z.v0, sig128Z.v64, sig128Z.v0 ); in softfloat_mulAddF64() 132 sigZ = sig128Z.v64<<1 | (sig128Z.v0 != 0); in softfloat_mulAddF64() 146 sig128Z.v64 = softfloat_shiftRightJam64( sig128Z.v64, -expDiff ); in softfloat_mulAddF64() 149 softfloat_shortShiftRightJam128( sig128Z.v64, sig128Z.v0, 1 ); in softfloat_mulAddF64() 160 sigZ = (sigC + sig128Z.v64) | (sig128Z.v0 != 0); in softfloat_mulAddF64() 164 sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); in softfloat_mulAddF64() 165 sigZ = sig128Z.v64 | (sig128Z.v0 != 0); in softfloat_mulAddF64() 176 sig128Z = softfloat_sub128( sigC, 0, sig128Z.v64, sig128Z.v0 ); in softfloat_mulAddF64() 178 sig128Z.v64 = sig128Z.v64 - sigC; in softfloat_mulAddF64() [all …]
|
| H A D | s_mul128To256M.c | 56 z64 = p64.v0 + p0.v64; in softfloat_mul128To256M() 57 z128 = p64.v64 + (z64 < p64.v0); in softfloat_mul128To256M() 60 z192 = p128.v64 + (z128 < p128.v0); in softfloat_mul128To256M() 64 p64.v64 += (z64 < p64.v0); in softfloat_mul128To256M() 65 z128 += p64.v64; in softfloat_mul128To256M() 67 zPtr[indexWord( 4, 3 )] = z192 + (z128 < p64.v64); in softfloat_mul128To256M()
|
| H A D | f64_to_f128.c | 68 uiZ.v64 = packToF128UI64( sign, 0x7FFF, 0 ); in f64_to_f128() 75 uiZ.v64 = packToF128UI64( sign, 0, 0 ); in f64_to_f128() 84 uiZ.v64 = packToF128UI64( sign, exp + 0x3C00, sig128.v64 ); in f64_to_f128()
|
| H A D | ui64_to_f128.c | 56 zSig.v64 = a<<(shiftCount - 64); in ui64_to_f128() 61 uiZ64 = packToF128UI64( 0, 0x406E - shiftCount, zSig.v64 ); in ui64_to_f128() 64 uZ.ui.v64 = uiZ64; in ui64_to_f128()
|
| H A D | f128_to_extF80.c | 61 uiA64 = uA.ui.v64; in f128_to_extF80() 71 uiZ64 = uiZ.v64; in f128_to_extF80() 87 sig64 = normExpSig.sig.v64; in f128_to_extF80() 93 return softfloat_roundPackToExtF80( sign, exp, sig128.v64, sig128.v0, 80 ); in f128_to_extF80()
|
| H A D | f128_mulAdd.c | 53 uiA64 = uA.ui.v64; in f128_mulAdd() 56 uiB64 = uB.ui.v64; in f128_mulAdd() 59 uiC64 = uC.ui.v64; in f128_mulAdd()
|
| H A D | s_mul64To128.c | 57 z.v64 = (uint_fast64_t) a32 * b32; in softfloat_mul64To128() 58 z.v64 += (uint_fast64_t) (mid < mid1)<<32 | mid>>32; in softfloat_mul64To128() 61 z.v64 += (z.v0 < mid); in softfloat_mul64To128()
|
| H A D | i64_to_f128.c | 60 zSig.v64 = absA<<(shiftCount - 64); in i64_to_f128() 65 uiZ64 = packToF128UI64( sign, 0x406E - shiftCount, zSig.v64 ); in i64_to_f128() 68 uZ.ui.v64 = uiZ64; in i64_to_f128()
|
| H A D | extF80_mul.c | 120 if ( sig128Z.v64 < UINT64_C( 0x8000000000000000 ) ) { in extF80_mul() 124 sig128Z.v64, sig128Z.v0, sig128Z.v64, sig128Z.v0 ); in extF80_mul() 128 signZ, expZ, sig128Z.v64, sig128Z.v0, extF80_roundingPrecision ); in extF80_mul() 133 uiZ64 = uiZ.v64; in extF80_mul()
|
| H A D | s_normRoundPackToF128.c | 63 sig64 = sig128.v64; in softfloat_normRoundPackToF128() 67 uZ.ui.v64 = packToF128UI64( sign, sig64 | sig0 ? exp : 0, sig64 ); in softfloat_normRoundPackToF128() 76 sig64 = sig128Extra.v.v64; in softfloat_normRoundPackToF128()
|
| H A D | f32_to_f128.c | 67 uiZ.v64 = packToF128UI64( sign, 0x7FFF, 0 ); in f32_to_f128() 74 uiZ.v64 = packToF128UI64( sign, 0, 0 ); in f32_to_f128() 82 uiZ.v64 = packToF128UI64( sign, exp + 0x3F80, (uint_fast64_t) sig<<25 ); in f32_to_f128()
|
| /optee_os/lib/libutils/isoc/arch/arm/softfloat/source/8086-SSE/ |
| H A D | s_propagateNaNF128UI.c | 72 uiZ.v64 = uiA64; in softfloat_propagateNaNF128UI() 75 uiZ.v64 = uiB64; in softfloat_propagateNaNF128UI() 78 uiZ.v64 |= UINT64_C( 0x0000800000000000 ); in softfloat_propagateNaNF128UI()
|
| H A D | s_commonNaNToExtF80UI.c | 52 uiZ.v64 = (uint_fast16_t) aPtr->sign<<15 | 0x7FFF; in softfloat_commonNaNToExtF80UI() 53 uiZ.v0 = UINT64_C( 0xC000000000000000 ) | aPtr->v64>>1; in softfloat_commonNaNToExtF80UI()
|
| /optee_os/lib/libutils/isoc/arch/arm/softfloat/source/8086/ |
| H A D | s_commonNaNToExtF80UI.c | 52 uiZ.v64 = (uint_fast16_t) aPtr->sign<<15 | 0x7FFF; in softfloat_commonNaNToExtF80UI() 53 uiZ.v0 = UINT64_C( 0xC000000000000000 ) | aPtr->v64>>1; in softfloat_commonNaNToExtF80UI()
|