| /optee_os/core/arch/arm/crypto/ |
| H A D | aes_modes_armv8a_ce_a64.S | 152 encrypt_block2x v0, v1, w3, x2, x6, w7 157 decrypt_block2x v0, v1, w3, x2, x6, w7 164 encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7 169 decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7 198 encrypt_block2x v0, v1, w3, x2, x6, w7 202 decrypt_block2x v0, v1, w3, x2, x6, w7 206 encrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7 210 decrypt_block4x v0, v1, v2, v3, w3, x2, x6, w7 221 movi v0.16b, #0 222 aese v0.16b, v1.16b [all …]
|
| H A D | sm3_armv8a_ce_a64.S | 88 0: ld1 {v0.16b-v3.16b}, [x1], #64 94 rev32 v0.16b, v0.16b 101 qround a, v0, v1, v2, v3, v4 102 qround a, v1, v2, v3, v4, v0 103 qround a, v2, v3, v4, v0, v1 104 qround a, v3, v4, v0, v1, v2 108 qround b, v4, v0, v1, v2, v3 109 qround b, v0, v1, v2, v3, v4 110 qround b, v1, v2, v3, v4, v0 111 qround b, v2, v3, v4, v0, v1 [all …]
|
| H A D | sha3_armv8a_ce_a64.S | 48 ld1 { v0.1d- v3.1d}, [x0] 63 eor v0.8b, v0.8b, v25.8b 131 eor3 v25.16b, v0.16b, v5.16b, v10.16b 145 eor v0.16b, v0.16b, v30.16b 197 bcax v3.16b, v27.16b, v0.16b, v28.16b 198 bcax v4.16b, v28.16b, v1.16b, v0.16b 199 bcax v0.16b, v0.16b, v2.16b, v1.16b 203 eor v0.16b, v0.16b, v31.16b 209 st1 { v0.1d- v3.1d}, [x0], #32
|
| /optee_os/lib/libutils/isoc/arch/arm/softfloat/source/ |
| H A D | s_subMagsF128.c | 64 sigA.v0 = uiA0; in softfloat_subMagsF128() 67 sigB.v0 = uiB0; in softfloat_subMagsF128() 68 sigA = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 4 ); in softfloat_subMagsF128() 69 sigB = softfloat_shortShiftLeft128( sigB.v64, sigB.v0, 4 ); in softfloat_subMagsF128() 74 if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; in softfloat_subMagsF128() 77 uiZ.v0 = defaultNaNF128UI0; in softfloat_subMagsF128() 84 if ( sigB.v0 < sigA.v0 ) goto aBigger; in softfloat_subMagsF128() 85 if ( sigA.v0 < sigB.v0 ) goto bBigger; in softfloat_subMagsF128() 89 uiZ.v0 = 0; in softfloat_subMagsF128() 93 if ( sigB.v64 | sigB.v0 ) goto propagateNaN; in softfloat_subMagsF128() [all …]
|
| H A D | s_mulAddF128.c | 84 sigA.v0 = uiA0; in softfloat_mulAddF128() 88 sigB.v0 = uiB0; in softfloat_mulAddF128() 92 sigC.v0 = uiC0; in softfloat_mulAddF128() 98 (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) in softfloat_mulAddF128() 102 magBits = expB | sigB.v64 | sigB.v0; in softfloat_mulAddF128() 106 if ( sigB.v64 | sigB.v0 ) goto propagateNaN_ABC; in softfloat_mulAddF128() 107 magBits = expA | sigA.v64 | sigA.v0; in softfloat_mulAddF128() 111 if ( sigC.v64 | sigC.v0 ) { in softfloat_mulAddF128() 113 uiZ.v0 = 0; in softfloat_mulAddF128() 117 uiZ.v0 = uiC0; in softfloat_mulAddF128() [all …]
|
| H A D | f128_rem.c | 71 uiA0 = uA.ui.v0; in f128_rem() 75 sigA.v0 = uiA0; in f128_rem() 78 uiB0 = uB.ui.v0; in f128_rem() 82 sigB.v0 = uiB0; in f128_rem() 87 (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) in f128_rem() 94 if ( sigB.v64 | sigB.v0 ) goto propagateNaN; in f128_rem() 100 if ( ! (sigB.v64 | sigB.v0) ) goto invalid; in f128_rem() 101 normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); in f128_rem() 106 if ( ! (sigA.v64 | sigA.v0) ) return a; in f128_rem() 107 normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); in f128_rem() [all …]
|
| H A D | s_addMagsF128.c | 66 sigA.v0 = uiA0; in softfloat_addMagsF128() 69 sigB.v0 = uiB0; in softfloat_addMagsF128() 73 if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; in softfloat_addMagsF128() 75 uiZ.v0 = uiA0; in softfloat_addMagsF128() 78 sigZ = softfloat_add128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ); in softfloat_addMagsF128() 81 uiZ.v0 = sigZ.v0; in softfloat_addMagsF128() 91 if ( sigB.v64 | sigB.v0 ) goto propagateNaN; in softfloat_addMagsF128() 93 uiZ.v0 = 0; in softfloat_addMagsF128() 105 softfloat_shiftRightJam128Extra( sigA.v64, sigA.v0, 0, -expDiff ); in softfloat_addMagsF128() 110 if ( sigA.v64 | sigA.v0 ) goto propagateNaN; in softfloat_addMagsF128() [all …]
|
| H A D | f128_div.c | 75 uiA0 = uA.ui.v0; in f128_div() 79 sigA.v0 = uiA0; in f128_div() 82 uiB0 = uB.ui.v0; in f128_div() 86 sigB.v0 = uiB0; in f128_div() 91 if ( sigA.v64 | sigA.v0 ) goto propagateNaN; in f128_div() 93 if ( sigB.v64 | sigB.v0 ) goto propagateNaN; in f128_div() 99 if ( sigB.v64 | sigB.v0 ) goto propagateNaN; in f128_div() 105 if ( ! (sigB.v64 | sigB.v0) ) { in f128_div() 106 if ( ! (expA | sigA.v64 | sigA.v0) ) goto invalid; in f128_div() 110 normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); in f128_div() [all …]
|
| H A D | f128_sqrt.c | 68 uiA0 = uA.ui.v0; in f128_sqrt() 72 sigA.v0 = uiA0; in f128_sqrt() 76 if ( sigA.v64 | sigA.v0 ) { in f128_sqrt() 86 if ( ! (expA | sigA.v64 | sigA.v0) ) return a; in f128_sqrt() 92 if ( ! (sigA.v64 | sigA.v0) ) return a; in f128_sqrt() 93 normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); in f128_sqrt() 110 rem = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 12 ); in f128_sqrt() 112 rem = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 13 ); in f128_sqrt() 123 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in f128_sqrt() 125 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in f128_sqrt() [all …]
|
| H A D | f128_mul.c | 72 uiA0 = uA.ui.v0; in f128_mul() 76 sigA.v0 = uiA0; in f128_mul() 79 uiB0 = uB.ui.v0; in f128_mul() 83 sigB.v0 = uiB0; in f128_mul() 89 (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) in f128_mul() 93 magBits = expB | sigB.v64 | sigB.v0; in f128_mul() 97 if ( sigB.v64 | sigB.v0 ) goto propagateNaN; in f128_mul() 98 magBits = expA | sigA.v64 | sigA.v0; in f128_mul() 104 if ( ! (sigA.v64 | sigA.v0) ) goto zero; in f128_mul() 105 normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); in f128_mul() [all …]
|
| H A D | extF80_rem.c | 140 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 150 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_rem() 152 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_rem() 156 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 164 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, expDiff + 30 ); in extF80_rem() 166 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_rem() 170 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 181 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 184 meanRem = softfloat_add128( rem.v64, rem.v0, altRem.v64, altRem.v0 ); in extF80_rem() 187 || (! (meanRem.v64 | meanRem.v0) && (q & 1)) in extF80_rem() [all …]
|
| H A D | s_mul128To256M.c | 54 zPtr[indexWord( 4, 0 )] = p0.v0; in softfloat_mul128To256M() 56 z64 = p64.v0 + p0.v64; in softfloat_mul128To256M() 57 z128 = p64.v64 + (z64 < p64.v0); in softfloat_mul128To256M() 59 z128 += p128.v0; in softfloat_mul128To256M() 60 z192 = p128.v64 + (z128 < p128.v0); in softfloat_mul128To256M() 62 z64 += p64.v0; in softfloat_mul128To256M() 64 p64.v64 += (z64 < p64.v0); in softfloat_mul128To256M()
|
| H A D | f128_roundToInt.c | 60 uiA0 = uA.ui.v0; in f128_roundToInt() 79 uiZ.v0 = uiA0; in f128_roundToInt() 83 if ( UINT64_C( 0x8000000000000000 ) <= uiZ.v0 ) { in f128_roundToInt() 87 && (uiZ.v0 == UINT64_C( 0x8000000000000000 )) in f128_roundToInt() 93 uiZ = softfloat_add128( uiZ.v64, uiZ.v0, 0, lastBitMask>>1 ); in f128_roundToInt() 94 if ( roundNearEven && ! (uiZ.v0 & roundBitsMask) ) { in f128_roundToInt() 95 uiZ.v0 &= ~lastBitMask; in f128_roundToInt() 102 uiZ = softfloat_add128( uiZ.v64, uiZ.v0, 0, roundBitsMask ); in f128_roundToInt() 105 uiZ.v0 &= ~roundBitsMask; in f128_roundToInt() 115 uiZ.v0 = 0; in f128_roundToInt() [all …]
|
| H A D | extF80_div.c | 141 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_div() 143 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 146 rem = softfloat_add128( rem.v64, rem.v0, sigB>>32, sigB<<32 ); in extF80_div() 153 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_div() 155 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 159 rem = softfloat_add128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 160 } else if ( softfloat_le128( term.v64, term.v0, rem.v64, rem.v0 ) ) { in extF80_div() 162 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 164 if ( rem.v64 | rem.v0 ) q |= 1; in extF80_div() 178 uiZ0 = uiZ.v0; in extF80_div()
|
| H A D | s_shiftRightJam128Extra.c | 54 z.v.v0 = a64<<(negCount & 63) | a0>>count; in softfloat_shiftRightJam128Extra() 59 z.v.v0 = a64; in softfloat_shiftRightJam128Extra() 64 z.v.v0 = a64>>(count & 63); in softfloat_shiftRightJam128Extra() 67 z.v.v0 = 0; in softfloat_shiftRightJam128Extra()
|
| H A D | f64_to_f128.c | 69 uiZ.v0 = 0; in f64_to_f128() 76 uiZ.v0 = 0; in f64_to_f128() 85 uiZ.v0 = sig128.v0; in f64_to_f128()
|
| H A D | extF80_sqrt.c | 79 uiZ0 = uiZ.v0; in extF80_sqrt() 123 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_sqrt() 124 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_sqrt() 138 term = softfloat_add128( term.v64, term.v0, 0, x64 ); in extF80_sqrt() 139 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 28 ); in extF80_sqrt() 140 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_sqrt() 145 if ( rem.v64 | rem.v0 ) sigZExtra |= 1; in extF80_sqrt()
|
| H A D | f128_to_extF80.c | 62 uiA0 = uA.ui.v0; in f128_to_extF80() 72 uiZ0 = uiZ.v0; in f128_to_extF80() 88 sig0 = normExpSig.sig.v0; in f128_to_extF80() 93 return softfloat_roundPackToExtF80( sign, exp, sig128.v64, sig128.v0, 80 ); in f128_to_extF80()
|
| H A D | s_mulAddF64.c | 127 sig128Z.v64, sig128Z.v0, sig128Z.v64, sig128Z.v0 ); in softfloat_mulAddF64() 132 sigZ = sig128Z.v64<<1 | (sig128Z.v0 != 0); in softfloat_mulAddF64() 149 softfloat_shortShiftRightJam128( sig128Z.v64, sig128Z.v0, 1 ); in softfloat_mulAddF64() 160 sigZ = (sigC + sig128Z.v64) | (sig128Z.v0 != 0); in softfloat_mulAddF64() 164 sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); in softfloat_mulAddF64() 165 sigZ = sig128Z.v64 | (sig128Z.v0 != 0); in softfloat_mulAddF64() 176 sig128Z = softfloat_sub128( sigC, 0, sig128Z.v64, sig128Z.v0 ); in softfloat_mulAddF64() 179 if ( ! (sig128Z.v64 | sig128Z.v0) ) goto completeCancellation; in softfloat_mulAddF64() 182 sig128Z = softfloat_sub128( 0, 0, sig128Z.v64, sig128Z.v0 ); in softfloat_mulAddF64() 187 sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); in softfloat_mulAddF64() [all …]
|
| H A D | ui64_to_f128.c | 57 zSig.v0 = 0; in ui64_to_f128() 62 uiZ0 = zSig.v0; in ui64_to_f128() 65 uZ.ui.v0 = uiZ0; in ui64_to_f128()
|
| H A D | f128_mulAdd.c | 54 uiA0 = uA.ui.v0; in f128_mulAdd() 57 uiB0 = uB.ui.v0; in f128_mulAdd() 60 uiC0 = uC.ui.v0; in f128_mulAdd()
|
| H A D | s_mul64To128.c | 54 z.v0 = (uint_fast64_t) a0 * b0; in softfloat_mul64To128() 60 z.v0 += mid; in softfloat_mul64To128() 61 z.v64 += (z.v0 < mid); in softfloat_mul64To128()
|
| /optee_os/lib/libutils/isoc/arch/arm/ |
| H A D | arm32_aeabi_ldivmod.c | 59 unsigned long long v0; member 67 unsigned long long numerator = asm_ulqr->v0; in __ul_divmod() 73 asm_ulqr->v0 = qr.q; in __ul_divmod() 78 long long v0; member 86 long long numerator = asm_lqr->v0; in __l_divmod() 102 asm_lqr->v0 = qr.q; in __l_divmod()
|
| /optee_os/lib/libutils/isoc/arch/arm/softfloat/source/include/ |
| H A D | primitiveTypes.h | 46 struct uint128 { uint64_t v0, v64; }; member 50 struct uint128 { uint64_t v64, v0; }; member 71 #define INIT_UINTM4( v3, v2, v1, v0 ) { v0, v1, v2, v3 } argument 82 #define INIT_UINTM4( v3, v2, v1, v0 ) { v3, v2, v1, v0 } argument
|
| /optee_os/core/lib/libtomcrypt/src/ciphers/ |
| H A D | tea.c | 90 ulong32 v0, v1, sum = SUM; in tea_ecb_decrypt() local 98 LOAD32H(v0, &ct[0]); in tea_ecb_decrypt() 102 v1 -= ((v0 << 4) + skey->tea.k[2]) ^ (v0 + sum) ^ ((v0 >> 5) + skey->tea.k[3]); in tea_ecb_decrypt() 103 v0 -= ((v1 << 4) + skey->tea.k[0]) ^ (v1 + sum) ^ ((v1 >> 5) + skey->tea.k[1]); in tea_ecb_decrypt() 107 STORE32H(v0, &pt[0]); in tea_ecb_decrypt()
|