/optee_os-3.20.0/lib/libutils/isoc/arch/arm/softfloat/source/ |
A D | s_mulAddF128.c | 84 sigA.v0 = uiA0; in softfloat_mulAddF128() 88 sigB.v0 = uiB0; in softfloat_mulAddF128() 92 sigC.v0 = uiC0; in softfloat_mulAddF128() 98 (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) in softfloat_mulAddF128() 113 uiZ.v0 = 0; in softfloat_mulAddF128() 117 uiZ.v0 = uiC0; in softfloat_mulAddF128() 141 softfloat_mul128To256M( sigA.v64, sigA.v0, sigB.v64, sigB.v0, sig256Z ); in softfloat_mulAddF128() 206 sigZ = softfloat_add128( sigC.v64, sigC.v0, sigZ.v64, sigZ.v0 ); in softfloat_mulAddF128() 325 uiZ.v0 = 0; in softfloat_mulAddF128() 341 uiZ.v0 = uiC0; in softfloat_mulAddF128() [all …]
|
A D | f128_rem.c | 71 uiA0 = uA.ui.v0; in f128_rem() 75 sigA.v0 = uiA0; in f128_rem() 78 uiB0 = uB.ui.v0; in f128_rem() 82 sigB.v0 = uiB0; in f128_rem() 87 (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) in f128_rem() 121 sigB = softfloat_add128( sigB.v64, sigB.v0, sigB.v64, sigB.v0 ); in f128_rem() 124 q = softfloat_le128( sigB.v64, sigB.v0, rem.v64, rem.v0 ); in f128_rem() 138 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in f128_rem() 150 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in f128_rem() 161 rem = softfloat_sub128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); in f128_rem() [all …]
|
A D | f128_div.c | 75 uiA0 = uA.ui.v0; in f128_div() 79 sigA.v0 = uiA0; in f128_div() 82 uiB0 = uB.ui.v0; in f128_div() 86 sigB.v0 = uiB0; in f128_div() 126 if ( softfloat_lt128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ) ) { in f128_div() 128 rem = softfloat_add128( sigA.v64, sigA.v0, sigA.v64, sigA.v0 ); in f128_div() 139 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in f128_div() 142 rem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); in f128_div() 151 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in f128_div() 154 rem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); in f128_div() [all …]
|
A D | s_subMagsF128.c | 64 sigA.v0 = uiA0; in softfloat_subMagsF128() 67 sigB.v0 = uiB0; in softfloat_subMagsF128() 74 if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; in softfloat_subMagsF128() 77 uiZ.v0 = defaultNaNF128UI0; in softfloat_subMagsF128() 84 if ( sigB.v0 < sigA.v0 ) goto aBigger; in softfloat_subMagsF128() 85 if ( sigA.v0 < sigB.v0 ) goto bBigger; in softfloat_subMagsF128() 89 uiZ.v0 = 0; in softfloat_subMagsF128() 95 uiZ.v0 = 0; in softfloat_subMagsF128() 110 sigZ = softfloat_sub128( sigB.v64, sigB.v0, sigA.v64, sigA.v0 ); in softfloat_subMagsF128() 116 uiZ.v0 = uiA0; in softfloat_subMagsF128() [all …]
|
A D | f128_sqrt.c | 68 uiA0 = uA.ui.v0; in f128_sqrt() 72 sigA.v0 = uiA0; in f128_sqrt() 76 if ( sigA.v64 | sigA.v0 ) { in f128_sqrt() 92 if ( ! (sigA.v64 | sigA.v0) ) return a; in f128_sqrt() 125 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in f128_sqrt() 138 rem = softfloat_sub128( y.v64, y.v0, term.v64, term.v0 ); in f128_sqrt() 151 term.v64, term.v0 in f128_sqrt() 159 y.v0 |= sigZExtra>>58; in f128_sqrt() 165 term = softfloat_sub128( term.v64, term.v0, rem.v64, rem.v0 ); in f128_sqrt() 173 if ( term.v64 | term.v0 | y.v0 ) { in f128_sqrt() [all …]
|
A D | f128_mul.c | 72 uiA0 = uA.ui.v0; in f128_mul() 76 sigA.v0 = uiA0; in f128_mul() 79 uiB0 = uB.ui.v0; in f128_mul() 83 sigB.v0 = uiB0; in f128_mul() 89 (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) in f128_mul() 93 magBits = expB | sigB.v64 | sigB.v0; in f128_mul() 98 magBits = expA | sigA.v64 | sigA.v0; in f128_mul() 120 softfloat_mul128To256M( sigA.v64, sigA.v0, sigB.v64, sigB.v0, sig256Z ); in f128_mul() 125 sigA.v64, sigA.v0 in f128_mul() 148 uiZ.v0 = defaultNaNF128UI0; in f128_mul() [all …]
|
A D | s_addMagsF128.c | 66 sigA.v0 = uiA0; in softfloat_addMagsF128() 69 sigB.v0 = uiB0; in softfloat_addMagsF128() 73 if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; in softfloat_addMagsF128() 75 uiZ.v0 = uiA0; in softfloat_addMagsF128() 78 sigZ = softfloat_add128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ); in softfloat_addMagsF128() 81 uiZ.v0 = sigZ.v0; in softfloat_addMagsF128() 93 uiZ.v0 = 0; in softfloat_addMagsF128() 112 uiZ.v0 = uiA0; in softfloat_addMagsF128() 132 sigA.v0, in softfloat_addMagsF128() 134 sigB.v0 in softfloat_addMagsF128() [all …]
|
A D | extF80_rem.c | 140 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 152 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_rem() 156 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 166 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_rem() 170 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 181 rem.v64, rem.v0, shiftedSigB.v64, shiftedSigB.v0 ); in extF80_rem() 184 meanRem = softfloat_add128( rem.v64, rem.v0, altRem.v64, altRem.v0 ); in extF80_rem() 187 || (! (meanRem.v64 | meanRem.v0) && (q & 1)) in extF80_rem() 194 rem = softfloat_sub128( 0, 0, rem.v64, rem.v0 ); in extF80_rem() 198 signRem, expB + 32, rem.v64, rem.v0, 80 ); in extF80_rem() [all …]
|
A D | f128_roundToInt.c | 60 uiA0 = uA.ui.v0; in f128_roundToInt() 79 uiZ.v0 = uiA0; in f128_roundToInt() 83 if ( UINT64_C( 0x8000000000000000 ) <= uiZ.v0 ) { in f128_roundToInt() 87 && (uiZ.v0 == UINT64_C( 0x8000000000000000 )) in f128_roundToInt() 94 if ( roundNearEven && ! (uiZ.v0 & roundBitsMask) ) { in f128_roundToInt() 95 uiZ.v0 &= ~lastBitMask; in f128_roundToInt() 102 uiZ = softfloat_add128( uiZ.v64, uiZ.v0, 0, roundBitsMask ); in f128_roundToInt() 105 uiZ.v0 &= ~roundBitsMask; in f128_roundToInt() 115 uiZ.v0 = 0; in f128_roundToInt() 134 uiZ.v0 = 0; in f128_roundToInt() [all …]
|
A D | s_mul128To256M.c | 54 zPtr[indexWord( 4, 0 )] = p0.v0; in softfloat_mul128To256M() 56 z64 = p64.v0 + p0.v64; in softfloat_mul128To256M() 57 z128 = p64.v64 + (z64 < p64.v0); in softfloat_mul128To256M() 59 z128 += p128.v0; in softfloat_mul128To256M() 60 z192 = p128.v64 + (z128 < p128.v0); in softfloat_mul128To256M() 62 z64 += p64.v0; in softfloat_mul128To256M() 64 p64.v64 += (z64 < p64.v0); in softfloat_mul128To256M()
|
A D | extF80_div.c | 141 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_div() 143 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 146 rem = softfloat_add128( rem.v64, rem.v0, sigB>>32, sigB<<32 ); in extF80_div() 153 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_div() 155 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 159 rem = softfloat_add128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 160 } else if ( softfloat_le128( term.v64, term.v0, rem.v64, rem.v0 ) ) { in extF80_div() 162 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_div() 164 if ( rem.v64 | rem.v0 ) q |= 1; in extF80_div() 178 uiZ0 = uiZ.v0; in extF80_div()
|
A D | extF80_sqrt.c | 79 uiZ0 = uiZ.v0; in extF80_sqrt() 123 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); in extF80_sqrt() 124 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_sqrt() 138 term = softfloat_add128( term.v64, term.v0, 0, x64 ); in extF80_sqrt() 139 rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 28 ); in extF80_sqrt() 140 rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); in extF80_sqrt() 145 if ( rem.v64 | rem.v0 ) sigZExtra |= 1; in extF80_sqrt()
|
A D | s_shiftRightJam128Extra.c | 54 z.v.v0 = a64<<(negCount & 63) | a0>>count; in softfloat_shiftRightJam128Extra() 59 z.v.v0 = a64; in softfloat_shiftRightJam128Extra() 64 z.v.v0 = a64>>(count & 63); in softfloat_shiftRightJam128Extra() 67 z.v.v0 = 0; in softfloat_shiftRightJam128Extra()
|
A D | f64_to_f128.c | 69 uiZ.v0 = 0; in f64_to_f128() 76 uiZ.v0 = 0; in f64_to_f128() 85 uiZ.v0 = sig128.v0; in f64_to_f128()
|
A D | ui64_to_f128.c | 57 zSig.v0 = 0; in ui64_to_f128() 62 uiZ0 = zSig.v0; in ui64_to_f128() 65 uZ.ui.v0 = uiZ0; in ui64_to_f128()
|
A D | s_mulAddF64.c | 127 sig128Z.v64, sig128Z.v0, sig128Z.v64, sig128Z.v0 ); in softfloat_mulAddF64() 132 sigZ = sig128Z.v64<<1 | (sig128Z.v0 != 0); in softfloat_mulAddF64() 160 sigZ = (sigC + sig128Z.v64) | (sig128Z.v0 != 0); in softfloat_mulAddF64() 164 sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); in softfloat_mulAddF64() 165 sigZ = sig128Z.v64 | (sig128Z.v0 != 0); in softfloat_mulAddF64() 179 if ( ! (sig128Z.v64 | sig128Z.v0) ) goto completeCancellation; in softfloat_mulAddF64() 187 sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); in softfloat_mulAddF64() 193 sig128Z.v64 = sig128Z.v0; in softfloat_mulAddF64() 194 sig128Z.v0 = 0; in softfloat_mulAddF64() 203 sig128Z.v64, sig128Z.v0, shiftCount ); in softfloat_mulAddF64() [all …]
|
A D | i64_to_f128.c | 61 zSig.v0 = 0; in i64_to_f128() 66 uiZ0 = zSig.v0; in i64_to_f128() 69 uZ.ui.v0 = uiZ0; in i64_to_f128()
|
A D | s_mul64To128.c | 54 z.v0 = (uint_fast64_t) a0 * b0; in softfloat_mul64To128() 60 z.v0 += mid; in softfloat_mul64To128() 61 z.v64 += (z.v0 < mid); in softfloat_mul64To128()
|
A D | f128_mulAdd.c | 54 uiA0 = uA.ui.v0; in f128_mulAdd() 57 uiB0 = uB.ui.v0; in f128_mulAdd() 60 uiC0 = uC.ui.v0; in f128_mulAdd()
|
A D | f128_to_extF80.c | 62 uiA0 = uA.ui.v0; in f128_to_extF80() 72 uiZ0 = uiZ.v0; in f128_to_extF80() 88 sig0 = normExpSig.sig.v0; in f128_to_extF80() 93 return softfloat_roundPackToExtF80( sign, exp, sig128.v64, sig128.v0, 80 ); in f128_to_extF80()
|
/optee_os-3.20.0/core/arch/arm/crypto/ |
A D | aes_modes_armv8a_ce_a64.S | 375 eor v0.16b, v0.16b, v7.16b 519 eor v0.16b, v0.16b, v4.16b 522 eor v0.16b, v0.16b, v4.16b 534 eor v0.16b, v0.16b, v4.16b 542 eor v0.16b, v0.16b, v4.16b 558 eor v0.16b, v0.16b, v4.16b 600 eor v0.16b, v0.16b, v4.16b 603 eor v0.16b, v0.16b, v4.16b 615 eor v0.16b, v0.16b, v4.16b 623 eor v0.16b, v0.16b, v4.16b [all …]
|
A D | sm3_armv8a_ce_a64.S | 94 rev32 v0.16b, v0.16b 101 qround a, v0, v1, v2, v3, v4 102 qround a, v1, v2, v3, v4, v0 103 qround a, v2, v3, v4, v0, v1 104 qround a, v3, v4, v0, v1, v2 108 qround b, v4, v0, v1, v2, v3 109 qround b, v0, v1, v2, v3, v4 110 qround b, v1, v2, v3, v4, v0 111 qround b, v2, v3, v4, v0, v1 118 qround b, v4, v0 [all …]
|
/optee_os-3.20.0/lib/libmbedtls/mbedtls/library/ |
A D | xtea.c | 74 uint32_t *k, v0, v1, i; in mbedtls_xtea_crypt_ecb() local 78 v0 = MBEDTLS_GET_UINT32_BE( input, 0 ); in mbedtls_xtea_crypt_ecb() 87 v0 += (((v1 << 4) ^ (v1 >> 5)) + v1) ^ (sum + k[sum & 3]); in mbedtls_xtea_crypt_ecb() 89 v1 += (((v0 << 4) ^ (v0 >> 5)) + v0) ^ (sum + k[(sum>>11) & 3]); in mbedtls_xtea_crypt_ecb() 98 v1 -= (((v0 << 4) ^ (v0 >> 5)) + v0) ^ (sum + k[(sum>>11) & 3]); in mbedtls_xtea_crypt_ecb() 100 v0 -= (((v1 << 4) ^ (v1 >> 5)) + v1) ^ (sum + k[sum & 3]); in mbedtls_xtea_crypt_ecb() 104 MBEDTLS_PUT_UINT32_BE( v0, output, 0 ); in mbedtls_xtea_crypt_ecb()
|
/optee_os-3.20.0/lib/libutils/isoc/arch/arm/softfloat/source/include/ |
A D | primitiveTypes.h | 46 struct uint128 { uint64_t v0, v64; }; member 50 struct uint128 { uint64_t v64, v0; }; member 71 #define INIT_UINTM4( v3, v2, v1, v0 ) { v0, v1, v2, v3 } argument 82 #define INIT_UINTM4( v3, v2, v1, v0 ) { v3, v2, v1, v0 } argument
|
/optee_os-3.20.0/lib/libutils/isoc/arch/arm/ |
A D | arm32_aeabi_ldivmod.c | 59 unsigned long long v0; member 67 unsigned long long numerator = asm_ulqr->v0; in __ul_divmod() 73 asm_ulqr->v0 = qr.q; in __ul_divmod() 78 long long v0; member 86 long long numerator = asm_lqr->v0; in __l_divmod() 102 asm_lqr->v0 = qr.q; in __l_divmod()
|