Home
last modified time | relevance | path

Searched refs:W (Results 1 – 25 of 405) sorted by relevance

12345678910>>...17

/linux/lib/crypto/
A Dsha256.c61 W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; in BLEND_OP()
91 BLEND_OP(i + 0, W); in sha256_transform()
92 BLEND_OP(i + 1, W); in sha256_transform()
93 BLEND_OP(i + 2, W); in sha256_transform()
94 BLEND_OP(i + 3, W); in sha256_transform()
95 BLEND_OP(i + 4, W); in sha256_transform()
96 BLEND_OP(i + 5, W); in sha256_transform()
97 BLEND_OP(i + 6, W); in sha256_transform()
98 BLEND_OP(i + 7, W); in sha256_transform()
124 u32 W[64]; in sha256_transform_blocks() local
[all …]
A Dsha1.c40 #define setW(x, val) (*(volatile __u32 *)&W(x) = (val))
42 #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0)
44 #define setW(x, val) (W(x) = (val))
48 #define W(x) (array[(x)&15]) macro
55 #define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1)
/linux/arch/powerpc/crypto/
A Dsha1-powerpc-asm.S58 xor r5,W((t)+4-3),W((t)+4-8); \
60 xor W((t)+4),W((t)+4-16),W((t)+4-14); \
62 xor W((t)+4),W((t)+4),r5; \
64 rotlwi W((t)+4),W((t)+4),1
84 xor W((t)+4),W((t)+4-16),W((t)+4-14); \
86 xor W((t)+4),W((t)+4),r5; \
88 rotlwi W((t)+4),W((t)+4),1
97 xor r5,W((t)+4-3),W((t)+4-8); \
99 xor W((t)+4),W((t)+4-16),W((t)+4-14); \
103 xor W((t)+4),W((t)+4),r5; \
[all …]
/linux/Documentation/translations/zh_CN/arch/loongarch/
A Dintroduction.rst205 ADD.W SUB.W ADDI.W ADD.D SUB.D ADDI.D
208 MUL.W MULH.W MULH.WU DIV.W DIV.WU MOD.W MOD.WU
215 SLL.W SRL.W SRA.W ROTR.W SLLI.W SRLI.W SRAI.W ROTRI.W
220 EXT.W.B EXT.W.H CLO.W CLO.D SLZ.W CLZ.D CTO.W CTO.D CTZ.W CTZ.D
221 BYTEPICK.W BYTEPICK.D BSTRINS.W BSTRINS.D BSTRPICK.W BSTRPICK.D
222 REVB.2H REVB.4H REVB.2W REVB.D REVH.2W REVH.D BITREV.4B BITREV.8B BITREV.W BITREV.D
231 LD.B LD.BU LD.H LD.HU LD.W LD.WU LD.D ST.B ST.H ST.W ST.D
233 LDPTR.W LDPTR.D STPTR.W STPTR.D
238 LL.W SC.W LL.D SC.D
239 AMSWAP.W AMSWAP.D AMADD.W AMADD.D AMAND.W AMAND.D AMOR.W AMOR.D AMXOR.W AMXOR.D
[all …]
/linux/Documentation/translations/zh_TW/arch/loongarch/
A Dintroduction.rst205 ADD.W SUB.W ADDI.W ADD.D SUB.D ADDI.D
208 MUL.W MULH.W MULH.WU DIV.W DIV.WU MOD.W MOD.WU
215 SLL.W SRL.W SRA.W ROTR.W SLLI.W SRLI.W SRAI.W ROTRI.W
220 EXT.W.B EXT.W.H CLO.W CLO.D SLZ.W CLZ.D CTO.W CTO.D CTZ.W CTZ.D
221 BYTEPICK.W BYTEPICK.D BSTRINS.W BSTRINS.D BSTRPICK.W BSTRPICK.D
222 REVB.2H REVB.4H REVB.2W REVB.D REVH.2W REVH.D BITREV.4B BITREV.8B BITREV.W BITREV.D
231 LD.B LD.BU LD.H LD.HU LD.W LD.WU LD.D ST.B ST.H ST.W ST.D
233 LDPTR.W LDPTR.D STPTR.W STPTR.D
238 LL.W SC.W LL.D SC.D
239 AMSWAP.W AMSWAP.D AMADD.W AMADD.D AMAND.W AMAND.D AMOR.W AMOR.D AMXOR.W AMXOR.D
[all …]
/linux/arch/x86/crypto/
A Dsha1_ssse3_asm.S387 psrld $31, W
392 pslld $2, W
412 pxor W_minus_28, W # W is W_minus_32 before xor
419 psrld $30, W
503 vpxor W_minus_08, W, W
506 vpxor W_TMP1, W, W
510 vpsrld $31, W, W
526 vpxor W_minus_28, W, W # W is W_minus_32 before xor
529 vpxor W_TMP1, W, W
532 vpsrld $30, W, W
[all …]
A Dsha512-ssse3-asm.S163 # Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]}
201 psrlq $(7-1), %xmm3 # XMM3 = ((W[t-15]>>1)^W[t-15])>>6
204 pxor %xmm2, %xmm0 # XMM0 = (((W[t-2]>>42)^W[t-2])>>13)^W[t-2]
207 pxor %xmm5, %xmm3 # XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15]
210 psrlq $6, %xmm0 # XMM0 = ((((W[t-2]>>42)^W[t-2])>>13)^W[t-2])>>6
213 psrlq $1, %xmm3 # XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15]>>1
229 pxor %xmm2, %xmm1 # XMM1 = (W[t-2] << 42)^W[t-2]
232 pxor %xmm5, %xmm4 # XMM4 = (W[t-15]<<7)^W[t-15]
249 paddq %xmm3, %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15])
252 paddq W_t(idx), %xmm0 # XMM0 = s1(W[t-2]) + s0(W[t-15]) + W[t-16]
[all …]
A Dsha512-avx-asm.S78 # W[t] + K[t] | W[t+1] + K[t+1]
168 # Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]}
185 vpxor %xmm1, %xmm0, %xmm0 # XMM0 = W[t-2]>>61 ^ W[t-2]>>19
195 vpxor %xmm7, %xmm6, %xmm6 # XMM6 = W[t-15]>>1 ^ W[t-15]>>8
204 vpxor %xmm3, %xmm2, %xmm2 # XMM2 = W[t-2]>>6 ^ W[t-2]<<3
209 vpxor %xmm9, %xmm8, %xmm8 # XMM8 = W[t-15]>>7 ^ W[t-15]<<63
214 vpxor %xmm8, %xmm6, %xmm6 # XMM6 = W[t-15]>>1 ^ W[t-15]>>8 ^
215 # W[t-15]>>7 ^ W[t-15]<<63
240 vpaddq %xmm6, %xmm0, %xmm0 # XMM0 = s1(W[t-2]) + W[t-16] + s0(W[t-15])
242 vpaddq %xmm1, %xmm0, %xmm0 # XMM0 = W[t] = s1(W[t-2]) + W[t-7] +
[all …]
A Dsha512-avx2-asm.S169 vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16]
226 vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7
231 vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0
233 vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA}
289 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA}
294 # (W[-2] ror 61) ^ (W[-2] >> 6) {BABA}
297 vpaddq YTMP4, Y_0, Y_0 # Y_0 = {W[1], W[0], W[1], W[0]}
345 vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--}
350 # (W[-2] ror 61) ^ (W[-2] >> 6) {DC--}
354 vpaddq YTMP4, YTMP0, YTMP2 # YTMP2 = {W[3], W[2], --, --}
[all …]
A Dsha256-ssse3-asm.S150 ## compute W[-16] + W[-7] 4 at a time
155 palignr $4, X2, XTMP0 # XTMP0 = W[-7]
163 paddd X0, XTMP0 # XTMP0 = W[-7] + W[-16]
168 palignr $4, X0, XTMP1 # XTMP1 = W[-15]
172 movdqa XTMP1, XTMP2 # XTMP2 = W[-15]
176 movdqa XTMP1, XTMP3 # XTMP3 = W[-15]
192 movdqa XTMP3, XTMP2 # XTMP2 = W[-15]
216 pxor XTMP2, XTMP1 # XTMP1 = W[-15] ror 7 ^ W[-15] ror 18
228 paddd XTMP1, XTMP0 # XTMP0 = W[-16] + W[-7] + s0
265 paddd XTMP4, XTMP0 # XTMP0 = {..., ..., W[1], W[0]}
[all …]
/linux/arch/x86/kernel/
A Duprobes.c90 W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
91 W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
92 W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
93 W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
94 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
95 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
96 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
97 W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
98 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
99 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
[all …]
/linux/tools/bpf/bpftool/bash-completion/
A Dbpftool22 COMPREPLY+=( $( compgen -W "$w" -- "$cur" ) )
44 COMPREPLY+=( $( compgen -W "$*" -- "$cur" ) )
49 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
57 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
64 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
72 COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
79 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
85 COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
97 COMPREPLY+=( $( compgen -W "$( bpftool -jp btf 2>&1 | \
116 COMPREPLY+=( $( compgen -W "$maps" -- "$cur" ) )
[all …]
/linux/tools/memory-model/Documentation/
A Dherd-representation.txt4 # W, a Store event
13 # W*, a Store event included in RMW
32 | WRITE_ONCE | W[once] |
36 | smp_store_release | W[release] |
38 | smp_store_mb | W[once] ->po F[mb] |
53 | rcu_assign_pointer | W[release] |
56 | srcu_read_unlock | W[srcu-unlock] |
63 | atomic_add | R*[noreturn] ->rmw W*[once] |
70 | | ->rmw W*[once] ->po F[mb] |
76 | atomic_add_return_relaxed | R*[once] ->rmw W*[once] |
[all …]
A Dcheatsheet.txt3 C Self R W RMW Self R W DR DW RMW SV
11 Successful *_release() C Y Y Y W Y
13 smp_wmb() Y W Y Y W
28 W: Write, for example, WRITE_ONCE(), or write portion of RMW
/linux/arch/arm/crypto/
A Dsha1-armv7-neon.S218 veor.32 W, W, W_m08; \
222 veor W, W, tmp0; \
225 vshl.u32 tmp0, W, #1; \
229 vshr.u32 W, W, #31; \
254 veor W, W_m28; \
260 veor W, W_m16; \
263 veor W, tmp0; \
269 vshl.u32 tmp1, W, #2; \
272 vshr.u32 tmp0, W, #30; \
275 vorr W, tmp0, tmp1; \
[all …]
/linux/crypto/
A Dsha512_generic.c88 static inline void LOAD_OP(int I, u64 *W, const u8 *input) in LOAD_OP() argument
90 W[I] = get_unaligned_be64((__u64 *)input + I); in LOAD_OP()
93 static inline void BLEND_OP(int I, u64 *W) in BLEND_OP() argument
95 W[I & 15] += s1(W[(I-2) & 15]) + W[(I-7) & 15] + s0(W[(I-15) & 15]); in BLEND_OP()
104 u64 W[16]; in sha512_transform() local
118 LOAD_OP(i + j, W, input); in sha512_transform()
121 BLEND_OP(i + j, W); in sha512_transform()
126 t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[(i & 15)]; in sha512_transform()
128 t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[(i & 15) + 1]; in sha512_transform()
130 t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[(i & 15) + 2]; in sha512_transform()
[all …]
A Dsm3.c64 #define W1(i) (W[i & 0x0f])
65 #define W2(i) (W[i & 0x0f] = \
66 P1(W[i & 0x0f] \
67 ^ W[(i-9) & 0x0f] \
69 ^ rol32(W[(i-13) & 0x0f], 7) \
70 ^ W[(i-6) & 0x0f])
173 sm3_transform(sctx, data, W); in sm3_block()
181 u32 W[16]; in sm3_update() local
206 memzero_explicit(W, sizeof(W)); in sm3_update()
221 u32 W[16]; in sm3_final() local
[all …]
/linux/Documentation/arch/loongarch/
A Dintroduction.rst238 ADD.W SUB.W ADDI.W ADD.D SUB.D ADDI.D
241 MUL.W MULH.W MULH.WU DIV.W DIV.WU MOD.W MOD.WU
248 SLL.W SRL.W SRA.W ROTR.W SLLI.W SRLI.W SRAI.W ROTRI.W
253 EXT.W.B EXT.W.H CLO.W CLO.D SLZ.W CLZ.D CTO.W CTO.D CTZ.W CTZ.D
254 BYTEPICK.W BYTEPICK.D BSTRINS.W BSTRINS.D BSTRPICK.W BSTRPICK.D
255 REVB.2H REVB.4H REVB.2W REVB.D REVH.2W REVH.D BITREV.4B BITREV.8B BITREV.W BITREV.D
264 LD.B LD.BU LD.H LD.HU LD.W LD.WU LD.D ST.B ST.H ST.W ST.D
266 LDPTR.W LDPTR.D STPTR.W STPTR.D
271 LL.W SC.W LL.D SC.D
272 AMSWAP.W AMSWAP.D AMADD.W AMADD.D AMAND.W AMAND.D AMOR.W AMOR.D AMXOR.W AMXOR.D
[all …]
/linux/tools/memory-model/
A Dlinux-kernel.def35 cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
36 cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
37 cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
38 cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
111 atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
112 atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
113 atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
114 atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
/linux/arch/arm/lib/
A Dmemmove.S82 6: W(nop)
83 W(ldr) r3, [r1, #-4]!
84 W(ldr) r4, [r1, #-4]!
85 W(ldr) r5, [r1, #-4]!
86 W(ldr) r6, [r1, #-4]!
87 W(ldr) r8, [r1, #-4]!
88 W(ldr) r9, [r1, #-4]!
89 W(ldr) lr, [r1, #-4]!
93 W(nop)
94 W(str) r3, [r0, #-4]!
[all …]
/linux/arch/m68k/fpsp040/
A Dslogn.S436 |--LET V=U*U, W=V*V, CALCULATE
438 |--U + U*V*( [B1 + W*(B3 + W*B5)] + [V*(B2 + W*B4)] )
443 fmulx %fp1,%fp1 | ...FP1 IS W
448 fmulx %fp1,%fp3 | ...W*B5
449 fmulx %fp1,%fp2 | ...W*B4
451 faddd LOGB3,%fp3 | ...B3+W*B5
452 faddd LOGB2,%fp2 | ...B2+W*B4
454 fmulx %fp3,%fp1 | ...W*(B3+W*B5), FP3 RELEASED
458 faddd LOGB1,%fp1 | ...B1+W*(B3+W*B5)
461 faddx %fp2,%fp1 | ...B1+W*(B3+W*B5) + V*(B2+W*B4), FP2 RELEASED
[all …]
/linux/arch/arm/boot/compressed/
A Dhead.S213 W(b) 1f
1014 W(b) __armv4_mmu_cache_on
1089 W(b) __fa526_cache_on
1091 W(b) __fa526_cache_flush
1420 W(b) . @ reset
1421 W(b) . @ undef
1425 W(b) . @ svc
1427 W(b) . @ pabort
1428 W(b) . @ dabort
1430 W(b) . @ irq
[all …]
/linux/arch/arm/kernel/
A Dentry-armv.S906 3: W(b) . + 4
1075 W(b) vector_rst
1076 W(b) vector_und
1079 W(ldr) pc, .
1080 W(b) vector_pabt
1083 W(b) vector_irq
1084 W(b) vector_fiq
1089 W(b) vector_rst
1093 W(ldr) pc, .
1102 W(b) vector_rst
[all …]
/linux/arch/mips/n64/
A Dinit.c51 #define W 320 macro
82 .width = W, in n64_platform_init()
84 .stride = W * 2, in n64_platform_init()
122 orig = kzalloc(W * H * 2 + 63, GFP_DMA | GFP_KERNEL); in n64_platform_init()
141 res[0].end = phys + W * H * 2 - 1; in n64_platform_init()
149 #undef W
/linux/drivers/atm/
A DKconfig86 when going from 8W to 16W bursts.
96 bool "Enable 8W TX bursts (recommended)"
103 bool "Enable 4W TX bursts (optional)"
107 this if you have disabled 8W bursts. Enabling 4W if 8W is also set
111 bool "Enable 2W TX bursts (optional)"
115 this if you have disabled 4W and 8W bursts. Enabling 2W if 4W or 8W
126 bool "Enable 8W RX bursts (discouraged)"
134 bool "Enable 4W RX bursts (recommended)"
138 default setting. Enabling 4W if 8W is also set may or may not
142 bool "Enable 2W RX bursts (optional)"
[all …]

Completed in 53 milliseconds

12345678910>>...17