15#if defined(CRYPTOPP_DISABLE_GCM_ASM)
16# undef CRYPTOPP_X86_ASM_AVAILABLE
17# undef CRYPTOPP_X32_ASM_AVAILABLE
18# undef CRYPTOPP_X64_ASM_AVAILABLE
19# undef CRYPTOPP_SSE2_ASM_AVAILABLE
22#if (CRYPTOPP_SSE2_INTRIN_AVAILABLE)
23# include <emmintrin.h>
24# include <xmmintrin.h>
27#if (CRYPTOPP_CLMUL_AVAILABLE)
28# include <tmmintrin.h>
29# include <wmmintrin.h>
32#if (CRYPTOPP_ARM_NEON_HEADER)
37#if defined(CRYPTOPP_ARM_PMULL_AVAILABLE)
41#if defined(CRYPTOPP_ALTIVEC_AVAILABLE)
45#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
50#ifndef EXCEPTION_EXECUTE_HANDLER
51# define EXCEPTION_EXECUTE_HANDLER 1
55extern const char GCM_SIMD_FNAME[] = __FILE__;
61#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
63 typedef void (*SigHandler)(int);
65 static jmp_buf s_jmpSIGILL;
66 static void SigIllHandler(
int)
68 longjmp(s_jmpSIGILL, 1);
73#if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARMV8)
76#if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
78#elif (CRYPTOPP_ARM_PMULL_AVAILABLE)
79# if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY)
80 volatile bool result =
true;
84 const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
85 const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
87 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
88 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
89 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
90 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
91 const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
93 const uint64x2_t r1 =
PMULL_00(a1, b1);
94 const uint64x2_t r2 =
PMULL_11(vreinterpretq_u64_u8(a2),
95 vreinterpretq_u64_u8(b2));
97 result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
98 vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
99 vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
100 vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
102 __except (EXCEPTION_EXECUTE_HANDLER)
110 volatile bool result =
true;
112 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
113 if (oldHandler == SIG_ERR)
116 volatile sigset_t oldMask;
117 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
119 signal(SIGILL, oldHandler);
123 if (setjmp(s_jmpSIGILL))
128 const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
129 const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
131 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
132 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
133 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
134 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
135 const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
137 const uint64x2_t r1 =
PMULL_00(a1, b1);
138 const uint64x2_t r2 =
PMULL_11(vreinterpretq_u64_u8(a2),
139 vreinterpretq_u64_u8(b2));
141 result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
142 vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
143 vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
144 vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
147 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
148 signal(SIGILL, oldHandler);
157#if (CRYPTOPP_BOOL_PPC32 || CRYPTOPP_BOOL_PPC64)
160#if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
162#elif (CRYPTOPP_POWER8_VMULL_AVAILABLE)
164 volatile bool result =
true;
166 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
167 if (oldHandler == SIG_ERR)
170 volatile sigset_t oldMask;
171 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
173 signal(SIGILL, oldHandler);
177 if (setjmp(s_jmpSIGILL))
181 const uint64_t wa1[]={0,
W64LIT(0x9090909090909090)},
182 wb1[]={0,
W64LIT(0xb0b0b0b0b0b0b0b0)};
185 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
186 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
187 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
188 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
194 const uint64_t wc1[]={
W64LIT(0x5300530053005300),
W64LIT(0x5300530053005300)},
195 wc2[]={
W64LIT(0x6c006c006c006c00),
W64LIT(0x6c006c006c006c00)};
201 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
202 signal(SIGILL, oldHandler);
212#if CRYPTOPP_ARM_NEON_AVAILABLE
213void GCM_Xor16_NEON(
byte *a,
const byte *b,
const byte *c)
215 vst1q_u8(a, veorq_u8(vld1q_u8(b), vld1q_u8(c)));
219#if CRYPTOPP_ARM_PMULL_AVAILABLE
222inline uint64x2_t SwapWords(
const uint64x2_t& data)
224 return (uint64x2_t)vcombine_u64(
225 vget_high_u64(data), vget_low_u64(data));
228uint64x2_t GCM_Reduce_PMULL(uint64x2_t c0, uint64x2_t c1, uint64x2_t c2,
const uint64x2_t &r)
230 c1 = veorq_u64(c1, VEXT_U8<8>(vdupq_n_u64(0), c0));
231 c1 = veorq_u64(c1,
PMULL_01(c0, r));
232 c0 = VEXT_U8<8>(c0, vdupq_n_u64(0));
233 c0 = vshlq_n_u64(veorq_u64(c0, c1), 1);
235 c2 = veorq_u64(c2, c0);
236 c2 = veorq_u64(c2, VEXT_U8<8>(c1, vdupq_n_u64(0)));
237 c1 = vshrq_n_u64(vcombine_u64(vget_low_u64(c1), vget_low_u64(c2)), 63);
238 c2 = vshlq_n_u64(c2, 1);
240 return veorq_u64(c2, c1);
243uint64x2_t GCM_Multiply_PMULL(
const uint64x2_t &x,
const uint64x2_t &h,
const uint64x2_t &r)
245 const uint64x2_t c0 =
PMULL_00(x, h);
247 const uint64x2_t c2 =
PMULL_11(x, h);
249 return GCM_Reduce_PMULL(c0, c1, c2, r);
252void GCM_SetKeyWithoutResync_PMULL(
const byte *hashKey,
byte *mulTable,
unsigned int tableSize)
254 const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
255 const uint64x2_t t = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(hashKey)));
256 const uint64x2_t h0 = vextq_u64(t, t, 1);
260 for (i=0; i<tableSize-32; i+=32)
262 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
263 vst1_u64(UINT64_CAST(mulTable+i), vget_low_u64(h));
264 vst1q_u64(UINT64_CAST(mulTable+i+16), h1);
265 vst1q_u64(UINT64_CAST(mulTable+i+8), h);
266 vst1_u64(UINT64_CAST(mulTable+i+8), vget_low_u64(h1));
267 h = GCM_Multiply_PMULL(h1, h0, r);
270 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
271 vst1_u64(UINT64_CAST(mulTable+i), vget_low_u64(h));
272 vst1q_u64(UINT64_CAST(mulTable+i+16), h1);
273 vst1q_u64(UINT64_CAST(mulTable+i+8), h);
274 vst1_u64(UINT64_CAST(mulTable+i+8), vget_low_u64(h1));
277size_t GCM_AuthenticateBlocks_PMULL(
const byte *data,
size_t len,
const byte *mtable,
byte *hbuffer)
279 const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
280 uint64x2_t x = vreinterpretq_u64_u8(vld1q_u8(hbuffer));
285 uint64x2_t d1, d2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-1)*16U)));
286 uint64x2_t c0 = vdupq_n_u64(0);
287 uint64x2_t c1 = vdupq_n_u64(0);
288 uint64x2_t c2 = vdupq_n_u64(0);
292 const uint64x2_t h0 = vld1q_u64(CONST_UINT64_CAST(mtable+(i+0)*16));
293 const uint64x2_t h1 = vld1q_u64(CONST_UINT64_CAST(mtable+(i+1)*16));
294 const uint64x2_t h2 = veorq_u64(h0, h1);
298 const uint64x2_t t1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
299 d1 = veorq_u64(vextq_u64(t1, t1, 1), x);
300 c0 = veorq_u64(c0,
PMULL_00(d1, h0));
301 c2 = veorq_u64(c2,
PMULL_10(d1, h1));
302 d1 = veorq_u64(d1, SwapWords(d1));
303 c1 = veorq_u64(c1,
PMULL_00(d1, h2));
308 d1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
309 c0 = veorq_u64(c0,
PMULL_10(d2, h0));
310 c2 = veorq_u64(c2,
PMULL_10(d1, h1));
311 d2 = veorq_u64(d2, d1);
312 c1 = veorq_u64(c1,
PMULL_10(d2, h2));
316 const uint64x2_t t2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
317 d1 = veorq_u64(vextq_u64(t2, t2, 1), x);
318 c0 = veorq_u64(c0,
PMULL_01(d1, h0));
319 c2 = veorq_u64(c2,
PMULL_11(d1, h1));
320 d1 = veorq_u64(d1, SwapWords(d1));
321 c1 = veorq_u64(c1,
PMULL_01(d1, h2));
326 const uint64x2_t t3 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
327 d2 = vextq_u64(t3, t3, 1);
328 c0 = veorq_u64(c0,
PMULL_01(d1, h0));
329 c2 = veorq_u64(c2,
PMULL_01(d2, h1));
330 d1 = veorq_u64(d1, d2);
331 c1 = veorq_u64(c1,
PMULL_01(d1, h2));
336 c1 = veorq_u64(veorq_u64(c1, c0), c2);
337 x = GCM_Reduce_PMULL(c0, c1, c2, r);
340 vst1q_u64(UINT64_CAST(hbuffer), x);
344void GCM_ReverseHashBufferIfNeeded_PMULL(
byte *hashBuffer)
348 const uint8x16_t x = vrev64q_u8(vld1q_u8(hashBuffer));
349 vst1q_u8(hashBuffer, vextq_u8(x, x, 8));
356#if CRYPTOPP_SSE2_INTRIN_AVAILABLE || CRYPTOPP_SSE2_ASM_AVAILABLE
359void GCM_Xor16_SSE2(
byte *a,
const byte *b,
const byte *c)
361# if CRYPTOPP_SSE2_ASM_AVAILABLE && defined(__GNUC__)
362 asm (
"movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;"
363 :
"=m" (a[0]) :
"m"(b[0]),
"m"(c[0]));
365 _mm_store_si128(
M128_CAST(a), _mm_xor_si128(
372#if CRYPTOPP_CLMUL_AVAILABLE
376void gcm_gf_mult(
const unsigned char *a,
const unsigned char *b,
unsigned char *c)
378 word64 Z0=0, Z1=0, V0, V1;
381 Block::Get(a)(V0)(V1);
383 for (
int i=0; i<16; i++)
385 for (
int j=0x80; j!=0; j>>=1)
391 V1 = (V1>>1) | (V0<<63);
392 V0 = (V0>>1) ^ (x ?
W64LIT(0xe1) << 56 : 0);
398__m128i _mm_clmulepi64_si128(
const __m128i &a,
const __m128i &b,
int i)
408 for (
int i=0; i<16; i++)
409 ((
byte *)&output)[i] = c.GetByte(i);
415inline __m128i SwapWords(
const __m128i& val)
417 return _mm_shuffle_epi32(val, _MM_SHUFFLE(1, 0, 3, 2));
422inline __m128i GCM_Reduce_CLMUL(__m128i c0, __m128i c1, __m128i c2,
const __m128i& r)
436 c1 = _mm_xor_si128(c1, _mm_slli_si128(c0, 8));
437 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(c0, r, 0x10));
438 c0 = _mm_xor_si128(c1, _mm_srli_si128(c0, 8));
439 c0 = _mm_slli_epi64(c0, 1);
440 c0 = _mm_clmulepi64_si128(c0, r, 0);
441 c2 = _mm_xor_si128(c2, c0);
442 c2 = _mm_xor_si128(c2, _mm_srli_si128(c1, 8));
443 c1 = _mm_unpacklo_epi64(c1, c2);
444 c1 = _mm_srli_epi64(c1, 63);
445 c2 = _mm_slli_epi64(c2, 1);
446 return _mm_xor_si128(c2, c1);
451__m128i GCM_Multiply_CLMUL(
const __m128i &x,
const __m128i &h,
const __m128i &r)
453 const __m128i c0 = _mm_clmulepi64_si128(x,h,0);
454 const __m128i c1 = _mm_xor_si128(_mm_clmulepi64_si128(x,h,1), _mm_clmulepi64_si128(x,h,0x10));
455 const __m128i c2 = _mm_clmulepi64_si128(x,h,0x11);
457 return GCM_Reduce_CLMUL(c0, c1, c2, r);
460void GCM_SetKeyWithoutResync_CLMUL(
const byte *hashKey,
byte *mulTable,
unsigned int tableSize)
462 const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
463 const __m128i m = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
464 __m128i h0 = _mm_shuffle_epi8(_mm_load_si128(
CONST_M128_CAST(hashKey)), m), h = h0;
467 for (i=0; i<tableSize-32; i+=32)
469 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
470 _mm_storel_epi64(
M128_CAST(mulTable+i), h);
471 _mm_storeu_si128(
M128_CAST(mulTable+i+16), h1);
472 _mm_storeu_si128(
M128_CAST(mulTable+i+8), h);
473 _mm_storel_epi64(
M128_CAST(mulTable+i+8), h1);
474 h = GCM_Multiply_CLMUL(h1, h0, r);
477 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
478 _mm_storel_epi64(
M128_CAST(mulTable+i), h);
479 _mm_storeu_si128(
M128_CAST(mulTable+i+16), h1);
480 _mm_storeu_si128(
M128_CAST(mulTable+i+8), h);
481 _mm_storel_epi64(
M128_CAST(mulTable+i+8), h1);
484size_t GCM_AuthenticateBlocks_CLMUL(
const byte *data,
size_t len,
const byte *mtable,
byte *hbuffer)
486 const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
487 const __m128i m1 = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
488 const __m128i m2 = _mm_set_epi32(0x08090a0b, 0x0c0d0e0f, 0x00010203, 0x04050607);
489 __m128i x = _mm_load_si128(
M128_CAST(hbuffer));
495 __m128i d2 = _mm_shuffle_epi8(d1, m2);
496 __m128i c0 = _mm_setzero_si128();
497 __m128i c1 = _mm_setzero_si128();
498 __m128i c2 = _mm_setzero_si128();
504 const __m128i h2 = _mm_xor_si128(h0, h1);
509 d1 = _mm_xor_si128(d1, x);
510 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0));
511 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
512 d1 = _mm_xor_si128(d1, SwapWords(d1));
513 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0));
517 d1 = _mm_shuffle_epi8(_mm_loadu_si128(
CONST_M128_CAST(data+(s-i)*16-8)), m2);
518 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d2, h0, 1));
519 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
520 d2 = _mm_xor_si128(d2, d1);
521 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d2, h2, 1));
526 d1 = _mm_xor_si128(d1, x);
527 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
528 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 0x11));
529 d1 = _mm_xor_si128(d1, SwapWords(d1));
530 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
534 d2 = _mm_shuffle_epi8(_mm_loadu_si128(
CONST_M128_CAST(data+(s-i)*16-8)), m1);
535 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
536 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d2, h1, 0x10));
537 d1 = _mm_xor_si128(d1, d2);
538 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
543 c1 = _mm_xor_si128(_mm_xor_si128(c1, c0), c2);
544 x = GCM_Reduce_CLMUL(c0, c1, c2, r);
551void GCM_ReverseHashBufferIfNeeded_CLMUL(
byte *hashBuffer)
554 const __m128i mask = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
555 _mm_storeu_si128(
M128_CAST(hashBuffer), _mm_shuffle_epi8(
562#if CRYPTOPP_POWER8_AVAILABLE
563void GCM_Xor16_POWER8(
byte *a,
const byte *b,
const byte *c)
569#if CRYPTOPP_POWER8_VMULL_AVAILABLE
575 c1 =
VecXor(c1, VecShiftRightOctet<8>(c0));
577 c0 =
VecXor(c1, VecShiftLeftOctet<8>(c0));
580 c2 =
VecXor(c2, VecShiftLeftOctet<8>(c1));
581 c1 = vec_sr(vec_mergeh(c1, c2), m63);
593 return GCM_Reduce_VMULL(c0, c1, c2, r);
596inline uint64x2_p LoadHashKey(
const byte *hashKey)
598#if (CRYPTOPP_BIG_ENDIAN)
600 const uint8x16_p mask = {8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7};
604 const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
609void GCM_SetKeyWithoutResync_VMULL(
const byte *hashKey,
byte *mulTable,
unsigned int tableSize)
611 const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
617 for (i=0; i<tableSize-32; i+=32)
619 const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
621 std::memcpy(mulTable+i, temp+0, 8);
625 std::memcpy(mulTable+i+8, temp+0, 8);
626 h = GCM_Multiply_VMULL(h1, h0, r);
629 const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
631 std::memcpy(mulTable+i, temp+0, 8);
635 std::memcpy(mulTable+i+8, temp+0, 8);
640inline T SwapWords(
const T& data)
642 return (T)VecRotateLeftOctet<8>(data);
645inline uint64x2_p LoadBuffer1(
const byte *dataBuffer)
647#if (CRYPTOPP_BIG_ENDIAN)
651 const uint8x16_p mask = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
656inline uint64x2_p LoadBuffer2(
const byte *dataBuffer)
658#if (CRYPTOPP_BIG_ENDIAN)
665size_t GCM_AuthenticateBlocks_VMULL(
const byte *data,
size_t len,
const byte *mtable,
byte *hbuffer)
667 const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
673 uint64x2_p d1, d2 = LoadBuffer1(data+(s-1)*16);
684 d1 = LoadBuffer2(data);
688 d1 =
VecXor(d1, SwapWords(d1));
693 d1 = LoadBuffer1(data+(s-i)*16-8);
701 d1 = LoadBuffer2(data);
705 d1 =
VecXor(d1, SwapWords(d1));
710 d2 = LoadBuffer2(data+(s-i)*16-8);
720 x = GCM_Reduce_VMULL(c0, c1, c2, r);
727void GCM_ReverseHashBufferIfNeeded_VMULL(
byte *hashBuffer)
729 const uint64x2_p mask = {0x08090a0b0c0d0e0full, 0x0001020304050607ull};
#define M128_CAST(x)
Clang workaround.
#define CONST_M128_CAST(x)
Clang workaround.
Support functions for ARM and vector operations.
uint64x2_t PMULL_00(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
uint64x2_t PMULL_11(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
uint64x2_t PMULL_01(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
uint64x2_t PMULL_10(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Polynomial with Coefficients in GF(2)
Access a block of memory.
Library configuration file.
#define W64LIT(x)
Declare an unsigned word64.
unsigned long long word64
64-bit unsigned datatype
@ BIG_ENDIAN_ORDER
byte order is big-endian
Utility functions for the Crypto++ library.
byte ByteReverse(byte value)
Reverses bytes in a 8-bit value.
const T1 UnsignedMin(const T1 &a, const T2 &b)
Safe comparison of values that could be negative and incorrectly promoted.
ByteOrder GetNativeByteOrder()
Returns NativeByteOrder as an enumerated ByteOrder value.
Crypto++ library namespace.
Support functions for PowerPC and vector operations.
uint32x4_p VecLoadBE(const byte src[16])
Loads a vector from a byte array.
__vector unsigned int uint32x4_p
Vector of 32-bit elements.
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
uint64x2_p VecIntelMultiply00(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
__vector unsigned char uint8x16_p
Vector of 8-bit elements.
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
bool VecEqual(const T1 vec1, const T2 vec2)
Compare two vectors.
uint64x2_p VecIntelMultiply11(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
void VecStore(const T data, byte dest[16])
Stores a vector to a byte array.
uint64x2_p VecIntelMultiply01(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
uint32x4_p VecLoad(const byte src[16])
Loads a vector from a byte array.
uint64x2_p VecIntelMultiply10(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Access a block of memory.