Crypto++  8.6
Free C++ class library of cryptographic schemes
gcm_simd.cpp
1 // gcm_simd.cpp - written and placed in the public domain by
2 // Jeffrey Walton, Uri Blumenthal and Marcel Raad.
3 // Original x86 CLMUL by Wei Dai. ARM and POWER8
4 // PMULL and VMULL by JW, UB and MR.
5 //
6 // This source file uses intrinsics to gain access to SSE4.2 and
7 // ARMv8a CRC-32 and CRC-32C instructions. A separate source file
8 // is needed because additional CXXFLAGS are required to enable
9 // the appropriate instructions sets in some build configurations.
10 
11 #include "pch.h"
12 #include "config.h"
13 #include "misc.h"
14 
15 #if defined(CRYPTOPP_DISABLE_GCM_ASM)
16 # undef CRYPTOPP_X86_ASM_AVAILABLE
17 # undef CRYPTOPP_X32_ASM_AVAILABLE
18 # undef CRYPTOPP_X64_ASM_AVAILABLE
19 # undef CRYPTOPP_SSE2_ASM_AVAILABLE
20 #endif
21 
22 #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE)
23 # include <emmintrin.h>
24 # include <xmmintrin.h>
25 #endif
26 
27 #if (CRYPTOPP_CLMUL_AVAILABLE)
28 # include <tmmintrin.h>
29 # include <wmmintrin.h>
30 #endif
31 
32 #if (CRYPTOPP_ARM_NEON_HEADER)
33 # include <stdint.h>
34 # include <arm_neon.h>
35 #endif
36 
37 #if defined(CRYPTOPP_ARM_PMULL_AVAILABLE)
38 # include "arm_simd.h"
39 #endif
40 
41 #if defined(CRYPTOPP_ALTIVEC_AVAILABLE)
42 # include "ppc_simd.h"
43 #endif
44 
45 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
46 # include <signal.h>
47 # include <setjmp.h>
48 #endif
49 
50 #ifndef EXCEPTION_EXECUTE_HANDLER
51 # define EXCEPTION_EXECUTE_HANDLER 1
52 #endif
53 
54 // Squash MS LNK4221 and libtool warnings
55 extern const char GCM_SIMD_FNAME[] = __FILE__;
56 
57 NAMESPACE_BEGIN(CryptoPP)
58 
59 // ************************* Feature Probes ************************* //
60 
61 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY
62 extern "C" {
63  typedef void (*SigHandler)(int);
64 
65  static jmp_buf s_jmpSIGILL;
66  static void SigIllHandler(int)
67  {
68  longjmp(s_jmpSIGILL, 1);
69  }
70 }
71 #endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY
72 
73 #if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARMV8)
74 bool CPU_ProbePMULL()
75 {
76 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
77  return false;
78 #elif (CRYPTOPP_ARM_PMULL_AVAILABLE)
79 # if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY)
80  volatile bool result = true;
81  __try
82  {
83  // Linaro is missing a lot of pmull gear. Also see http://github.com/weidai11/cryptopp/issues/233.
84  const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
85  const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
86 
87  const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
88  0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
89  wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
90  0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
91  const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
92 
93  const uint64x2_t r1 = PMULL_00(a1, b1);
94  const uint64x2_t r2 = PMULL_11(vreinterpretq_u64_u8(a2),
95  vreinterpretq_u64_u8(b2));
96 
97  result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
98  vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
99  vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
100  vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
101  }
102  __except (EXCEPTION_EXECUTE_HANDLER)
103  {
104  return false;
105  }
106  return result;
107 # else
108 
109  // longjmp and clobber warnings. Volatile is required.
110  volatile bool result = true;
111 
112  volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
113  if (oldHandler == SIG_ERR)
114  return false;
115 
116  volatile sigset_t oldMask;
117  if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
118  {
119  signal(SIGILL, oldHandler);
120  return false;
121  }
122 
123  if (setjmp(s_jmpSIGILL))
124  result = false;
125  else
126  {
127  // Linaro is missing a lot of pmull gear. Also see http://github.com/weidai11/cryptopp/issues/233.
128  const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
129  const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
130 
131  const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
132  0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
133  wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
134  0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
135  const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
136 
137  const uint64x2_t r1 = PMULL_00(a1, b1);
138  const uint64x2_t r2 = PMULL_11(vreinterpretq_u64_u8(a2),
139  vreinterpretq_u64_u8(b2));
140 
141  result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
142  vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
143  vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
144  vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
145  }
146 
147  sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
148  signal(SIGILL, oldHandler);
149  return result;
150 # endif
151 #else
152  return false;
153 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE
154 }
155 #endif // ARM32 or ARM64
156 
157 #if (CRYPTOPP_BOOL_PPC32 || CRYPTOPP_BOOL_PPC64)
158 bool CPU_ProbePMULL()
159 {
160 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES)
161  return false;
162 #elif (CRYPTOPP_POWER8_VMULL_AVAILABLE)
163  // longjmp and clobber warnings. Volatile is required.
164  volatile bool result = true;
165 
166  volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
167  if (oldHandler == SIG_ERR)
168  return false;
169 
170  volatile sigset_t oldMask;
171  if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
172  {
173  signal(SIGILL, oldHandler);
174  return false;
175  }
176 
177  if (setjmp(s_jmpSIGILL))
178  result = false;
179  else
180  {
181  const uint64_t wa1[]={0,W64LIT(0x9090909090909090)},
182  wb1[]={0,W64LIT(0xb0b0b0b0b0b0b0b0)};
183  const uint64x2_p a1=VecLoad(wa1), b1=VecLoad(wb1);
184 
185  const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
186  0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
187  wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
188  0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
189  const uint32x4_p a2=VecLoad(wa2), b2=VecLoad(wb2);
190 
191  const uint64x2_p r1 = VecIntelMultiply11(a1, b1);
192  const uint64x2_p r2 = VecIntelMultiply11((uint64x2_p)a2, (uint64x2_p)b2);
193 
194  const uint64_t wc1[]={W64LIT(0x5300530053005300), W64LIT(0x5300530053005300)},
195  wc2[]={W64LIT(0x6c006c006c006c00), W64LIT(0x6c006c006c006c00)};
196  const uint64x2_p c1=VecLoad(wc1), c2=VecLoad(wc2);
197 
198  result = !!(VecEqual(r1, c1) && VecEqual(r2, c2));
199  }
200 
201  sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
202  signal(SIGILL, oldHandler);
203  return result;
204 #else
205  return false;
206 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
207 }
208 #endif // PPC32 or PPC64
209 
210 // *************************** ARM NEON *************************** //
211 
212 #if CRYPTOPP_ARM_NEON_AVAILABLE
213 void GCM_Xor16_NEON(byte *a, const byte *b, const byte *c)
214 {
215  vst1q_u8(a, veorq_u8(vld1q_u8(b), vld1q_u8(c)));
216 }
217 #endif // CRYPTOPP_ARM_NEON_AVAILABLE
218 
219 #if CRYPTOPP_ARM_PMULL_AVAILABLE
220 
221 // Swaps high and low 64-bit words
222 inline uint64x2_t SwapWords(const uint64x2_t& data)
223 {
224  return (uint64x2_t)vcombine_u64(
225  vget_high_u64(data), vget_low_u64(data));
226 }
227 
228 uint64x2_t GCM_Reduce_PMULL(uint64x2_t c0, uint64x2_t c1, uint64x2_t c2, const uint64x2_t &r)
229 {
230  c1 = veorq_u64(c1, VEXT_U8<8>(vdupq_n_u64(0), c0));
231  c1 = veorq_u64(c1, PMULL_01(c0, r));
232  c0 = VEXT_U8<8>(c0, vdupq_n_u64(0));
233  c0 = vshlq_n_u64(veorq_u64(c0, c1), 1);
234  c0 = PMULL_00(c0, r);
235  c2 = veorq_u64(c2, c0);
236  c2 = veorq_u64(c2, VEXT_U8<8>(c1, vdupq_n_u64(0)));
237  c1 = vshrq_n_u64(vcombine_u64(vget_low_u64(c1), vget_low_u64(c2)), 63);
238  c2 = vshlq_n_u64(c2, 1);
239 
240  return veorq_u64(c2, c1);
241 }
242 
243 uint64x2_t GCM_Multiply_PMULL(const uint64x2_t &x, const uint64x2_t &h, const uint64x2_t &r)
244 {
245  const uint64x2_t c0 = PMULL_00(x, h);
246  const uint64x2_t c1 = veorq_u64(PMULL_10(x, h), PMULL_01(x, h));
247  const uint64x2_t c2 = PMULL_11(x, h);
248 
249  return GCM_Reduce_PMULL(c0, c1, c2, r);
250 }
251 
252 void GCM_SetKeyWithoutResync_PMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
253 {
254  const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
255  const uint64x2_t t = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(hashKey)));
256  const uint64x2_t h0 = vextq_u64(t, t, 1);
257 
258  uint64x2_t h = h0;
259  unsigned int i;
260  for (i=0; i<tableSize-32; i+=32)
261  {
262  const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
263  vst1_u64(UINT64_CAST(mulTable+i), vget_low_u64(h));
264  vst1q_u64(UINT64_CAST(mulTable+i+16), h1);
265  vst1q_u64(UINT64_CAST(mulTable+i+8), h);
266  vst1_u64(UINT64_CAST(mulTable+i+8), vget_low_u64(h1));
267  h = GCM_Multiply_PMULL(h1, h0, r);
268  }
269 
270  const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
271  vst1_u64(UINT64_CAST(mulTable+i), vget_low_u64(h));
272  vst1q_u64(UINT64_CAST(mulTable+i+16), h1);
273  vst1q_u64(UINT64_CAST(mulTable+i+8), h);
274  vst1_u64(UINT64_CAST(mulTable+i+8), vget_low_u64(h1));
275 }
276 
277 size_t GCM_AuthenticateBlocks_PMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
278 {
279  const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
280  uint64x2_t x = vreinterpretq_u64_u8(vld1q_u8(hbuffer));
281 
282  while (len >= 16)
283  {
284  size_t i=0, s = UnsignedMin(len/16U, 8U);
285  uint64x2_t d1, d2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-1)*16U)));
286  uint64x2_t c0 = vdupq_n_u64(0);
287  uint64x2_t c1 = vdupq_n_u64(0);
288  uint64x2_t c2 = vdupq_n_u64(0);
289 
290  while (true)
291  {
292  const uint64x2_t h0 = vld1q_u64(CONST_UINT64_CAST(mtable+(i+0)*16));
293  const uint64x2_t h1 = vld1q_u64(CONST_UINT64_CAST(mtable+(i+1)*16));
294  const uint64x2_t h2 = veorq_u64(h0, h1);
295 
296  if (++i == s)
297  {
298  const uint64x2_t t1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
299  d1 = veorq_u64(vextq_u64(t1, t1, 1), x);
300  c0 = veorq_u64(c0, PMULL_00(d1, h0));
301  c2 = veorq_u64(c2, PMULL_10(d1, h1));
302  d1 = veorq_u64(d1, SwapWords(d1));
303  c1 = veorq_u64(c1, PMULL_00(d1, h2));
304 
305  break;
306  }
307 
308  d1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
309  c0 = veorq_u64(c0, PMULL_10(d2, h0));
310  c2 = veorq_u64(c2, PMULL_10(d1, h1));
311  d2 = veorq_u64(d2, d1);
312  c1 = veorq_u64(c1, PMULL_10(d2, h2));
313 
314  if (++i == s)
315  {
316  const uint64x2_t t2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
317  d1 = veorq_u64(vextq_u64(t2, t2, 1), x);
318  c0 = veorq_u64(c0, PMULL_01(d1, h0));
319  c2 = veorq_u64(c2, PMULL_11(d1, h1));
320  d1 = veorq_u64(d1, SwapWords(d1));
321  c1 = veorq_u64(c1, PMULL_01(d1, h2));
322 
323  break;
324  }
325 
326  const uint64x2_t t3 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
327  d2 = vextq_u64(t3, t3, 1);
328  c0 = veorq_u64(c0, PMULL_01(d1, h0));
329  c2 = veorq_u64(c2, PMULL_01(d2, h1));
330  d1 = veorq_u64(d1, d2);
331  c1 = veorq_u64(c1, PMULL_01(d1, h2));
332  }
333  data += s*16;
334  len -= s*16;
335 
336  c1 = veorq_u64(veorq_u64(c1, c0), c2);
337  x = GCM_Reduce_PMULL(c0, c1, c2, r);
338  }
339 
340  vst1q_u64(UINT64_CAST(hbuffer), x);
341  return len;
342 }
343 
344 void GCM_ReverseHashBufferIfNeeded_PMULL(byte *hashBuffer)
345 {
347  {
348  const uint8x16_t x = vrev64q_u8(vld1q_u8(hashBuffer));
349  vst1q_u8(hashBuffer, vextq_u8(x, x, 8));
350  }
351 }
352 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE
353 
354 // ***************************** SSE ***************************** //
355 
356 #if CRYPTOPP_SSE2_INTRIN_AVAILABLE || CRYPTOPP_SSE2_ASM_AVAILABLE
357 // SunCC 5.10-5.11 compiler crash. Move GCM_Xor16_SSE2 out-of-line, and place in
358 // a source file with a SSE architecture switch. Also see GH #226 and GH #284.
359 void GCM_Xor16_SSE2(byte *a, const byte *b, const byte *c)
360 {
361 # if CRYPTOPP_SSE2_ASM_AVAILABLE && defined(__GNUC__)
362  asm ("movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;"
363  : "=m" (a[0]) : "m"(b[0]), "m"(c[0]));
364 # else // CRYPTOPP_SSE2_INTRIN_AVAILABLE
365  _mm_store_si128(M128_CAST(a), _mm_xor_si128(
366  _mm_load_si128(CONST_M128_CAST(b)),
367  _mm_load_si128(CONST_M128_CAST(c))));
368 # endif
369 }
370 #endif // CRYPTOPP_SSE2_ASM_AVAILABLE
371 
372 #if CRYPTOPP_CLMUL_AVAILABLE
373 
374 #if 0
375 // preserved for testing
376 void gcm_gf_mult(const unsigned char *a, const unsigned char *b, unsigned char *c)
377 {
378  word64 Z0=0, Z1=0, V0, V1;
379 
381  Block::Get(a)(V0)(V1);
382 
383  for (int i=0; i<16; i++)
384  {
385  for (int j=0x80; j!=0; j>>=1)
386  {
387  int x = b[i] & j;
388  Z0 ^= x ? V0 : 0;
389  Z1 ^= x ? V1 : 0;
390  x = (int)V1 & 1;
391  V1 = (V1>>1) | (V0<<63);
392  V0 = (V0>>1) ^ (x ? W64LIT(0xe1) << 56 : 0);
393  }
394  }
395  Block::Put(NULLPTR, c)(Z0)(Z1);
396 }
397 
398 __m128i _mm_clmulepi64_si128(const __m128i &a, const __m128i &b, int i)
399 {
400  word64 A[1] = {ByteReverse(((word64*)&a)[i&1])};
401  word64 B[1] = {ByteReverse(((word64*)&b)[i>>4])};
402 
403  PolynomialMod2 pa((byte *)A, 8);
404  PolynomialMod2 pb((byte *)B, 8);
405  PolynomialMod2 c = pa*pb;
406 
407  __m128i output;
408  for (int i=0; i<16; i++)
409  ((byte *)&output)[i] = c.GetByte(i);
410  return output;
411 }
412 #endif // Testing
413 
414 // Swaps high and low 64-bit words
415 inline __m128i SwapWords(const __m128i& val)
416 {
417  return _mm_shuffle_epi32(val, _MM_SHUFFLE(1, 0, 3, 2));
418 }
419 
420 // SunCC 5.11-5.15 compiler crash. Make the function inline
421 // and parameters non-const. Also see GH #188 and GH #224.
422 inline __m128i GCM_Reduce_CLMUL(__m128i c0, __m128i c1, __m128i c2, const __m128i& r)
423 {
424  /*
425  The polynomial to be reduced is c0 * x^128 + c1 * x^64 + c2. c0t below refers to the most
426  significant half of c0 as a polynomial, which, due to GCM's bit reflection, are in the
427  rightmost bit positions, and the lowest byte addresses.
428 
429  c1 ^= c0t * 0xc200000000000000
430  c2t ^= c0t
431  t = shift (c1t ^ c0b) left 1 bit
432  c2 ^= t * 0xe100000000000000
433  c2t ^= c1b
434  shift c2 left 1 bit and xor in lowest bit of c1t
435  */
436  c1 = _mm_xor_si128(c1, _mm_slli_si128(c0, 8));
437  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(c0, r, 0x10));
438  c0 = _mm_xor_si128(c1, _mm_srli_si128(c0, 8));
439  c0 = _mm_slli_epi64(c0, 1);
440  c0 = _mm_clmulepi64_si128(c0, r, 0);
441  c2 = _mm_xor_si128(c2, c0);
442  c2 = _mm_xor_si128(c2, _mm_srli_si128(c1, 8));
443  c1 = _mm_unpacklo_epi64(c1, c2);
444  c1 = _mm_srli_epi64(c1, 63);
445  c2 = _mm_slli_epi64(c2, 1);
446  return _mm_xor_si128(c2, c1);
447 }
448 
449 // SunCC 5.13-5.14 compiler crash. Don't make the function inline.
450 // This is in contrast to GCM_Reduce_CLMUL, which must be inline.
451 __m128i GCM_Multiply_CLMUL(const __m128i &x, const __m128i &h, const __m128i &r)
452 {
453  const __m128i c0 = _mm_clmulepi64_si128(x,h,0);
454  const __m128i c1 = _mm_xor_si128(_mm_clmulepi64_si128(x,h,1), _mm_clmulepi64_si128(x,h,0x10));
455  const __m128i c2 = _mm_clmulepi64_si128(x,h,0x11);
456 
457  return GCM_Reduce_CLMUL(c0, c1, c2, r);
458 }
459 
460 void GCM_SetKeyWithoutResync_CLMUL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
461 {
462  const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
463  const __m128i m = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
464  __m128i h0 = _mm_shuffle_epi8(_mm_load_si128(CONST_M128_CAST(hashKey)), m), h = h0;
465 
466  unsigned int i;
467  for (i=0; i<tableSize-32; i+=32)
468  {
469  const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
470  _mm_storel_epi64(M128_CAST(mulTable+i), h);
471  _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
472  _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
473  _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
474  h = GCM_Multiply_CLMUL(h1, h0, r);
475  }
476 
477  const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
478  _mm_storel_epi64(M128_CAST(mulTable+i), h);
479  _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
480  _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
481  _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
482 }
483 
484 size_t GCM_AuthenticateBlocks_CLMUL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
485 {
486  const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
487  const __m128i m1 = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
488  const __m128i m2 = _mm_set_epi32(0x08090a0b, 0x0c0d0e0f, 0x00010203, 0x04050607);
489  __m128i x = _mm_load_si128(M128_CAST(hbuffer));
490 
491  while (len >= 16)
492  {
493  size_t i=0, s = UnsignedMin(len/16, 8U);
494  __m128i d1 = _mm_loadu_si128(CONST_M128_CAST(data+(s-1)*16));
495  __m128i d2 = _mm_shuffle_epi8(d1, m2);
496  __m128i c0 = _mm_setzero_si128();
497  __m128i c1 = _mm_setzero_si128();
498  __m128i c2 = _mm_setzero_si128();
499 
500  while (true)
501  {
502  const __m128i h0 = _mm_load_si128(CONST_M128_CAST(mtable+(i+0)*16));
503  const __m128i h1 = _mm_load_si128(CONST_M128_CAST(mtable+(i+1)*16));
504  const __m128i h2 = _mm_xor_si128(h0, h1);
505 
506  if (++i == s)
507  {
508  d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
509  d1 = _mm_xor_si128(d1, x);
510  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0));
511  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
512  d1 = _mm_xor_si128(d1, SwapWords(d1));
513  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0));
514  break;
515  }
516 
517  d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m2);
518  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d2, h0, 1));
519  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
520  d2 = _mm_xor_si128(d2, d1);
521  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d2, h2, 1));
522 
523  if (++i == s)
524  {
525  d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
526  d1 = _mm_xor_si128(d1, x);
527  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
528  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 0x11));
529  d1 = _mm_xor_si128(d1, SwapWords(d1));
530  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
531  break;
532  }
533 
534  d2 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m1);
535  c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
536  c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d2, h1, 0x10));
537  d1 = _mm_xor_si128(d1, d2);
538  c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
539  }
540  data += s*16;
541  len -= s*16;
542 
543  c1 = _mm_xor_si128(_mm_xor_si128(c1, c0), c2);
544  x = GCM_Reduce_CLMUL(c0, c1, c2, r);
545  }
546 
547  _mm_store_si128(M128_CAST(hbuffer), x);
548  return len;
549 }
550 
551 void GCM_ReverseHashBufferIfNeeded_CLMUL(byte *hashBuffer)
552 {
553  // SSSE3 instruction, but only used with CLMUL
554  const __m128i mask = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
555  _mm_storeu_si128(M128_CAST(hashBuffer), _mm_shuffle_epi8(
556  _mm_loadu_si128(CONST_M128_CAST(hashBuffer)), mask));
557 }
558 #endif // CRYPTOPP_CLMUL_AVAILABLE
559 
560 // ***************************** POWER8 ***************************** //
561 
562 #if CRYPTOPP_POWER8_AVAILABLE
563 void GCM_Xor16_POWER8(byte *a, const byte *b, const byte *c)
564 {
565  VecStore(VecXor(VecLoad(b), VecLoad(c)), a);
566 }
567 #endif // CRYPTOPP_POWER8_AVAILABLE
568 
569 #if CRYPTOPP_POWER8_VMULL_AVAILABLE
570 
571 uint64x2_p GCM_Reduce_VMULL(uint64x2_p c0, uint64x2_p c1, uint64x2_p c2, uint64x2_p r)
572 {
573  const uint64x2_p m1 = {1,1}, m63 = {63,63};
574 
575  c1 = VecXor(c1, VecShiftRightOctet<8>(c0));
576  c1 = VecXor(c1, VecIntelMultiply10(c0, r));
577  c0 = VecXor(c1, VecShiftLeftOctet<8>(c0));
578  c0 = VecIntelMultiply00(vec_sl(c0, m1), r);
579  c2 = VecXor(c2, c0);
580  c2 = VecXor(c2, VecShiftLeftOctet<8>(c1));
581  c1 = vec_sr(vec_mergeh(c1, c2), m63);
582  c2 = vec_sl(c2, m1);
583 
584  return VecXor(c2, c1);
585 }
586 
587 inline uint64x2_p GCM_Multiply_VMULL(uint64x2_p x, uint64x2_p h, uint64x2_p r)
588 {
589  const uint64x2_p c0 = VecIntelMultiply00(x, h);
590  const uint64x2_p c1 = VecXor(VecIntelMultiply01(x, h), VecIntelMultiply10(x, h));
591  const uint64x2_p c2 = VecIntelMultiply11(x, h);
592 
593  return GCM_Reduce_VMULL(c0, c1, c2, r);
594 }
595 
596 inline uint64x2_p LoadHashKey(const byte *hashKey)
597 {
598 #if (CRYPTOPP_BIG_ENDIAN)
599  const uint64x2_p key = (uint64x2_p)VecLoad(hashKey);
600  const uint8x16_p mask = {8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7};
601  return VecPermute(key, key, mask);
602 #else
603  const uint64x2_p key = (uint64x2_p)VecLoad(hashKey);
604  const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
605  return VecPermute(key, key, mask);
606 #endif
607 }
608 
609 void GCM_SetKeyWithoutResync_VMULL(const byte *hashKey, byte *mulTable, unsigned int tableSize)
610 {
611  const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
612  uint64x2_p h = LoadHashKey(hashKey), h0 = h;
613 
614  unsigned int i;
615  uint64_t temp[2];
616 
617  for (i=0; i<tableSize-32; i+=32)
618  {
619  const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
620  VecStore(h, (byte*)temp);
621  std::memcpy(mulTable+i, temp+0, 8);
622  VecStore(h1, mulTable+i+16);
623  VecStore(h, mulTable+i+8);
624  VecStore(h1, (byte*)temp);
625  std::memcpy(mulTable+i+8, temp+0, 8);
626  h = GCM_Multiply_VMULL(h1, h0, r);
627  }
628 
629  const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
630  VecStore(h, (byte*)temp);
631  std::memcpy(mulTable+i, temp+0, 8);
632  VecStore(h1, mulTable+i+16);
633  VecStore(h, mulTable+i+8);
634  VecStore(h1, (byte*)temp);
635  std::memcpy(mulTable+i+8, temp+0, 8);
636 }
637 
638 // Swaps high and low 64-bit words
639 template <class T>
640 inline T SwapWords(const T& data)
641 {
642  return (T)VecRotateLeftOctet<8>(data);
643 }
644 
645 inline uint64x2_p LoadBuffer1(const byte *dataBuffer)
646 {
647 #if (CRYPTOPP_BIG_ENDIAN)
648  return (uint64x2_p)VecLoad(dataBuffer);
649 #else
650  const uint64x2_p data = (uint64x2_p)VecLoad(dataBuffer);
651  const uint8x16_p mask = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
652  return VecPermute(data, data, mask);
653 #endif
654 }
655 
656 inline uint64x2_p LoadBuffer2(const byte *dataBuffer)
657 {
658 #if (CRYPTOPP_BIG_ENDIAN)
659  return (uint64x2_p)SwapWords(VecLoadBE(dataBuffer));
660 #else
661  return (uint64x2_p)VecLoadBE(dataBuffer);
662 #endif
663 }
664 
665 size_t GCM_AuthenticateBlocks_VMULL(const byte *data, size_t len, const byte *mtable, byte *hbuffer)
666 {
667  const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
668  uint64x2_p x = (uint64x2_p)VecLoad(hbuffer);
669 
670  while (len >= 16)
671  {
672  size_t i=0, s = UnsignedMin(len/16, 8U);
673  uint64x2_p d1, d2 = LoadBuffer1(data+(s-1)*16);
674  uint64x2_p c0 = {0}, c1 = {0}, c2 = {0};
675 
676  while (true)
677  {
678  const uint64x2_p h0 = (uint64x2_p)VecLoad(mtable+(i+0)*16);
679  const uint64x2_p h1 = (uint64x2_p)VecLoad(mtable+(i+1)*16);
680  const uint64x2_p h2 = (uint64x2_p)VecXor(h0, h1);
681 
682  if (++i == s)
683  {
684  d1 = LoadBuffer2(data);
685  d1 = VecXor(d1, x);
686  c0 = VecXor(c0, VecIntelMultiply00(d1, h0));
687  c2 = VecXor(c2, VecIntelMultiply01(d1, h1));
688  d1 = VecXor(d1, SwapWords(d1));
689  c1 = VecXor(c1, VecIntelMultiply00(d1, h2));
690  break;
691  }
692 
693  d1 = LoadBuffer1(data+(s-i)*16-8);
694  c0 = VecXor(c0, VecIntelMultiply01(d2, h0));
695  c2 = VecXor(c2, VecIntelMultiply01(d1, h1));
696  d2 = VecXor(d2, d1);
697  c1 = VecXor(c1, VecIntelMultiply01(d2, h2));
698 
699  if (++i == s)
700  {
701  d1 = LoadBuffer2(data);
702  d1 = VecXor(d1, x);
703  c0 = VecXor(c0, VecIntelMultiply10(d1, h0));
704  c2 = VecXor(c2, VecIntelMultiply11(d1, h1));
705  d1 = VecXor(d1, SwapWords(d1));
706  c1 = VecXor(c1, VecIntelMultiply10(d1, h2));
707  break;
708  }
709 
710  d2 = LoadBuffer2(data+(s-i)*16-8);
711  c0 = VecXor(c0, VecIntelMultiply10(d1, h0));
712  c2 = VecXor(c2, VecIntelMultiply10(d2, h1));
713  d1 = VecXor(d1, d2);
714  c1 = VecXor(c1, VecIntelMultiply10(d1, h2));
715  }
716  data += s*16;
717  len -= s*16;
718 
719  c1 = VecXor(VecXor(c1, c0), c2);
720  x = GCM_Reduce_VMULL(c0, c1, c2, r);
721  }
722 
723  VecStore(x, hbuffer);
724  return len;
725 }
726 
727 void GCM_ReverseHashBufferIfNeeded_VMULL(byte *hashBuffer)
728 {
729  const uint64x2_p mask = {0x08090a0b0c0d0e0full, 0x0001020304050607ull};
730  VecStore(VecPermute(VecLoad(hashBuffer), mask), hashBuffer);
731 }
732 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE
733 
734 NAMESPACE_END
#define M128_CAST(x)
Clang workaround.
Definition: adv_simd.h:609
#define CONST_M128_CAST(x)
Clang workaround.
Definition: adv_simd.h:614
Support functions for ARM and vector operations.
uint64x2_t PMULL_00(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:152
uint64x2_t PMULL_11(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:242
uint64x2_t PMULL_01(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:182
uint64x2_t PMULL_10(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Definition: arm_simd.h:212
Polynomial with Coefficients in GF(2)
Definition: gf2n.h:27
Access a block of memory.
Definition: misc.h:2570
Library configuration file.
#define W64LIT(x)
Declare an unsigned word64.
Definition: config_int.h:119
unsigned long long word64
64-bit unsigned datatype
Definition: config_int.h:91
@ BIG_ENDIAN_ORDER
byte order is big-endian
Definition: cryptlib.h:147
Utility functions for the Crypto++ library.
byte ByteReverse(byte value)
Reverses bytes in a 8-bit value.
Definition: misc.h:2021
const T1 UnsignedMin(const T1 &a, const T2 &b)
Safe comparison of values that could be negative and incorrectly promoted.
Definition: misc.h:694
ByteOrder GetNativeByteOrder()
Returns NativeByteOrder as an enumerated ByteOrder value.
Definition: misc.h:1263
Crypto++ library namespace.
Precompiled header file.
Support functions for PowerPC and vector operations.
uint32x4_p VecLoadBE(const byte src[16])
Loads a vector from a byte array.
Definition: ppc_simd.h:742
__vector unsigned int uint32x4_p
Vector of 32-bit elements.
Definition: ppc_simd.h:202
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
Definition: ppc_simd.h:1478
uint64x2_p VecIntelMultiply00(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:2517
__vector unsigned char uint8x16_p
Vector of 8-bit elements.
Definition: ppc_simd.h:192
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
Definition: ppc_simd.h:1414
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
Definition: ppc_simd.h:212
bool VecEqual(const T1 vec1, const T2 vec2)
Compare two vectors.
Definition: ppc_simd.h:1975
uint64x2_p VecIntelMultiply11(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:2583
void VecStore(const T data, byte dest[16])
Stores a vector to a byte array.
Definition: ppc_simd.h:895
uint64x2_p VecIntelMultiply01(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:2539
uint32x4_p VecLoad(const byte src[16])
Loads a vector from a byte array.
Definition: ppc_simd.h:369
uint64x2_p VecIntelMultiply10(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Definition: ppc_simd.h:2561
Access a block of memory.
Definition: misc.h:2607