23 #if (defined(__aarch32__) || defined(__aarch64__)) && defined(CRYPTOPP_SLOW_ARMV8_SHIFT) 24 # undef CRYPTOPP_ARM_NEON_AVAILABLE 29 #if defined(__xlC__) && (__xlC__ < 0x0d01) 30 # define CRYPTOPP_DISABLE_ALTIVEC 1 31 # undef CRYPTOPP_POWER8_AVAILABLE 32 # undef CRYPTOPP_ALTIVEC_AVAILABLE 36 # include <ammintrin.h> 37 # if defined(__GNUC__) 38 # include <x86intrin.h> 42 #if (CRYPTOPP_SSE41_AVAILABLE) 43 # include <emmintrin.h> 44 # include <tmmintrin.h> 45 # include <smmintrin.h> 48 #if (CRYPTOPP_ARM_NEON_HEADER) 49 # include <arm_neon.h> 52 #if (CRYPTOPP_ARM_ACLE_HEADER) 54 # include <arm_acle.h> 57 #if (CRYPTOPP_POWER8_AVAILABLE) 61 #if defined(CRYPTOPP_GCC_DIAGNOSTIC_AVAILABLE) 63 # pragma GCC diagnostic ignored "-Wdeprecated" 67 extern const char BLAKE2B_SIMD_FNAME[] = __FILE__;
72 extern const word32 BLAKE2S_IV[8];
73 extern const word64 BLAKE2B_IV[8];
75 #if CRYPTOPP_SSE41_AVAILABLE 77 #define LOADU(p) _mm_loadu_si128((const __m128i *)(const void*)(p)) 78 #define STOREU(p,r) _mm_storeu_si128((__m128i *)(void*)(p), r) 79 #define TOF(reg) _mm_castsi128_ps((reg)) 80 #define TOI(reg) _mm_castps_si128((reg)) 84 #define BLAKE2B_LOAD_MSG_0_1(b0, b1) \ 86 b0 = _mm_unpacklo_epi64(m0, m1); \ 87 b1 = _mm_unpacklo_epi64(m2, m3); \ 90 #define BLAKE2B_LOAD_MSG_0_2(b0, b1) \ 92 b0 = _mm_unpackhi_epi64(m0, m1); \ 93 b1 = _mm_unpackhi_epi64(m2, m3); \ 96 #define BLAKE2B_LOAD_MSG_0_3(b0, b1) \ 98 b0 = _mm_unpacklo_epi64(m4, m5); \ 99 b1 = _mm_unpacklo_epi64(m6, m7); \ 102 #define BLAKE2B_LOAD_MSG_0_4(b0, b1) \ 104 b0 = _mm_unpackhi_epi64(m4, m5); \ 105 b1 = _mm_unpackhi_epi64(m6, m7); \ 108 #define BLAKE2B_LOAD_MSG_1_1(b0, b1) \ 110 b0 = _mm_unpacklo_epi64(m7, m2); \ 111 b1 = _mm_unpackhi_epi64(m4, m6); \ 114 #define BLAKE2B_LOAD_MSG_1_2(b0, b1) \ 116 b0 = _mm_unpacklo_epi64(m5, m4); \ 117 b1 = _mm_alignr_epi8(m3, m7, 8); \ 120 #define BLAKE2B_LOAD_MSG_1_3(b0, b1) \ 122 b0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1,0,3,2)); \ 123 b1 = _mm_unpackhi_epi64(m5, m2); \ 126 #define BLAKE2B_LOAD_MSG_1_4(b0, b1) \ 128 b0 = _mm_unpacklo_epi64(m6, m1); \ 129 b1 = _mm_unpackhi_epi64(m3, m1); \ 132 #define BLAKE2B_LOAD_MSG_2_1(b0, b1) \ 134 b0 = _mm_alignr_epi8(m6, m5, 8); \ 135 b1 = _mm_unpackhi_epi64(m2, m7); \ 138 #define BLAKE2B_LOAD_MSG_2_2(b0, b1) \ 140 b0 = _mm_unpacklo_epi64(m4, m0); \ 141 b1 = _mm_blend_epi16(m1, m6, 0xF0); \ 144 #define BLAKE2B_LOAD_MSG_2_3(b0, b1) \ 146 b0 = _mm_blend_epi16(m5, m1, 0xF0); \ 147 b1 = _mm_unpackhi_epi64(m3, m4); \ 150 #define BLAKE2B_LOAD_MSG_2_4(b0, b1) \ 152 b0 = _mm_unpacklo_epi64(m7, m3); \ 153 b1 = _mm_alignr_epi8(m2, m0, 8); \ 156 #define BLAKE2B_LOAD_MSG_3_1(b0, b1) \ 158 b0 = _mm_unpackhi_epi64(m3, m1); \ 159 b1 = _mm_unpackhi_epi64(m6, m5); \ 162 #define BLAKE2B_LOAD_MSG_3_2(b0, b1) \ 164 b0 = _mm_unpackhi_epi64(m4, m0); \ 165 b1 = _mm_unpacklo_epi64(m6, m7); \ 168 #define BLAKE2B_LOAD_MSG_3_3(b0, b1) \ 170 b0 = _mm_blend_epi16(m1, m2, 0xF0); \ 171 b1 = _mm_blend_epi16(m2, m7, 0xF0); \ 174 #define BLAKE2B_LOAD_MSG_3_4(b0, b1) \ 176 b0 = _mm_unpacklo_epi64(m3, m5); \ 177 b1 = _mm_unpacklo_epi64(m0, m4); \ 180 #define BLAKE2B_LOAD_MSG_4_1(b0, b1) \ 182 b0 = _mm_unpackhi_epi64(m4, m2); \ 183 b1 = _mm_unpacklo_epi64(m1, m5); \ 186 #define BLAKE2B_LOAD_MSG_4_2(b0, b1) \ 188 b0 = _mm_blend_epi16(m0, m3, 0xF0); \ 189 b1 = _mm_blend_epi16(m2, m7, 0xF0); \ 192 #define BLAKE2B_LOAD_MSG_4_3(b0, b1) \ 194 b0 = _mm_blend_epi16(m7, m5, 0xF0); \ 195 b1 = _mm_blend_epi16(m3, m1, 0xF0); \ 198 #define BLAKE2B_LOAD_MSG_4_4(b0, b1) \ 200 b0 = _mm_alignr_epi8(m6, m0, 8); \ 201 b1 = _mm_blend_epi16(m4, m6, 0xF0); \ 204 #define BLAKE2B_LOAD_MSG_5_1(b0, b1) \ 206 b0 = _mm_unpacklo_epi64(m1, m3); \ 207 b1 = _mm_unpacklo_epi64(m0, m4); \ 210 #define BLAKE2B_LOAD_MSG_5_2(b0, b1) \ 212 b0 = _mm_unpacklo_epi64(m6, m5); \ 213 b1 = _mm_unpackhi_epi64(m5, m1); \ 216 #define BLAKE2B_LOAD_MSG_5_3(b0, b1) \ 218 b0 = _mm_blend_epi16(m2, m3, 0xF0); \ 219 b1 = _mm_unpackhi_epi64(m7, m0); \ 222 #define BLAKE2B_LOAD_MSG_5_4(b0, b1) \ 224 b0 = _mm_unpackhi_epi64(m6, m2); \ 225 b1 = _mm_blend_epi16(m7, m4, 0xF0); \ 228 #define BLAKE2B_LOAD_MSG_6_1(b0, b1) \ 230 b0 = _mm_blend_epi16(m6, m0, 0xF0); \ 231 b1 = _mm_unpacklo_epi64(m7, m2); \ 234 #define BLAKE2B_LOAD_MSG_6_2(b0, b1) \ 236 b0 = _mm_unpackhi_epi64(m2, m7); \ 237 b1 = _mm_alignr_epi8(m5, m6, 8); \ 240 #define BLAKE2B_LOAD_MSG_6_3(b0, b1) \ 242 b0 = _mm_unpacklo_epi64(m0, m3); \ 243 b1 = _mm_shuffle_epi32(m4, _MM_SHUFFLE(1,0,3,2)); \ 246 #define BLAKE2B_LOAD_MSG_6_4(b0, b1) \ 248 b0 = _mm_unpackhi_epi64(m3, m1); \ 249 b1 = _mm_blend_epi16(m1, m5, 0xF0); \ 252 #define BLAKE2B_LOAD_MSG_7_1(b0, b1) \ 254 b0 = _mm_unpackhi_epi64(m6, m3); \ 255 b1 = _mm_blend_epi16(m6, m1, 0xF0); \ 258 #define BLAKE2B_LOAD_MSG_7_2(b0, b1) \ 260 b0 = _mm_alignr_epi8(m7, m5, 8); \ 261 b1 = _mm_unpackhi_epi64(m0, m4); \ 264 #define BLAKE2B_LOAD_MSG_7_3(b0, b1) \ 266 b0 = _mm_unpackhi_epi64(m2, m7); \ 267 b1 = _mm_unpacklo_epi64(m4, m1); \ 270 #define BLAKE2B_LOAD_MSG_7_4(b0, b1) \ 272 b0 = _mm_unpacklo_epi64(m0, m2); \ 273 b1 = _mm_unpacklo_epi64(m3, m5); \ 276 #define BLAKE2B_LOAD_MSG_8_1(b0, b1) \ 278 b0 = _mm_unpacklo_epi64(m3, m7); \ 279 b1 = _mm_alignr_epi8(m0, m5, 8); \ 282 #define BLAKE2B_LOAD_MSG_8_2(b0, b1) \ 284 b0 = _mm_unpackhi_epi64(m7, m4); \ 285 b1 = _mm_alignr_epi8(m4, m1, 8); \ 288 #define BLAKE2B_LOAD_MSG_8_3(b0, b1) \ 291 b1 = _mm_alignr_epi8(m5, m0, 8); \ 294 #define BLAKE2B_LOAD_MSG_8_4(b0, b1) \ 296 b0 = _mm_blend_epi16(m1, m3, 0xF0); \ 300 #define BLAKE2B_LOAD_MSG_9_1(b0, b1) \ 302 b0 = _mm_unpacklo_epi64(m5, m4); \ 303 b1 = _mm_unpackhi_epi64(m3, m0); \ 306 #define BLAKE2B_LOAD_MSG_9_2(b0, b1) \ 308 b0 = _mm_unpacklo_epi64(m1, m2); \ 309 b1 = _mm_blend_epi16(m3, m2, 0xF0); \ 312 #define BLAKE2B_LOAD_MSG_9_3(b0, b1) \ 314 b0 = _mm_unpackhi_epi64(m7, m4); \ 315 b1 = _mm_unpackhi_epi64(m1, m6); \ 318 #define BLAKE2B_LOAD_MSG_9_4(b0, b1) \ 320 b0 = _mm_alignr_epi8(m7, m5, 8); \ 321 b1 = _mm_unpacklo_epi64(m6, m0); \ 324 #define BLAKE2B_LOAD_MSG_10_1(b0, b1) \ 326 b0 = _mm_unpacklo_epi64(m0, m1); \ 327 b1 = _mm_unpacklo_epi64(m2, m3); \ 330 #define BLAKE2B_LOAD_MSG_10_2(b0, b1) \ 332 b0 = _mm_unpackhi_epi64(m0, m1); \ 333 b1 = _mm_unpackhi_epi64(m2, m3); \ 336 #define BLAKE2B_LOAD_MSG_10_3(b0, b1) \ 338 b0 = _mm_unpacklo_epi64(m4, m5); \ 339 b1 = _mm_unpacklo_epi64(m6, m7); \ 342 #define BLAKE2B_LOAD_MSG_10_4(b0, b1) \ 344 b0 = _mm_unpackhi_epi64(m4, m5); \ 345 b1 = _mm_unpackhi_epi64(m6, m7); \ 348 #define BLAKE2B_LOAD_MSG_11_1(b0, b1) \ 350 b0 = _mm_unpacklo_epi64(m7, m2); \ 351 b1 = _mm_unpackhi_epi64(m4, m6); \ 354 #define BLAKE2B_LOAD_MSG_11_2(b0, b1) \ 356 b0 = _mm_unpacklo_epi64(m5, m4); \ 357 b1 = _mm_alignr_epi8(m3, m7, 8); \ 360 #define BLAKE2B_LOAD_MSG_11_3(b0, b1) \ 362 b0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1,0,3,2)); \ 363 b1 = _mm_unpackhi_epi64(m5, m2); \ 366 #define BLAKE2B_LOAD_MSG_11_4(b0, b1) \ 368 b0 = _mm_unpacklo_epi64(m6, m1); \ 369 b1 = _mm_unpackhi_epi64(m3, m1); \ 373 # define MM_ROTI_EPI64(r, c) \ 376 # define MM_ROTI_EPI64(x, c) \ 377 (-(c) == 32) ? _mm_shuffle_epi32((x), _MM_SHUFFLE(2,3,0,1)) \ 378 : (-(c) == 24) ? _mm_shuffle_epi8((x), r24) \ 379 : (-(c) == 16) ? _mm_shuffle_epi8((x), r16) \ 380 : (-(c) == 63) ? _mm_xor_si128(_mm_srli_epi64((x), -(c)), _mm_add_epi64((x), (x))) \ 381 : _mm_xor_si128(_mm_srli_epi64((x), -(c)), _mm_slli_epi64((x), 64-(-(c)))) 384 #define BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ 385 row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); \ 386 row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); \ 388 row4l = _mm_xor_si128(row4l, row1l); \ 389 row4h = _mm_xor_si128(row4h, row1h); \ 391 row4l = MM_ROTI_EPI64(row4l, -32); \ 392 row4h = MM_ROTI_EPI64(row4h, -32); \ 394 row3l = _mm_add_epi64(row3l, row4l); \ 395 row3h = _mm_add_epi64(row3h, row4h); \ 397 row2l = _mm_xor_si128(row2l, row3l); \ 398 row2h = _mm_xor_si128(row2h, row3h); \ 400 row2l = MM_ROTI_EPI64(row2l, -24); \ 401 row2h = MM_ROTI_EPI64(row2h, -24); 403 #define BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ 404 row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); \ 405 row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); \ 407 row4l = _mm_xor_si128(row4l, row1l); \ 408 row4h = _mm_xor_si128(row4h, row1h); \ 410 row4l = MM_ROTI_EPI64(row4l, -16); \ 411 row4h = MM_ROTI_EPI64(row4h, -16); \ 413 row3l = _mm_add_epi64(row3l, row4l); \ 414 row3h = _mm_add_epi64(row3h, row4h); \ 416 row2l = _mm_xor_si128(row2l, row3l); \ 417 row2h = _mm_xor_si128(row2h, row3h); \ 419 row2l = MM_ROTI_EPI64(row2l, -63); \ 420 row2h = MM_ROTI_EPI64(row2h, -63); \ 422 #define BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ 428 row4l = _mm_unpackhi_epi64(row4h, _mm_unpacklo_epi64(t0, t0)); \ 429 row4h = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(row4h, row4h)); \ 430 row2l = _mm_unpackhi_epi64(row2l, _mm_unpacklo_epi64(row2h, row2h)); \ 431 row2h = _mm_unpackhi_epi64(row2h, _mm_unpacklo_epi64(t1, t1)) 433 #define BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ 439 row2l = _mm_unpackhi_epi64(row2h, _mm_unpacklo_epi64(row2l, row2l)); \ 440 row2h = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(row2h, row2h)); \ 441 row4l = _mm_unpackhi_epi64(row4l, _mm_unpacklo_epi64(row4h, row4h)); \ 442 row4h = _mm_unpackhi_epi64(row4h, _mm_unpacklo_epi64(t1, t1)) 444 #define BLAKE2B_ROUND(r) \ 445 BLAKE2B_LOAD_MSG_ ##r ##_1(b0, b1); \ 446 BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ 447 BLAKE2B_LOAD_MSG_ ##r ##_2(b0, b1); \ 448 BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ 449 BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ 450 BLAKE2B_LOAD_MSG_ ##r ##_3(b0, b1); \ 451 BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ 452 BLAKE2B_LOAD_MSG_ ##r ##_4(b0, b1); \ 453 BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ 454 BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); 456 __m128i row1l, row1h;
457 __m128i row2l, row2h;
458 __m128i row3l, row3h;
459 __m128i row4l, row4h;
463 const __m128i r16 = _mm_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9);
464 const __m128i r24 = _mm_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10);
466 const __m128i m0 = LOADU(input + 00);
467 const __m128i m1 = LOADU(input + 16);
468 const __m128i m2 = LOADU(input + 32);
469 const __m128i m3 = LOADU(input + 48);
470 const __m128i m4 = LOADU(input + 64);
471 const __m128i m5 = LOADU(input + 80);
472 const __m128i m6 = LOADU(input + 96);
473 const __m128i m7 = LOADU(input + 112);
475 row1l = LOADU(state.h()+0);
476 row1h = LOADU(state.h()+2);
477 row2l = LOADU(state.h()+4);
478 row2h = LOADU(state.h()+6);
479 row3l = LOADU(BLAKE2B_IV+0);
480 row3h = LOADU(BLAKE2B_IV+2);
481 row4l = _mm_xor_si128(LOADU(BLAKE2B_IV+4), LOADU(state.t()+0));
482 row4h = _mm_xor_si128(LOADU(BLAKE2B_IV+6), LOADU(state.f()+0));
497 row1l = _mm_xor_si128(row3l, row1l);
498 row1h = _mm_xor_si128(row3h, row1h);
499 STOREU(state.h()+0, _mm_xor_si128(LOADU(state.h()+0), row1l));
500 STOREU(state.h()+2, _mm_xor_si128(LOADU(state.h()+2), row1h));
501 row2l = _mm_xor_si128(row4l, row2l);
502 row2h = _mm_xor_si128(row4h, row2h);
503 STOREU(state.h()+4, _mm_xor_si128(LOADU(state.h()+4), row2l));
504 STOREU(state.h()+6, _mm_xor_si128(LOADU(state.h()+6), row2h));
506 #endif // CRYPTOPP_SSE41_AVAILABLE 508 #if CRYPTOPP_ARM_NEON_AVAILABLE 511 #define BLAKE2B_LOAD_MSG_0_1(b0, b1) \ 512 do { b0 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m1)); b1 = vcombine_u64(vget_low_u64(m2), vget_low_u64(m3)); } while(0) 514 #define BLAKE2B_LOAD_MSG_0_2(b0, b1) \ 515 do { b0 = vcombine_u64(vget_high_u64(m0), vget_high_u64(m1)); b1 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m3)); } while(0) 517 #define BLAKE2B_LOAD_MSG_0_3(b0, b1) \ 518 do { b0 = vcombine_u64(vget_low_u64(m4), vget_low_u64(m5)); b1 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m7)); } while(0) 520 #define BLAKE2B_LOAD_MSG_0_4(b0, b1) \ 521 do { b0 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m5)); b1 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m7)); } while(0) 523 #define BLAKE2B_LOAD_MSG_1_1(b0, b1) \ 524 do { b0 = vcombine_u64(vget_low_u64(m7), vget_low_u64(m2)); b1 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m6)); } while(0) 526 #define BLAKE2B_LOAD_MSG_1_2(b0, b1) \ 527 do { b0 = vcombine_u64(vget_low_u64(m5), vget_low_u64(m4)); b1 = vextq_u64(m7, m3, 1); } while(0) 529 #define BLAKE2B_LOAD_MSG_1_3(b0, b1) \ 530 do { b0 = vextq_u64(m0, m0, 1); b1 = vcombine_u64(vget_high_u64(m5), vget_high_u64(m2)); } while(0) 532 #define BLAKE2B_LOAD_MSG_1_4(b0, b1) \ 533 do { b0 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m1)); b1 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m1)); } while(0) 535 #define BLAKE2B_LOAD_MSG_2_1(b0, b1) \ 536 do { b0 = vextq_u64(m5, m6, 1); b1 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m7)); } while(0) 538 #define BLAKE2B_LOAD_MSG_2_2(b0, b1) \ 539 do { b0 = vcombine_u64(vget_low_u64(m4), vget_low_u64(m0)); b1 = vcombine_u64(vget_low_u64(m1), vget_high_u64(m6)); } while(0) 541 #define BLAKE2B_LOAD_MSG_2_3(b0, b1) \ 542 do { b0 = vcombine_u64(vget_low_u64(m5), vget_high_u64(m1)); b1 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m4)); } while(0) 544 #define BLAKE2B_LOAD_MSG_2_4(b0, b1) \ 545 do { b0 = vcombine_u64(vget_low_u64(m7), vget_low_u64(m3)); b1 = vextq_u64(m0, m2, 1); } while(0) 547 #define BLAKE2B_LOAD_MSG_3_1(b0, b1) \ 548 do { b0 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m1)); b1 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m5)); } while(0) 550 #define BLAKE2B_LOAD_MSG_3_2(b0, b1) \ 551 do { b0 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m0)); b1 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m7)); } while(0) 553 #define BLAKE2B_LOAD_MSG_3_3(b0, b1) \ 554 do { b0 = vcombine_u64(vget_low_u64(m1), vget_high_u64(m2)); b1 = vcombine_u64(vget_low_u64(m2), vget_high_u64(m7)); } while(0) 556 #define BLAKE2B_LOAD_MSG_3_4(b0, b1) \ 557 do { b0 = vcombine_u64(vget_low_u64(m3), vget_low_u64(m5)); b1 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m4)); } while(0) 559 #define BLAKE2B_LOAD_MSG_4_1(b0, b1) \ 560 do { b0 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m2)); b1 = vcombine_u64(vget_low_u64(m1), vget_low_u64(m5)); } while(0) 562 #define BLAKE2B_LOAD_MSG_4_2(b0, b1) \ 563 do { b0 = vcombine_u64(vget_low_u64(m0), vget_high_u64(m3)); b1 = vcombine_u64(vget_low_u64(m2), vget_high_u64(m7)); } while(0) 565 #define BLAKE2B_LOAD_MSG_4_3(b0, b1) \ 566 do { b0 = vcombine_u64(vget_low_u64(m7), vget_high_u64(m5)); b1 = vcombine_u64(vget_low_u64(m3), vget_high_u64(m1)); } while(0) 568 #define BLAKE2B_LOAD_MSG_4_4(b0, b1) \ 569 do { b0 = vextq_u64(m0, m6, 1); b1 = vcombine_u64(vget_low_u64(m4), vget_high_u64(m6)); } while(0) 571 #define BLAKE2B_LOAD_MSG_5_1(b0, b1) \ 572 do { b0 = vcombine_u64(vget_low_u64(m1), vget_low_u64(m3)); b1 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m4)); } while(0) 574 #define BLAKE2B_LOAD_MSG_5_2(b0, b1) \ 575 do { b0 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m5)); b1 = vcombine_u64(vget_high_u64(m5), vget_high_u64(m1)); } while(0) 577 #define BLAKE2B_LOAD_MSG_5_3(b0, b1) \ 578 do { b0 = vcombine_u64(vget_low_u64(m2), vget_high_u64(m3)); b1 = vcombine_u64(vget_high_u64(m7), vget_high_u64(m0)); } while(0) 580 #define BLAKE2B_LOAD_MSG_5_4(b0, b1) \ 581 do { b0 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m2)); b1 = vcombine_u64(vget_low_u64(m7), vget_high_u64(m4)); } while(0) 583 #define BLAKE2B_LOAD_MSG_6_1(b0, b1) \ 584 do { b0 = vcombine_u64(vget_low_u64(m6), vget_high_u64(m0)); b1 = vcombine_u64(vget_low_u64(m7), vget_low_u64(m2)); } while(0) 586 #define BLAKE2B_LOAD_MSG_6_2(b0, b1) \ 587 do { b0 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m7)); b1 = vextq_u64(m6, m5, 1); } while(0) 589 #define BLAKE2B_LOAD_MSG_6_3(b0, b1) \ 590 do { b0 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m3)); b1 = vextq_u64(m4, m4, 1); } while(0) 592 #define BLAKE2B_LOAD_MSG_6_4(b0, b1) \ 593 do { b0 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m1)); b1 = vcombine_u64(vget_low_u64(m1), vget_high_u64(m5)); } while(0) 595 #define BLAKE2B_LOAD_MSG_7_1(b0, b1) \ 596 do { b0 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m3)); b1 = vcombine_u64(vget_low_u64(m6), vget_high_u64(m1)); } while(0) 598 #define BLAKE2B_LOAD_MSG_7_2(b0, b1) \ 599 do { b0 = vextq_u64(m5, m7, 1); b1 = vcombine_u64(vget_high_u64(m0), vget_high_u64(m4)); } while(0) 601 #define BLAKE2B_LOAD_MSG_7_3(b0, b1) \ 602 do { b0 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m7)); b1 = vcombine_u64(vget_low_u64(m4), vget_low_u64(m1)); } while(0) 604 #define BLAKE2B_LOAD_MSG_7_4(b0, b1) \ 605 do { b0 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m2)); b1 = vcombine_u64(vget_low_u64(m3), vget_low_u64(m5)); } while(0) 607 #define BLAKE2B_LOAD_MSG_8_1(b0, b1) \ 608 do { b0 = vcombine_u64(vget_low_u64(m3), vget_low_u64(m7)); b1 = vextq_u64(m5, m0, 1); } while(0) 610 #define BLAKE2B_LOAD_MSG_8_2(b0, b1) \ 611 do { b0 = vcombine_u64(vget_high_u64(m7), vget_high_u64(m4)); b1 = vextq_u64(m1, m4, 1); } while(0) 613 #define BLAKE2B_LOAD_MSG_8_3(b0, b1) \ 614 do { b0 = m6; b1 = vextq_u64(m0, m5, 1); } while(0) 616 #define BLAKE2B_LOAD_MSG_8_4(b0, b1) \ 617 do { b0 = vcombine_u64(vget_low_u64(m1), vget_high_u64(m3)); b1 = m2; } while(0) 619 #define BLAKE2B_LOAD_MSG_9_1(b0, b1) \ 620 do { b0 = vcombine_u64(vget_low_u64(m5), vget_low_u64(m4)); b1 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m0)); } while(0) 622 #define BLAKE2B_LOAD_MSG_9_2(b0, b1) \ 623 do { b0 = vcombine_u64(vget_low_u64(m1), vget_low_u64(m2)); b1 = vcombine_u64(vget_low_u64(m3), vget_high_u64(m2)); } while(0) 625 #define BLAKE2B_LOAD_MSG_9_3(b0, b1) \ 626 do { b0 = vcombine_u64(vget_high_u64(m7), vget_high_u64(m4)); b1 = vcombine_u64(vget_high_u64(m1), vget_high_u64(m6)); } while(0) 628 #define BLAKE2B_LOAD_MSG_9_4(b0, b1) \ 629 do { b0 = vextq_u64(m5, m7, 1); b1 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m0)); } while(0) 631 #define BLAKE2B_LOAD_MSG_10_1(b0, b1) \ 632 do { b0 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m1)); b1 = vcombine_u64(vget_low_u64(m2), vget_low_u64(m3)); } while(0) 634 #define BLAKE2B_LOAD_MSG_10_2(b0, b1) \ 635 do { b0 = vcombine_u64(vget_high_u64(m0), vget_high_u64(m1)); b1 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m3)); } while(0) 637 #define BLAKE2B_LOAD_MSG_10_3(b0, b1) \ 638 do { b0 = vcombine_u64(vget_low_u64(m4), vget_low_u64(m5)); b1 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m7)); } while(0) 640 #define BLAKE2B_LOAD_MSG_10_4(b0, b1) \ 641 do { b0 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m5)); b1 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m7)); } while(0) 643 #define BLAKE2B_LOAD_MSG_11_1(b0, b1) \ 644 do { b0 = vcombine_u64(vget_low_u64(m7), vget_low_u64(m2)); b1 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m6)); } while(0) 646 #define BLAKE2B_LOAD_MSG_11_2(b0, b1) \ 647 do { b0 = vcombine_u64(vget_low_u64(m5), vget_low_u64(m4)); b1 = vextq_u64(m7, m3, 1); } while(0) 649 #define BLAKE2B_LOAD_MSG_11_3(b0, b1) \ 650 do { b0 = vextq_u64(m0, m0, 1); b1 = vcombine_u64(vget_high_u64(m5), vget_high_u64(m2)); } while(0) 652 #define BLAKE2B_LOAD_MSG_11_4(b0, b1) \ 653 do { b0 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m1)); b1 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m1)); } while(0) 655 #define vrorq_n_u64_32(x) vreinterpretq_u64_u32(vrev64q_u32(vreinterpretq_u32_u64((x)))) 657 #define vrorq_n_u64_24(x) vcombine_u64( \ 658 vreinterpret_u64_u8(vext_u8(vreinterpret_u8_u64(vget_low_u64(x)), vreinterpret_u8_u64(vget_low_u64(x)), 3)), \ 659 vreinterpret_u64_u8(vext_u8(vreinterpret_u8_u64(vget_high_u64(x)), vreinterpret_u8_u64(vget_high_u64(x)), 3))) 661 #define vrorq_n_u64_16(x) vcombine_u64( \ 662 vreinterpret_u64_u8(vext_u8(vreinterpret_u8_u64(vget_low_u64(x)), vreinterpret_u8_u64(vget_low_u64(x)), 2)), \ 663 vreinterpret_u64_u8(vext_u8(vreinterpret_u8_u64(vget_high_u64(x)), vreinterpret_u8_u64(vget_high_u64(x)), 2))) 665 #define vrorq_n_u64_63(x) veorq_u64(vaddq_u64(x, x), vshrq_n_u64(x, 63)) 667 #define BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ 669 row1l = vaddq_u64(vaddq_u64(row1l, b0), row2l); \ 670 row1h = vaddq_u64(vaddq_u64(row1h, b1), row2h); \ 671 row4l = veorq_u64(row4l, row1l); row4h = veorq_u64(row4h, row1h); \ 672 row4l = vrorq_n_u64_32(row4l); row4h = vrorq_n_u64_32(row4h); \ 673 row3l = vaddq_u64(row3l, row4l); row3h = vaddq_u64(row3h, row4h); \ 674 row2l = veorq_u64(row2l, row3l); row2h = veorq_u64(row2h, row3h); \ 675 row2l = vrorq_n_u64_24(row2l); row2h = vrorq_n_u64_24(row2h); \ 678 #define BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ 680 row1l = vaddq_u64(vaddq_u64(row1l, b0), row2l); \ 681 row1h = vaddq_u64(vaddq_u64(row1h, b1), row2h); \ 682 row4l = veorq_u64(row4l, row1l); row4h = veorq_u64(row4h, row1h); \ 683 row4l = vrorq_n_u64_16(row4l); row4h = vrorq_n_u64_16(row4h); \ 684 row3l = vaddq_u64(row3l, row4l); row3h = vaddq_u64(row3h, row4h); \ 685 row2l = veorq_u64(row2l, row3l); row2h = veorq_u64(row2h, row3h); \ 686 row2l = vrorq_n_u64_63(row2l); row2h = vrorq_n_u64_63(row2h); \ 689 #define BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ 691 uint64x2_t t0 = vextq_u64(row2l, row2h, 1); \ 692 uint64x2_t t1 = vextq_u64(row2h, row2l, 1); \ 693 row2l = t0; row2h = t1; t0 = row3l; row3l = row3h; row3h = t0; \ 694 t0 = vextq_u64(row4h, row4l, 1); t1 = vextq_u64(row4l, row4h, 1); \ 695 row4l = t0; row4h = t1; \ 698 #define BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ 700 uint64x2_t t0 = vextq_u64(row2h, row2l, 1); \ 701 uint64x2_t t1 = vextq_u64(row2l, row2h, 1); \ 702 row2l = t0; row2h = t1; t0 = row3l; row3l = row3h; row3h = t0; \ 703 t0 = vextq_u64(row4l, row4h, 1); t1 = vextq_u64(row4h, row4l, 1); \ 704 row4l = t0; row4h = t1; \ 707 #define BLAKE2B_ROUND(r) \ 710 BLAKE2B_LOAD_MSG_ ##r ##_1(b0, b1); \ 711 BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ 712 BLAKE2B_LOAD_MSG_ ##r ##_2(b0, b1); \ 713 BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ 714 BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ 715 BLAKE2B_LOAD_MSG_ ##r ##_3(b0, b1); \ 716 BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ 717 BLAKE2B_LOAD_MSG_ ##r ##_4(b0, b1); \ 718 BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ 719 BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ 722 const uint64x2_t m0 = vreinterpretq_u64_u8(vld1q_u8(input + 00));
723 const uint64x2_t m1 = vreinterpretq_u64_u8(vld1q_u8(input + 16));
724 const uint64x2_t m2 = vreinterpretq_u64_u8(vld1q_u8(input + 32));
725 const uint64x2_t m3 = vreinterpretq_u64_u8(vld1q_u8(input + 48));
726 const uint64x2_t m4 = vreinterpretq_u64_u8(vld1q_u8(input + 64));
727 const uint64x2_t m5 = vreinterpretq_u64_u8(vld1q_u8(input + 80));
728 const uint64x2_t m6 = vreinterpretq_u64_u8(vld1q_u8(input + 96));
729 const uint64x2_t m7 = vreinterpretq_u64_u8(vld1q_u8(input + 112));
731 uint64x2_t row1l, row1h, row2l, row2h;
732 uint64x2_t row3l, row3h, row4l, row4h;
734 const uint64x2_t h0 = row1l = vld1q_u64(state.h()+0);
735 const uint64x2_t h1 = row1h = vld1q_u64(state.h()+2);
736 const uint64x2_t h2 = row2l = vld1q_u64(state.h()+4);
737 const uint64x2_t h3 = row2h = vld1q_u64(state.h()+6);
739 row3l = vld1q_u64(BLAKE2B_IV+0);
740 row3h = vld1q_u64(BLAKE2B_IV+2);
741 row4l = veorq_u64(vld1q_u64(BLAKE2B_IV+4), vld1q_u64(state.t()+0));
742 row4h = veorq_u64(vld1q_u64(BLAKE2B_IV+6), vld1q_u64(state.f()+0));
757 vst1q_u64(state.h()+0, veorq_u64(h0, veorq_u64(row1l, row3l)));
758 vst1q_u64(state.h()+2, veorq_u64(h1, veorq_u64(row1h, row3h)));
759 vst1q_u64(state.h()+4, veorq_u64(h2, veorq_u64(row2l, row4l)));
760 vst1q_u64(state.h()+6, veorq_u64(h3, veorq_u64(row2h, row4h)));
762 #endif // CRYPTOPP_ARM_NEON_AVAILABLE 764 #if (CRYPTOPP_POWER8_AVAILABLE) 773 #if defined(CRYPTOPP_BIG_ENDIAN) 777 CRYPTOPP_UNUSED(le_mask);
782 inline void VecStore64(
void* p,
const uint64x2_p x)
789 #if defined(CRYPTOPP_BIG_ENDIAN) 793 CRYPTOPP_UNUSED(le_mask);
798 #if defined(CRYPTOPP_BIG_ENDIAN) 799 #define vec_shl_8(a,b) (uint64x2_p)vec_sld((uint8x16_p)a,(uint8x16_p)b,8) 801 #define vec_shl_8(a,b) (uint64x2_p)vec_sld((uint8x16_p)b,(uint8x16_p)a,8) 804 #define vec_merge_hi(a, b) vec_mergeh(a,b) 805 #define vec_merge_hi_lo(a, b) vec_mergeh(a,(uint64x2_p)vec_sld((uint8x16_p)b,(uint8x16_p)b,8)) 806 #define vec_merge_lo(a, b) vec_mergel(a,b) 810 #define BLAKE2B_LOAD_MSG_0_1(b0, b1) \ 812 b0 = vec_merge_hi(m0, m1); \ 813 b1 = vec_merge_hi(m2, m3); \ 816 #define BLAKE2B_LOAD_MSG_0_2(b0, b1) \ 818 b0 = vec_merge_lo(m0, m1); \ 819 b1 = vec_merge_lo(m2, m3); \ 822 #define BLAKE2B_LOAD_MSG_0_3(b0, b1) \ 824 b0 = vec_merge_hi(m4, m5); \ 825 b1 = vec_merge_hi(m6, m7); \ 828 #define BLAKE2B_LOAD_MSG_0_4(b0, b1) \ 830 b0 = vec_merge_lo(m4, m5); \ 831 b1 = vec_merge_lo(m6, m7); \ 834 #define BLAKE2B_LOAD_MSG_1_1(b0, b1) \ 836 b0 = vec_merge_hi(m7, m2); \ 837 b1 = vec_merge_lo(m4, m6); \ 840 #define BLAKE2B_LOAD_MSG_1_2(b0, b1) \ 842 b0 = vec_merge_hi(m5, m4); \ 843 b1 = vec_shl_8(m7, m3); \ 846 #define BLAKE2B_LOAD_MSG_1_3(b0, b1) \ 848 b0 = vec_shl_8(m0, m0); \ 849 b1 = vec_merge_lo(m5, m2); \ 852 #define BLAKE2B_LOAD_MSG_1_4(b0, b1) \ 854 b0 = vec_merge_hi(m6, m1); \ 855 b1 = vec_merge_lo(m3, m1); \ 858 #define BLAKE2B_LOAD_MSG_2_1(b0, b1) \ 860 b0 = vec_shl_8(m5, m6); \ 861 b1 = vec_merge_lo(m2, m7); \ 864 #define BLAKE2B_LOAD_MSG_2_2(b0, b1) \ 866 b0 = vec_merge_hi(m4, m0); \ 867 b1 = vec_merge_hi_lo(m1, m6); \ 870 #define BLAKE2B_LOAD_MSG_2_3(b0, b1) \ 872 b0 = vec_merge_hi_lo(m5, m1); \ 873 b1 = vec_merge_lo(m3, m4); \ 876 #define BLAKE2B_LOAD_MSG_2_4(b0, b1) \ 878 b0 = vec_merge_hi(m7, m3); \ 879 b1 = vec_shl_8(m0, m2); \ 882 #define BLAKE2B_LOAD_MSG_3_1(b0, b1) \ 884 b0 = vec_merge_lo(m3, m1); \ 885 b1 = vec_merge_lo(m6, m5); \ 888 #define BLAKE2B_LOAD_MSG_3_2(b0, b1) \ 890 b0 = vec_merge_lo(m4, m0); \ 891 b1 = vec_merge_hi(m6, m7); \ 894 #define BLAKE2B_LOAD_MSG_3_3(b0, b1) \ 896 b0 = vec_merge_hi_lo(m1, m2); \ 897 b1 = vec_merge_hi_lo(m2, m7); \ 900 #define BLAKE2B_LOAD_MSG_3_4(b0, b1) \ 902 b0 = vec_merge_hi(m3, m5); \ 903 b1 = vec_merge_hi(m0, m4); \ 906 #define BLAKE2B_LOAD_MSG_4_1(b0, b1) \ 908 b0 = vec_merge_lo(m4, m2); \ 909 b1 = vec_merge_hi(m1, m5); \ 912 #define BLAKE2B_LOAD_MSG_4_2(b0, b1) \ 914 b0 = vec_merge_hi_lo(m0, m3); \ 915 b1 = vec_merge_hi_lo(m2, m7); \ 918 #define BLAKE2B_LOAD_MSG_4_3(b0, b1) \ 920 b0 = vec_merge_hi_lo(m7, m5); \ 921 b1 = vec_merge_hi_lo(m3, m1); \ 924 #define BLAKE2B_LOAD_MSG_4_4(b0, b1) \ 926 b0 = vec_shl_8(m0, m6); \ 927 b1 = vec_merge_hi_lo(m4, m6); \ 930 #define BLAKE2B_LOAD_MSG_5_1(b0, b1) \ 932 b0 = vec_merge_hi(m1, m3); \ 933 b1 = vec_merge_hi(m0, m4); \ 936 #define BLAKE2B_LOAD_MSG_5_2(b0, b1) \ 938 b0 = vec_merge_hi(m6, m5); \ 939 b1 = vec_merge_lo(m5, m1); \ 942 #define BLAKE2B_LOAD_MSG_5_3(b0, b1) \ 944 b0 = vec_merge_hi_lo(m2, m3); \ 945 b1 = vec_merge_lo(m7, m0); \ 948 #define BLAKE2B_LOAD_MSG_5_4(b0, b1) \ 950 b0 = vec_merge_lo(m6, m2); \ 951 b1 = vec_merge_hi_lo(m7, m4); \ 954 #define BLAKE2B_LOAD_MSG_6_1(b0, b1) \ 956 b0 = vec_merge_hi_lo(m6, m0); \ 957 b1 = vec_merge_hi(m7, m2); \ 960 #define BLAKE2B_LOAD_MSG_6_2(b0, b1) \ 962 b0 = vec_merge_lo(m2, m7); \ 963 b1 = vec_shl_8(m6, m5); \ 966 #define BLAKE2B_LOAD_MSG_6_3(b0, b1) \ 968 b0 = vec_merge_hi(m0, m3); \ 969 b1 = vec_shl_8(m4, m4); \ 972 #define BLAKE2B_LOAD_MSG_6_4(b0, b1) \ 974 b0 = vec_merge_lo(m3, m1); \ 975 b1 = vec_merge_hi_lo(m1, m5); \ 978 #define BLAKE2B_LOAD_MSG_7_1(b0, b1) \ 980 b0 = vec_merge_lo(m6, m3); \ 981 b1 = vec_merge_hi_lo(m6, m1); \ 984 #define BLAKE2B_LOAD_MSG_7_2(b0, b1) \ 986 b0 = vec_shl_8(m5, m7); \ 987 b1 = vec_merge_lo(m0, m4); \ 990 #define BLAKE2B_LOAD_MSG_7_3(b0, b1) \ 992 b0 = vec_merge_lo(m2, m7); \ 993 b1 = vec_merge_hi(m4, m1); \ 996 #define BLAKE2B_LOAD_MSG_7_4(b0, b1) \ 998 b0 = vec_merge_hi(m0, m2); \ 999 b1 = vec_merge_hi(m3, m5); \ 1002 #define BLAKE2B_LOAD_MSG_8_1(b0, b1) \ 1004 b0 = vec_merge_hi(m3, m7); \ 1005 b1 = vec_shl_8(m5, m0); \ 1008 #define BLAKE2B_LOAD_MSG_8_2(b0, b1) \ 1010 b0 = vec_merge_lo(m7, m4); \ 1011 b1 = vec_shl_8(m1, m4); \ 1014 #define BLAKE2B_LOAD_MSG_8_3(b0, b1) \ 1017 b1 = vec_shl_8(m0, m5); \ 1020 #define BLAKE2B_LOAD_MSG_8_4(b0, b1) \ 1022 b0 = vec_merge_hi_lo(m1, m3); \ 1026 #define BLAKE2B_LOAD_MSG_9_1(b0, b1) \ 1028 b0 = vec_merge_hi(m5, m4); \ 1029 b1 = vec_merge_lo(m3, m0); \ 1032 #define BLAKE2B_LOAD_MSG_9_2(b0, b1) \ 1034 b0 = vec_merge_hi(m1, m2); \ 1035 b1 = vec_merge_hi_lo(m3, m2); \ 1038 #define BLAKE2B_LOAD_MSG_9_3(b0, b1) \ 1040 b0 = vec_merge_lo(m7, m4); \ 1041 b1 = vec_merge_lo(m1, m6); \ 1044 #define BLAKE2B_LOAD_MSG_9_4(b0, b1) \ 1046 b0 = vec_shl_8(m5, m7); \ 1047 b1 = vec_merge_hi(m6, m0); \ 1050 #define BLAKE2B_LOAD_MSG_10_1(b0, b1) \ 1052 b0 = vec_merge_hi(m0, m1); \ 1053 b1 = vec_merge_hi(m2, m3); \ 1056 #define BLAKE2B_LOAD_MSG_10_2(b0, b1) \ 1058 b0 = vec_merge_lo(m0, m1); \ 1059 b1 = vec_merge_lo(m2, m3); \ 1062 #define BLAKE2B_LOAD_MSG_10_3(b0, b1) \ 1064 b0 = vec_merge_hi(m4, m5); \ 1065 b1 = vec_merge_hi(m6, m7); \ 1068 #define BLAKE2B_LOAD_MSG_10_4(b0, b1) \ 1070 b0 = vec_merge_lo(m4, m5); \ 1071 b1 = vec_merge_lo(m6, m7); \ 1074 #define BLAKE2B_LOAD_MSG_11_1(b0, b1) \ 1076 b0 = vec_merge_hi(m7, m2); \ 1077 b1 = vec_merge_lo(m4, m6); \ 1080 #define BLAKE2B_LOAD_MSG_11_2(b0, b1) \ 1082 b0 = vec_merge_hi(m5, m4); \ 1083 b1 = vec_shl_8(m7, m3); \ 1086 #define BLAKE2B_LOAD_MSG_11_3(b0, b1) \ 1088 b0 = vec_shl_8(m0, m0); \ 1089 b1 = vec_merge_lo(m5, m2); \ 1092 #define BLAKE2B_LOAD_MSG_11_4(b0, b1) \ 1094 b0 = vec_merge_hi(m6, m1); \ 1095 b1 = vec_merge_lo(m3, m1); \ 1099 const uint64x2_p ROR16_MASK = { 64-16, 64-16 };
1100 const uint64x2_p ROR24_MASK = { 64-24, 64-24 };
1101 const uint64x2_p ROR32_MASK = { 64-32, 64-32 };
1102 const uint64x2_p ROR63_MASK = { 64-63, 64-63 };
1104 #define vec_ror_32(x) vec_rl(x, ROR32_MASK) 1105 #define vec_ror_24(x) vec_rl(x, ROR24_MASK) 1106 #define vec_ror_16(x) vec_rl(x, ROR16_MASK) 1107 #define vec_ror_63(x) vec_rl(x, ROR63_MASK) 1109 #define BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ 1111 row1l = VecAdd(VecAdd(row1l, b0), row2l); \ 1112 row1h = VecAdd(VecAdd(row1h, b1), row2h); \ 1113 row4l = VecXor(row4l, row1l); row4h = VecXor(row4h, row1h); \ 1114 row4l = vec_ror_32(row4l); row4h = vec_ror_32(row4h); \ 1115 row3l = VecAdd(row3l, row4l); row3h = VecAdd(row3h, row4h); \ 1116 row2l = VecXor(row2l, row3l); row2h = VecXor(row2h, row3h); \ 1117 row2l = vec_ror_24(row2l); row2h = vec_ror_24(row2h); \ 1120 #define BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ 1122 row1l = VecAdd(VecAdd(row1l, b0), row2l); \ 1123 row1h = VecAdd(VecAdd(row1h, b1), row2h); \ 1124 row4l = VecXor(row4l, row1l); row4h = VecXor(row4h, row1h); \ 1125 row4l = vec_ror_16(row4l); row4h = vec_ror_16(row4h); \ 1126 row3l = VecAdd(row3l, row4l); row3h = VecAdd(row3h, row4h); \ 1127 row2l = VecXor(row2l, row3l); row2h = VecXor(row2h, row3h); \ 1128 row2l = vec_ror_63(row2l); row2h = vec_ror_63(row2h); \ 1131 #define BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ 1133 uint64x2_p t0 = vec_shl_8(row2l, row2h); \ 1134 uint64x2_p t1 = vec_shl_8(row2h, row2l); \ 1135 row2l = t0; row2h = t1; t0 = row3l; row3l = row3h; row3h = t0; \ 1136 t0 = vec_shl_8(row4h, row4l); t1 = vec_shl_8(row4l, row4h); \ 1137 row4l = t0; row4h = t1; \ 1140 #define BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ 1142 uint64x2_p t0 = vec_shl_8(row2h, row2l); \ 1143 uint64x2_p t1 = vec_shl_8(row2l, row2h); \ 1144 row2l = t0; row2h = t1; t0 = row3l; row3l = row3h; row3h = t0; \ 1145 t0 = vec_shl_8(row4l, row4h); t1 = vec_shl_8(row4h, row4l); \ 1146 row4l = t0; row4h = t1; \ 1149 #define BLAKE2B_ROUND(r) \ 1151 uint64x2_p b0, b1; \ 1152 BLAKE2B_LOAD_MSG_ ##r ##_1(b0, b1); \ 1153 BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ 1154 BLAKE2B_LOAD_MSG_ ##r ##_2(b0, b1); \ 1155 BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ 1156 BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ 1157 BLAKE2B_LOAD_MSG_ ##r ##_3(b0, b1); \ 1158 BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ 1159 BLAKE2B_LOAD_MSG_ ##r ##_4(b0, b1); \ 1160 BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ 1161 BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ 1167 const uint8x16_p le_mask = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
1169 #if defined(_ARCH_PWR9) 1180 # if defined(CRYPTOPP_BIG_ENDIAN) 1181 m0 = vec_perm(m0, m0, le_mask);
1182 m1 = vec_perm(m1, m1, le_mask);
1183 m2 = vec_perm(m2, m2, le_mask);
1184 m3 = vec_perm(m3, m3, le_mask);
1185 m4 = vec_perm(m4, m4, le_mask);
1186 m5 = vec_perm(m5, m5, le_mask);
1187 m6 = vec_perm(m6, m6, le_mask);
1188 m7 = vec_perm(m7, m7, le_mask);
1203 const uintptr_t addr = (uintptr_t)input;
1207 # if defined(CRYPTOPP_BIG_ENDIAN) 1208 m0 = vec_perm(m0, m0, le_mask);
1209 m1 = vec_perm(m1, m1, le_mask);
1210 m2 = vec_perm(m2, m2, le_mask);
1211 m3 = vec_perm(m3, m3, le_mask);
1212 m4 = vec_perm(m4, m4, le_mask);
1213 m5 = vec_perm(m5, m5, le_mask);
1214 m6 = vec_perm(m6, m6, le_mask);
1215 m7 = vec_perm(m7, m7, le_mask);
1227 # if defined(CRYPTOPP_BIG_ENDIAN) 1229 perm = vec_perm(perm, perm, le_mask);
1232 m0 = vec_perm(m0, m1, perm);
1233 m1 = vec_perm(m1, m2, perm);
1234 m2 = vec_perm(m2, m3, perm);
1235 m3 = vec_perm(m3, m4, perm);
1236 m4 = vec_perm(m4, m5, perm);
1237 m5 = vec_perm(m5, m6, perm);
1238 m6 = vec_perm(m6, m7, perm);
1239 m7 = vec_perm(m7, ex, perm);
1246 const uint64x2_p h0 = row1l = VecLoad64LE(state.h()+0, le_mask);
1247 const uint64x2_p h1 = row1h = VecLoad64LE(state.h()+2, le_mask);
1248 const uint64x2_p h2 = row2l = VecLoad64LE(state.h()+4, le_mask);
1249 const uint64x2_p h3 = row2h = VecLoad64LE(state.h()+6, le_mask);
1251 row3l = VecLoad64(BLAKE2B_IV+0);
1252 row3h = VecLoad64(BLAKE2B_IV+2);
1253 row4l =
VecXor(VecLoad64(BLAKE2B_IV+4), VecLoad64(state.t()+0));
1254 row4h =
VecXor(VecLoad64(BLAKE2B_IV+6), VecLoad64(state.f()+0));
1269 VecStore64LE(state.h()+0,
VecXor(h0,
VecXor(row1l, row3l)), le_mask);
1270 VecStore64LE(state.h()+2,
VecXor(h1,
VecXor(row1h, row3h)), le_mask);
1271 VecStore64LE(state.h()+4,
VecXor(h2,
VecXor(row2l, row4l)), le_mask);
1272 VecStore64LE(state.h()+6,
VecXor(h3,
VecXor(row2h, row4h)), le_mask);
1274 #endif // CRYPTOPP_POWER8_AVAILABLE Utility functions for the Crypto++ library.
#define NCONST_V32_CAST(x)
Cast array to vector pointer.
unsigned int word32
32-bit unsigned datatype
Library configuration file.
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
unsigned long long word64
64-bit unsigned datatype
__vector unsigned int uint32x4_p
Vector of 32-bit elements.
Support functions for PowerPC and vector operations.
Classes for BLAKE2b and BLAKE2s message digests and keyed message digests.
#define CONST_V32_CAST(x)
Cast array to vector pointer.
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
unsigned char byte
8-bit unsigned datatype
BLAKE2b state information.
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
#define CONST_V8_CAST(x)
Cast array to vector pointer.
Crypto++ library namespace.
__vector unsigned char uint8x16_p
Vector of 8-bit elements.