adv_simd.h 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281
  1. // adv_simd.h - written and placed in the public domain by Jeffrey Walton
  2. /// \file adv_simd.h
  3. /// \brief Template for AdvancedProcessBlocks and SIMD processing
  4. // The SIMD based implementations for ciphers that use SSE, NEON and Power7
  5. // have a common pattern. Namely, they have a specialized implementation of
  6. // AdvancedProcessBlocks which processes multiple block using hardware
  7. // acceleration. After several implementations we noticed a lot of copy and
  8. // paste occurring. adv_simd.h provides a template to avoid the copy and paste.
  9. //
  10. // There are 6 templates provided in this file. The number following the
  11. // function name, 128, is the block size in bits. The name following the
  12. // block size is the arrangement and acceleration. For example 4x1_SSE means
  13. // Intel SSE using two encrypt (or decrypt) functions: one that operates on
  14. // 4 SIMD words, and one that operates on 1 SIMD words.
  15. //
  16. // * AdvancedProcessBlocks128_4x1_SSE
  17. // * AdvancedProcessBlocks128_6x2_SSE
  18. // * AdvancedProcessBlocks128_4x1_NEON
  19. // * AdvancedProcessBlocks128_6x1_NEON
  20. // * AdvancedProcessBlocks128_4x1_ALTIVEC
  21. // * AdvancedProcessBlocks128_6x1_ALTIVEC
  22. //
  23. // If an arrangement ends in 2, like 6x2, then the template will handle the
  24. // single block case by padding with 0's and using the two SIMD word
  25. // function. This happens at most one time when processing multiple blocks.
  26. // The extra processing of a zero block is trivial and worth the tradeoff.
  27. //
  28. // The MAYBE_CONST macro present on x86 is a SunCC workaround. Some versions
  29. // of SunCC lose/drop the const-ness in the F1 and F4 functions. It eventually
  30. // results in a failed link due to the const/non-const mismatch.
  31. //
  32. // In July 2020 the library stopped using 64-bit block version of
  33. // AdvancedProcessBlocks. Testing showed unreliable results and failed
  34. // self tests on occasion. Also see Issue 945 and
  35. // https://github.com/weidai11/cryptopp/commit/dd7598e638bb.
  36. #ifndef CRYPTOPP_ADVANCED_SIMD_TEMPLATES
  37. #define CRYPTOPP_ADVANCED_SIMD_TEMPLATES
  38. #include "config.h"
  39. #include "misc.h"
  40. #include "stdcpp.h"
  41. #if (CRYPTOPP_ARM_NEON_HEADER)
  42. # include <arm_neon.h>
  43. #endif
  44. #if (CRYPTOPP_ARM_ACLE_HEADER)
  45. # include <stdint.h>
  46. # include <arm_acle.h>
  47. #endif
  48. #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE)
  49. # include <emmintrin.h>
  50. # include <xmmintrin.h>
  51. #endif
  52. // SunCC needs CRYPTOPP_SSSE3_AVAILABLE, too
  53. #if (CRYPTOPP_SSSE3_AVAILABLE)
  54. # include <emmintrin.h>
  55. # include <pmmintrin.h>
  56. # include <xmmintrin.h>
  57. #endif
  58. #if defined(__ALTIVEC__)
  59. # include "ppc_simd.h"
  60. #endif
  61. // ************************ All block ciphers *********************** //
  62. ANONYMOUS_NAMESPACE_BEGIN
  63. using CryptoPP::BlockTransformation;
  64. CRYPTOPP_CONSTANT(BT_XorInput = BlockTransformation::BT_XorInput);
  65. CRYPTOPP_CONSTANT(BT_AllowParallel = BlockTransformation::BT_AllowParallel);
  66. CRYPTOPP_CONSTANT(BT_InBlockIsCounter = BlockTransformation::BT_InBlockIsCounter);
  67. CRYPTOPP_CONSTANT(BT_ReverseDirection = BlockTransformation::BT_ReverseDirection);
  68. CRYPTOPP_CONSTANT(BT_DontIncrementInOutPointers = BlockTransformation::BT_DontIncrementInOutPointers);
  69. ANONYMOUS_NAMESPACE_END
  70. // *************************** ARM NEON ************************** //
  71. #if (CRYPTOPP_ARM_NEON_AVAILABLE) || (CRYPTOPP_ARM_ASIMD_AVAILABLE) || \
  72. defined(CRYPTOPP_DOXYGEN_PROCESSING)
  73. NAMESPACE_BEGIN(CryptoPP)
  74. /// \brief AdvancedProcessBlocks for 1 and 6 blocks
  75. /// \tparam F1 function to process 1 128-bit block
  76. /// \tparam F6 function to process 6 128-bit blocks
  77. /// \tparam W word type of the subkey table
  78. /// \details AdvancedProcessBlocks128_6x1_NEON processes 6 and 2 NEON SIMD words
  79. /// at a time.
  80. /// \details The subkey type is usually word32 or word64. F1 and F6 must use the
  81. /// same word type.
  82. template <typename F1, typename F6, typename W>
  83. inline size_t AdvancedProcessBlocks128_6x1_NEON(F1 func1, F6 func6,
  84. const W *subKeys, size_t rounds, const byte *inBlocks,
  85. const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
  86. {
  87. CRYPTOPP_ASSERT(subKeys);
  88. CRYPTOPP_ASSERT(inBlocks);
  89. CRYPTOPP_ASSERT(outBlocks);
  90. CRYPTOPP_ASSERT(length >= 16);
  91. const unsigned int w_one[] = {0, 0<<24, 0, 1<<24};
  92. const uint32x4_t s_one = vld1q_u32(w_one);
  93. const size_t blockSize = 16;
  94. // const size_t neonBlockSize = 16;
  95. size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
  96. size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
  97. size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
  98. // Clang and Coverity are generating findings using xorBlocks as a flag.
  99. const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
  100. const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
  101. if (flags & BT_ReverseDirection)
  102. {
  103. inBlocks = PtrAdd(inBlocks, length - blockSize);
  104. xorBlocks = PtrAdd(xorBlocks, length - blockSize);
  105. outBlocks = PtrAdd(outBlocks, length - blockSize);
  106. inIncrement = 0-inIncrement;
  107. xorIncrement = 0-xorIncrement;
  108. outIncrement = 0-outIncrement;
  109. }
  110. if (flags & BT_AllowParallel)
  111. {
  112. while (length >= 6*blockSize)
  113. {
  114. uint64x2_t block0, block1, block2, block3, block4, block5;
  115. if (flags & BT_InBlockIsCounter)
  116. {
  117. const uint64x2_t one = vreinterpretq_u64_u32(s_one);
  118. block0 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  119. block1 = vaddq_u64(block0, one);
  120. block2 = vaddq_u64(block1, one);
  121. block3 = vaddq_u64(block2, one);
  122. block4 = vaddq_u64(block3, one);
  123. block5 = vaddq_u64(block4, one);
  124. vst1q_u8(const_cast<byte*>(inBlocks),
  125. vreinterpretq_u8_u64(vaddq_u64(block5, one)));
  126. }
  127. else
  128. {
  129. block0 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  130. inBlocks = PtrAdd(inBlocks, inIncrement);
  131. block1 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  132. inBlocks = PtrAdd(inBlocks, inIncrement);
  133. block2 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  134. inBlocks = PtrAdd(inBlocks, inIncrement);
  135. block3 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  136. inBlocks = PtrAdd(inBlocks, inIncrement);
  137. block4 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  138. inBlocks = PtrAdd(inBlocks, inIncrement);
  139. block5 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  140. inBlocks = PtrAdd(inBlocks, inIncrement);
  141. }
  142. if (xorInput)
  143. {
  144. block0 = veorq_u64(block0, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  145. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  146. block1 = veorq_u64(block1, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  147. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  148. block2 = veorq_u64(block2, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  149. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  150. block3 = veorq_u64(block3, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  151. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  152. block4 = veorq_u64(block4, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  153. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  154. block5 = veorq_u64(block5, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  155. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  156. }
  157. func6(block0, block1, block2, block3, block4, block5, subKeys, static_cast<unsigned int>(rounds));
  158. if (xorOutput)
  159. {
  160. block0 = veorq_u64(block0, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  161. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  162. block1 = veorq_u64(block1, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  163. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  164. block2 = veorq_u64(block2, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  165. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  166. block3 = veorq_u64(block3, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  167. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  168. block4 = veorq_u64(block4, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  169. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  170. block5 = veorq_u64(block5, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  171. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  172. }
  173. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block0));
  174. outBlocks = PtrAdd(outBlocks, outIncrement);
  175. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block1));
  176. outBlocks = PtrAdd(outBlocks, outIncrement);
  177. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block2));
  178. outBlocks = PtrAdd(outBlocks, outIncrement);
  179. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block3));
  180. outBlocks = PtrAdd(outBlocks, outIncrement);
  181. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block4));
  182. outBlocks = PtrAdd(outBlocks, outIncrement);
  183. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block5));
  184. outBlocks = PtrAdd(outBlocks, outIncrement);
  185. length -= 6*blockSize;
  186. }
  187. }
  188. while (length >= blockSize)
  189. {
  190. uint64x2_t block;
  191. block = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  192. if (xorInput)
  193. block = veorq_u64(block, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  194. if (flags & BT_InBlockIsCounter)
  195. const_cast<byte *>(inBlocks)[15]++;
  196. func1(block, subKeys, static_cast<unsigned int>(rounds));
  197. if (xorOutput)
  198. block = veorq_u64(block, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  199. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block));
  200. inBlocks = PtrAdd(inBlocks, inIncrement);
  201. outBlocks = PtrAdd(outBlocks, outIncrement);
  202. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  203. length -= blockSize;
  204. }
  205. return length;
  206. }
  207. /// \brief AdvancedProcessBlocks for 1 and 4 blocks
  208. /// \tparam F1 function to process 1 128-bit block
  209. /// \tparam F4 function to process 4 128-bit blocks
  210. /// \tparam W word type of the subkey table
  211. /// \details AdvancedProcessBlocks128_4x1_NEON processes 4 and 1 NEON SIMD words
  212. /// at a time.
  213. /// \details The subkey type is usually word32 or word64. V is the vector type and it is
  214. /// usually uint32x4_t or uint32x4_t. F1, F4, and W must use the same word and
  215. /// vector type.
  216. template <typename F1, typename F4, typename W>
  217. inline size_t AdvancedProcessBlocks128_4x1_NEON(F1 func1, F4 func4,
  218. const W *subKeys, size_t rounds, const byte *inBlocks,
  219. const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
  220. {
  221. CRYPTOPP_ASSERT(subKeys);
  222. CRYPTOPP_ASSERT(inBlocks);
  223. CRYPTOPP_ASSERT(outBlocks);
  224. CRYPTOPP_ASSERT(length >= 16);
  225. const unsigned int w_one[] = {0, 0<<24, 0, 1<<24};
  226. const uint32x4_t s_one = vld1q_u32(w_one);
  227. const size_t blockSize = 16;
  228. // const size_t neonBlockSize = 16;
  229. size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
  230. size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
  231. size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
  232. // Clang and Coverity are generating findings using xorBlocks as a flag.
  233. const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
  234. const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
  235. if (flags & BT_ReverseDirection)
  236. {
  237. inBlocks = PtrAdd(inBlocks, length - blockSize);
  238. xorBlocks = PtrAdd(xorBlocks, length - blockSize);
  239. outBlocks = PtrAdd(outBlocks, length - blockSize);
  240. inIncrement = 0-inIncrement;
  241. xorIncrement = 0-xorIncrement;
  242. outIncrement = 0-outIncrement;
  243. }
  244. if (flags & BT_AllowParallel)
  245. {
  246. while (length >= 4*blockSize)
  247. {
  248. uint32x4_t block0, block1, block2, block3;
  249. if (flags & BT_InBlockIsCounter)
  250. {
  251. const uint32x4_t one = s_one;
  252. block0 = vreinterpretq_u32_u8(vld1q_u8(inBlocks));
  253. block1 = vreinterpretq_u32_u64(vaddq_u64(vreinterpretq_u64_u32(block0), vreinterpretq_u64_u32(one)));
  254. block2 = vreinterpretq_u32_u64(vaddq_u64(vreinterpretq_u64_u32(block1), vreinterpretq_u64_u32(one)));
  255. block3 = vreinterpretq_u32_u64(vaddq_u64(vreinterpretq_u64_u32(block2), vreinterpretq_u64_u32(one)));
  256. vst1q_u8(const_cast<byte*>(inBlocks), vreinterpretq_u8_u64(vaddq_u64(
  257. vreinterpretq_u64_u32(block3), vreinterpretq_u64_u32(one))));
  258. }
  259. else
  260. {
  261. block0 = vreinterpretq_u32_u8(vld1q_u8(inBlocks));
  262. inBlocks = PtrAdd(inBlocks, inIncrement);
  263. block1 = vreinterpretq_u32_u8(vld1q_u8(inBlocks));
  264. inBlocks = PtrAdd(inBlocks, inIncrement);
  265. block2 = vreinterpretq_u32_u8(vld1q_u8(inBlocks));
  266. inBlocks = PtrAdd(inBlocks, inIncrement);
  267. block3 = vreinterpretq_u32_u8(vld1q_u8(inBlocks));
  268. inBlocks = PtrAdd(inBlocks, inIncrement);
  269. }
  270. if (xorInput)
  271. {
  272. block0 = veorq_u32(block0, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
  273. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  274. block1 = veorq_u32(block1, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
  275. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  276. block2 = veorq_u32(block2, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
  277. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  278. block3 = veorq_u32(block3, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
  279. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  280. }
  281. func4(block0, block1, block2, block3, subKeys, static_cast<unsigned int>(rounds));
  282. if (xorOutput)
  283. {
  284. block0 = veorq_u32(block0, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
  285. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  286. block1 = veorq_u32(block1, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
  287. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  288. block2 = veorq_u32(block2, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
  289. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  290. block3 = veorq_u32(block3, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
  291. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  292. }
  293. vst1q_u8(outBlocks, vreinterpretq_u8_u32(block0));
  294. outBlocks = PtrAdd(outBlocks, outIncrement);
  295. vst1q_u8(outBlocks, vreinterpretq_u8_u32(block1));
  296. outBlocks = PtrAdd(outBlocks, outIncrement);
  297. vst1q_u8(outBlocks, vreinterpretq_u8_u32(block2));
  298. outBlocks = PtrAdd(outBlocks, outIncrement);
  299. vst1q_u8(outBlocks, vreinterpretq_u8_u32(block3));
  300. outBlocks = PtrAdd(outBlocks, outIncrement);
  301. length -= 4*blockSize;
  302. }
  303. }
  304. while (length >= blockSize)
  305. {
  306. uint32x4_t block = vreinterpretq_u32_u8(vld1q_u8(inBlocks));
  307. if (xorInput)
  308. block = veorq_u32(block, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
  309. if (flags & BT_InBlockIsCounter)
  310. const_cast<byte *>(inBlocks)[15]++;
  311. func1(block, subKeys, static_cast<unsigned int>(rounds));
  312. if (xorOutput)
  313. block = veorq_u32(block, vreinterpretq_u32_u8(vld1q_u8(xorBlocks)));
  314. vst1q_u8(outBlocks, vreinterpretq_u8_u32(block));
  315. inBlocks = PtrAdd(inBlocks, inIncrement);
  316. outBlocks = PtrAdd(outBlocks, outIncrement);
  317. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  318. length -= blockSize;
  319. }
  320. return length;
  321. }
  322. /// \brief AdvancedProcessBlocks for 2 and 6 blocks
  323. /// \tparam F2 function to process 2 128-bit blocks
  324. /// \tparam F6 function to process 6 128-bit blocks
  325. /// \tparam W word type of the subkey table
  326. /// \details AdvancedProcessBlocks128_6x2_NEON processes 6 and 2 NEON SIMD words
  327. /// at a time. For a single block the template uses F2 with a zero block.
  328. /// \details The subkey type is usually word32 or word64. F2 and F6 must use the
  329. /// same word type.
  330. template <typename F2, typename F6, typename W>
  331. inline size_t AdvancedProcessBlocks128_6x2_NEON(F2 func2, F6 func6,
  332. const W *subKeys, size_t rounds, const byte *inBlocks,
  333. const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
  334. {
  335. CRYPTOPP_ASSERT(subKeys);
  336. CRYPTOPP_ASSERT(inBlocks);
  337. CRYPTOPP_ASSERT(outBlocks);
  338. CRYPTOPP_ASSERT(length >= 16);
  339. const unsigned int w_one[] = {0, 0<<24, 0, 1<<24};
  340. const uint32x4_t s_one = vld1q_u32(w_one);
  341. const size_t blockSize = 16;
  342. // const size_t neonBlockSize = 16;
  343. size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
  344. size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
  345. size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
  346. // Clang and Coverity are generating findings using xorBlocks as a flag.
  347. const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
  348. const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
  349. if (flags & BT_ReverseDirection)
  350. {
  351. inBlocks = PtrAdd(inBlocks, length - blockSize);
  352. xorBlocks = PtrAdd(xorBlocks, length - blockSize);
  353. outBlocks = PtrAdd(outBlocks, length - blockSize);
  354. inIncrement = 0-inIncrement;
  355. xorIncrement = 0-xorIncrement;
  356. outIncrement = 0-outIncrement;
  357. }
  358. if (flags & BT_AllowParallel)
  359. {
  360. while (length >= 6*blockSize)
  361. {
  362. uint64x2_t block0, block1, block2, block3, block4, block5;
  363. if (flags & BT_InBlockIsCounter)
  364. {
  365. const uint64x2_t one = vreinterpretq_u64_u32(s_one);
  366. block0 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  367. block1 = vaddq_u64(block0, one);
  368. block2 = vaddq_u64(block1, one);
  369. block3 = vaddq_u64(block2, one);
  370. block4 = vaddq_u64(block3, one);
  371. block5 = vaddq_u64(block4, one);
  372. vst1q_u8(const_cast<byte*>(inBlocks),
  373. vreinterpretq_u8_u64(vaddq_u64(block5, one)));
  374. }
  375. else
  376. {
  377. block0 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  378. inBlocks = PtrAdd(inBlocks, inIncrement);
  379. block1 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  380. inBlocks = PtrAdd(inBlocks, inIncrement);
  381. block2 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  382. inBlocks = PtrAdd(inBlocks, inIncrement);
  383. block3 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  384. inBlocks = PtrAdd(inBlocks, inIncrement);
  385. block4 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  386. inBlocks = PtrAdd(inBlocks, inIncrement);
  387. block5 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  388. inBlocks = PtrAdd(inBlocks, inIncrement);
  389. }
  390. if (xorInput)
  391. {
  392. block0 = veorq_u64(block0, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  393. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  394. block1 = veorq_u64(block1, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  395. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  396. block2 = veorq_u64(block2, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  397. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  398. block3 = veorq_u64(block3, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  399. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  400. block4 = veorq_u64(block4, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  401. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  402. block5 = veorq_u64(block5, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  403. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  404. }
  405. func6(block0, block1, block2, block3, block4, block5, subKeys, static_cast<unsigned int>(rounds));
  406. if (xorOutput)
  407. {
  408. block0 = veorq_u64(block0, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  409. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  410. block1 = veorq_u64(block1, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  411. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  412. block2 = veorq_u64(block2, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  413. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  414. block3 = veorq_u64(block3, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  415. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  416. block4 = veorq_u64(block4, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  417. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  418. block5 = veorq_u64(block5, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  419. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  420. }
  421. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block0));
  422. outBlocks = PtrAdd(outBlocks, outIncrement);
  423. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block1));
  424. outBlocks = PtrAdd(outBlocks, outIncrement);
  425. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block2));
  426. outBlocks = PtrAdd(outBlocks, outIncrement);
  427. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block3));
  428. outBlocks = PtrAdd(outBlocks, outIncrement);
  429. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block4));
  430. outBlocks = PtrAdd(outBlocks, outIncrement);
  431. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block5));
  432. outBlocks = PtrAdd(outBlocks, outIncrement);
  433. length -= 6*blockSize;
  434. }
  435. while (length >= 2*blockSize)
  436. {
  437. uint64x2_t block0, block1;
  438. if (flags & BT_InBlockIsCounter)
  439. {
  440. const uint64x2_t one = vreinterpretq_u64_u32(s_one);
  441. block0 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  442. block1 = vaddq_u64(block0, one);
  443. vst1q_u8(const_cast<byte*>(inBlocks),
  444. vreinterpretq_u8_u64(vaddq_u64(block1, one)));
  445. }
  446. else
  447. {
  448. block0 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  449. inBlocks = PtrAdd(inBlocks, inIncrement);
  450. block1 = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  451. inBlocks = PtrAdd(inBlocks, inIncrement);
  452. }
  453. if (xorInput)
  454. {
  455. block0 = veorq_u64(block0, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  456. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  457. block1 = veorq_u64(block1, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  458. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  459. }
  460. func2(block0, block1, subKeys, static_cast<unsigned int>(rounds));
  461. if (xorOutput)
  462. {
  463. block0 = veorq_u64(block0, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  464. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  465. block1 = veorq_u64(block1, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  466. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  467. }
  468. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block0));
  469. outBlocks = PtrAdd(outBlocks, outIncrement);
  470. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block1));
  471. outBlocks = PtrAdd(outBlocks, outIncrement);
  472. length -= 2*blockSize;
  473. }
  474. }
  475. while (length >= blockSize)
  476. {
  477. uint64x2_t block, zero = {0,0};
  478. block = vreinterpretq_u64_u8(vld1q_u8(inBlocks));
  479. if (xorInput)
  480. block = veorq_u64(block, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  481. if (flags & BT_InBlockIsCounter)
  482. const_cast<byte *>(inBlocks)[15]++;
  483. func2(block, zero, subKeys, static_cast<unsigned int>(rounds));
  484. if (xorOutput)
  485. block = veorq_u64(block, vreinterpretq_u64_u8(vld1q_u8(xorBlocks)));
  486. vst1q_u8(outBlocks, vreinterpretq_u8_u64(block));
  487. inBlocks = PtrAdd(inBlocks, inIncrement);
  488. outBlocks = PtrAdd(outBlocks, outIncrement);
  489. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  490. length -= blockSize;
  491. }
  492. return length;
  493. }
  494. NAMESPACE_END // CryptoPP
  495. #endif // CRYPTOPP_ARM_NEON_AVAILABLE
  496. // *************************** Intel SSE ************************** //
  497. #if defined(CRYPTOPP_SSSE3_AVAILABLE) || defined(CRYPTOPP_DOXYGEN_PROCESSING)
  498. #if defined(CRYPTOPP_DOXYGEN_PROCESSING)
  499. /// \brief SunCC workaround
  500. /// \details SunCC loses the const on AES_Enc_Block and AES_Dec_Block
  501. /// \sa <A HREF="http://github.com/weidai11/cryptopp/issues/224">Issue
  502. /// 224, SunCC and failed compile for rijndael.cpp</A>
  503. # define MAYBE_CONST const
  504. /// \brief SunCC workaround
  505. /// \details SunCC loses the const on AES_Enc_Block and AES_Dec_Block
  506. /// \sa <A HREF="http://github.com/weidai11/cryptopp/issues/224">Issue
  507. /// 224, SunCC and failed compile for rijndael.cpp</A>
  508. # define MAYBE_UNCONST_CAST(T, x) (x)
  509. #elif (__SUNPRO_CC >= 0x5130)
  510. # define MAYBE_CONST
  511. # define MAYBE_UNCONST_CAST(T, x) const_cast<MAYBE_CONST T>(x)
  512. #else
  513. # define MAYBE_CONST const
  514. # define MAYBE_UNCONST_CAST(T, x) (x)
  515. #endif
  516. #if defined(CRYPTOPP_DOXYGEN_PROCESSING)
  517. /// \brief Clang workaround
  518. /// \details Clang issues spurious alignment warnings
  519. /// \sa <A HREF="http://bugs.llvm.org/show_bug.cgi?id=20670">Issue
  520. /// 20670, _mm_loadu_si128 parameter has wrong type</A>
  521. # define M128_CAST(x) ((__m128i *)(void *)(x))
  522. /// \brief Clang workaround
  523. /// \details Clang issues spurious alignment warnings
  524. /// \sa <A HREF="http://bugs.llvm.org/show_bug.cgi?id=20670">Issue
  525. /// 20670, _mm_loadu_si128 parameter has wrong type</A>
  526. # define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
  527. #else
  528. # ifndef M128_CAST
  529. # define M128_CAST(x) ((__m128i *)(void *)(x))
  530. # endif
  531. # ifndef CONST_M128_CAST
  532. # define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
  533. # endif
  534. #endif
  535. NAMESPACE_BEGIN(CryptoPP)
  536. /// \brief AdvancedProcessBlocks for 2 and 6 blocks
  537. /// \tparam F2 function to process 2 128-bit blocks
  538. /// \tparam F6 function to process 6 128-bit blocks
  539. /// \tparam W word type of the subkey table
  540. /// \details AdvancedProcessBlocks128_6x2_SSE processes 6 and 2 SSE SIMD words
  541. /// at a time. For a single block the template uses F2 with a zero block.
  542. /// \details The subkey type is usually word32 or word64. F2 and F6 must use the
  543. /// same word type.
  544. template <typename F2, typename F6, typename W>
  545. inline size_t AdvancedProcessBlocks128_6x2_SSE(F2 func2, F6 func6,
  546. MAYBE_CONST W *subKeys, size_t rounds, const byte *inBlocks,
  547. const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
  548. {
  549. CRYPTOPP_ASSERT(subKeys);
  550. CRYPTOPP_ASSERT(inBlocks);
  551. CRYPTOPP_ASSERT(outBlocks);
  552. CRYPTOPP_ASSERT(length >= 16);
  553. const size_t blockSize = 16;
  554. // const size_t xmmBlockSize = 16;
  555. size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
  556. size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
  557. size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
  558. // Clang and Coverity are generating findings using xorBlocks as a flag.
  559. const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
  560. const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
  561. if (flags & BT_ReverseDirection)
  562. {
  563. inBlocks = PtrAdd(inBlocks, length - blockSize);
  564. xorBlocks = PtrAdd(xorBlocks, length - blockSize);
  565. outBlocks = PtrAdd(outBlocks, length - blockSize);
  566. inIncrement = 0-inIncrement;
  567. xorIncrement = 0-xorIncrement;
  568. outIncrement = 0-outIncrement;
  569. }
  570. if (flags & BT_AllowParallel)
  571. {
  572. while (length >= 6*blockSize)
  573. {
  574. __m128i block0, block1, block2, block3, block4, block5;
  575. if (flags & BT_InBlockIsCounter)
  576. {
  577. // Increment of 1 in big-endian compatible with the ctr byte array.
  578. const __m128i s_one = _mm_set_epi32(1<<24, 0, 0, 0);
  579. block0 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  580. block1 = _mm_add_epi32(block0, s_one);
  581. block2 = _mm_add_epi32(block1, s_one);
  582. block3 = _mm_add_epi32(block2, s_one);
  583. block4 = _mm_add_epi32(block3, s_one);
  584. block5 = _mm_add_epi32(block4, s_one);
  585. _mm_storeu_si128(M128_CAST(inBlocks), _mm_add_epi32(block5, s_one));
  586. }
  587. else
  588. {
  589. block0 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  590. inBlocks = PtrAdd(inBlocks, inIncrement);
  591. block1 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  592. inBlocks = PtrAdd(inBlocks, inIncrement);
  593. block2 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  594. inBlocks = PtrAdd(inBlocks, inIncrement);
  595. block3 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  596. inBlocks = PtrAdd(inBlocks, inIncrement);
  597. block4 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  598. inBlocks = PtrAdd(inBlocks, inIncrement);
  599. block5 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  600. inBlocks = PtrAdd(inBlocks, inIncrement);
  601. }
  602. if (xorInput)
  603. {
  604. block0 = _mm_xor_si128(block0, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  605. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  606. block1 = _mm_xor_si128(block1, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  607. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  608. block2 = _mm_xor_si128(block2, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  609. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  610. block3 = _mm_xor_si128(block3, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  611. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  612. block4 = _mm_xor_si128(block4, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  613. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  614. block5 = _mm_xor_si128(block5, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  615. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  616. }
  617. func6(block0, block1, block2, block3, block4, block5, subKeys, static_cast<unsigned int>(rounds));
  618. if (xorOutput)
  619. {
  620. block0 = _mm_xor_si128(block0, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  621. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  622. block1 = _mm_xor_si128(block1, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  623. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  624. block2 = _mm_xor_si128(block2, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  625. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  626. block3 = _mm_xor_si128(block3, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  627. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  628. block4 = _mm_xor_si128(block4, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  629. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  630. block5 = _mm_xor_si128(block5, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  631. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  632. }
  633. _mm_storeu_si128(M128_CAST(outBlocks), block0);
  634. outBlocks = PtrAdd(outBlocks, outIncrement);
  635. _mm_storeu_si128(M128_CAST(outBlocks), block1);
  636. outBlocks = PtrAdd(outBlocks, outIncrement);
  637. _mm_storeu_si128(M128_CAST(outBlocks), block2);
  638. outBlocks = PtrAdd(outBlocks, outIncrement);
  639. _mm_storeu_si128(M128_CAST(outBlocks), block3);
  640. outBlocks = PtrAdd(outBlocks, outIncrement);
  641. _mm_storeu_si128(M128_CAST(outBlocks), block4);
  642. outBlocks = PtrAdd(outBlocks, outIncrement);
  643. _mm_storeu_si128(M128_CAST(outBlocks), block5);
  644. outBlocks = PtrAdd(outBlocks, outIncrement);
  645. length -= 6*blockSize;
  646. }
  647. while (length >= 2*blockSize)
  648. {
  649. __m128i block0, block1;
  650. if (flags & BT_InBlockIsCounter)
  651. {
  652. // Increment of 1 in big-endian compatible with the ctr byte array.
  653. const __m128i s_one = _mm_set_epi32(1<<24, 0, 0, 0);
  654. block0 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  655. block1 = _mm_add_epi32(block0, s_one);
  656. _mm_storeu_si128(M128_CAST(inBlocks), _mm_add_epi32(block1, s_one));
  657. }
  658. else
  659. {
  660. block0 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  661. inBlocks = PtrAdd(inBlocks, inIncrement);
  662. block1 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  663. inBlocks = PtrAdd(inBlocks, inIncrement);
  664. }
  665. if (xorInput)
  666. {
  667. block0 = _mm_xor_si128(block0, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  668. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  669. block1 = _mm_xor_si128(block1, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  670. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  671. }
  672. func2(block0, block1, subKeys, static_cast<unsigned int>(rounds));
  673. if (xorOutput)
  674. {
  675. block0 = _mm_xor_si128(block0, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  676. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  677. block1 = _mm_xor_si128(block1, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  678. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  679. }
  680. _mm_storeu_si128(M128_CAST(outBlocks), block0);
  681. outBlocks = PtrAdd(outBlocks, outIncrement);
  682. _mm_storeu_si128(M128_CAST(outBlocks), block1);
  683. outBlocks = PtrAdd(outBlocks, outIncrement);
  684. length -= 2*blockSize;
  685. }
  686. }
  687. while (length >= blockSize)
  688. {
  689. __m128i block, zero = _mm_setzero_si128();
  690. block = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  691. if (xorInput)
  692. block = _mm_xor_si128(block, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  693. if (flags & BT_InBlockIsCounter)
  694. const_cast<byte *>(inBlocks)[15]++;
  695. func2(block, zero, subKeys, static_cast<unsigned int>(rounds));
  696. if (xorOutput)
  697. block = _mm_xor_si128(block, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  698. _mm_storeu_si128(M128_CAST(outBlocks), block);
  699. inBlocks = PtrAdd(inBlocks, inIncrement);
  700. outBlocks = PtrAdd(outBlocks, outIncrement);
  701. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  702. length -= blockSize;
  703. }
  704. return length;
  705. }
  706. /// \brief AdvancedProcessBlocks for 1 and 4 blocks
  707. /// \tparam F1 function to process 1 128-bit block
  708. /// \tparam F4 function to process 4 128-bit blocks
  709. /// \tparam W word type of the subkey table
  710. /// \details AdvancedProcessBlocks128_4x1_SSE processes 4 and 1 SSE SIMD words
  711. /// at a time.
  712. /// \details The subkey type is usually word32 or word64. F1 and F4 must use the
  713. /// same word type.
  714. template <typename F1, typename F4, typename W>
  715. inline size_t AdvancedProcessBlocks128_4x1_SSE(F1 func1, F4 func4,
  716. MAYBE_CONST W *subKeys, size_t rounds, const byte *inBlocks,
  717. const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
  718. {
  719. CRYPTOPP_ASSERT(subKeys);
  720. CRYPTOPP_ASSERT(inBlocks);
  721. CRYPTOPP_ASSERT(outBlocks);
  722. CRYPTOPP_ASSERT(length >= 16);
  723. const size_t blockSize = 16;
  724. // const size_t xmmBlockSize = 16;
  725. size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
  726. size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
  727. size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
  728. // Clang and Coverity are generating findings using xorBlocks as a flag.
  729. const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
  730. const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
  731. if (flags & BT_ReverseDirection)
  732. {
  733. inBlocks = PtrAdd(inBlocks, length - blockSize);
  734. xorBlocks = PtrAdd(xorBlocks, length - blockSize);
  735. outBlocks = PtrAdd(outBlocks, length - blockSize);
  736. inIncrement = 0-inIncrement;
  737. xorIncrement = 0-xorIncrement;
  738. outIncrement = 0-outIncrement;
  739. }
  740. if (flags & BT_AllowParallel)
  741. {
  742. while (length >= 4*blockSize)
  743. {
  744. __m128i block0, block1, block2, block3;
  745. if (flags & BT_InBlockIsCounter)
  746. {
  747. // Increment of 1 in big-endian compatible with the ctr byte array.
  748. const __m128i s_one = _mm_set_epi32(1<<24, 0, 0, 0);
  749. block0 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  750. block1 = _mm_add_epi32(block0, s_one);
  751. block2 = _mm_add_epi32(block1, s_one);
  752. block3 = _mm_add_epi32(block2, s_one);
  753. _mm_storeu_si128(M128_CAST(inBlocks), _mm_add_epi32(block3, s_one));
  754. }
  755. else
  756. {
  757. block0 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  758. inBlocks = PtrAdd(inBlocks, inIncrement);
  759. block1 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  760. inBlocks = PtrAdd(inBlocks, inIncrement);
  761. block2 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  762. inBlocks = PtrAdd(inBlocks, inIncrement);
  763. block3 = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  764. inBlocks = PtrAdd(inBlocks, inIncrement);
  765. }
  766. if (xorInput)
  767. {
  768. block0 = _mm_xor_si128(block0, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  769. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  770. block1 = _mm_xor_si128(block1, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  771. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  772. block2 = _mm_xor_si128(block2, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  773. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  774. block3 = _mm_xor_si128(block3, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  775. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  776. }
  777. func4(block0, block1, block2, block3, subKeys, static_cast<unsigned int>(rounds));
  778. if (xorOutput)
  779. {
  780. block0 = _mm_xor_si128(block0, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  781. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  782. block1 = _mm_xor_si128(block1, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  783. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  784. block2 = _mm_xor_si128(block2, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  785. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  786. block3 = _mm_xor_si128(block3, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  787. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  788. }
  789. _mm_storeu_si128(M128_CAST(outBlocks), block0);
  790. outBlocks = PtrAdd(outBlocks, outIncrement);
  791. _mm_storeu_si128(M128_CAST(outBlocks), block1);
  792. outBlocks = PtrAdd(outBlocks, outIncrement);
  793. _mm_storeu_si128(M128_CAST(outBlocks), block2);
  794. outBlocks = PtrAdd(outBlocks, outIncrement);
  795. _mm_storeu_si128(M128_CAST(outBlocks), block3);
  796. outBlocks = PtrAdd(outBlocks, outIncrement);
  797. length -= 4*blockSize;
  798. }
  799. }
  800. while (length >= blockSize)
  801. {
  802. __m128i block = _mm_loadu_si128(CONST_M128_CAST(inBlocks));
  803. if (xorInput)
  804. block = _mm_xor_si128(block, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  805. if (flags & BT_InBlockIsCounter)
  806. const_cast<byte *>(inBlocks)[15]++;
  807. func1(block, subKeys, static_cast<unsigned int>(rounds));
  808. if (xorOutput)
  809. block = _mm_xor_si128(block, _mm_loadu_si128(CONST_M128_CAST(xorBlocks)));
  810. _mm_storeu_si128(M128_CAST(outBlocks), block);
  811. inBlocks = PtrAdd(inBlocks, inIncrement);
  812. outBlocks = PtrAdd(outBlocks, outIncrement);
  813. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  814. length -= blockSize;
  815. }
  816. return length;
  817. }
  818. NAMESPACE_END // CryptoPP
  819. #endif // CRYPTOPP_SSSE3_AVAILABLE
  820. // ************************** Altivec/Power 4 ************************** //
  821. #if defined(__ALTIVEC__) || defined(CRYPTOPP_DOXYGEN_PROCESSING)
  822. NAMESPACE_BEGIN(CryptoPP)
  823. /// \brief AdvancedProcessBlocks for 1 and 4 blocks
  824. /// \tparam F1 function to process 1 128-bit block
  825. /// \tparam F4 function to process 4 128-bit blocks
  826. /// \tparam W word type of the subkey table
  827. /// \details AdvancedProcessBlocks128_4x1_ALTIVEC processes 4 and 1 Altivec SIMD words
  828. /// at a time.
  829. /// \details The subkey type is usually word32 or word64. F1 and F4 must use the
  830. /// same word type.
  831. template <typename F1, typename F4, typename W>
  832. inline size_t AdvancedProcessBlocks128_4x1_ALTIVEC(F1 func1, F4 func4,
  833. const W *subKeys, size_t rounds, const byte *inBlocks,
  834. const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
  835. {
  836. CRYPTOPP_ASSERT(subKeys);
  837. CRYPTOPP_ASSERT(inBlocks);
  838. CRYPTOPP_ASSERT(outBlocks);
  839. CRYPTOPP_ASSERT(length >= 16);
  840. #if (CRYPTOPP_LITTLE_ENDIAN)
  841. const uint32x4_p s_one = {1,0,0,0};
  842. #else
  843. const uint32x4_p s_one = {0,0,0,1};
  844. #endif
  845. const size_t blockSize = 16;
  846. // const size_t simdBlockSize = 16;
  847. size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
  848. size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
  849. size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
  850. // Clang and Coverity are generating findings using xorBlocks as a flag.
  851. const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
  852. const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
  853. if (flags & BT_ReverseDirection)
  854. {
  855. inBlocks = PtrAdd(inBlocks, length - blockSize);
  856. xorBlocks = PtrAdd(xorBlocks, length - blockSize);
  857. outBlocks = PtrAdd(outBlocks, length - blockSize);
  858. inIncrement = 0-inIncrement;
  859. xorIncrement = 0-xorIncrement;
  860. outIncrement = 0-outIncrement;
  861. }
  862. if (flags & BT_AllowParallel)
  863. {
  864. while (length >= 4*blockSize)
  865. {
  866. uint32x4_p block0, block1, block2, block3;
  867. if (flags & BT_InBlockIsCounter)
  868. {
  869. block0 = VecLoadBE(inBlocks);
  870. block1 = VecAdd(block0, s_one);
  871. block2 = VecAdd(block1, s_one);
  872. block3 = VecAdd(block2, s_one);
  873. // Hack due to big-endian loads used by POWER8 (and maybe ARM-BE).
  874. // CTR_ModePolicy::OperateKeystream is wired such that after
  875. // returning from this function CTR_ModePolicy will detect wrap on
  876. // on the last counter byte and increment the next to last byte.
  877. // The problem is, with a big-endian load, inBlocks[15] is really
  878. // located at index 15. The vector addition using a 32-bit element
  879. // generates a carry into inBlocks[14] and then CTR_ModePolicy
  880. // increments inBlocks[14] too.
  881. const_cast<byte*>(inBlocks)[15] += 6;
  882. }
  883. else
  884. {
  885. block0 = VecLoadBE(inBlocks);
  886. inBlocks = PtrAdd(inBlocks, inIncrement);
  887. block1 = VecLoadBE(inBlocks);
  888. inBlocks = PtrAdd(inBlocks, inIncrement);
  889. block2 = VecLoadBE(inBlocks);
  890. inBlocks = PtrAdd(inBlocks, inIncrement);
  891. block3 = VecLoadBE(inBlocks);
  892. inBlocks = PtrAdd(inBlocks, inIncrement);
  893. }
  894. if (xorInput)
  895. {
  896. block0 = VecXor(block0, VecLoadBE(xorBlocks));
  897. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  898. block1 = VecXor(block1, VecLoadBE(xorBlocks));
  899. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  900. block2 = VecXor(block2, VecLoadBE(xorBlocks));
  901. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  902. block3 = VecXor(block3, VecLoadBE(xorBlocks));
  903. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  904. }
  905. func4(block0, block1, block2, block3, subKeys, rounds);
  906. if (xorOutput)
  907. {
  908. block0 = VecXor(block0, VecLoadBE(xorBlocks));
  909. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  910. block1 = VecXor(block1, VecLoadBE(xorBlocks));
  911. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  912. block2 = VecXor(block2, VecLoadBE(xorBlocks));
  913. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  914. block3 = VecXor(block3, VecLoadBE(xorBlocks));
  915. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  916. }
  917. VecStoreBE(block0, outBlocks);
  918. outBlocks = PtrAdd(outBlocks, outIncrement);
  919. VecStoreBE(block1, outBlocks);
  920. outBlocks = PtrAdd(outBlocks, outIncrement);
  921. VecStoreBE(block2, outBlocks);
  922. outBlocks = PtrAdd(outBlocks, outIncrement);
  923. VecStoreBE(block3, outBlocks);
  924. outBlocks = PtrAdd(outBlocks, outIncrement);
  925. length -= 4*blockSize;
  926. }
  927. }
  928. while (length >= blockSize)
  929. {
  930. uint32x4_p block = VecLoadBE(inBlocks);
  931. if (xorInput)
  932. block = VecXor(block, VecLoadBE(xorBlocks));
  933. if (flags & BT_InBlockIsCounter)
  934. const_cast<byte *>(inBlocks)[15]++;
  935. func1(block, subKeys, rounds);
  936. if (xorOutput)
  937. block = VecXor(block, VecLoadBE(xorBlocks));
  938. VecStoreBE(block, outBlocks);
  939. inBlocks = PtrAdd(inBlocks, inIncrement);
  940. outBlocks = PtrAdd(outBlocks, outIncrement);
  941. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  942. length -= blockSize;
  943. }
  944. return length;
  945. }
  946. /// \brief AdvancedProcessBlocks for 1 and 6 blocks
  947. /// \tparam F1 function to process 1 128-bit block
  948. /// \tparam F6 function to process 6 128-bit blocks
  949. /// \tparam W word type of the subkey table
  950. /// \details AdvancedProcessBlocks128_6x1_ALTIVEC processes 6 and 1 Altivec SIMD words
  951. /// at a time.
  952. /// \details The subkey type is usually word32 or word64. F1 and F6 must use the
  953. /// same word type.
  954. template <typename F1, typename F6, typename W>
  955. inline size_t AdvancedProcessBlocks128_6x1_ALTIVEC(F1 func1, F6 func6,
  956. const W *subKeys, size_t rounds, const byte *inBlocks,
  957. const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
  958. {
  959. CRYPTOPP_ASSERT(subKeys);
  960. CRYPTOPP_ASSERT(inBlocks);
  961. CRYPTOPP_ASSERT(outBlocks);
  962. CRYPTOPP_ASSERT(length >= 16);
  963. #if (CRYPTOPP_LITTLE_ENDIAN)
  964. const uint32x4_p s_one = {1,0,0,0};
  965. #else
  966. const uint32x4_p s_one = {0,0,0,1};
  967. #endif
  968. const size_t blockSize = 16;
  969. // const size_t simdBlockSize = 16;
  970. size_t inIncrement = (flags & (EnumToInt(BT_InBlockIsCounter)|EnumToInt(BT_DontIncrementInOutPointers))) ? 0 : blockSize;
  971. size_t xorIncrement = (xorBlocks != NULLPTR) ? blockSize : 0;
  972. size_t outIncrement = (flags & EnumToInt(BT_DontIncrementInOutPointers)) ? 0 : blockSize;
  973. // Clang and Coverity are generating findings using xorBlocks as a flag.
  974. const bool xorInput = (xorBlocks != NULLPTR) && (flags & EnumToInt(BT_XorInput));
  975. const bool xorOutput = (xorBlocks != NULLPTR) && !(flags & EnumToInt(BT_XorInput));
  976. if (flags & BT_ReverseDirection)
  977. {
  978. inBlocks = PtrAdd(inBlocks, length - blockSize);
  979. xorBlocks = PtrAdd(xorBlocks, length - blockSize);
  980. outBlocks = PtrAdd(outBlocks, length - blockSize);
  981. inIncrement = 0-inIncrement;
  982. xorIncrement = 0-xorIncrement;
  983. outIncrement = 0-outIncrement;
  984. }
  985. if (flags & BT_AllowParallel)
  986. {
  987. while (length >= 6*blockSize)
  988. {
  989. uint32x4_p block0, block1, block2, block3, block4, block5;
  990. if (flags & BT_InBlockIsCounter)
  991. {
  992. block0 = VecLoadBE(inBlocks);
  993. block1 = VecAdd(block0, s_one);
  994. block2 = VecAdd(block1, s_one);
  995. block3 = VecAdd(block2, s_one);
  996. block4 = VecAdd(block3, s_one);
  997. block5 = VecAdd(block4, s_one);
  998. // Hack due to big-endian loads used by POWER8 (and maybe ARM-BE).
  999. // CTR_ModePolicy::OperateKeystream is wired such that after
  1000. // returning from this function CTR_ModePolicy will detect wrap on
  1001. // on the last counter byte and increment the next to last byte.
  1002. // The problem is, with a big-endian load, inBlocks[15] is really
  1003. // located at index 15. The vector addition using a 32-bit element
  1004. // generates a carry into inBlocks[14] and then CTR_ModePolicy
  1005. // increments inBlocks[14] too.
  1006. //
  1007. // To find this bug we needed a test case with a ctr of 0xNN...FA.
  1008. // The last octet is 0xFA and adding 6 creates the wrap to trigger
  1009. // the issue. If the last octet was 0xFC then 4 would trigger it.
  1010. // We dumb-lucked into the test with SPECK-128. The test case of
  1011. // interest is the one with IV 348ECA9766C09F04 826520DE47A212FA.
  1012. uint8x16_p temp = VecAdd((uint8x16_p)block5, (uint8x16_p)s_one);
  1013. VecStoreBE(temp, const_cast<byte*>(inBlocks));
  1014. }
  1015. else
  1016. {
  1017. block0 = VecLoadBE(inBlocks);
  1018. inBlocks = PtrAdd(inBlocks, inIncrement);
  1019. block1 = VecLoadBE(inBlocks);
  1020. inBlocks = PtrAdd(inBlocks, inIncrement);
  1021. block2 = VecLoadBE(inBlocks);
  1022. inBlocks = PtrAdd(inBlocks, inIncrement);
  1023. block3 = VecLoadBE(inBlocks);
  1024. inBlocks = PtrAdd(inBlocks, inIncrement);
  1025. block4 = VecLoadBE(inBlocks);
  1026. inBlocks = PtrAdd(inBlocks, inIncrement);
  1027. block5 = VecLoadBE(inBlocks);
  1028. inBlocks = PtrAdd(inBlocks, inIncrement);
  1029. }
  1030. if (xorInput)
  1031. {
  1032. block0 = VecXor(block0, VecLoadBE(xorBlocks));
  1033. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1034. block1 = VecXor(block1, VecLoadBE(xorBlocks));
  1035. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1036. block2 = VecXor(block2, VecLoadBE(xorBlocks));
  1037. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1038. block3 = VecXor(block3, VecLoadBE(xorBlocks));
  1039. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1040. block4 = VecXor(block4, VecLoadBE(xorBlocks));
  1041. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1042. block5 = VecXor(block5, VecLoadBE(xorBlocks));
  1043. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1044. }
  1045. func6(block0, block1, block2, block3, block4, block5, subKeys, rounds);
  1046. if (xorOutput)
  1047. {
  1048. block0 = VecXor(block0, VecLoadBE(xorBlocks));
  1049. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1050. block1 = VecXor(block1, VecLoadBE(xorBlocks));
  1051. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1052. block2 = VecXor(block2, VecLoadBE(xorBlocks));
  1053. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1054. block3 = VecXor(block3, VecLoadBE(xorBlocks));
  1055. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1056. block4 = VecXor(block4, VecLoadBE(xorBlocks));
  1057. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1058. block5 = VecXor(block5, VecLoadBE(xorBlocks));
  1059. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1060. }
  1061. VecStoreBE(block0, outBlocks);
  1062. outBlocks = PtrAdd(outBlocks, outIncrement);
  1063. VecStoreBE(block1, outBlocks);
  1064. outBlocks = PtrAdd(outBlocks, outIncrement);
  1065. VecStoreBE(block2, outBlocks);
  1066. outBlocks = PtrAdd(outBlocks, outIncrement);
  1067. VecStoreBE(block3, outBlocks);
  1068. outBlocks = PtrAdd(outBlocks, outIncrement);
  1069. VecStoreBE(block4, outBlocks);
  1070. outBlocks = PtrAdd(outBlocks, outIncrement);
  1071. VecStoreBE(block5, outBlocks);
  1072. outBlocks = PtrAdd(outBlocks, outIncrement);
  1073. length -= 6*blockSize;
  1074. }
  1075. }
  1076. while (length >= blockSize)
  1077. {
  1078. uint32x4_p block = VecLoadBE(inBlocks);
  1079. if (xorInput)
  1080. block = VecXor(block, VecLoadBE(xorBlocks));
  1081. if (flags & BT_InBlockIsCounter)
  1082. const_cast<byte *>(inBlocks)[15]++;
  1083. func1(block, subKeys, rounds);
  1084. if (xorOutput)
  1085. block = VecXor(block, VecLoadBE(xorBlocks));
  1086. VecStoreBE(block, outBlocks);
  1087. inBlocks = PtrAdd(inBlocks, inIncrement);
  1088. outBlocks = PtrAdd(outBlocks, outIncrement);
  1089. xorBlocks = PtrAdd(xorBlocks, xorIncrement);
  1090. length -= blockSize;
  1091. }
  1092. return length;
  1093. }
  1094. NAMESPACE_END // CryptoPP
  1095. #endif // __ALTIVEC__
  1096. #endif // CRYPTOPP_ADVANCED_SIMD_TEMPLATES