intrin_wrapper.hpp 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. #ifndef _PVFMM_INTRIN_WRAPPER_HPP_
  2. #define _PVFMM_INTRIN_WRAPPER_HPP_
  3. #ifdef __SSE__
  4. #include <xmmintrin.h>
  5. #endif
  6. #ifdef __SSE2__
  7. #include <emmintrin.h>
  8. #endif
  9. #ifdef __SSE3__
  10. #include <pmmintrin.h>
  11. #endif
  12. #ifdef __AVX__
  13. #include <immintrin.h>
  14. #endif
  15. #if defined(__MIC__)
  16. #include <immintrin.h>
  17. #endif
  18. namespace pvfmm {
  19. template <class T> inline T zero_intrin() { return (T)0; }
  20. template <class T, class Real> inline T set_intrin(const Real& a) { return a; }
  21. template <class T, class Real> inline T load_intrin(Real const* a) { return a[0]; }
  22. template <class T, class Real> inline T bcast_intrin(Real const* a) { return a[0]; }
  23. template <class T, class Real> inline void store_intrin(Real* a, const T& b) { a[0] = b; }
  24. template <class T> inline T mul_intrin(const T& a, const T& b) { return a * b; }
  25. template <class T> inline T add_intrin(const T& a, const T& b) { return a + b; }
  26. template <class T> inline T sub_intrin(const T& a, const T& b) { return a - b; }
  27. template <class T> inline T cmplt_intrin(const T& a, const T& b) {
  28. T r = 0;
  29. uint8_t* r_ = reinterpret_cast<uint8_t*>(&r);
  30. if (a < b)
  31. for (int i = 0; i < sizeof(T); i++) r_[i] = ~(uint8_t)0;
  32. return r;
  33. }
  34. template <class T> inline T and_intrin(const T& a, const T& b) {
  35. T r = 0;
  36. const uint8_t* a_ = reinterpret_cast<const uint8_t*>(&a);
  37. const uint8_t* b_ = reinterpret_cast<const uint8_t*>(&b);
  38. uint8_t* r_ = reinterpret_cast<uint8_t*>(&r);
  39. for (int i = 0; i < sizeof(T); i++) r_[i] = a_[i] & b_[i];
  40. return r;
  41. }
  42. template <class T> inline T rsqrt_approx_intrin(const T& r2) {
  43. if (r2 != 0) return 1.0 / pvfmm::sqrt<T>(r2);
  44. return 0;
  45. }
  46. template <class T, class Real> inline void rsqrt_newton_intrin(T& rinv, const T& r2, const Real& nwtn_const) { rinv = rinv * (nwtn_const - r2 * rinv * rinv); }
  47. template <class T> inline T rsqrt_single_intrin(const T& r2) {
  48. if (r2 != 0) return 1.0 / pvfmm::sqrt<T>(r2);
  49. return 0;
  50. }
  51. template <class T> inline T max_intrin(const T& a, const T& b) {
  52. if (a > b)
  53. return a;
  54. else
  55. return b;
  56. }
  57. template <class T> inline T min_intrin(const T& a, const T& b) {
  58. if (a > b)
  59. return b;
  60. else
  61. return a;
  62. }
  63. template <class T> inline T sin_intrin(const T& t) { return pvfmm::sin<T>(t); }
  64. template <class T> inline T cos_intrin(const T& t) { return pvfmm::cos<T>(t); }
  65. #ifdef __SSE3__
  66. template <> inline __m128 zero_intrin() { return _mm_setzero_ps(); }
  67. template <> inline __m128d zero_intrin() { return _mm_setzero_pd(); }
  68. template <> inline __m128 set_intrin(const float& a) { return _mm_set_ps1(a); }
  69. template <> inline __m128d set_intrin(const double& a) { return _mm_set_pd1(a); }
  70. template <> inline __m128 load_intrin(float const* a) { return _mm_load_ps(a); }
  71. template <> inline __m128d load_intrin(double const* a) { return _mm_load_pd(a); }
  72. template <> inline __m128 bcast_intrin(float const* a) { return _mm_set_ps1(a[0]); }
  73. template <> inline __m128d bcast_intrin(double const* a) { return _mm_load_pd1(a); }
  74. template <> inline void store_intrin(float* a, const __m128& b) { return _mm_store_ps(a, b); }
  75. template <> inline void store_intrin(double* a, const __m128d& b) { return _mm_store_pd(a, b); }
  76. template <> inline __m128 mul_intrin(const __m128& a, const __m128& b) { return _mm_mul_ps(a, b); }
  77. template <> inline __m128d mul_intrin(const __m128d& a, const __m128d& b) { return _mm_mul_pd(a, b); }
  78. template <> inline __m128 add_intrin(const __m128& a, const __m128& b) { return _mm_add_ps(a, b); }
  79. template <> inline __m128d add_intrin(const __m128d& a, const __m128d& b) { return _mm_add_pd(a, b); }
  80. template <> inline __m128 sub_intrin(const __m128& a, const __m128& b) { return _mm_sub_ps(a, b); }
  81. template <> inline __m128d sub_intrin(const __m128d& a, const __m128d& b) { return _mm_sub_pd(a, b); }
  82. template <> inline __m128 cmplt_intrin(const __m128& a, const __m128& b) { return _mm_cmplt_ps(a, b); }
  83. template <> inline __m128d cmplt_intrin(const __m128d& a, const __m128d& b) { return _mm_cmplt_pd(a, b); }
  84. template <> inline __m128 and_intrin(const __m128& a, const __m128& b) { return _mm_and_ps(a, b); }
  85. template <> inline __m128d and_intrin(const __m128d& a, const __m128d& b) { return _mm_and_pd(a, b); }
  86. template <> inline __m128 rsqrt_approx_intrin(const __m128& r2) {
  87. #define VEC_INTRIN __m128
  88. #define RSQRT_INTRIN(a) _mm_rsqrt_ps(a)
  89. #define CMPEQ_INTRIN(a, b) _mm_cmpeq_ps(a, b)
  90. #define ANDNOT_INTRIN(a, b) _mm_andnot_ps(a, b)
  91. // Approx inverse square root which returns zero for r2=0
  92. return ANDNOT_INTRIN(CMPEQ_INTRIN(r2, zero_intrin<VEC_INTRIN>()), RSQRT_INTRIN(r2));
  93. #undef VEC_INTRIN
  94. #undef RSQRT_INTRIN
  95. #undef CMPEQ_INTRIN
  96. #undef ANDNOT_INTRIN
  97. }
  98. template <> inline __m128d rsqrt_approx_intrin(const __m128d& r2) {
  99. #define PD2PS(a) _mm_cvtpd_ps(a)
  100. #define PS2PD(a) _mm_cvtps_pd(a)
  101. return PS2PD(rsqrt_approx_intrin(PD2PS(r2)));
  102. #undef PD2PS
  103. #undef PS2PD
  104. }
  105. template <> inline void rsqrt_newton_intrin(__m128& rinv, const __m128& r2, const float& nwtn_const) {
  106. #define VEC_INTRIN __m128
  107. // Newton iteration: rinv = 0.5 rinv_approx ( 3 - r2 rinv_approx^2 )
  108. // We do not compute the product with 0.5 and this needs to be adjusted later
  109. rinv = mul_intrin(rinv, sub_intrin(set_intrin<VEC_INTRIN>(nwtn_const), mul_intrin(r2, mul_intrin(rinv, rinv))));
  110. #undef VEC_INTRIN
  111. }
  112. template <> inline void rsqrt_newton_intrin(__m128d& rinv, const __m128d& r2, const double& nwtn_const) {
  113. #define VEC_INTRIN __m128d
  114. // Newton iteration: rinv = 0.5 rinv_approx ( 3 - r2 rinv_approx^2 )
  115. // We do not compute the product with 0.5 and this needs to be adjusted later
  116. rinv = mul_intrin(rinv, sub_intrin(set_intrin<VEC_INTRIN>(nwtn_const), mul_intrin(r2, mul_intrin(rinv, rinv))));
  117. #undef VEC_INTRIN
  118. }
  119. template <> inline __m128 rsqrt_single_intrin(const __m128& r2) {
  120. #define VEC_INTRIN __m128
  121. VEC_INTRIN rinv = rsqrt_approx_intrin(r2);
  122. rsqrt_newton_intrin(rinv, r2, (float)3.0);
  123. return rinv;
  124. #undef VEC_INTRIN
  125. }
  126. template <> inline __m128d rsqrt_single_intrin(const __m128d& r2) {
  127. #define PD2PS(a) _mm_cvtpd_ps(a)
  128. #define PS2PD(a) _mm_cvtps_pd(a)
  129. return PS2PD(rsqrt_single_intrin(PD2PS(r2)));
  130. #undef PD2PS
  131. #undef PS2PD
  132. }
  133. template <> inline __m128 max_intrin(const __m128& a, const __m128& b) { return _mm_max_ps(a, b); }
  134. template <> inline __m128d max_intrin(const __m128d& a, const __m128d& b) { return _mm_max_pd(a, b); }
  135. template <> inline __m128 min_intrin(const __m128& a, const __m128& b) { return _mm_min_ps(a, b); }
  136. template <> inline __m128d min_intrin(const __m128d& a, const __m128d& b) { return _mm_min_pd(a, b); }
  137. #ifdef PVFMM_HAVE_INTEL_SVML
  138. template <> inline __m128 sin_intrin(const __m128& t) { return _mm_sin_ps(t); }
  139. template <> inline __m128 cos_intrin(const __m128& t) { return _mm_cos_ps(t); }
  140. template <> inline __m128d sin_intrin(const __m128d& t) { return _mm_sin_pd(t); }
  141. template <> inline __m128d cos_intrin(const __m128d& t) { return _mm_cos_pd(t); }
  142. #else
  143. template <> inline __m128 sin_intrin(const __m128& t_) {
  144. union {
  145. float e[4];
  146. __m128 d;
  147. } t;
  148. store_intrin(t.e, t_);
  149. return _mm_set_ps(pvfmm::sin<float>(t.e[3]), pvfmm::sin<float>(t.e[2]), pvfmm::sin<float>(t.e[1]), pvfmm::sin<float>(t.e[0]));
  150. }
  151. template <> inline __m128 cos_intrin(const __m128& t_) {
  152. union {
  153. float e[4];
  154. __m128 d;
  155. } t;
  156. store_intrin(t.e, t_);
  157. return _mm_set_ps(pvfmm::cos<float>(t.e[3]), pvfmm::cos<float>(t.e[2]), pvfmm::cos<float>(t.e[1]), pvfmm::cos<float>(t.e[0]));
  158. }
  159. template <> inline __m128d sin_intrin(const __m128d& t_) {
  160. union {
  161. double e[2];
  162. __m128d d;
  163. } t;
  164. store_intrin(t.e, t_);
  165. return _mm_set_pd(pvfmm::sin<double>(t.e[1]), pvfmm::sin<double>(t.e[0]));
  166. }
  167. template <> inline __m128d cos_intrin(const __m128d& t_) {
  168. union {
  169. double e[2];
  170. __m128d d;
  171. } t;
  172. store_intrin(t.e, t_);
  173. return _mm_set_pd(pvfmm::cos<double>(t.e[1]), pvfmm::cos<double>(t.e[0]));
  174. }
  175. #endif
  176. #endif
  177. #ifdef __AVX__
  178. template <> inline __m256 zero_intrin() { return _mm256_setzero_ps(); }
  179. template <> inline __m256d zero_intrin() { return _mm256_setzero_pd(); }
  180. template <> inline __m256 set_intrin(const float& a) { return _mm256_set_ps(a, a, a, a, a, a, a, a); }
  181. template <> inline __m256d set_intrin(const double& a) { return _mm256_set_pd(a, a, a, a); }
  182. template <> inline __m256 load_intrin(float const* a) { return _mm256_load_ps(a); }
  183. template <> inline __m256d load_intrin(double const* a) { return _mm256_load_pd(a); }
  184. template <> inline __m256 bcast_intrin(float const* a) { return _mm256_broadcast_ss(a); }
  185. template <> inline __m256d bcast_intrin(double const* a) { return _mm256_broadcast_sd(a); }
  186. template <> inline void store_intrin(float* a, const __m256& b) { return _mm256_store_ps(a, b); }
  187. template <> inline void store_intrin(double* a, const __m256d& b) { return _mm256_store_pd(a, b); }
  188. template <> inline __m256 mul_intrin(const __m256& a, const __m256& b) { return _mm256_mul_ps(a, b); }
  189. template <> inline __m256d mul_intrin(const __m256d& a, const __m256d& b) { return _mm256_mul_pd(a, b); }
  190. template <> inline __m256 add_intrin(const __m256& a, const __m256& b) { return _mm256_add_ps(a, b); }
  191. template <> inline __m256d add_intrin(const __m256d& a, const __m256d& b) { return _mm256_add_pd(a, b); }
  192. template <> inline __m256 sub_intrin(const __m256& a, const __m256& b) { return _mm256_sub_ps(a, b); }
  193. template <> inline __m256d sub_intrin(const __m256d& a, const __m256d& b) { return _mm256_sub_pd(a, b); }
  194. template <> inline __m256 cmplt_intrin(const __m256& a, const __m256& b) { return _mm256_cmp_ps(a, b, _CMP_LT_OS); }
  195. template <> inline __m256d cmplt_intrin(const __m256d& a, const __m256d& b) { return _mm256_cmp_pd(a, b, _CMP_LT_OS); }
  196. template <> inline __m256 and_intrin(const __m256& a, const __m256& b) { return _mm256_and_ps(a, b); }
  197. template <> inline __m256d and_intrin(const __m256d& a, const __m256d& b) { return _mm256_and_pd(a, b); }
  198. template <> inline __m256 rsqrt_approx_intrin(const __m256& r2) {
  199. #define VEC_INTRIN __m256
  200. #define RSQRT_INTRIN(a) _mm256_rsqrt_ps(a)
  201. #define CMPEQ_INTRIN(a, b) _mm256_cmp_ps(a, b, _CMP_EQ_OS)
  202. #define ANDNOT_INTRIN(a, b) _mm256_andnot_ps(a, b)
  203. // Approx inverse square root which returns zero for r2=0
  204. return ANDNOT_INTRIN(CMPEQ_INTRIN(r2, zero_intrin<VEC_INTRIN>()), RSQRT_INTRIN(r2));
  205. #undef VEC_INTRIN
  206. #undef RSQRT_INTRIN
  207. #undef CMPEQ_INTRIN
  208. #undef ANDNOT_INTRIN
  209. }
  210. template <> inline __m256d rsqrt_approx_intrin(const __m256d& r2) {
  211. #define PD2PS(a) _mm256_cvtpd_ps(a)
  212. #define PS2PD(a) _mm256_cvtps_pd(a)
  213. return PS2PD(rsqrt_approx_intrin(PD2PS(r2)));
  214. #undef PD2PS
  215. #undef PS2PD
  216. }
  217. template <> inline void rsqrt_newton_intrin(__m256& rinv, const __m256& r2, const float& nwtn_const) {
  218. #define VEC_INTRIN __m256
  219. // Newton iteration: rinv = 0.5 rinv_approx ( 3 - r2 rinv_approx^2 )
  220. // We do not compute the product with 0.5 and this needs to be adjusted later
  221. rinv = mul_intrin(rinv, sub_intrin(set_intrin<VEC_INTRIN>(nwtn_const), mul_intrin(r2, mul_intrin(rinv, rinv))));
  222. #undef VEC_INTRIN
  223. }
  224. template <> inline void rsqrt_newton_intrin(__m256d& rinv, const __m256d& r2, const double& nwtn_const) {
  225. #define VEC_INTRIN __m256d
  226. // Newton iteration: rinv = 0.5 rinv_approx ( 3 - r2 rinv_approx^2 )
  227. // We do not compute the product with 0.5 and this needs to be adjusted later
  228. rinv = mul_intrin(rinv, sub_intrin(set_intrin<VEC_INTRIN>(nwtn_const), mul_intrin(r2, mul_intrin(rinv, rinv))));
  229. #undef VEC_INTRIN
  230. }
  231. template <> inline __m256 rsqrt_single_intrin(const __m256& r2) {
  232. #define VEC_INTRIN __m256
  233. VEC_INTRIN rinv = rsqrt_approx_intrin(r2);
  234. rsqrt_newton_intrin(rinv, r2, (float)3.0);
  235. return rinv;
  236. #undef VEC_INTRIN
  237. }
  238. template <> inline __m256d rsqrt_single_intrin(const __m256d& r2) {
  239. #define PD2PS(a) _mm256_cvtpd_ps(a)
  240. #define PS2PD(a) _mm256_cvtps_pd(a)
  241. return PS2PD(rsqrt_single_intrin(PD2PS(r2)));
  242. #undef PD2PS
  243. #undef PS2PD
  244. }
  245. template <> inline __m256 max_intrin(const __m256& a, const __m256& b) { return _mm256_max_ps(a, b); }
  246. template <> inline __m256d max_intrin(const __m256d& a, const __m256d& b) { return _mm256_max_pd(a, b); }
  247. template <> inline __m256 min_intrin(const __m256& a, const __m256& b) { return _mm256_min_ps(a, b); }
  248. template <> inline __m256d min_intrin(const __m256d& a, const __m256d& b) { return _mm256_min_pd(a, b); }
  249. #ifdef PVFMM_HAVE_INTEL_SVML
  250. template <> inline __m256 sin_intrin(const __m256& t) { return _mm256_sin_ps(t); }
  251. template <> inline __m256 cos_intrin(const __m256& t) { return _mm256_cos_ps(t); }
  252. template <> inline __m256d sin_intrin(const __m256d& t) { return _mm256_sin_pd(t); }
  253. template <> inline __m256d cos_intrin(const __m256d& t) { return _mm256_cos_pd(t); }
  254. #else
  255. template <> inline __m256 sin_intrin(const __m256& t_) {
  256. union {
  257. float e[8];
  258. __m256 d;
  259. } t;
  260. store_intrin(t.e, t_); // t.d=t_;
  261. return _mm256_set_ps(pvfmm::sin<float>(t.e[7]), pvfmm::sin<float>(t.e[6]), pvfmm::sin<float>(t.e[5]), pvfmm::sin<float>(t.e[4]), pvfmm::sin<float>(t.e[3]), pvfmm::sin<float>(t.e[2]), pvfmm::sin<float>(t.e[1]), pvfmm::sin<float>(t.e[0]));
  262. }
  263. template <> inline __m256 cos_intrin(const __m256& t_) {
  264. union {
  265. float e[8];
  266. __m256 d;
  267. } t;
  268. store_intrin(t.e, t_); // t.d=t_;
  269. return _mm256_set_ps(pvfmm::cos<float>(t.e[7]), pvfmm::cos<float>(t.e[6]), pvfmm::cos<float>(t.e[5]), pvfmm::cos<float>(t.e[4]), pvfmm::cos<float>(t.e[3]), pvfmm::cos<float>(t.e[2]), pvfmm::cos<float>(t.e[1]), pvfmm::cos<float>(t.e[0]));
  270. }
  271. template <> inline __m256d sin_intrin(const __m256d& t_) {
  272. union {
  273. double e[4];
  274. __m256d d;
  275. } t;
  276. store_intrin(t.e, t_); // t.d=t_;
  277. return _mm256_set_pd(pvfmm::sin<double>(t.e[3]), pvfmm::sin<double>(t.e[2]), pvfmm::sin<double>(t.e[1]), pvfmm::sin<double>(t.e[0]));
  278. }
  279. template <> inline __m256d cos_intrin(const __m256d& t_) {
  280. union {
  281. double e[4];
  282. __m256d d;
  283. } t;
  284. store_intrin(t.e, t_); // t.d=t_;
  285. return _mm256_set_pd(pvfmm::cos<double>(t.e[3]), pvfmm::cos<double>(t.e[2]), pvfmm::cos<double>(t.e[1]), pvfmm::cos<double>(t.e[0]));
  286. }
  287. #endif
  288. #endif
  289. template <class VEC, class Real> inline VEC rsqrt_intrin0(VEC r2) {
  290. #define NWTN0 0
  291. #define NWTN1 0
  292. #define NWTN2 0
  293. #define NWTN3 0
  294. // Real scal=1; Real const_nwtn0=3*scal*scal;
  295. // scal=(NWTN0?2*scal*scal*scal:scal); Real const_nwtn1=3*scal*scal;
  296. // scal=(NWTN1?2*scal*scal*scal:scal); Real const_nwtn2=3*scal*scal;
  297. // scal=(NWTN2?2*scal*scal*scal:scal); Real const_nwtn3=3*scal*scal;
  298. VEC rinv;
  299. #if NWTN0
  300. rinv = rsqrt_single_intrin(r2);
  301. #else
  302. rinv = rsqrt_approx_intrin(r2);
  303. #endif
  304. #if NWTN1
  305. rsqrt_newton_intrin(rinv, r2, const_nwtn1);
  306. #endif
  307. #if NWTN2
  308. rsqrt_newton_intrin(rinv, r2, const_nwtn2);
  309. #endif
  310. #if NWTN3
  311. rsqrt_newton_intrin(rinv, r2, const_nwtn3);
  312. #endif
  313. return rinv;
  314. #undef NWTN0
  315. #undef NWTN1
  316. #undef NWTN2
  317. #undef NWTN3
  318. }
  319. template <class VEC, class Real> inline VEC rsqrt_intrin1(VEC r2) {
  320. #define NWTN0 0
  321. #define NWTN1 1
  322. #define NWTN2 0
  323. #define NWTN3 0
  324. Real scal = 1; // Real const_nwtn0=3*scal*scal;
  325. scal = (NWTN0 ? 2 * scal * scal * scal : scal);
  326. Real const_nwtn1 = 3 * scal * scal;
  327. // scal=(NWTN1?2*scal*scal*scal:scal); Real const_nwtn2=3*scal*scal;
  328. // scal=(NWTN2?2*scal*scal*scal:scal); Real const_nwtn3=3*scal*scal;
  329. VEC rinv;
  330. #if NWTN0
  331. rinv = rsqrt_single_intrin(r2);
  332. #else
  333. rinv = rsqrt_approx_intrin(r2);
  334. #endif
  335. #if NWTN1
  336. rsqrt_newton_intrin(rinv, r2, const_nwtn1);
  337. #endif
  338. #if NWTN2
  339. rsqrt_newton_intrin(rinv, r2, const_nwtn2);
  340. #endif
  341. #if NWTN3
  342. rsqrt_newton_intrin(rinv, r2, const_nwtn3);
  343. #endif
  344. return rinv;
  345. #undef NWTN0
  346. #undef NWTN1
  347. #undef NWTN2
  348. #undef NWTN3
  349. }
  350. template <class VEC, class Real> inline VEC rsqrt_intrin2(VEC r2) {
  351. #define NWTN0 0
  352. #define NWTN1 1
  353. #define NWTN2 1
  354. #define NWTN3 0
  355. Real scal = 1; // Real const_nwtn0=3*scal*scal;
  356. scal = (NWTN0 ? 2 * scal * scal * scal : scal);
  357. Real const_nwtn1 = 3 * scal * scal;
  358. scal = (NWTN1 ? 2 * scal * scal * scal : scal);
  359. Real const_nwtn2 = 3 * scal * scal;
  360. // scal=(NWTN2?2*scal*scal*scal:scal); Real const_nwtn3=3*scal*scal;
  361. VEC rinv;
  362. #if NWTN0
  363. rinv = rsqrt_single_intrin(r2);
  364. #else
  365. rinv = rsqrt_approx_intrin(r2);
  366. #endif
  367. #if NWTN1
  368. rsqrt_newton_intrin(rinv, r2, const_nwtn1);
  369. #endif
  370. #if NWTN2
  371. rsqrt_newton_intrin(rinv, r2, const_nwtn2);
  372. #endif
  373. #if NWTN3
  374. rsqrt_newton_intrin(rinv, r2, const_nwtn3);
  375. #endif
  376. return rinv;
  377. #undef NWTN0
  378. #undef NWTN1
  379. #undef NWTN2
  380. #undef NWTN3
  381. }
  382. template <class VEC, class Real> inline VEC rsqrt_intrin3(VEC r2) {
  383. #define NWTN0 0
  384. #define NWTN1 1
  385. #define NWTN2 1
  386. #define NWTN3 1
  387. Real scal = 1; // Real const_nwtn0=3*scal*scal;
  388. scal = (NWTN0 ? 2 * scal * scal * scal : scal);
  389. Real const_nwtn1 = 3 * scal * scal;
  390. scal = (NWTN1 ? 2 * scal * scal * scal : scal);
  391. Real const_nwtn2 = 3 * scal * scal;
  392. scal = (NWTN2 ? 2 * scal * scal * scal : scal);
  393. Real const_nwtn3 = 3 * scal * scal;
  394. VEC rinv;
  395. #if NWTN0
  396. rinv = rsqrt_single_intrin(r2);
  397. #else
  398. rinv = rsqrt_approx_intrin(r2);
  399. #endif
  400. #if NWTN1
  401. rsqrt_newton_intrin(rinv, r2, const_nwtn1);
  402. #endif
  403. #if NWTN2
  404. rsqrt_newton_intrin(rinv, r2, const_nwtn2);
  405. #endif
  406. #if NWTN3
  407. rsqrt_newton_intrin(rinv, r2, const_nwtn3);
  408. #endif
  409. return rinv;
  410. #undef NWTN0
  411. #undef NWTN1
  412. #undef NWTN2
  413. #undef NWTN3
  414. }
  415. }
  416. #endif //_PVFMM_INTRIN_WRAPPER_HPP_