intrin_wrapper.hpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529
  1. /**
  2. * \file intrin_wrapper.hpp
  3. * \author Dhairya Malhotra, dhairya.malhotra@gmail.com
  4. * \date 12-19-2014
  5. * \brief This file contains the templated wrappers for vector intrinsics.
  6. */
  7. #ifdef __SSE__
  8. #include <xmmintrin.h>
  9. #endif
  10. #ifdef __SSE2__
  11. #include <emmintrin.h>
  12. #endif
  13. #ifdef __SSE3__
  14. #include <pmmintrin.h>
  15. #endif
  16. #ifdef __AVX__
  17. #include <immintrin.h>
  18. #endif
  19. #if defined(__MIC__)
  20. #include <immintrin.h>
  21. #endif
  22. #ifndef _PVFMM_INTRIN_WRAPPER_HPP_
  23. #define _PVFMM_INTRIN_WRAPPER_HPP_
  24. namespace pvfmm{
  25. template <class T>
  26. inline T zero_intrin(){
  27. return (T)0;
  28. }
  29. template <class T, class Real_t>
  30. inline T set_intrin(const Real_t& a){
  31. return a;
  32. }
  33. template <class T, class Real_t>
  34. inline T load_intrin(Real_t const* a){
  35. return a[0];
  36. }
  37. template <class T, class Real_t>
  38. inline T bcast_intrin(Real_t const* a){
  39. return a[0];
  40. }
  41. template <class T, class Real_t>
  42. inline void store_intrin(Real_t* a, const T& b){
  43. a[0]=b;
  44. }
  45. template <class T>
  46. inline T mul_intrin(const T& a, const T& b){
  47. return a*b;
  48. }
  49. template <class T>
  50. inline T add_intrin(const T& a, const T& b){
  51. return a+b;
  52. }
  53. template <class T>
  54. inline T sub_intrin(const T& a, const T& b){
  55. return a-b;
  56. }
  57. template <class T>
  58. inline T rinv_approx_intrin(const T& r2){
  59. if(r2!=0) return 1.0/sqrt(r2);
  60. return 0;
  61. }
  62. template <class T, class Real_t>
  63. inline void rinv_newton_intrin(T& rinv, const T& r2, const Real_t& nwtn_const){
  64. rinv=rinv*(nwtn_const-r2*rinv*rinv);
  65. }
  66. template <class T>
  67. inline T rinv_single_intrin(const T& r2){
  68. if(r2!=0) return 1.0/sqrt(r2);
  69. return 0;
  70. }
  71. #ifdef __SSE3__
  72. template <>
  73. inline __m128 zero_intrin(){
  74. return _mm_setzero_ps();
  75. }
  76. template <>
  77. inline __m128d zero_intrin(){
  78. return _mm_setzero_pd();
  79. }
  80. template <>
  81. inline __m128 set_intrin(const float& a){
  82. return _mm_set_ps1(a);
  83. }
  84. template <>
  85. inline __m128d set_intrin(const double& a){
  86. return _mm_set_pd1(a);
  87. }
  88. template <>
  89. inline __m128 load_intrin(float const* a){
  90. return _mm_load_ps(a);
  91. }
  92. template <>
  93. inline __m128d load_intrin(double const* a){
  94. return _mm_load_pd(a);
  95. }
  96. template <>
  97. inline __m128 bcast_intrin(float const* a){
  98. return _mm_broadcast_ss((float*)a);
  99. }
  100. template <>
  101. inline __m128d bcast_intrin(double const* a){
  102. return _mm_load_pd1(a);
  103. }
  104. template <>
  105. inline void store_intrin(float* a, const __m128& b){
  106. return _mm_store_ps(a,b);
  107. }
  108. template <>
  109. inline void store_intrin(double* a, const __m128d& b){
  110. return _mm_store_pd(a,b);
  111. }
  112. template <>
  113. inline __m128 mul_intrin(const __m128& a, const __m128& b){
  114. return _mm_mul_ps(a,b);
  115. }
  116. template <>
  117. inline __m128d mul_intrin(const __m128d& a, const __m128d& b){
  118. return _mm_mul_pd(a,b);
  119. }
  120. template <>
  121. inline __m128 add_intrin(const __m128& a, const __m128& b){
  122. return _mm_add_ps(a,b);
  123. }
  124. template <>
  125. inline __m128d add_intrin(const __m128d& a, const __m128d& b){
  126. return _mm_add_pd(a,b);
  127. }
  128. template <>
  129. inline __m128 sub_intrin(const __m128& a, const __m128& b){
  130. return _mm_sub_ps(a,b);
  131. }
  132. template <>
  133. inline __m128d sub_intrin(const __m128d& a, const __m128d& b){
  134. return _mm_sub_pd(a,b);
  135. }
  136. template <>
  137. inline __m128 rinv_approx_intrin(const __m128& r2){
  138. #define VEC_INTRIN __m128
  139. #define RSQRT_INTRIN(a) _mm_rsqrt_ps(a)
  140. #define CMPEQ_INTRIN(a,b) _mm_cmpeq_ps(a,b)
  141. #define ANDNOT_INTRIN(a,b) _mm_andnot_ps(a,b)
  142. // Approx inverse square root which returns zero for r2=0
  143. return ANDNOT_INTRIN(CMPEQ_INTRIN(r2,zero_intrin<VEC_INTRIN>()),RSQRT_INTRIN(r2));
  144. #undef VEC_INTRIN
  145. #undef RSQRT_INTRIN
  146. #undef CMPEQ_INTRIN
  147. #undef ANDNOT_INTRIN
  148. }
  149. template <>
  150. inline __m128d rinv_approx_intrin(const __m128d& r2){
  151. #define PD2PS(a) _mm_cvtpd_ps(a)
  152. #define PS2PD(a) _mm_cvtps_pd(a)
  153. return PS2PD(rinv_approx_intrin(PD2PS(r2)));
  154. #undef PD2PS
  155. #undef PS2PD
  156. }
  157. template <>
  158. inline void rinv_newton_intrin(__m128& rinv, const __m128& r2, const float& nwtn_const){
  159. #define VEC_INTRIN __m128
  160. // Newton iteration: rinv = 0.5 rinv_approx ( 3 - r2 rinv_approx^2 )
  161. // We do not compute the product with 0.5 and this needs to be adjusted later
  162. rinv=mul_intrin(rinv,sub_intrin(set_intrin<VEC_INTRIN>(nwtn_const),mul_intrin(r2,mul_intrin(rinv,rinv))));
  163. #undef VEC_INTRIN
  164. }
  165. template <>
  166. inline void rinv_newton_intrin(__m128d& rinv, const __m128d& r2, const double& nwtn_const){
  167. #define VEC_INTRIN __m128d
  168. // Newton iteration: rinv = 0.5 rinv_approx ( 3 - r2 rinv_approx^2 )
  169. // We do not compute the product with 0.5 and this needs to be adjusted later
  170. rinv=mul_intrin(rinv,sub_intrin(set_intrin<VEC_INTRIN>(nwtn_const),mul_intrin(r2,mul_intrin(rinv,rinv))));
  171. #undef VEC_INTRIN
  172. }
  173. template <>
  174. inline __m128 rinv_single_intrin(const __m128& r2){
  175. #define VEC_INTRIN __m128
  176. VEC_INTRIN rinv=rinv_approx_intrin(r2);
  177. rinv_newton_intrin(rinv,r2,(float)3.0);
  178. return rinv;
  179. #undef VEC_INTRIN
  180. }
  181. template <>
  182. inline __m128d rinv_single_intrin(const __m128d& r2){
  183. #define PD2PS(a) _mm_cvtpd_ps(a)
  184. #define PS2PD(a) _mm_cvtps_pd(a)
  185. return PS2PD(rinv_single_intrin(PD2PS(r2)));
  186. #undef PD2PS
  187. #undef PS2PD
  188. }
  189. #endif
  190. #ifdef __AVX__
  191. template <>
  192. inline __m256 zero_intrin(){
  193. return _mm256_setzero_ps();
  194. }
  195. template <>
  196. inline __m256d zero_intrin(){
  197. return _mm256_setzero_pd();
  198. }
  199. template <>
  200. inline __m256 set_intrin(const float& a){
  201. return _mm256_set_ps(a,a,a,a,a,a,a,a);
  202. }
  203. template <>
  204. inline __m256d set_intrin(const double& a){
  205. return _mm256_set_pd(a,a,a,a);
  206. }
  207. template <>
  208. inline __m256 load_intrin(float const* a){
  209. return _mm256_load_ps(a);
  210. }
  211. template <>
  212. inline __m256d load_intrin(double const* a){
  213. return _mm256_load_pd(a);
  214. }
  215. template <>
  216. inline __m256 bcast_intrin(float const* a){
  217. return _mm256_broadcast_ss(a);
  218. }
  219. template <>
  220. inline __m256d bcast_intrin(double const* a){
  221. return _mm256_broadcast_sd(a);
  222. }
  223. template <>
  224. inline void store_intrin(float* a, const __m256& b){
  225. return _mm256_store_ps(a,b);
  226. }
  227. template <>
  228. inline void store_intrin(double* a, const __m256d& b){
  229. return _mm256_store_pd(a,b);
  230. }
  231. template <>
  232. inline __m256 mul_intrin(const __m256& a, const __m256& b){
  233. return _mm256_mul_ps(a,b);
  234. }
  235. template <>
  236. inline __m256d mul_intrin(const __m256d& a, const __m256d& b){
  237. return _mm256_mul_pd(a,b);
  238. }
  239. template <>
  240. inline __m256 add_intrin(const __m256& a, const __m256& b){
  241. return _mm256_add_ps(a,b);
  242. }
  243. template <>
  244. inline __m256d add_intrin(const __m256d& a, const __m256d& b){
  245. return _mm256_add_pd(a,b);
  246. }
  247. template <>
  248. inline __m256 sub_intrin(const __m256& a, const __m256& b){
  249. return _mm256_sub_ps(a,b);
  250. }
  251. template <>
  252. inline __m256d sub_intrin(const __m256d& a, const __m256d& b){
  253. return _mm256_sub_pd(a,b);
  254. }
  255. template <>
  256. inline __m256 rinv_approx_intrin(const __m256& r2){
  257. #define VEC_INTRIN __m256
  258. #define RSQRT_INTRIN(a) _mm256_rsqrt_ps(a)
  259. #define CMPEQ_INTRIN(a,b) _mm256_insertf128_ps(_mm256_castps128_ps256(_mm_cmpeq_ps(_mm256_extractf128_ps(a,0),_mm256_extractf128_ps(b,0))),\
  260. (_mm_cmpeq_ps(_mm256_extractf128_ps(a,1),_mm256_extractf128_ps(b,1))), 1)
  261. #define ANDNOT_INTRIN(a,b) _mm256_andnot_ps(a,b)
  262. // Approx inverse square root which returns zero for r2=0
  263. return ANDNOT_INTRIN(CMPEQ_INTRIN(r2,zero_intrin<VEC_INTRIN>()),RSQRT_INTRIN(r2));
  264. #undef VEC_INTRIN
  265. #undef RSQRT_INTRIN
  266. #undef CMPEQ_INTRIN
  267. #undef ANDNOT_INTRIN
  268. }
  269. template <>
  270. inline __m256d rinv_approx_intrin(const __m256d& r2){
  271. #define PD2PS(a) _mm256_cvtpd_ps(a)
  272. #define PS2PD(a) _mm256_cvtps_pd(a)
  273. return PS2PD(rinv_approx_intrin(PD2PS(r2)));
  274. #undef PD2PS
  275. #undef PS2PD
  276. }
  277. template <>
  278. inline void rinv_newton_intrin(__m256& rinv, const __m256& r2, const float& nwtn_const){
  279. #define VEC_INTRIN __m256
  280. // Newton iteration: rinv = 0.5 rinv_approx ( 3 - r2 rinv_approx^2 )
  281. // We do not compute the product with 0.5 and this needs to be adjusted later
  282. rinv=mul_intrin(rinv,sub_intrin(set_intrin<VEC_INTRIN>(nwtn_const),mul_intrin(r2,mul_intrin(rinv,rinv))));
  283. #undef VEC_INTRIN
  284. }
  285. template <>
  286. inline void rinv_newton_intrin(__m256d& rinv, const __m256d& r2, const double& nwtn_const){
  287. #define VEC_INTRIN __m256d
  288. // Newton iteration: rinv = 0.5 rinv_approx ( 3 - r2 rinv_approx^2 )
  289. // We do not compute the product with 0.5 and this needs to be adjusted later
  290. rinv=mul_intrin(rinv,sub_intrin(set_intrin<VEC_INTRIN>(nwtn_const),mul_intrin(r2,mul_intrin(rinv,rinv))));
  291. #undef VEC_INTRIN
  292. }
  293. template <>
  294. inline __m256 rinv_single_intrin(const __m256& r2){
  295. #define VEC_INTRIN __m256
  296. VEC_INTRIN rinv=rinv_approx_intrin(r2);
  297. rinv_newton_intrin(rinv,r2,(float)3.0);
  298. return rinv;
  299. #undef VEC_INTRIN
  300. }
  301. template <>
  302. inline __m256d rinv_single_intrin(const __m256d& r2){
  303. #define PD2PS(a) _mm256_cvtpd_ps(a)
  304. #define PS2PD(a) _mm256_cvtps_pd(a)
  305. return PS2PD(rinv_single_intrin(PD2PS(r2)));
  306. #undef PD2PS
  307. #undef PS2PD
  308. }
  309. #endif
  310. template <class VEC, class Real_t>
  311. inline VEC rinv_intrin0(VEC r2){
  312. #define NWTN0 0
  313. #define NWTN1 0
  314. #define NWTN2 0
  315. #define NWTN3 0
  316. //Real_t scal=1; Real_t const_nwtn0=3*scal*scal;
  317. //scal=(NWTN0?2*scal*scal*scal:scal); Real_t const_nwtn1=3*scal*scal;
  318. //scal=(NWTN1?2*scal*scal*scal:scal); Real_t const_nwtn2=3*scal*scal;
  319. //scal=(NWTN2?2*scal*scal*scal:scal); Real_t const_nwtn3=3*scal*scal;
  320. VEC rinv;
  321. #if NWTN0
  322. rinv=rinv_single_intrin(r2);
  323. #else
  324. rinv=rinv_approx_intrin(r2);
  325. #endif
  326. #if NWTN1
  327. rinv_newton_intrin(rinv,r2,const_nwtn1);
  328. #endif
  329. #if NWTN2
  330. rinv_newton_intrin(rinv,r2,const_nwtn2);
  331. #endif
  332. #if NWTN3
  333. rinv_newton_intrin(rinv,r2,const_nwtn3);
  334. #endif
  335. return rinv;
  336. #undef NWTN0
  337. #undef NWTN1
  338. #undef NWTN2
  339. #undef NWTN3
  340. }
  341. template <class VEC, class Real_t>
  342. inline VEC rinv_intrin1(VEC r2){
  343. #define NWTN0 0
  344. #define NWTN1 1
  345. #define NWTN2 0
  346. #define NWTN3 0
  347. Real_t scal=1; //Real_t const_nwtn0=3*scal*scal;
  348. scal=(NWTN0?2*scal*scal*scal:scal); Real_t const_nwtn1=3*scal*scal;
  349. //scal=(NWTN1?2*scal*scal*scal:scal); Real_t const_nwtn2=3*scal*scal;
  350. //scal=(NWTN2?2*scal*scal*scal:scal); Real_t const_nwtn3=3*scal*scal;
  351. VEC rinv;
  352. #if NWTN0
  353. rinv=rinv_single_intrin(r2);
  354. #else
  355. rinv=rinv_approx_intrin(r2);
  356. #endif
  357. #if NWTN1
  358. rinv_newton_intrin(rinv,r2,const_nwtn1);
  359. #endif
  360. #if NWTN2
  361. rinv_newton_intrin(rinv,r2,const_nwtn2);
  362. #endif
  363. #if NWTN3
  364. rinv_newton_intrin(rinv,r2,const_nwtn3);
  365. #endif
  366. return rinv;
  367. #undef NWTN0
  368. #undef NWTN1
  369. #undef NWTN2
  370. #undef NWTN3
  371. }
  372. template <class VEC, class Real_t>
  373. inline VEC rinv_intrin2(VEC r2){
  374. #define NWTN0 0
  375. #define NWTN1 1
  376. #define NWTN2 1
  377. #define NWTN3 0
  378. Real_t scal=1; //Real_t const_nwtn0=3*scal*scal;
  379. scal=(NWTN0?2*scal*scal*scal:scal); Real_t const_nwtn1=3*scal*scal;
  380. scal=(NWTN1?2*scal*scal*scal:scal); Real_t const_nwtn2=3*scal*scal;
  381. //scal=(NWTN2?2*scal*scal*scal:scal); Real_t const_nwtn3=3*scal*scal;
  382. VEC rinv;
  383. #if NWTN0
  384. rinv=rinv_single_intrin(r2);
  385. #else
  386. rinv=rinv_approx_intrin(r2);
  387. #endif
  388. #if NWTN1
  389. rinv_newton_intrin(rinv,r2,const_nwtn1);
  390. #endif
  391. #if NWTN2
  392. rinv_newton_intrin(rinv,r2,const_nwtn2);
  393. #endif
  394. #if NWTN3
  395. rinv_newton_intrin(rinv,r2,const_nwtn3);
  396. #endif
  397. return rinv;
  398. #undef NWTN0
  399. #undef NWTN1
  400. #undef NWTN2
  401. #undef NWTN3
  402. }
  403. template <class VEC, class Real_t>
  404. inline VEC rinv_intrin3(VEC r2){
  405. #define NWTN0 0
  406. #define NWTN1 1
  407. #define NWTN2 1
  408. #define NWTN3 1
  409. Real_t scal=1; //Real_t const_nwtn0=3*scal*scal;
  410. scal=(NWTN0?2*scal*scal*scal:scal); Real_t const_nwtn1=3*scal*scal;
  411. scal=(NWTN1?2*scal*scal*scal:scal); Real_t const_nwtn2=3*scal*scal;
  412. scal=(NWTN2?2*scal*scal*scal:scal); Real_t const_nwtn3=3*scal*scal;
  413. VEC rinv;
  414. #if NWTN0
  415. rinv=rinv_single_intrin(r2);
  416. #else
  417. rinv=rinv_approx_intrin(r2);
  418. #endif
  419. #if NWTN1
  420. rinv_newton_intrin(rinv,r2,const_nwtn1);
  421. #endif
  422. #if NWTN2
  423. rinv_newton_intrin(rinv,r2,const_nwtn2);
  424. #endif
  425. #if NWTN3
  426. rinv_newton_intrin(rinv,r2,const_nwtn3);
  427. #endif
  428. return rinv;
  429. #undef NWTN0
  430. #undef NWTN1
  431. #undef NWTN2
  432. #undef NWTN3
  433. }
  434. }
  435. #endif //_PVFMM_INTRIN_WRAPPER_HPP_