precomp_mat.txx 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. /**
  2. * \file precomp_mat.txx
  3. * \author Dhairya Malhotra, dhairya.malhotra@gmail.com
  4. * \date 3-07-2011
  5. * \brief This file contains the implementation of the PrecompMat class.
  6. * Handles storage of precomputed translation matrices.
  7. */
  8. #include <omp.h>
  9. #include <cassert>
  10. #include <stdint.h>
  11. #ifdef PVFMM_HAVE_SYS_STAT_H
  12. #include <sys/stat.h>
  13. #endif
  14. #include <mem_mgr.hpp>
  15. #include <profile.hpp>
  16. #include <vector.hpp>
  17. namespace pvfmm{
  18. #define PRECOMP_MIN_DEPTH 40
  19. template <class T>
  20. PrecompMat<T>::PrecompMat(bool homogen, int max_d): homogeneous(homogen), max_depth(max_d){
  21. if(!homogen) mat.resize((max_d+PRECOMP_MIN_DEPTH)*Type_Count); //For each U,V,W,X
  22. else mat.resize(Type_Count);
  23. for(size_t i=0;i<mat.size();i++)
  24. mat[i].resize(500);
  25. perm.resize(Type_Count);
  26. for(size_t i=0;i<Type_Count;i++){
  27. perm[i].resize(Perm_Count);
  28. }
  29. perm_r.resize((max_d+PRECOMP_MIN_DEPTH)*Type_Count);
  30. perm_c.resize((max_d+PRECOMP_MIN_DEPTH)*Type_Count);
  31. for(size_t i=0;i<perm_r.size();i++){
  32. perm_r[i].resize(500);
  33. perm_c[i].resize(500);
  34. }
  35. }
  36. template <class T>
  37. Matrix<T>& PrecompMat<T>::Mat(int l, Mat_Type type, size_t indx){
  38. int level=(homogeneous?0:l+PRECOMP_MIN_DEPTH);
  39. assert(level*Type_Count+type<mat.size());
  40. //#pragma omp critical (PrecompMAT)
  41. if(indx>=mat[level*Type_Count+type].size()){
  42. mat[level*Type_Count+type].resize(indx+1);
  43. assert(false); //TODO: this is not thread safe.
  44. }
  45. return mat[level*Type_Count+type][indx];
  46. }
  47. template <class T>
  48. Permutation<T>& PrecompMat<T>::Perm_R(int l, Mat_Type type, size_t indx){
  49. int level=l+PRECOMP_MIN_DEPTH;
  50. assert(level*Type_Count+type<perm_r.size());
  51. //#pragma omp critical (PrecompMAT)
  52. if(indx>=perm_r[level*Type_Count+type].size()){
  53. perm_r[level*Type_Count+type].resize(indx+1);
  54. assert(false); //TODO: this is not thread safe.
  55. }
  56. return perm_r[level*Type_Count+type][indx];
  57. }
  58. template <class T>
  59. Permutation<T>& PrecompMat<T>::Perm_C(int l, Mat_Type type, size_t indx){
  60. int level=l+PRECOMP_MIN_DEPTH;
  61. assert(level*Type_Count+type<perm_c.size());
  62. //#pragma omp critical (PrecompMAT)
  63. if(indx>=perm_c[level*Type_Count+type].size()){
  64. perm_c[level*Type_Count+type].resize(indx+1);
  65. assert(false); //TODO: this is not thread safe.
  66. }
  67. return perm_c[level*Type_Count+type][indx];
  68. }
  69. template <class T>
  70. Permutation<T>& PrecompMat<T>::Perm(Mat_Type type, size_t indx){
  71. assert(indx<Perm_Count);
  72. return perm[type][indx];
  73. }
  74. inline static uintptr_t align_ptr(uintptr_t ptr){
  75. static uintptr_t ALIGN_MINUS_ONE=MEM_ALIGN-1;
  76. static uintptr_t NOT_ALIGN_MINUS_ONE=~ALIGN_MINUS_ONE;
  77. return ((ptr+ALIGN_MINUS_ONE) & NOT_ALIGN_MINUS_ONE);
  78. }
  79. template <class T>
  80. size_t PrecompMat<T>::CompactData(int level, Mat_Type type, Matrix<char>& comp_data, size_t offset){
  81. struct HeaderData{
  82. size_t total_size;
  83. size_t level;
  84. size_t mat_cnt ;
  85. size_t max_depth;
  86. };
  87. if(comp_data.Dim(0)*comp_data.Dim(1)>offset){
  88. char* indx_ptr=comp_data[0]+offset;
  89. HeaderData& header=*(HeaderData*)indx_ptr; indx_ptr+=sizeof(HeaderData);
  90. if(level==header.level){ // Data already exists.
  91. offset+=header.total_size;
  92. return offset;
  93. }
  94. }
  95. std::vector<Matrix<T> >& mat_=mat[(homogeneous?0:level+PRECOMP_MIN_DEPTH)*Type_Count+type];
  96. size_t mat_cnt=mat_.size();
  97. size_t indx_size=0;
  98. size_t mem_size=0;
  99. int omp_p=omp_get_max_threads();
  100. size_t l0=(homogeneous?0:level);
  101. size_t l1=(homogeneous?max_depth:level+1);
  102. { // Determine memory size.
  103. indx_size+=sizeof(HeaderData); // HeaderData
  104. indx_size+=mat_cnt*(1+(2+2)*(l1-l0))*sizeof(size_t); //Mat, Perm_R, Perm_C.
  105. indx_size=align_ptr(indx_size);
  106. for(size_t j=0;j<mat_cnt;j++){
  107. Matrix <T>& M =Mat (level,type,j);
  108. if(M.Dim(0)>0 && M.Dim(1)>0){
  109. mem_size+=M.Dim(0)*M.Dim(1)*sizeof(T); mem_size=align_ptr(mem_size);
  110. }
  111. for(size_t l=l0;l<l1;l++){
  112. Permutation<T>& Pr=Perm_R(l,type,j);
  113. Permutation<T>& Pc=Perm_C(l,type,j);
  114. if(Pr.Dim()>0){
  115. mem_size+=Pr.Dim()*sizeof(PERM_INT_T); mem_size=align_ptr(mem_size);
  116. mem_size+=Pr.Dim()*sizeof(T); mem_size=align_ptr(mem_size);
  117. }
  118. if(Pc.Dim()>0){
  119. mem_size+=Pc.Dim()*sizeof(PERM_INT_T); mem_size=align_ptr(mem_size);
  120. mem_size+=Pc.Dim()*sizeof(T); mem_size=align_ptr(mem_size);
  121. }
  122. }
  123. }
  124. }
  125. if(comp_data.Dim(0)*comp_data.Dim(1)<offset+indx_size+mem_size){ // Resize if needed.
  126. Matrix<char> old_data;
  127. if(offset>0) old_data=comp_data;
  128. comp_data.Resize(1,offset+indx_size+mem_size);
  129. if(offset>0){
  130. #pragma omp parallel for
  131. for(int tid=0;tid<omp_p;tid++){ // Copy data.
  132. size_t a=(offset*(tid+0))/omp_p;
  133. size_t b=(offset*(tid+1))/omp_p;
  134. mem::memcopy(comp_data[0]+a, old_data[0]+a, b-a);
  135. }
  136. }
  137. }
  138. { // Create indx.
  139. char* indx_ptr=comp_data[0]+offset;
  140. HeaderData& header=*(HeaderData*)indx_ptr; indx_ptr+=sizeof(HeaderData);
  141. Matrix<size_t> offset_indx(mat_cnt,1+(2+2)*(l1-l0), (size_t*)indx_ptr, false);
  142. header.total_size=indx_size+mem_size;
  143. header. level=level ;
  144. header. mat_cnt = mat_cnt ;
  145. header. max_depth=l1-l0 ;
  146. size_t data_offset=offset+indx_size;
  147. for(size_t j=0;j<mat_cnt;j++){
  148. Matrix <T>& M =Mat (level,type,j);
  149. offset_indx[j][0]=data_offset; indx_ptr+=sizeof(size_t);
  150. data_offset+=M.Dim(0)*M.Dim(1)*sizeof(T); mem_size=align_ptr(mem_size);
  151. for(size_t l=l0;l<l1;l++){
  152. Permutation<T>& Pr=Perm_R(l,type,j);
  153. offset_indx[j][1+4*(l-l0)+0]=data_offset;
  154. data_offset+=Pr.Dim()*sizeof(PERM_INT_T); mem_size=align_ptr(mem_size);
  155. offset_indx[j][1+4*(l-l0)+1]=data_offset;
  156. data_offset+=Pr.Dim()*sizeof(T); mem_size=align_ptr(mem_size);
  157. Permutation<T>& Pc=Perm_C(l,type,j);
  158. offset_indx[j][1+4*(l-l0)+2]=data_offset;
  159. data_offset+=Pc.Dim()*sizeof(PERM_INT_T); mem_size=align_ptr(mem_size);
  160. offset_indx[j][1+4*(l-l0)+3]=data_offset;
  161. data_offset+=Pc.Dim()*sizeof(T); mem_size=align_ptr(mem_size);
  162. }
  163. }
  164. }
  165. #pragma omp parallel for
  166. for(int tid=0;tid<omp_p;tid++){ // Copy data.
  167. char* indx_ptr=comp_data[0]+offset;
  168. HeaderData& header=*(HeaderData*)indx_ptr;indx_ptr+=sizeof(HeaderData);
  169. Matrix<size_t> offset_indx(mat_cnt,1+(2+2)*(l1-l0), (size_t*)indx_ptr, false);
  170. for(size_t j=0;j<mat_cnt;j++){
  171. Matrix <T>& M =Mat (level,type,j);
  172. if(M.Dim(0)>0 && M.Dim(1)>0){
  173. size_t a=(M.Dim(0)*M.Dim(1)* tid )/omp_p;
  174. size_t b=(M.Dim(0)*M.Dim(1)*(tid+1))/omp_p;
  175. mem::memcopy(comp_data[0]+offset_indx[j][0]+a*sizeof(T), &M[0][a], (b-a)*sizeof(T));
  176. }
  177. for(size_t l=l0;l<l1;l++){
  178. Permutation<T>& Pr=Perm_R(l,type,j);
  179. Permutation<T>& Pc=Perm_C(l,type,j);
  180. if(Pr.Dim()>0){
  181. size_t a=(Pr.Dim()* tid )/omp_p;
  182. size_t b=(Pr.Dim()*(tid+1))/omp_p;
  183. mem::memcopy(comp_data[0]+offset_indx[j][1+4*(l-l0)+0]+a*sizeof(PERM_INT_T), &Pr.perm[a], (b-a)*sizeof(PERM_INT_T));
  184. mem::memcopy(comp_data[0]+offset_indx[j][1+4*(l-l0)+1]+a*sizeof( T), &Pr.scal[a], (b-a)*sizeof( T));
  185. }
  186. if(Pc.Dim()>0){
  187. size_t a=(Pc.Dim()* tid )/omp_p;
  188. size_t b=(Pc.Dim()*(tid+1))/omp_p;
  189. mem::memcopy(comp_data[0]+offset_indx[j][1+4*(l-l0)+2]+a*sizeof(PERM_INT_T), &Pc.perm[a], (b-a)*sizeof(PERM_INT_T));
  190. mem::memcopy(comp_data[0]+offset_indx[j][1+4*(l-l0)+3]+a*sizeof( T), &Pc.scal[a], (b-a)*sizeof( T));
  191. }
  192. }
  193. }
  194. }
  195. return offset+indx_size+mem_size;
  196. }
  197. template <class T>
  198. void PrecompMat<T>::Save2File(const char* fname, bool replace){
  199. FILE* f=fopen(fname,"r");
  200. if(f!=NULL) { //File already exists.
  201. fclose(f);
  202. if(!replace) return;
  203. }
  204. f=fopen(fname,"wb");
  205. if(f==NULL) return;
  206. int tmp;
  207. tmp=sizeof(T);
  208. fwrite(&tmp,sizeof(int),1,f);
  209. tmp=(homogeneous?1:0);
  210. fwrite(&tmp,sizeof(int),1,f);
  211. tmp=max_depth;
  212. fwrite(&tmp,sizeof(int),1,f);
  213. for(size_t i=0;i<mat.size();i++){
  214. int n=mat[i].size();
  215. fwrite(&n,sizeof(int),1,f);
  216. for(int j=0;j<n;j++){
  217. Matrix<T>& M=mat[i][j];
  218. int n1=M.Dim(0);
  219. fwrite(&n1,sizeof(int),1,f);
  220. int n2=M.Dim(1);
  221. fwrite(&n2,sizeof(int),1,f);
  222. if(n1*n2>0)
  223. fwrite(&M[0][0],sizeof(T),n1*n2,f);
  224. }
  225. }
  226. fclose(f);
  227. }
  228. #define MY_FREAD(a,b,c,d) { \
  229. size_t r_cnt=fread(a,b,c,d); \
  230. if(r_cnt!=c){ \
  231. fputs ("Reading error ",stderr); \
  232. exit (-1); \
  233. } }
  234. template <class T>
  235. void PrecompMat<T>::LoadFile(const char* fname, MPI_Comm comm){
  236. Profile::Tic("LoadMatrices",&comm,true,3);
  237. Profile::Tic("ReadFile",&comm,true,4);
  238. size_t f_size=0;
  239. char* f_data=NULL;
  240. int np, myrank;
  241. MPI_Comm_size(comm,&np);
  242. MPI_Comm_rank(comm,&myrank);
  243. if(myrank==0){
  244. FILE* f=fopen(fname,"rb");
  245. if(f==NULL){
  246. f_size=0;
  247. }else{
  248. struct stat fileStat;
  249. if(stat(fname,&fileStat) < 0) f_size=0;
  250. else f_size=fileStat.st_size;
  251. //fseek (f, 0, SEEK_END);
  252. //f_size=ftell (f);
  253. }
  254. if(f_size>0){
  255. f_data=mem::aligned_new<char>(f_size);
  256. fseek (f, 0, SEEK_SET);
  257. MY_FREAD(f_data,sizeof(char),f_size,f);
  258. fclose(f);
  259. }
  260. }
  261. Profile::Toc();
  262. Profile::Tic("Broadcast",&comm,true,4);
  263. MPI_Bcast( &f_size, sizeof(size_t), MPI_BYTE, 0, comm );
  264. if(f_size==0){
  265. Profile::Toc();
  266. Profile::Toc();
  267. return;
  268. }
  269. if(f_data==NULL) f_data=mem::aligned_new<char>(f_size);
  270. char* f_ptr=f_data;
  271. int max_send_size=1000000000;
  272. while(f_size>0){
  273. if(f_size>(size_t)max_send_size){
  274. MPI_Bcast( f_ptr, max_send_size, MPI_BYTE, 0, comm );
  275. f_size-=max_send_size;
  276. f_ptr+=max_send_size;
  277. }else{
  278. MPI_Bcast( f_ptr, f_size, MPI_BYTE, 0, comm );
  279. f_size=0;
  280. }
  281. }
  282. f_ptr=f_data;
  283. {
  284. int tmp;
  285. tmp=*(int*)f_ptr; f_ptr+=sizeof(int);
  286. assert(tmp==sizeof(T));
  287. tmp=*(int*)f_ptr; f_ptr+=sizeof(int);
  288. homogeneous=tmp;
  289. int max_depth_;
  290. max_depth_=*(int*)f_ptr; f_ptr+=sizeof(int);
  291. size_t mat_size=(size_t)Type_Count*(homogeneous?1:max_depth_+PRECOMP_MIN_DEPTH);
  292. if(mat.size()<mat_size){
  293. max_depth=max_depth_;
  294. mat.resize(mat_size);
  295. }
  296. for(size_t i=0;i<mat_size;i++){
  297. int n;
  298. n=*(int*)f_ptr; f_ptr+=sizeof(int);
  299. if(mat[i].size()<(size_t)n)
  300. mat[i].resize(n);
  301. for(int j=0;j<n;j++){
  302. Matrix<T>& M=mat[i][j];
  303. int n1;
  304. n1=*(int*)f_ptr; f_ptr+=sizeof(int);
  305. int n2;
  306. n2=*(int*)f_ptr; f_ptr+=sizeof(int);
  307. if(n1*n2>0){
  308. M.Resize(n1,n2);
  309. mem::memcopy(&M[0][0], f_ptr, sizeof(T)*n1*n2); f_ptr+=sizeof(T)*n1*n2;
  310. }
  311. }
  312. }
  313. // Resize perm_r, perm_c
  314. perm_r.resize((max_depth_+PRECOMP_MIN_DEPTH)*Type_Count);
  315. perm_c.resize((max_depth_+PRECOMP_MIN_DEPTH)*Type_Count);
  316. for(size_t i=0;i<perm_r.size();i++){
  317. perm_r[i].resize(500);
  318. perm_c[i].resize(500);
  319. }
  320. }
  321. mem::aligned_delete<char>(f_data);
  322. Profile::Toc();
  323. Profile::Toc();
  324. }
  325. #undef MY_FREAD
  326. #undef PRECOMP_MIN_DEPTH
  327. template <class T>
  328. std::vector<T>& PrecompMat<T>::RelativeTrgCoord(){
  329. return rel_trg_coord;
  330. }
  331. template <class T>
  332. bool PrecompMat<T>::Homogen(){
  333. return homogeneous;
  334. }
  335. }//end namespace