fmm_cheb.txx 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. /**
  2. * \file fmm_cheb.txx
  3. * \author Dhairya Malhotra, dhairya.malhotra@gmail.com
  4. * \date 3-07-2011
  5. * \brief This file contains the implementation of the FMM_Cheb class.
  6. */
  7. #include <mpi.h>
  8. #include <fftw3.h>
  9. #include <parUtils.h>
  10. #ifdef PVFMM_HAVE_SYS_STAT_H
  11. #include <sys/stat.h>
  12. #endif
  13. namespace pvfmm{
  14. template <class FMMNode>
  15. FMM_Cheb<FMMNode>::~FMM_Cheb() {
  16. if(this->mat!=NULL){
  17. int rank;
  18. MPI_Comm_rank(this->comm,&rank);
  19. if(!rank){
  20. FILE* f=fopen(this->mat_fname.c_str(),"r");
  21. if(f==NULL) { //File does not exists.
  22. { // Delete easy to compute matrices.
  23. Mat_Type type=W_Type;
  24. for(int l=-BC_LEVELS;l<MAX_DEPTH;l++)
  25. for(size_t indx=0;indx<this->interac_list.ListCount(type);indx++){
  26. Matrix<Real_t>& M=this->mat->Mat(l, type, indx);
  27. M.Resize(0,0);
  28. }
  29. type=V_Type;
  30. for(int l=-BC_LEVELS;l<MAX_DEPTH;l++)
  31. for(size_t indx=0;indx<this->interac_list.ListCount(type);indx++){
  32. Matrix<Real_t>& M=this->mat->Mat(l, type, indx);
  33. M.Resize(0,0);
  34. }
  35. type=V1_Type;
  36. for(int l=-BC_LEVELS;l<MAX_DEPTH;l++)
  37. for(size_t indx=0;indx<this->interac_list.ListCount(type);indx++){
  38. Matrix<Real_t>& M=this->mat->Mat(l, type, indx);
  39. M.Resize(0,0);
  40. }
  41. type=U2U_Type;
  42. for(int l=-BC_LEVELS;l<MAX_DEPTH;l++)
  43. for(size_t indx=0;indx<this->interac_list.ListCount(type);indx++){
  44. Matrix<Real_t>& M=this->mat->Mat(l, type, indx);
  45. M.Resize(0,0);
  46. }
  47. type=D2D_Type;
  48. for(int l=-BC_LEVELS;l<MAX_DEPTH;l++)
  49. for(size_t indx=0;indx<this->interac_list.ListCount(type);indx++){
  50. Matrix<Real_t>& M=this->mat->Mat(l, type, indx);
  51. M.Resize(0,0);
  52. }
  53. type=D2T_Type;
  54. for(int l=-BC_LEVELS;l<MAX_DEPTH;l++)
  55. for(size_t indx=0;indx<this->interac_list.ListCount(type);indx++){
  56. Matrix<Real_t>& M=this->mat->Mat(l, type, indx);
  57. M.Resize(0,0);
  58. }
  59. }
  60. this->mat->Save2File(this->mat_fname.c_str());
  61. }else fclose(f);
  62. }
  63. }
  64. }
  65. template <class FMMNode>
  66. void FMM_Cheb<FMMNode>::Initialize(int mult_order, int cheb_deg_, const MPI_Comm& comm_, const Kernel<Real_t>* kernel_, const Kernel<Real_t>* aux_kernel_){
  67. Profile::Tic("InitFMM_Cheb",&comm_,true);{
  68. int rank;
  69. MPI_Comm_rank(comm_,&rank);
  70. int dim=3; //Only supporting 3D
  71. cheb_deg=cheb_deg_;
  72. if(this->mat_fname.size()==0){
  73. std::stringstream st;
  74. st<<PVFMM_PRECOMP_DATA_PATH;
  75. if(!st.str().size()){ // look in PVFMM_DIR
  76. char* pvfmm_dir = getenv ("PVFMM_DIR");
  77. if(pvfmm_dir) st<<pvfmm_dir;
  78. }
  79. #ifndef STAT_MACROS_BROKEN
  80. if(st.str().size()){ // check if the path is a directory
  81. struct stat stat_buff;
  82. if(stat(st.str().c_str(), &stat_buff) || !S_ISDIR(stat_buff.st_mode)){
  83. std::cout<<"error: path not found: "<<st.str()<<'\n';
  84. exit(0);
  85. }
  86. }
  87. #endif
  88. if(st.str().size()) st<<'/';
  89. st<<"Precomp_"<<kernel_->ker_name.c_str()<<"_q"<<cheb_deg<<"_m"<<mult_order;
  90. if(sizeof(Real_t)==8) st<<"";
  91. else if(sizeof(Real_t)==4) st<<"_f";
  92. else st<<"_t"<<sizeof(Real_t);
  93. st<<".data";
  94. this->mat_fname=st.str();
  95. }
  96. if(!rank){
  97. FILE* f=fopen(this->mat_fname.c_str(),"r");
  98. if(f==NULL) { //File does not exists.
  99. std::cout<<"Could not find precomputed data file for "<<kernel_->ker_name.c_str()<<" kernel with q="<<cheb_deg<<" and m="<<mult_order<<".\n";
  100. std::cout<<"This data will be computed and stored for future use at:\n"<<this->mat_fname<<'\n';
  101. std::cout<<"This may take a while...\n";
  102. }else fclose(f);
  103. }
  104. //this->mat->LoadFile(this->mat_fname.c_str(), this->comm);
  105. FMM_Pts<FMMNode>::Initialize(mult_order, comm_, kernel_, aux_kernel_);
  106. this->mat->RelativeTrgCoord()=cheb_nodes<Real_t>(ChebDeg(),dim);
  107. Profile::Tic("PrecompD2T",&this->comm,false,4);
  108. this->PrecompAll(D2T_Type);
  109. Profile::Toc();
  110. //Volume solver.
  111. Profile::Tic("PrecompS2M",&this->comm,false,4);
  112. this->PrecompAll(S2U_Type);
  113. Profile::Toc();
  114. Profile::Tic("PrecompX",&this->comm,false,4);
  115. this->PrecompAll(X_Type);
  116. Profile::Toc();
  117. Profile::Tic("PrecompW",&this->comm,false,4);
  118. this->PrecompAll(W_Type);
  119. Profile::Toc();
  120. Profile::Tic("PrecompU0",&this->comm,false,4);
  121. this->PrecompAll(U0_Type);
  122. Profile::Toc();
  123. Profile::Tic("PrecompU1",&this->comm,false,4);
  124. this->PrecompAll(U1_Type);
  125. Profile::Toc();
  126. Profile::Tic("PrecompU2",&this->comm,false,4);
  127. this->PrecompAll(U2_Type);
  128. Profile::Toc();
  129. Profile::Tic("Save2File",&this->comm,false,4);
  130. if(!rank){
  131. FILE* f=fopen(this->mat_fname.c_str(),"r");
  132. if(f==NULL) { //File does not exists.
  133. { // Delete easy to compute matrices.
  134. Mat_Type type=W_Type;
  135. for(int l=-BC_LEVELS;l<MAX_DEPTH;l++)
  136. for(size_t indx=0;indx<this->interac_list.ListCount(type);indx++){
  137. Matrix<Real_t>& M=this->mat->Mat(l, type, indx);
  138. M.Resize(0,0);
  139. }
  140. type=V_Type;
  141. for(int l=-BC_LEVELS;l<MAX_DEPTH;l++)
  142. for(size_t indx=0;indx<this->interac_list.ListCount(type);indx++){
  143. Matrix<Real_t>& M=this->mat->Mat(l, type, indx);
  144. M.Resize(0,0);
  145. }
  146. type=V1_Type;
  147. for(int l=-BC_LEVELS;l<MAX_DEPTH;l++)
  148. for(size_t indx=0;indx<this->interac_list.ListCount(type);indx++){
  149. Matrix<Real_t>& M=this->mat->Mat(l, type, indx);
  150. M.Resize(0,0);
  151. }
  152. type=U2U_Type;
  153. for(int l=-BC_LEVELS;l<MAX_DEPTH;l++)
  154. for(size_t indx=0;indx<this->interac_list.ListCount(type);indx++){
  155. Matrix<Real_t>& M=this->mat->Mat(l, type, indx);
  156. M.Resize(0,0);
  157. }
  158. type=D2D_Type;
  159. for(int l=-BC_LEVELS;l<MAX_DEPTH;l++)
  160. for(size_t indx=0;indx<this->interac_list.ListCount(type);indx++){
  161. Matrix<Real_t>& M=this->mat->Mat(l, type, indx);
  162. M.Resize(0,0);
  163. }
  164. type=D2T_Type;
  165. for(int l=-BC_LEVELS;l<MAX_DEPTH;l++)
  166. for(size_t indx=0;indx<this->interac_list.ListCount(type);indx++){
  167. Matrix<Real_t>& M=this->mat->Mat(l, type, indx);
  168. M.Resize(0,0);
  169. }
  170. }
  171. this->mat->Save2File(this->mat_fname.c_str());
  172. }else fclose(f);
  173. }
  174. Profile::Toc();
  175. Profile::Tic("Recompute",&this->comm,false,4);
  176. { // Recompute matrices.
  177. this->PrecompAll(W_Type);
  178. this->PrecompAll(V_Type);
  179. this->PrecompAll(V1_Type);
  180. this->PrecompAll(U2U_Type);
  181. this->PrecompAll(D2D_Type);
  182. this->PrecompAll(D2T_Type);
  183. }
  184. Profile::Toc();
  185. }Profile::Toc();
  186. }
  187. template <class FMMNode>
  188. Permutation<typename FMMNode::Real_t>& FMM_Cheb<FMMNode>::PrecompPerm(Mat_Type type, Perm_Type perm_indx){
  189. int dim=3; //Only supporting 3D
  190. //Check if the matrix already exists.
  191. Permutation<Real_t>& P_ = FMM_Pts<FMMNode>::PrecompPerm(type, perm_indx);
  192. if(P_.Dim()!=0) return P_;
  193. Matrix<size_t> swap_xy(10,9);
  194. Matrix<size_t> swap_xz(10,9);
  195. { // This is repeated from FMM_Pts::PrecompPerm, but I dont see any other way.
  196. for(int i=0;i<9;i++)
  197. for(int j=0;j<9;j++){
  198. swap_xy[i][j]=j;
  199. swap_xz[i][j]=j;
  200. }
  201. swap_xy[3][0]=1; swap_xy[3][1]=0; swap_xy[3][2]=2;
  202. swap_xz[3][0]=2; swap_xz[3][1]=1; swap_xz[3][2]=0;
  203. swap_xy[6][0]=1; swap_xy[6][1]=0; swap_xy[6][2]=2;
  204. swap_xy[6][3]=4; swap_xy[6][4]=3; swap_xy[6][5]=5;
  205. swap_xz[6][0]=2; swap_xz[6][1]=1; swap_xz[6][2]=0;
  206. swap_xz[6][3]=5; swap_xz[6][4]=4; swap_xz[6][5]=3;
  207. swap_xy[9][0]=4; swap_xy[9][1]=3; swap_xy[9][2]=5;
  208. swap_xy[9][3]=1; swap_xy[9][4]=0; swap_xy[9][5]=2;
  209. swap_xy[9][6]=7; swap_xy[9][7]=6; swap_xy[9][8]=8;
  210. swap_xz[9][0]=8; swap_xz[9][1]=7; swap_xz[9][2]=6;
  211. swap_xz[9][3]=5; swap_xz[9][4]=4; swap_xz[9][5]=3;
  212. swap_xz[9][6]=2; swap_xz[9][7]=1; swap_xz[9][8]=0;
  213. }
  214. //Compute the matrix.
  215. Permutation<Real_t> P;
  216. switch (type){
  217. case UC2UE_Type:
  218. {
  219. break;
  220. }
  221. case DC2DE_Type:
  222. {
  223. break;
  224. }
  225. case S2U_Type:
  226. {
  227. break;
  228. }
  229. case U2U_Type:
  230. {
  231. break;
  232. }
  233. case D2D_Type:
  234. {
  235. break;
  236. }
  237. case D2T_Type:
  238. {
  239. break;
  240. }
  241. case U0_Type:
  242. {
  243. int coeff_cnt=((cheb_deg+1)*(cheb_deg+2)*(cheb_deg+3))/6;
  244. int n3=(int)pow((Real_t)(cheb_deg+1),dim);
  245. int dof=(perm_indx<C_Perm?this->kernel.ker_dim[0]:this->kernel.ker_dim[1]);
  246. size_t p_indx=perm_indx % C_Perm;
  247. Permutation<Real_t> P0(n3*dof);
  248. if(dof%3==0 && this->kernel.ker_name.compare("biot_savart")==0) //biot_savart
  249. for(int j=0;j<dof;j++)
  250. for(int i=0;i<n3;i++)
  251. P0.scal[i+n3*j]*=(perm_indx<C_Perm?1:-1);
  252. if(p_indx==ReflecX || p_indx==ReflecY || p_indx==ReflecZ){
  253. for(int j=0;j<dof;j++)
  254. for(int i=0;i<n3;i++){
  255. int x[3]={i%(cheb_deg+1), (i/(cheb_deg+1))%(cheb_deg+1), i/(cheb_deg+1)/(cheb_deg+1)};
  256. P0.scal[i+n3*j]*=(x[p_indx]%2?-1.0:1.0);
  257. if(dof%3==0) //stokes_vel (and like kernels)
  258. P0.scal[i+n3*j]*=( j %3==p_indx?-1.0:1.0);
  259. if(dof%3==0 && (dof/3)%3==0)
  260. P0.scal[i+n3*j]*=((j/3)%3==p_indx?-1.0:1.0);
  261. }
  262. }else if(p_indx==SwapXY || p_indx==SwapXZ){
  263. int indx[3];
  264. if(p_indx==SwapXY) {indx[0]=1; indx[1]=0; indx[2]=2;}
  265. if(p_indx==SwapXZ) {indx[0]=2; indx[1]=1; indx[2]=0;}
  266. for(int j=0;j<dof;j++)
  267. for(int i=0;i<n3;i++){
  268. int x[3]={i%(cheb_deg+1), (i/(cheb_deg+1))%(cheb_deg+1), i/(cheb_deg+1)/(cheb_deg+1)};
  269. P0.perm[i+n3*j]=x[indx[0]]+(x[indx[1]]+x[indx[2]]*(cheb_deg+1))*(cheb_deg+1)
  270. +n3*(p_indx==SwapXY?swap_xy[dof][j]:swap_xz[dof][j]);
  271. }
  272. }
  273. std::vector<size_t> coeff_map(n3*dof,0);
  274. {
  275. int indx=0;
  276. for(int j=0;j<dof;j++)
  277. for(int i=0;i<n3;i++){
  278. int x[3]={i%(cheb_deg+1), (i/(cheb_deg+1))%(cheb_deg+1), i/(cheb_deg+1)/(cheb_deg+1)};
  279. if(x[0]+x[1]+x[2]<=cheb_deg){
  280. coeff_map[i+n3*j]=indx;
  281. indx++;
  282. }
  283. }
  284. }
  285. P=Permutation<Real_t>(coeff_cnt*dof);
  286. {
  287. int indx=0;
  288. for(int j=0;j<dof;j++)
  289. for(int i=0;i<n3;i++){
  290. int x[3]={i%(cheb_deg+1), (i/(cheb_deg+1))%(cheb_deg+1), i/(cheb_deg+1)/(cheb_deg+1)};
  291. if(x[0]+x[1]+x[2]<=cheb_deg){
  292. P.perm[indx]=coeff_map[P0.perm[i+n3*j]];
  293. P.scal[indx]= P0.scal[i+n3*j] ;
  294. indx++;
  295. }
  296. }
  297. }
  298. break;
  299. }
  300. case U1_Type:
  301. {
  302. P=PrecompPerm(U0_Type, perm_indx);
  303. break;
  304. }
  305. case U2_Type:
  306. {
  307. P=PrecompPerm(U0_Type, perm_indx);
  308. break;
  309. }
  310. case V_Type:
  311. {
  312. break;
  313. }
  314. case V1_Type:
  315. {
  316. break;
  317. }
  318. case W_Type:
  319. {
  320. if(perm_indx>=C_Perm) P=PrecompPerm(U0_Type, perm_indx);
  321. else P=PrecompPerm(D2D_Type, perm_indx);
  322. break;
  323. }
  324. case X_Type:
  325. {
  326. if(perm_indx<C_Perm) P=PrecompPerm(U0_Type, perm_indx);
  327. else P=PrecompPerm(D2D_Type, perm_indx);
  328. break;
  329. }
  330. default:
  331. return FMM_Pts<FMMNode>::PrecompPerm(type, perm_indx);
  332. break;
  333. }
  334. //Save the matrix for future use.
  335. #pragma omp critical (PRECOMP_MATRIX_PTS)
  336. if(P_.Dim()==0){ P_=P;}
  337. return P_;
  338. }
  339. template <class FMMNode>
  340. Matrix<typename FMMNode::Real_t>& FMM_Cheb<FMMNode>::Precomp(int level, Mat_Type type, size_t mat_indx){
  341. if(this->Homogen()) level=0;
  342. //Check if the matrix already exists.
  343. //Compute matrix from symmetry class (if possible).
  344. Matrix<Real_t>& M_ = this->mat->Mat(level, type, mat_indx);
  345. if(M_.Dim(0)!=0 && M_.Dim(1)!=0) return M_;
  346. else{ //Compute matrix from symmetry class (if possible).
  347. size_t class_indx = this->interac_list.InteracClass(type, mat_indx);
  348. if(class_indx!=mat_indx){
  349. Matrix<Real_t>& M0 = this->Precomp(level, type, class_indx);
  350. Permutation<Real_t>& Pr = this->interac_list.Perm_R(level, type, mat_indx);
  351. Permutation<Real_t>& Pc = this->interac_list.Perm_C(level, type, mat_indx);
  352. if(Pr.Dim()>0 && Pc.Dim()>0 && M0.Dim(0)>0 && M0.Dim(1)>0) return M_;
  353. }
  354. }
  355. int myrank, np;
  356. MPI_Comm_rank(this->comm, &myrank);
  357. MPI_Comm_size(this->comm,&np);
  358. size_t progress=0, class_count=0;
  359. { // Determine precomputation progress.
  360. size_t mat_cnt=this->interac_list.ListCount((Mat_Type)type);
  361. for(size_t i=0; i<mat_cnt; i++){
  362. size_t indx=this->interac_list.InteracClass((Mat_Type)type,i);
  363. if(indx==i){
  364. class_count++;
  365. if(i<mat_indx) progress++;
  366. }
  367. }
  368. }
  369. //Compute the matrix.
  370. Matrix<Real_t> M;
  371. int n_src=((cheb_deg+1)*(cheb_deg+2)*(cheb_deg+3))/6;
  372. switch (type){
  373. case S2U_Type:
  374. {
  375. if(this->MultipoleOrder()==0) break;
  376. Real_t r=pow(0.5,level);
  377. Real_t c[3]={0,0,0};
  378. // Coord of upward check surface
  379. std::vector<Real_t> uc_coord=u_check_surf(this->MultipoleOrder(),c,level);
  380. size_t n_uc=uc_coord.size()/3;
  381. // Evaluate potential at check surface.
  382. Matrix<Real_t> M_s2c(n_src*this->aux_kernel.ker_dim[0],n_uc*this->aux_kernel.ker_dim[1]); //source 2 check
  383. Matrix<Real_t> M_s2c_local(n_src*this->aux_kernel.ker_dim[0],n_uc*this->aux_kernel.ker_dim[1]);
  384. {
  385. M_s2c.SetZero();
  386. M_s2c_local.SetZero();
  387. size_t cnt_done=0;
  388. #pragma omp parallel for schedule(dynamic)
  389. for(size_t i=myrank;i<n_uc;i+=np){
  390. std::vector<Real_t> M_=cheb_integ(cheb_deg, &uc_coord[i*3], r, this->aux_kernel);
  391. #ifdef __VERBOSE__
  392. #pragma omp critical
  393. if(!myrank){
  394. cnt_done++;
  395. std::cout<<"\r Progress: "<<(100*progress*n_uc+100*cnt_done*np)/(class_count*n_uc)<<"% "<<std::flush;
  396. }
  397. #endif
  398. for(int k=0; k<this->aux_kernel.ker_dim[1]; k++)
  399. for(size_t j=0; j<(size_t)M_s2c.Dim(0); j++)
  400. M_s2c_local[j][i*this->aux_kernel.ker_dim[1]+k] = M_[j+k*M_s2c.Dim(0)];
  401. }
  402. if(!myrank) std::cout<<"\r \r"<<std::flush;
  403. MPI_Allreduce(M_s2c_local[0], M_s2c[0], M_s2c.Dim(0)*M_s2c.Dim(1), par::Mpi_datatype<Real_t>::value(), par::Mpi_datatype<Real_t>::sum(), this->comm);
  404. }
  405. Matrix<Real_t>& M_c2e = this->Precomp(level, UC2UE_Type, 0);
  406. M=M_s2c*M_c2e;
  407. break;
  408. }
  409. case D2T_Type:
  410. {
  411. if(this->MultipoleOrder()==0) break;
  412. Matrix<Real_t>& M_s2t=FMM_Pts<FMMNode>::Precomp(level, type, mat_indx);
  413. int n_trg=M_s2t.Dim(1)/this->kernel.ker_dim[1];
  414. // Compute Chebyshev approx from target potential.
  415. M.Resize(M_s2t.Dim(0), n_src*this->kernel.ker_dim [1]);
  416. #pragma omp parallel for schedule(dynamic)
  417. for(size_t j=0; j<(size_t)M_s2t.Dim(0); j++){
  418. Matrix<Real_t> M_trg(n_trg,this->kernel.ker_dim[1],M_s2t[j],false);
  419. M_trg=M_trg.Transpose();
  420. cheb_approx<Real_t,Real_t>(M_s2t[j],cheb_deg,this->kernel.ker_dim[1],M[j]);
  421. }
  422. #pragma omp critical (PRECOMP_MATRIX_PTS)
  423. {
  424. M_s2t.Resize(0,0);
  425. }
  426. break;
  427. }
  428. case U0_Type:
  429. {
  430. // Coord of target points
  431. Real_t s=pow(0.5,level);
  432. int* coord=this->interac_list.RelativeCoord(type,mat_indx);
  433. Real_t coord_diff[3]={(coord[0]-1)*s*0.5,(coord[1]-1)*s*0.5,(coord[2]-1)*s*0.5};
  434. std::vector<Real_t>& rel_trg_coord = this->mat->RelativeTrgCoord();
  435. size_t n_trg = rel_trg_coord.size()/3;
  436. std::vector<Real_t> trg_coord(n_trg*3);
  437. for(size_t j=0;j<n_trg;j++){
  438. trg_coord[j*3+0]=rel_trg_coord[j*3+0]*s-coord_diff[0];
  439. trg_coord[j*3+1]=rel_trg_coord[j*3+1]*s-coord_diff[1];
  440. trg_coord[j*3+2]=rel_trg_coord[j*3+2]*s-coord_diff[2];
  441. }
  442. // Evaluate potential at target points.
  443. Matrix<Real_t> M_s2t(n_src*this->kernel.ker_dim [0], n_trg*this->kernel.ker_dim [1]);
  444. Matrix<Real_t> M_s2t_local(n_src*this->kernel.ker_dim [0], n_trg*this->kernel.ker_dim [1]);
  445. {
  446. M_s2t.SetZero();
  447. M_s2t_local.SetZero();
  448. size_t cnt_done=0;
  449. #pragma omp parallel for schedule(dynamic)
  450. for(size_t i=myrank;i<n_trg;i+=np){
  451. std::vector<Real_t> s2t=cheb_integ(cheb_deg, &trg_coord[i*3], (Real_t)(s*2.0), this->kernel);
  452. #ifdef __VERBOSE__
  453. #pragma omp critical
  454. if(!myrank){
  455. cnt_done++;
  456. std::cout<<"\r Progress: "<<(100*progress*n_trg+100*cnt_done*np)/(class_count*n_trg)<<"% "<<std::flush;
  457. }
  458. #endif
  459. for(int k=0; k<this->kernel.ker_dim[1]; k++)
  460. for(size_t j=0; j<(size_t)M_s2t.Dim(0); j++)
  461. M_s2t_local[j][i*this->kernel.ker_dim[1]+k] = s2t[j+k*M_s2t.Dim(0)];
  462. }
  463. if(!myrank) std::cout<<"\r \r"<<std::flush;
  464. MPI_Allreduce(M_s2t_local[0], M_s2t[0], M_s2t.Dim(0)*M_s2t.Dim(1), par::Mpi_datatype<Real_t>::value(), par::Mpi_datatype<Real_t>::sum(), this->comm);
  465. }
  466. // Compute Chebyshev approx from target potential.
  467. M.Resize(M_s2t.Dim(0), n_src*this->kernel.ker_dim [1]);
  468. #pragma omp parallel for schedule(dynamic)
  469. for(size_t j=0; j<(size_t)M_s2t.Dim(0); j++){
  470. Matrix<Real_t> M_trg(n_trg,this->kernel.ker_dim[1],M_s2t[j],false);
  471. M_trg=M_trg.Transpose();
  472. cheb_approx<Real_t,Real_t>(M_s2t[j],cheb_deg,this->kernel.ker_dim[1],M[j]);
  473. }
  474. break;
  475. }
  476. case U1_Type:
  477. {
  478. // Coord of target points
  479. Real_t s=pow(0.5,level);
  480. int* coord=this->interac_list.RelativeCoord(type,mat_indx);
  481. Real_t coord_diff[3]={coord[0]*s,coord[1]*s,coord[2]*s};
  482. std::vector<Real_t>& rel_trg_coord = this->mat->RelativeTrgCoord();
  483. size_t n_trg = rel_trg_coord.size()/3;
  484. std::vector<Real_t> trg_coord(n_trg*3);
  485. for(size_t j=0;j<n_trg;j++){
  486. trg_coord[j*3+0]=rel_trg_coord[j*3+0]*s-coord_diff[0];
  487. trg_coord[j*3+1]=rel_trg_coord[j*3+1]*s-coord_diff[1];
  488. trg_coord[j*3+2]=rel_trg_coord[j*3+2]*s-coord_diff[2];
  489. }
  490. // Evaluate potential at target points.
  491. Matrix<Real_t> M_s2t(n_src*this->kernel.ker_dim [0], n_trg*this->kernel.ker_dim [1]);
  492. Matrix<Real_t> M_s2t_local(n_src*this->kernel.ker_dim [0], n_trg*this->kernel.ker_dim [1]);
  493. {
  494. M_s2t.SetZero();
  495. M_s2t_local.SetZero();
  496. size_t cnt_done=0;
  497. #pragma omp parallel for schedule(dynamic)
  498. for(size_t i=myrank;i<n_trg;i+=np){
  499. std::vector<Real_t> s2t=cheb_integ(cheb_deg, &trg_coord[i*3], s, this->kernel);
  500. #ifdef __VERBOSE__
  501. #pragma omp critical
  502. if(!myrank){
  503. cnt_done++;
  504. std::cout<<"\r Progress: "<<(100*progress*n_trg+100*cnt_done*np)/(class_count*n_trg)<<"% "<<std::flush;
  505. }
  506. #endif
  507. for(int k=0; k<this->kernel.ker_dim[1]; k++)
  508. for(size_t j=0; j<(size_t)M_s2t.Dim(0); j++)
  509. M_s2t_local[j][i*this->kernel.ker_dim[1]+k] = s2t[j+k*M_s2t.Dim(0)];
  510. }
  511. if(!myrank) std::cout<<"\r \r"<<std::flush;
  512. MPI_Allreduce(M_s2t_local[0], M_s2t[0], M_s2t.Dim(0)*M_s2t.Dim(1), par::Mpi_datatype<Real_t>::value(), par::Mpi_datatype<Real_t>::sum(), this->comm);
  513. }
  514. // Compute Chebyshev approx from target potential.
  515. M.Resize(M_s2t.Dim(0), n_src*this->kernel.ker_dim [1]);
  516. #pragma omp parallel for schedule(dynamic)
  517. for(size_t j=0; j<(size_t)M_s2t.Dim(0); j++){
  518. Matrix<Real_t> M_trg(n_trg,this->kernel.ker_dim[1],M_s2t[j],false);
  519. M_trg=M_trg.Transpose();
  520. cheb_approx<Real_t,Real_t>(M_s2t[j],cheb_deg,this->kernel.ker_dim[1],M[j]);
  521. }
  522. break;
  523. }
  524. case U2_Type:
  525. {
  526. // Coord of target points
  527. Real_t s=pow(0.5,level);
  528. int* coord=this->interac_list.RelativeCoord(type,mat_indx);
  529. Real_t coord_diff[3]={(coord[0]+1)*s*0.25,(coord[1]+1)*s*0.25,(coord[2]+1)*s*0.25};
  530. std::vector<Real_t>& rel_trg_coord = this->mat->RelativeTrgCoord();
  531. size_t n_trg = rel_trg_coord.size()/3;
  532. std::vector<Real_t> trg_coord(n_trg*3);
  533. for(size_t j=0;j<n_trg;j++){
  534. trg_coord[j*3+0]=rel_trg_coord[j*3+0]*s-coord_diff[0];
  535. trg_coord[j*3+1]=rel_trg_coord[j*3+1]*s-coord_diff[1];
  536. trg_coord[j*3+2]=rel_trg_coord[j*3+2]*s-coord_diff[2];
  537. }
  538. // Evaluate potential at target points.
  539. Matrix<Real_t> M_s2t(n_src*this->kernel.ker_dim [0], n_trg*this->kernel.ker_dim [1]);
  540. Matrix<Real_t> M_s2t_local(n_src*this->kernel.ker_dim [0], n_trg*this->kernel.ker_dim [1]);
  541. {
  542. M_s2t.SetZero();
  543. M_s2t_local.SetZero();
  544. size_t cnt_done=0;
  545. #pragma omp parallel for schedule(dynamic)
  546. for(size_t i=myrank;i<n_trg;i+=np){
  547. std::vector<Real_t> s2t=cheb_integ(cheb_deg, &trg_coord[i*3], (Real_t)(s*0.5), this->kernel);
  548. #ifdef __VERBOSE__
  549. #pragma omp critical
  550. if(!myrank){
  551. cnt_done++;
  552. std::cout<<"\r Progress: "<<(100*progress*n_trg+100*cnt_done*np)/(class_count*n_trg)<<"% "<<std::flush;
  553. }
  554. #endif
  555. for(int k=0; k<this->kernel.ker_dim[1]; k++)
  556. for(size_t j=0; j<(size_t)M_s2t.Dim(0); j++)
  557. M_s2t_local[j][i*this->kernel.ker_dim[1]+k] = s2t[j+k*M_s2t.Dim(0)];
  558. }
  559. if(!myrank) std::cout<<"\r \r"<<std::flush;
  560. MPI_Allreduce(M_s2t_local[0], M_s2t[0], M_s2t.Dim(0)*M_s2t.Dim(1), par::Mpi_datatype<Real_t>::value(), par::Mpi_datatype<Real_t>::sum(), this->comm);
  561. }
  562. // Compute Chebyshev approx from target potential.
  563. M.Resize(M_s2t.Dim(0), n_src*this->kernel.ker_dim [1]);
  564. #pragma omp parallel for schedule(dynamic)
  565. for(size_t j=0; j<(size_t)M_s2t.Dim(0); j++){
  566. Matrix<Real_t> M_trg(n_trg,this->kernel.ker_dim[1],M_s2t[j],false);
  567. M_trg=M_trg.Transpose();
  568. cheb_approx<Real_t,Real_t>(M_s2t[j],cheb_deg,this->kernel.ker_dim[1],M[j]);
  569. }
  570. break;
  571. }
  572. case W_Type:
  573. {
  574. if(this->MultipoleOrder()==0) break;
  575. Matrix<Real_t>& M_s2t=FMM_Pts<FMMNode>::Precomp(level, type, mat_indx);
  576. int n_trg=M_s2t.Dim(1)/this->kernel.ker_dim[1];
  577. // Compute Chebyshev approx from target potential.
  578. M.Resize(M_s2t.Dim(0), n_src*this->kernel.ker_dim [1]);
  579. #pragma omp parallel for schedule(dynamic)
  580. for(size_t j=0; j<(size_t)M_s2t.Dim(0); j++){
  581. Matrix<Real_t> M_trg(n_trg,this->kernel.ker_dim[1],M_s2t[j],false);
  582. M_trg=M_trg.Transpose();
  583. cheb_approx<Real_t,Real_t>(M_s2t[j],cheb_deg,this->kernel.ker_dim[1],M[j]);
  584. }
  585. #pragma omp critical (PRECOMP_MATRIX_PTS)
  586. {
  587. M_s2t.Resize(0,0);
  588. }
  589. break;
  590. }
  591. case X_Type:
  592. {
  593. if(this->MultipoleOrder()==0) break;
  594. // Coord of target points
  595. Real_t s=pow(0.5,level-1);
  596. int* coord=this->interac_list.RelativeCoord(type,mat_indx);
  597. Real_t c[3]={-(coord[0]-1)*s*0.25,-(coord[1]-1)*s*0.25,-(coord[2]-1)*s*0.25};
  598. std::vector<Real_t> trg_coord=d_check_surf(this->MultipoleOrder(),c,level);
  599. size_t n_trg=trg_coord.size()/3;
  600. // Evaluate potential at target points.
  601. Matrix<Real_t> M_xs2c(n_src*this->aux_kernel.ker_dim[0], n_trg*this->aux_kernel.ker_dim[1]);
  602. Matrix<Real_t> M_xs2c_local(n_src*this->aux_kernel.ker_dim[0], n_trg*this->aux_kernel.ker_dim[1]);
  603. {
  604. M_xs2c.SetZero();
  605. M_xs2c_local.SetZero();
  606. size_t cnt_done=0;
  607. #pragma omp parallel for schedule(dynamic)
  608. for(size_t i=myrank;i<n_trg;i+=np){
  609. std::vector<Real_t> M_=cheb_integ(cheb_deg, &trg_coord[i*3], s, this->aux_kernel);
  610. #ifdef __VERBOSE__
  611. #pragma omp critical
  612. if(!myrank){
  613. cnt_done++;
  614. std::cout<<"\r Progress: "<<(100*progress*n_trg+100*cnt_done*np)/(class_count*n_trg)<<"% "<<std::flush;
  615. }
  616. #endif
  617. for(int k=0; k<this->aux_kernel.ker_dim[1]; k++)
  618. for(size_t j=0; j<(size_t)M_xs2c.Dim(0); j++)
  619. M_xs2c_local[j][i*this->aux_kernel.ker_dim[1]+k] = M_[j+k*M_xs2c.Dim(0)];
  620. }
  621. if(!myrank) std::cout<<"\r \r"<<std::flush;
  622. MPI_Allreduce(M_xs2c_local[0], M_xs2c[0], M_xs2c.Dim(0)*M_xs2c.Dim(1), par::Mpi_datatype<Real_t>::value(), par::Mpi_datatype<Real_t>::sum(), this->comm);
  623. }
  624. Matrix<Real_t>& M_c2e = this->Precomp(level, DC2DE_Type, 0);
  625. M=M_xs2c*M_c2e;
  626. break;
  627. }
  628. default:
  629. {
  630. return FMM_Pts<FMMNode>::Precomp(level, type, mat_indx);
  631. break;
  632. }
  633. }
  634. //Save the matrix for future use.
  635. #pragma omp critical (PRECOMP_MATRIX_PTS)
  636. if(M_.Dim(0)==0 && M_.Dim(1)==0){ M_=M;}
  637. return M_;
  638. }
  639. template <class FMMNode>
  640. void FMM_Cheb<FMMNode>::CollectNodeData(std::vector<FMMNode*>& node, std::vector<Matrix<Real_t> >& buff, std::vector<Vector<FMMNode_t*> >& n_list, std::vector<size_t> extra_size){
  641. if( buff.size()<6) buff.resize(6);
  642. if( n_list.size()<6) n_list.resize(6);
  643. if(extra_size.size()<6) extra_size.resize(6,0);
  644. size_t n_coeff=(cheb_deg+1)*(cheb_deg+2)*(cheb_deg+3)/6;
  645. if(node.size()==0) return;
  646. {// 4. cheb_in
  647. int indx=4;
  648. int dof=this->kernel.ker_dim[0];
  649. size_t vec_sz=dof*n_coeff;
  650. std::vector< FMMNode* > node_lst;
  651. for(size_t i=0;i<node.size();i++)
  652. if(node[i]->IsLeaf())
  653. node_lst.push_back(node[i]);
  654. n_list[indx]=node_lst;
  655. extra_size[indx]+=node_lst.size()*vec_sz;
  656. #pragma omp parallel for
  657. for(size_t i=0;i<node.size();i++){ // Move data before resizing buff[indx]
  658. Vector<Real_t>& cheb_in =node[i]->ChebData();
  659. Vector<Real_t> new_buff=cheb_in;
  660. cheb_in.Swap(new_buff);
  661. }
  662. }
  663. {// 5. cheb_out
  664. int indx=5;
  665. int dof=this->kernel.ker_dim[1];
  666. size_t vec_sz=dof*n_coeff;
  667. std::vector< FMMNode* > node_lst;
  668. for(size_t i=0;i<node.size();i++)
  669. if(node[i]->IsLeaf() && !node[i]->IsGhost())
  670. node_lst.push_back(node[i]);
  671. n_list[indx]=node_lst;
  672. extra_size[indx]+=node_lst.size()*vec_sz;
  673. #pragma omp parallel for
  674. for(size_t i=0;i<node.size();i++){ // Move data before resizing buff[indx]
  675. Vector<Real_t>& cheb_out=((FMMData*)node[i]->FMMData())->cheb_out;
  676. cheb_out.ReInit(0);
  677. }
  678. }
  679. FMM_Pts<FMMNode>::CollectNodeData(node, buff, n_list, extra_size);
  680. {// 4. cheb_in
  681. int indx=4;
  682. int dof=this->kernel.ker_dim[0];
  683. size_t vec_sz=dof*n_coeff;
  684. Vector< FMMNode* >& node_lst=n_list[indx];
  685. Real_t* buff_ptr=buff[indx][0]+buff[indx].Dim(0)*buff[indx].Dim(1)-extra_size[indx];
  686. #pragma omp parallel for
  687. for(size_t i=0;i<node_lst.Dim();i++){
  688. Vector<Real_t>& cheb_in =node_lst[i]->ChebData();
  689. mem::memcopy(buff_ptr+i*vec_sz, &cheb_in [0], cheb_in .Dim()*sizeof(Real_t));
  690. cheb_in .ReInit(vec_sz, buff_ptr+i*vec_sz, false);
  691. //if(node_lst[i]->IsGhost()) cheb_in .SetZero();
  692. }
  693. buff[indx].AllocDevice(true);
  694. }
  695. {// 5. cheb_out
  696. int indx=5;
  697. int dof=this->kernel.ker_dim[1];
  698. size_t vec_sz=dof*n_coeff;
  699. Vector< FMMNode* >& node_lst=n_list[indx];
  700. Real_t* buff_ptr=buff[indx][0]+buff[indx].Dim(0)*buff[indx].Dim(1)-extra_size[indx];
  701. #pragma omp parallel for
  702. for(size_t i=0;i<node_lst.Dim();i++){
  703. Vector<Real_t>& cheb_out=((FMMData*)node_lst[i]->FMMData())->cheb_out;
  704. cheb_out.ReInit(vec_sz, buff_ptr+i*vec_sz, false);
  705. cheb_out.SetZero();
  706. }
  707. buff[indx].AllocDevice(true);
  708. }
  709. }
  710. template <class FMMNode>
  711. void FMM_Cheb<FMMNode>::Source2UpSetup(SetupData<Real_t>& setup_data, std::vector<Matrix<Real_t> >& buff, std::vector<Vector<FMMNode_t*> >& n_list, int level, bool device){
  712. if(this->MultipoleOrder()==0) return;
  713. FMM_Pts<FMMNode>::Source2UpSetup(setup_data, buff, n_list, level, device);
  714. { // Set setup_data
  715. setup_data.level=level;
  716. setup_data.kernel=&this->aux_kernel;
  717. setup_data.interac_type.resize(1);
  718. setup_data.interac_type[0]=S2U_Type;
  719. setup_data. input_data=&buff[4];
  720. setup_data.output_data=&buff[0];
  721. Vector<FMMNode_t*>& nodes_in =n_list[4];
  722. Vector<FMMNode_t*>& nodes_out=n_list[0];
  723. setup_data.nodes_in .clear();
  724. setup_data.nodes_out.clear();
  725. for(size_t i=0;i<nodes_in .Dim();i++) if(nodes_in [i]->Depth()==level || level==-1) setup_data.nodes_in .push_back(nodes_in [i]);
  726. for(size_t i=0;i<nodes_out.Dim();i++) if(nodes_out[i]->Depth()==level || level==-1) setup_data.nodes_out.push_back(nodes_out[i]);
  727. }
  728. std::vector<void*>& nodes_in =setup_data.nodes_in ;
  729. std::vector<void*>& nodes_out=setup_data.nodes_out;
  730. std::vector<Vector<Real_t>*>& input_vector=setup_data. input_vector; input_vector.clear();
  731. std::vector<Vector<Real_t>*>& output_vector=setup_data.output_vector; output_vector.clear();
  732. for(size_t i=0;i<nodes_in .size();i++) input_vector.push_back(&( ((FMMNode*)nodes_in [i]) )->ChebData() );
  733. for(size_t i=0;i<nodes_out.size();i++) output_vector.push_back(&((FMMData*)((FMMNode*)nodes_out[i])->FMMData())->upward_equiv);
  734. this->SetupInterac(setup_data,device);
  735. }
  736. template <class FMMNode>
  737. void FMM_Cheb<FMMNode>::Source2Up (SetupData<Real_t>& setup_data, bool device){
  738. //Add Source2Up contribution.
  739. this->EvalList(setup_data, device);
  740. }
  741. template <class FMMNode>
  742. void FMM_Cheb<FMMNode>::X_ListSetup(SetupData<Real_t>& setup_data, std::vector<Matrix<Real_t> >& buff, std::vector<Vector<FMMNode_t*> >& n_list, int level, bool device){
  743. if(this->MultipoleOrder()==0) return;
  744. FMM_Pts<FMMNode>::X_ListSetup(setup_data, buff, n_list, level, device);
  745. { // Set setup_data
  746. setup_data.level=level;
  747. setup_data.kernel=&this->aux_kernel;
  748. setup_data.interac_type.resize(1);
  749. setup_data.interac_type[0]=X_Type;
  750. setup_data. input_data=&buff[4];
  751. setup_data.output_data=&buff[1];
  752. Vector<FMMNode_t*>& nodes_in =n_list[4];
  753. Vector<FMMNode_t*>& nodes_out=n_list[1];
  754. setup_data.nodes_in .clear();
  755. setup_data.nodes_out.clear();
  756. for(size_t i=0;i<nodes_in .Dim();i++) if(nodes_in [i]->Depth()==level-1 || level==-1) setup_data.nodes_in .push_back(nodes_in [i]);
  757. for(size_t i=0;i<nodes_out.Dim();i++) if(nodes_out[i]->Depth()==level || level==-1) setup_data.nodes_out.push_back(nodes_out[i]);
  758. }
  759. std::vector<void*>& nodes_in =setup_data.nodes_in ;
  760. std::vector<void*>& nodes_out=setup_data.nodes_out;
  761. std::vector<Vector<Real_t>*>& input_vector=setup_data. input_vector; input_vector.clear();
  762. std::vector<Vector<Real_t>*>& output_vector=setup_data.output_vector; output_vector.clear();
  763. for(size_t i=0;i<nodes_in .size();i++) input_vector.push_back(&( ((FMMNode*)nodes_in [i]) )->ChebData() );
  764. for(size_t i=0;i<nodes_out.size();i++) output_vector.push_back(&((FMMData*)((FMMNode*)nodes_out[i])->FMMData())->dnward_equiv);
  765. this->SetupInterac(setup_data,device);
  766. { // Resize device buffer
  767. size_t n=setup_data.output_data->Dim(0)*setup_data.output_data->Dim(1)*sizeof(Real_t);
  768. if(this->dev_buffer.Dim()<n) this->dev_buffer.Resize(n);
  769. }
  770. }
  771. template <class FMMNode>
  772. void FMM_Cheb<FMMNode>::X_List (SetupData<Real_t>& setup_data, bool device){
  773. //Add X_List contribution.
  774. FMM_Pts<FMMNode>::X_List(setup_data, device);
  775. this->EvalList(setup_data, device);
  776. }
  777. template <class FMMNode>
  778. void FMM_Cheb<FMMNode>::W_ListSetup(SetupData<Real_t>& setup_data, std::vector<Matrix<Real_t> >& buff, std::vector<Vector<FMMNode_t*> >& n_list, int level, bool device){
  779. if(this->MultipoleOrder()==0) return;
  780. { // Set setup_data
  781. setup_data.level=level;
  782. setup_data.kernel=&this->kernel;
  783. setup_data.interac_type.resize(1);
  784. setup_data.interac_type[0]=W_Type;
  785. setup_data. input_data=&buff[0];
  786. setup_data.output_data=&buff[5];
  787. Vector<FMMNode_t*>& nodes_in =n_list[0];
  788. Vector<FMMNode_t*>& nodes_out=n_list[5];
  789. setup_data.nodes_in .clear();
  790. setup_data.nodes_out.clear();
  791. for(size_t i=0;i<nodes_in .Dim();i++) if(nodes_in [i]->Depth()==level+1 || level==-1) setup_data.nodes_in .push_back(nodes_in [i]);
  792. for(size_t i=0;i<nodes_out.Dim();i++) if(nodes_out[i]->Depth()==level || level==-1) setup_data.nodes_out.push_back(nodes_out[i]);
  793. }
  794. std::vector<void*>& nodes_in =setup_data.nodes_in ;
  795. std::vector<void*>& nodes_out=setup_data.nodes_out;
  796. std::vector<Vector<Real_t>*>& input_vector=setup_data. input_vector; input_vector.clear();
  797. std::vector<Vector<Real_t>*>& output_vector=setup_data.output_vector; output_vector.clear();
  798. for(size_t i=0;i<nodes_in .size();i++) input_vector.push_back(&((FMMData*)((FMMNode*)nodes_in [i])->FMMData())->upward_equiv);
  799. for(size_t i=0;i<nodes_out.size();i++) output_vector.push_back(&((FMMData*)((FMMNode*)nodes_out[i])->FMMData())->cheb_out );
  800. this->SetupInterac(setup_data,device);
  801. { // Resize device buffer
  802. size_t n=setup_data.output_data->Dim(0)*setup_data.output_data->Dim(1)*sizeof(Real_t);
  803. if(this->dev_buffer.Dim()<n) this->dev_buffer.Resize(n);
  804. }
  805. }
  806. template <class FMMNode>
  807. void FMM_Cheb<FMMNode>::W_List (SetupData<Real_t>& setup_data, bool device){
  808. //Add W_List contribution.
  809. this->EvalList(setup_data, device);
  810. }
  811. template <class FMMNode>
  812. void FMM_Cheb<FMMNode>::U_ListSetup(SetupData<Real_t>& setup_data, std::vector<Matrix<Real_t> >& buff, std::vector<Vector<FMMNode_t*> >& n_list, int level, bool device){
  813. FMM_Pts<FMMNode>::U_ListSetup(setup_data, buff, n_list, level, device);
  814. { // Set setup_data
  815. setup_data.level=level;
  816. setup_data.kernel=&this->kernel;
  817. setup_data.interac_type.resize(3);
  818. setup_data.interac_type[0]=U0_Type;
  819. setup_data.interac_type[1]=U1_Type;
  820. setup_data.interac_type[2]=U2_Type;
  821. setup_data. input_data=&buff[4];
  822. setup_data.output_data=&buff[5];
  823. Vector<FMMNode_t*>& nodes_in =n_list[4];
  824. Vector<FMMNode_t*>& nodes_out=n_list[5];
  825. setup_data.nodes_in .clear();
  826. setup_data.nodes_out.clear();
  827. for(size_t i=0;i<nodes_in .Dim();i++) if((level-1<=nodes_in [i]->Depth() && nodes_in [i]->Depth()<=level+1) || level==-1) setup_data.nodes_in .push_back(nodes_in [i]);
  828. for(size_t i=0;i<nodes_out.Dim();i++) if(( nodes_out[i]->Depth()==level ) || level==-1) setup_data.nodes_out.push_back(nodes_out[i]);
  829. }
  830. std::vector<void*>& nodes_in =setup_data.nodes_in ;
  831. std::vector<void*>& nodes_out=setup_data.nodes_out;
  832. std::vector<Vector<Real_t>*>& input_vector=setup_data. input_vector; input_vector.clear();
  833. std::vector<Vector<Real_t>*>& output_vector=setup_data.output_vector; output_vector.clear();
  834. for(size_t i=0;i<nodes_in .size();i++) input_vector.push_back(&( ((FMMNode*)nodes_in [i]) )->ChebData());
  835. for(size_t i=0;i<nodes_out.size();i++) output_vector.push_back(&((FMMData*)((FMMNode*)nodes_out[i])->FMMData())->cheb_out );
  836. this->SetupInterac(setup_data,device);
  837. { // Resize device buffer
  838. size_t n=setup_data.output_data->Dim(0)*setup_data.output_data->Dim(1)*sizeof(Real_t);
  839. if(this->dev_buffer.Dim()<n) this->dev_buffer.Resize(n);
  840. }
  841. }
  842. template <class FMMNode>
  843. void FMM_Cheb<FMMNode>::U_List (SetupData<Real_t>& setup_data, bool device){
  844. //Add U_List contribution.
  845. FMM_Pts<FMMNode>::U_List(setup_data, device);
  846. this->EvalList(setup_data, device);
  847. }
  848. template <class FMMNode>
  849. void FMM_Cheb<FMMNode>::Down2TargetSetup(SetupData<Real_t>& setup_data, std::vector<Matrix<Real_t> >& buff, std::vector<Vector<FMMNode_t*> >& n_list, int level, bool device){
  850. if(this->MultipoleOrder()==0) return;
  851. { // Set setup_data
  852. setup_data.level=level;
  853. setup_data.kernel=&this->kernel;
  854. setup_data.interac_type.resize(1);
  855. setup_data.interac_type[0]=D2T_Type;
  856. setup_data. input_data=&buff[1];
  857. setup_data.output_data=&buff[5];
  858. Vector<FMMNode_t*>& nodes_in =n_list[1];
  859. Vector<FMMNode_t*>& nodes_out=n_list[5];
  860. setup_data.nodes_in .clear();
  861. setup_data.nodes_out.clear();
  862. for(size_t i=0;i<nodes_in .Dim();i++) if(nodes_in [i]->Depth()==level || level==-1) setup_data.nodes_in .push_back(nodes_in [i]);
  863. for(size_t i=0;i<nodes_out.Dim();i++) if(nodes_out[i]->Depth()==level || level==-1) setup_data.nodes_out.push_back(nodes_out[i]);
  864. }
  865. std::vector<void*>& nodes_in =setup_data.nodes_in ;
  866. std::vector<void*>& nodes_out=setup_data.nodes_out;
  867. std::vector<Vector<Real_t>*>& input_vector=setup_data. input_vector; input_vector.clear();
  868. std::vector<Vector<Real_t>*>& output_vector=setup_data.output_vector; output_vector.clear();
  869. for(size_t i=0;i<nodes_in .size();i++) input_vector.push_back(&((FMMData*)((FMMNode*)nodes_in [i])->FMMData())->dnward_equiv);
  870. for(size_t i=0;i<nodes_out.size();i++) output_vector.push_back(&((FMMData*)((FMMNode*)nodes_out[i])->FMMData())->cheb_out );
  871. this->SetupInterac(setup_data,device);
  872. }
  873. template <class FMMNode>
  874. void FMM_Cheb<FMMNode>::Down2Target (SetupData<Real_t>& setup_data, bool device){
  875. //Add Down2Target contribution.
  876. this->EvalList(setup_data, device);
  877. }
  878. template <class FMMNode>
  879. void FMM_Cheb<FMMNode>::PostProcessing(std::vector<FMMNode_t*>& nodes){
  880. size_t n=nodes.size();
  881. #pragma omp parallel
  882. {
  883. int omp_p=omp_get_num_threads();
  884. int pid = omp_get_thread_num();
  885. size_t a=(pid*n)/omp_p;
  886. size_t b=((pid+1)*n)/omp_p;
  887. //For each node, compute interaction from point sources.
  888. for(size_t i=a;i<b;i++){
  889. Vector<Real_t>& trg_coord=nodes[i]->trg_coord;
  890. Vector<Real_t>& trg_value=nodes[i]->trg_value;
  891. Vector<Real_t>& cheb_out =((FMMData*)nodes[i]->FMMData())->cheb_out;
  892. //Initialize target potential.
  893. size_t trg_cnt=trg_coord.Dim()/COORD_DIM;
  894. //trg_value.assign(trg_cnt*dof*this->kernel.ker_dim[1],0);
  895. //Sample local expansion at target points.
  896. if(trg_cnt>0 && cheb_out.Dim()>0){
  897. Real_t* c=nodes[i]->Coord();
  898. Real_t scale=pow(2.0,nodes[i]->Depth()+1);
  899. std::vector<Real_t> rel_coord(COORD_DIM*trg_cnt);
  900. for(size_t j=0;j<trg_cnt;j++) for(int k=0;k<COORD_DIM;k++)
  901. rel_coord[j*COORD_DIM+k]=(trg_coord[j*COORD_DIM+k]-c[k])*scale-1.0;
  902. cheb_eval(cheb_out, cheb_deg, rel_coord, trg_value);
  903. }
  904. }
  905. }
  906. }
  907. }//end namespace