matrix.txx 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. /**
  2. * \file matrix.txx
  3. * \author Dhairya Malhotra, dhairya.malhotra@gmail.com
  4. * \date 2-11-2011
  5. * \brief This file contains inplementation of the class Matrix.
  6. */
  7. #include <omp.h>
  8. #include <cmath>
  9. #include <cstdlib>
  10. #include <cassert>
  11. #include <iostream>
  12. #include <iomanip>
  13. #include <device_wrapper.hpp>
  14. #include <mat_utils.hpp>
  15. #include <mem_mgr.hpp>
  16. #include <profile.hpp>
  17. namespace pvfmm{
  18. template <class T>
  19. std::ostream& operator<<(std::ostream& output, const Matrix<T>& M){
  20. std::ios::fmtflags f(std::cout.flags());
  21. output<<std::fixed<<std::setprecision(4)<<std::setiosflags(std::ios::left);
  22. for(size_t i=0;i<M.Dim(0);i++){
  23. for(size_t j=0;j<M.Dim(1);j++){
  24. float f=((float)M(i,j));
  25. if(fabs(f)<1e-25) f=0;
  26. output<<std::setw(10)<<((double)f)<<' ';
  27. }
  28. output<<";\n";
  29. }
  30. std::cout.flags(f);
  31. return output;
  32. }
  33. template <class T>
  34. Matrix<T>::Matrix(){
  35. dim[0]=0;
  36. dim[1]=0;
  37. own_data=true;
  38. data_ptr=NULL;
  39. dev.dev_ptr=(uintptr_t)NULL;
  40. }
  41. template <class T>
  42. Matrix<T>::Matrix(size_t dim1, size_t dim2, T* data_, bool own_data_){
  43. dim[0]=dim1;
  44. dim[1]=dim2;
  45. own_data=own_data_;
  46. if(own_data){
  47. if(dim[0]*dim[1]>0){
  48. data_ptr=mem::aligned_new<T>(dim[0]*dim[1]);
  49. #if !defined(__MIC__) || !defined(__INTEL_OFFLOAD)
  50. Profile::Add_MEM(dim[0]*dim[1]*sizeof(T));
  51. #endif
  52. if(data_!=NULL) mem::memcopy(data_ptr,data_,dim[0]*dim[1]*sizeof(T));
  53. }else data_ptr=NULL;
  54. }else
  55. data_ptr=data_;
  56. dev.dev_ptr=(uintptr_t)NULL;
  57. }
  58. template <class T>
  59. Matrix<T>::Matrix(const Matrix<T>& M){
  60. dim[0]=M.dim[0];
  61. dim[1]=M.dim[1];
  62. own_data=true;
  63. if(dim[0]*dim[1]>0){
  64. data_ptr=mem::aligned_new<T>(dim[0]*dim[1]);
  65. #if !defined(__MIC__) || !defined(__INTEL_OFFLOAD)
  66. Profile::Add_MEM(dim[0]*dim[1]*sizeof(T));
  67. #endif
  68. mem::memcopy(data_ptr,M.data_ptr,dim[0]*dim[1]*sizeof(T));
  69. }else
  70. data_ptr=NULL;
  71. dev.dev_ptr=(uintptr_t)NULL;
  72. }
  73. template <class T>
  74. Matrix<T>::~Matrix(){
  75. FreeDevice(false);
  76. if(own_data){
  77. if(data_ptr!=NULL){
  78. mem::aligned_delete(data_ptr);
  79. #if !defined(__MIC__) || !defined(__INTEL_OFFLOAD)
  80. Profile::Add_MEM(-dim[0]*dim[1]*sizeof(T));
  81. #endif
  82. }
  83. }
  84. data_ptr=NULL;
  85. dim[0]=0;
  86. dim[1]=0;
  87. }
  88. template <class T>
  89. void Matrix<T>::Swap(Matrix<T>& M){
  90. size_t dim_[2]={dim[0],dim[1]};
  91. T* data_ptr_=data_ptr;
  92. bool own_data_=own_data;
  93. Device dev_=dev;
  94. Vector<char> dev_sig_=dev_sig;
  95. dim[0]=M.dim[0];
  96. dim[1]=M.dim[1];
  97. data_ptr=M.data_ptr;
  98. own_data=M.own_data;
  99. dev=M.dev;
  100. dev_sig=M.dev_sig;
  101. M.dim[0]=dim_[0];
  102. M.dim[1]=dim_[1];
  103. M.data_ptr=data_ptr_;
  104. M.own_data=own_data_;
  105. M.dev=dev_;
  106. M.dev_sig=dev_sig_;
  107. }
  108. template <class T>
  109. void Matrix<T>::ReInit(size_t dim1, size_t dim2, T* data_, bool own_data_){
  110. Matrix<T> tmp(dim1,dim2,data_,own_data_);
  111. this->Swap(tmp);
  112. }
  113. template <class T>
  114. typename Matrix<T>::Device& Matrix<T>::AllocDevice(bool copy){
  115. size_t len=dim[0]*dim[1];
  116. if(dev.dev_ptr==(uintptr_t)NULL && len>0) // Allocate data on device.
  117. dev.dev_ptr=DeviceWrapper::alloc_device((char*)data_ptr, len*sizeof(T));
  118. if(dev.dev_ptr!=(uintptr_t)NULL && copy) // Copy data to device
  119. dev.lock_idx=DeviceWrapper::host2device((char*)data_ptr,(char*)data_ptr,dev.dev_ptr,len*sizeof(T));
  120. dev.dim[0]=dim[0];
  121. dev.dim[1]=dim[1];
  122. return dev;
  123. }
  124. template <class T>
  125. void Matrix<T>::Device2Host(T* host_ptr){
  126. dev.lock_idx=DeviceWrapper::device2host((char*)data_ptr,dev.dev_ptr,(char*)(host_ptr==NULL?data_ptr:host_ptr),dim[0]*dim[1]*sizeof(T));
  127. //#if defined(PVFMM_HAVE_CUDA)
  128. // cudaEventCreate(&lock);
  129. // cudaEventRecord(lock, 0);
  130. //#endif
  131. }
  132. template <class T>
  133. void Matrix<T>::Device2HostWait(){
  134. //#if defined(PVFMM_HAVE_CUDA)
  135. // cudaEventSynchronize(lock);
  136. // cudaEventDestroy(lock);
  137. //#endif
  138. DeviceWrapper::wait(dev.lock_idx);
  139. dev.lock_idx=-1;
  140. }
  141. template <class T>
  142. void Matrix<T>::FreeDevice(bool copy){
  143. if(dev.dev_ptr==(uintptr_t)NULL) return;
  144. if(copy) DeviceWrapper::device2host((char*)data_ptr,dev.dev_ptr,(char*)data_ptr,dim[0]*dim[1]*sizeof(T));
  145. DeviceWrapper::free_device((char*)data_ptr, dev.dev_ptr);
  146. dev.dev_ptr=(uintptr_t)NULL;
  147. dev.dim[0]=0;
  148. dev.dim[1]=0;
  149. }
  150. template <class T>
  151. void Matrix<T>::Write(const char* fname){
  152. FILE* f1=fopen(fname,"wb+");
  153. if(f1==NULL){
  154. std::cout<<"Unable to open file for writing:"<<fname<<'\n';
  155. return;
  156. }
  157. size_t dim_[2]={dim[0],dim[1]};
  158. fwrite(dim_,sizeof(size_t),2,f1);
  159. fwrite(data_ptr,sizeof(T),dim[0]*dim[1],f1);
  160. fclose(f1);
  161. }
  162. template <class T>
  163. size_t Matrix<T>::Dim(size_t i) const{
  164. return dim[i];
  165. }
  166. template <class T>
  167. void Matrix<T>::Resize(size_t i, size_t j){
  168. if(dim[0]==i && dim[1]==j) return;
  169. FreeDevice(false);
  170. if(own_data){
  171. if(data_ptr!=NULL){
  172. mem::aligned_delete(data_ptr);
  173. #if !defined(__MIC__) || !defined(__INTEL_OFFLOAD)
  174. Profile::Add_MEM(-dim[0]*dim[1]*sizeof(T));
  175. #endif
  176. }
  177. }
  178. dim[0]=i;
  179. dim[1]=j;
  180. if(own_data){
  181. if(dim[0]*dim[1]>0){
  182. data_ptr=mem::aligned_new<T>(dim[0]*dim[1]);
  183. #if !defined(__MIC__) || !defined(__INTEL_OFFLOAD)
  184. Profile::Add_MEM(dim[0]*dim[1]*sizeof(T));
  185. #endif
  186. }else
  187. data_ptr=NULL;
  188. }
  189. }
  190. template <class T>
  191. void Matrix<T>::SetZero(){
  192. if(dim[0]*dim[1]>0)
  193. memset(data_ptr,0,dim[0]*dim[1]*sizeof(T));
  194. }
  195. template <class T>
  196. Matrix<T>& Matrix<T>::operator=(const Matrix<T>& M){
  197. if(this!=&M){
  198. FreeDevice(false);
  199. if(own_data && dim[0]*dim[1]!=M.dim[0]*M.dim[1]){
  200. if(data_ptr!=NULL){
  201. mem::aligned_delete(data_ptr); data_ptr=NULL;
  202. #if !defined(__MIC__) || !defined(__INTEL_OFFLOAD)
  203. Profile::Add_MEM(-dim[0]*dim[1]*sizeof(T));
  204. #endif
  205. }
  206. if(M.dim[0]*M.dim[1]>0){
  207. data_ptr=mem::aligned_new<T>(M.dim[0]*M.dim[1]);
  208. #if !defined(__MIC__) || !defined(__INTEL_OFFLOAD)
  209. Profile::Add_MEM(M.dim[0]*M.dim[1]*sizeof(T));
  210. #endif
  211. }
  212. }
  213. dim[0]=M.dim[0];
  214. dim[1]=M.dim[1];
  215. mem::memcopy(data_ptr,M.data_ptr,dim[0]*dim[1]*sizeof(T));
  216. }
  217. return *this;
  218. }
  219. template <class T>
  220. Matrix<T>& Matrix<T>::operator+=(const Matrix<T>& M){
  221. assert(M.Dim(0)==Dim(0) && M.Dim(1)==Dim(1));
  222. Profile::Add_FLOP(dim[0]*dim[1]);
  223. for(size_t i=0;i<M.Dim(0)*M.Dim(1);i++)
  224. data_ptr[i]+=M.data_ptr[i];
  225. return *this;
  226. }
  227. template <class T>
  228. Matrix<T>& Matrix<T>::operator-=(const Matrix<T>& M){
  229. assert(M.Dim(0)==Dim(0) && M.Dim(1)==Dim(1));
  230. Profile::Add_FLOP(dim[0]*dim[1]);
  231. for(size_t i=0;i<M.Dim(0)*M.Dim(1);i++)
  232. data_ptr[i]-=M.data_ptr[i];
  233. return *this;
  234. }
  235. template <class T>
  236. Matrix<T> Matrix<T>::operator+(const Matrix<T>& M2){
  237. Matrix<T>& M1=*this;
  238. assert(M2.Dim(0)==M1.Dim(0) && M2.Dim(1)==M1.Dim(1));
  239. Profile::Add_FLOP(dim[0]*dim[1]);
  240. Matrix<T> M_r(M1.Dim(0),M1.Dim(1),NULL);
  241. for(size_t i=0;i<M1.Dim(0)*M1.Dim(1);i++)
  242. M_r[0][i]=M1[0][i]+M2[0][i];
  243. return M_r;
  244. }
  245. template <class T>
  246. Matrix<T> Matrix<T>::operator-(const Matrix<T>& M2){
  247. Matrix<T>& M1=*this;
  248. assert(M2.Dim(0)==M1.Dim(0) && M2.Dim(1)==M1.Dim(1));
  249. Profile::Add_FLOP(dim[0]*dim[1]);
  250. Matrix<T> M_r(M1.Dim(0),M1.Dim(1),NULL);
  251. for(size_t i=0;i<M1.Dim(0)*M1.Dim(1);i++)
  252. M_r[0][i]=M1[0][i]-M2[0][i];
  253. return M_r;
  254. }
  255. template <class T>
  256. inline T& Matrix<T>::operator()(size_t i,size_t j) const{
  257. assert(i<dim[0] && j<dim[1]);
  258. return data_ptr[i*dim[1]+j];
  259. }
  260. template <class T>
  261. inline T* Matrix<T>::operator[](size_t i) const{
  262. assert(i<dim[0]);
  263. return &data_ptr[i*dim[1]];
  264. }
  265. template <class T>
  266. Matrix<T> Matrix<T>::operator*(const Matrix<T>& M){
  267. assert(dim[1]==M.dim[0]);
  268. Profile::Add_FLOP(2*(((long long)dim[0])*dim[1])*M.dim[1]);
  269. Matrix<T> M_r(dim[0],M.dim[1],NULL);
  270. if(M.Dim(0)*M.Dim(1)==0 || this->Dim(0)*this->Dim(1)==0) return M_r;
  271. mat::gemm<T>('N','N',M.dim[1],dim[0],dim[1],
  272. 1.0,M.data_ptr,M.dim[1],data_ptr,dim[1],0.0,M_r.data_ptr,M_r.dim[1]);
  273. return M_r;
  274. }
  275. template <class T>
  276. void Matrix<T>::GEMM(Matrix<T>& M_r, const Matrix<T>& A, const Matrix<T>& B, T beta){
  277. if(A.Dim(0)*A.Dim(1)==0 || B.Dim(0)*B.Dim(1)==0) return;
  278. assert(A.dim[1]==B.dim[0]);
  279. assert(M_r.dim[0]==A.dim[0]);
  280. assert(M_r.dim[1]==B.dim[1]);
  281. #if !defined(__MIC__) || !defined(__INTEL_OFFLOAD)
  282. Profile::Add_FLOP(2*(((long long)A.dim[0])*A.dim[1])*B.dim[1]);
  283. #endif
  284. mat::gemm<T>('N','N',B.dim[1],A.dim[0],A.dim[1],
  285. 1.0,B.data_ptr,B.dim[1],A.data_ptr,A.dim[1],beta,M_r.data_ptr,M_r.dim[1]);
  286. }
  287. // cublasgemm wrapper
  288. #if defined(PVFMM_HAVE_CUDA)
  289. template <class T>
  290. void Matrix<T>::CUBLASGEMM(Matrix<T>& M_r, const Matrix<T>& A, const Matrix<T>& B, T beta){
  291. if(A.Dim(0)*A.Dim(1)==0 || B.Dim(0)*B.Dim(1)==0) return;
  292. assert(A.dim[1]==B.dim[0]);
  293. assert(M_r.dim[0]==A.dim[0]);
  294. assert(M_r.dim[1]==B.dim[1]);
  295. Profile::Add_FLOP(2*(((long long)A.dim[0])*A.dim[1])*B.dim[1]);
  296. mat::cublasgemm('N', 'N', B.dim[1], A.dim[0], A.dim[1],
  297. 1.0, B.data_ptr, B.dim[1], A.data_ptr, A.dim[1], beta, M_r.data_ptr, M_r.dim[1]);
  298. }
  299. #endif
  300. #define myswap(t,a,b) {t c=a;a=b;b=c;}
  301. template <class T>
  302. void Matrix<T>::RowPerm(const Permutation<T>& P){
  303. Matrix<T>& M=*this;
  304. if(P.Dim()==0) return;
  305. assert(M.Dim(0)==P.Dim());
  306. size_t d0=M.Dim(0);
  307. size_t d1=M.Dim(1);
  308. #pragma omp parallel for
  309. for(size_t i=0;i<d0;i++){
  310. T* M_=M[i];
  311. const T s=P.scal[i];
  312. for(size_t j=0;j<d1;j++) M_[j]*=s;
  313. }
  314. Permutation<T> P_=P;
  315. for(size_t i=0;i<d0;i++)
  316. while(P_.perm[i]!=i){
  317. size_t a=P_.perm[i];
  318. size_t b=i;
  319. T* M_a=M[a];
  320. T* M_b=M[b];
  321. myswap(size_t,P_.perm[a],P_.perm[b]);
  322. for(size_t j=0;j<d1;j++)
  323. myswap(T,M_a[j],M_b[j]);
  324. }
  325. }
  326. template <class T>
  327. void Matrix<T>::ColPerm(const Permutation<T>& P){
  328. Matrix<T>& M=*this;
  329. if(P.Dim()==0) return;
  330. assert(M.Dim(1)==P.Dim());
  331. size_t d0=M.Dim(0);
  332. size_t d1=M.Dim(1);
  333. int omp_p=omp_get_max_threads();
  334. Matrix<T> M_buff(omp_p,d1);
  335. const size_t* perm_=&(P.perm[0]);
  336. const T* scal_=&(P.scal[0]);
  337. #pragma omp parallel for
  338. for(size_t i=0;i<d0;i++){
  339. int pid=omp_get_thread_num();
  340. T* buff=&M_buff[pid][0];
  341. T* M_=M[i];
  342. for(size_t j=0;j<d1;j++)
  343. buff[j]=M_[j];
  344. for(size_t j=0;j<d1;j++){
  345. M_[j]=buff[perm_[j]]*scal_[j];
  346. }
  347. }
  348. }
  349. #undef myswap
  350. #define B1 128
  351. #define B2 32
  352. template <class T>
  353. Matrix<T> Matrix<T>::Transpose(){
  354. Matrix<T>& M=*this;
  355. size_t d0=M.dim[0];
  356. size_t d1=M.dim[1];
  357. Matrix<T> M_r(d1,d0,NULL);
  358. const size_t blk0=((d0+B1-1)/B1);
  359. const size_t blk1=((d1+B1-1)/B1);
  360. const size_t blks=blk0*blk1;
  361. // #pragma omp parallel for
  362. for(size_t k=0;k<blks;k++){
  363. size_t i=(k%blk0)*B1;
  364. size_t j=(k/blk0)*B1;
  365. // for(size_t i=0;i<d0;i+=B1)
  366. // for(size_t j=0;j<d1;j+=B1){
  367. size_t d0_=i+B1; if(d0_>=d0) d0_=d0;
  368. size_t d1_=j+B1; if(d1_>=d1) d1_=d1;
  369. for(size_t ii=i;ii<d0_;ii+=B2)
  370. for(size_t jj=j;jj<d1_;jj+=B2){
  371. size_t d0__=ii+B2; if(d0__>=d0) d0__=d0;
  372. size_t d1__=jj+B2; if(d1__>=d1) d1__=d1;
  373. for(size_t iii=ii;iii<d0__;iii++)
  374. for(size_t jjj=jj;jjj<d1__;jjj++){
  375. M_r[jjj][iii]=M[iii][jjj];
  376. }
  377. }
  378. }
  379. // for(size_t i=0;i<d0;i++)
  380. // for(size_t j=0;j<d1;j++)
  381. // M_r[j][i]=M[i][j];
  382. return M_r;
  383. }
  384. template <class T>
  385. void Matrix<T>::Transpose(Matrix<T>& M_r, const Matrix<T>& M){
  386. size_t d0=M.dim[0];
  387. size_t d1=M.dim[1];
  388. M_r.Resize(d1, d0);
  389. const size_t blk0=((d0+B1-1)/B1);
  390. const size_t blk1=((d1+B1-1)/B1);
  391. const size_t blks=blk0*blk1;
  392. #pragma omp parallel for
  393. for(size_t k=0;k<blks;k++){
  394. size_t i=(k%blk0)*B1;
  395. size_t j=(k/blk0)*B1;
  396. // for(size_t i=0;i<d0;i+=B1)
  397. // for(size_t j=0;j<d1;j+=B1){
  398. size_t d0_=i+B1; if(d0_>=d0) d0_=d0;
  399. size_t d1_=j+B1; if(d1_>=d1) d1_=d1;
  400. for(size_t ii=i;ii<d0_;ii+=B2)
  401. for(size_t jj=j;jj<d1_;jj+=B2){
  402. size_t d0__=ii+B2; if(d0__>=d0) d0__=d0;
  403. size_t d1__=jj+B2; if(d1__>=d1) d1__=d1;
  404. for(size_t iii=ii;iii<d0__;iii++)
  405. for(size_t jjj=jj;jjj<d1__;jjj++){
  406. M_r[jjj][iii]=M[iii][jjj];
  407. }
  408. }
  409. }
  410. }
  411. #undef B2
  412. #undef B1
  413. template <class T>
  414. void Matrix<T>::SVD(Matrix<T>& tU, Matrix<T>& tS, Matrix<T>& tVT){
  415. pvfmm::Matrix<T>& M=*this;
  416. pvfmm::Matrix<T> M_=M;
  417. int n=M.Dim(0);
  418. int m=M.Dim(1);
  419. int k = (m<n?m:n);
  420. tU.Resize(n,k); tU.SetZero();
  421. tS.Resize(k,k); tS.SetZero();
  422. tVT.Resize(k,m); tVT.SetZero();
  423. //SVD
  424. int INFO=0;
  425. char JOBU = 'S';
  426. char JOBVT = 'S';
  427. int wssize = 3*(m<n?m:n)+(m>n?m:n);
  428. int wssize1 = 5*(m<n?m:n);
  429. wssize = (wssize>wssize1?wssize:wssize1);
  430. T* wsbuf = mem::aligned_new<T>(wssize);
  431. pvfmm::mat::svd(&JOBU, &JOBVT, &m, &n, &M[0][0], &m, &tS[0][0], &tVT[0][0], &m, &tU[0][0], &k, wsbuf, &wssize, &INFO);
  432. mem::aligned_delete<T>(wsbuf);
  433. if(INFO!=0) std::cout<<INFO<<'\n';
  434. assert(INFO==0);
  435. for(size_t i=1;i<k;i++){
  436. tS[i][i]=tS[0][i];
  437. tS[0][i]=0;
  438. }
  439. //std::cout<<tU*tS*tVT-M_<<'\n';
  440. }
  441. template <class T>
  442. Matrix<T> Matrix<T>::pinv(T eps){
  443. if(eps<0){
  444. eps=1.0;
  445. while(eps+(T)1.0>1.0) eps*=0.5;
  446. eps=sqrt(eps);
  447. }
  448. Matrix<T> M_r(dim[1],dim[0]);
  449. mat::pinv(data_ptr,dim[0],dim[1],eps,M_r.data_ptr);
  450. this->Resize(0,0);
  451. return M_r;
  452. }
  453. template <class T>
  454. std::ostream& operator<<(std::ostream& output, const Permutation<T>& P){
  455. output<<std::setprecision(4)<<std::setiosflags(std::ios::left);
  456. size_t size=P.perm.Dim();
  457. for(size_t i=0;i<size;i++) output<<std::setw(10)<<P.perm[i]<<' ';
  458. output<<";\n";
  459. for(size_t i=0;i<size;i++) output<<std::setw(10)<<P.scal[i]<<' ';
  460. output<<";\n";
  461. return output;
  462. }
  463. template <class T>
  464. Permutation<T>::Permutation(size_t size){
  465. perm.Resize(size);
  466. scal.Resize(size);
  467. for(size_t i=0;i<size;i++){
  468. perm[i]=i;
  469. scal[i]=1.0;
  470. }
  471. }
  472. template <class T>
  473. Permutation<T> Permutation<T>::RandPerm(size_t size){
  474. Permutation<T> P(size);
  475. for(size_t i=0;i<size;i++){
  476. P.perm[i]=rand()%size;
  477. for(size_t j=0;j<i;j++)
  478. if(P.perm[i]==P.perm[j]){ i--; break; }
  479. P.scal[i]=((T)rand())/RAND_MAX;
  480. }
  481. return P;
  482. }
  483. template <class T>
  484. Matrix<T> Permutation<T>::GetMatrix() const{
  485. size_t size=perm.Dim();
  486. Matrix<T> M_r(size,size,NULL);
  487. for(size_t i=0;i<size;i++)
  488. for(size_t j=0;j<size;j++)
  489. M_r[i][j]=(perm[j]==i?scal[j]:0.0);
  490. return M_r;
  491. }
  492. template <class T>
  493. size_t Permutation<T>::Dim() const{
  494. return perm.Dim();
  495. }
  496. template <class T>
  497. Permutation<T> Permutation<T>::Transpose(){
  498. size_t size=perm.Dim();
  499. Permutation<T> P_r(size);
  500. Vector<PERM_INT_T>& perm_r=P_r.perm;
  501. Vector<T>& scal_r=P_r.scal;
  502. for(size_t i=0;i<size;i++){
  503. perm_r[perm[i]]=i;
  504. scal_r[perm[i]]=scal[i];
  505. }
  506. return P_r;
  507. }
  508. template <class T>
  509. Permutation<T> Permutation<T>::operator*(const Permutation<T>& P){
  510. size_t size=perm.Dim();
  511. assert(P.Dim()==size);
  512. Permutation<T> P_r(size);
  513. Vector<PERM_INT_T>& perm_r=P_r.perm;
  514. Vector<T>& scal_r=P_r.scal;
  515. for(size_t i=0;i<size;i++){
  516. perm_r[i]=perm[P.perm[i]];
  517. scal_r[i]=scal[P.perm[i]]*P.scal[i];
  518. }
  519. return P_r;
  520. }
  521. template <class T>
  522. Matrix<T> Permutation<T>::operator*(const Matrix<T>& M){
  523. if(Dim()==0) return M;
  524. assert(M.Dim(0)==Dim());
  525. size_t d0=M.Dim(0);
  526. size_t d1=M.Dim(1);
  527. Matrix<T> M_r(d0,d1,NULL);
  528. for(size_t i=0;i<d0;i++){
  529. const T s=scal[i];
  530. const T* M_=M[i];
  531. T* M_r_=M_r[perm[i]];
  532. for(size_t j=0;j<d1;j++)
  533. M_r_[j]=M_[j]*s;
  534. }
  535. return M_r;
  536. }
  537. template <class T>
  538. Matrix<T> operator*(const Matrix<T>& M, const Permutation<T>& P){
  539. if(P.Dim()==0) return M;
  540. assert(M.Dim(1)==P.Dim());
  541. size_t d0=M.Dim(0);
  542. size_t d1=M.Dim(1);
  543. Matrix<T> M_r(d0,d1,NULL);
  544. for(size_t i=0;i<d0;i++){
  545. const PERM_INT_T* perm_=&(P.perm[0]);
  546. const T* scal_=&(P.scal[0]);
  547. const T* M_=M[i];
  548. T* M_r_=M_r[i];
  549. for(size_t j=0;j<d1;j++)
  550. M_r_[j]=M_[perm_[j]]*scal_[j];
  551. }
  552. return M_r;
  553. }
  554. }//end namespace