Jelajahi Sumber

CUDA Features are added. Not tested yet.

Chenhan Yu 11 tahun lalu
induk
melakukan
34f82b563f
4 mengubah file dengan 241 tambahan dan 0 penghapusan
  1. 69 0
      include/cuda_func.hpp
  2. 0 0
      include/permute.hpp
  3. 100 0
      src/cuda_func.cu
  4. 72 0
      src/permute.cu

+ 69 - 0
include/cuda_func.hpp

@@ -0,0 +1,69 @@
+#ifndef _CUDA_FUNC_HPP_
+#define _CUDA_FUNC_HPP_
+
+#include <pvfmm_common.hpp>
+#include <assert.h>
+#include <cstring>
+#include <device_wrapper.hpp>
+#include <matrix.hpp>
+#include <vector.hpp>
+
+namespace pvfmm {
+
+// external functions
+void in_perm_d (uintptr_t, uintptr_t, uintptr_t, uintptr_t, size_t, size_t, size_t, cudaStream_t*);
+void out_perm_d (uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t, size_t, size_t, size_t, cudaStream_t*);
+
+template <class Real_t>
+class cuda_func {
+  public:
+    static void in_perm_h (uintptr_t precomp_data, uintptr_t input_perm,
+        uintptr_t input_data, uintptr_t buff_in, size_t interac_indx,
+        size_t M_dim0, size_t vec_cnt);
+    static void out_perm_h (uintptr_t scaling, uintptr_t precomp_data, uintptr_t output_perm,
+        uintptr_t output_data, uintptr_t buff_out, size_t interac_indx,
+        size_t M_dim0, size_t vec_cnt);
+};
+
+template <class Real_t>
+void cuda_func<Real_t>::in_perm_h (
+  uintptr_t precomp_data,
+  uintptr_t input_perm,
+  uintptr_t input_data,
+  uintptr_t buff_in,
+  size_t interac_indx,
+  size_t M_dim0,
+  size_t vec_cnt )
+{
+  cudaStream_t *stream;
+  //stream = DeviceWrapper::CUDA_Lock::acquire_stream(0);
+  stream = CUDA_Lock::acquire_stream(0);
+  /*
+  intptr_t precomp_data_d = precomp_data[0];
+  intptr_t input_perm_d = input_perm[0];
+  intptr_t input_data_d = input_data[0];
+  intptr_t buff_in_d = buff_in[0];
+  */
+  in_perm_d(precomp_data, input_perm, input_data, buff_in, interac_indx, M_dim0, vec_cnt, stream);
+};
+
+template <class Real_t>
+void cuda_func<Real_t>::out_perm_h (
+  uintptr_t scaling,
+  uintptr_t precomp_data,
+  uintptr_t output_perm,
+  uintptr_t output_data,
+  uintptr_t buff_out,
+  size_t interac_indx,
+  size_t M_dim1,
+  size_t vec_cnt )
+{
+  cudaStream_t *stream;
+  //stream = DeviceWrapper::CUDA_Lock::acquire_stream(0);
+  stream = CUDA_Lock::acquire_stream(0);
+  out_perm_d(scaling, precomp_data, output_perm, output_data, buff_out, interac_indx, M_dim1, vec_cnt, stream);
+}
+
+};
+
+#endif //_CUDA_FUNC_HPP_

+ 0 - 0
include/permute.hpp


+ 100 - 0
src/cuda_func.cu

@@ -0,0 +1,100 @@
+#include "stdint.h"
+
+#define DEFAULT_NUM_THREAD 256
+
+/* Case: double */
+__global__ void in_perm_k (
+  uintptr_t *precomp_data,
+  uintptr_t *input_perm,
+  uintptr_t *input_data,
+  uintptr_t *buff_in,
+  size_t interac_indx,
+  size_t M_dim0,
+  size_t vec_cnt )
+{
+  /* 1-dim thread Id. */
+  int tid = blockIdx.x*blockDim.x + threadIdx.x;
+
+  /* Convert to ptr. */
+  int *perm = (int*) (precomp_data[0] + input_perm[(interac_indx + tid)*4 + 0]);
+  double *scal = (double*) (precomp_data[0] + input_perm[(interac_indx + tid)*4 + 1]);
+  double *v_in = (double*) (input_data[0] + input_perm[(interac_indx + tid)*4 + 3]);
+  double *v_out = (double*) (buff_in + input_perm[(interac_indx + tid)*4 + 2]);
+
+  if (tid < vec_cnt) {
+    /* PRAM Model: assuming as many threads as we need. */
+    for (int j = 0; j < M_dim0; j++) v_out[j] = v_in[perm[j]]*scal[j];
+  }
+}
+
+__global__ void out_perm_k (
+  uintptr_t *scaling,
+  uintptr_t *precomp_data,
+  uintptr_t *output_perm,
+  uintptr_t *output_data,
+  uintptr_t *buff_out,
+  size_t interac_indx,
+  size_t M_dim1,
+  size_t vec_cnt )
+{
+  /* 1-dim thread Id. */
+  int tid = blockIdx.x*blockDim.x + threadIdx.x;
+
+  /* Specifing range. */
+  int a = tid;
+  int b = tid + 1;
+
+  if (tid > 0 && a < vec_cnt) { // Find 'a' independent of other threads.
+    size_t out_ptr = output_perm[(interac_indx + a)*4 + 3];
+    if (tid > 0) while(a < vec_cnt && out_ptr == output_perm[(interac_indx+a)*4 + 3]) a++;
+  }
+  if (tid < vec_cnt - 1 && b < vec_cnt) { // Find 'b' independent of other threads.
+    size_t out_ptr = output_perm[(interac_indx + b)*4 + 3];
+    if (tid < vec_cnt-1) while(b < vec_cnt && out_ptr == output_perm[(interac_indx+b)*4 + 3]) b++;
+  }
+
+  if (tid < vec_cnt) {
+    /* PRAM Model: assuming as many threads as we need. */
+    for(int i = a; i < b; i++) { // Compute permutations.
+      double scaling_factor = scaling[interac_indx + i];
+      int *perm = (int*) (precomp_data[0] + output_perm[(interac_indx + i)*4 + 0]);
+      double *scal = (double*) (precomp_data[0] + output_perm[(interac_indx + i)*4 + 1]);
+      double *v_in = (double*) (buff_out + output_perm[(interac_indx + i)*4 + 3]);
+      double *v_out = (double*) (output_data[0] + output_perm[(interac_indx + i)*4 + 2]);
+      for (int j = 0; j < M_dim1; j++) v_out[j] += v_in[perm[j]]*scal[j]*scaling_factor;
+    }
+  }
+}
+
+void in_perm_d (
+  uintptr_t *precomp_data,
+  uintptr_t *input_perm,
+  uintptr_t *input_data,
+  uintptr_t *buff_in,
+  size_t interac_indx,
+  size_t M_dim0,
+  size_t vec_cnt,
+  cudaStream_t *stream )
+{
+  int n_thread, n_block;
+  n_thread = DEFAULT_NUM_THREAD;
+  n_block = vec_cnt/n_thread;
+  in_perm_k<<<n_thread, n_block, 0, *stream>>>(precomp_data, input_perm, input_data, buff_in, interac_indx, M_dim0, vec_cnt);
+}
+
+void out_perm_d (
+  uintptr_t *scaling,
+  uintptr_t *precomp_data,
+  uintptr_t *output_perm,
+  uintptr_t *output_data,
+  uintptr_t *buff_out,
+  size_t interac_indx,
+  size_t M_dim0,
+  size_t vec_cnt,
+  cudaStream_t *stream )
+{
+  int n_thread, n_block;
+  n_thread = DEFAULT_NUM_THREAD;
+  n_block = vec_cnt/n_thread;
+  out_perm_k<<<n_thread, n_block, 0, *stream>>>(scaling, precomp_data, output_perm, output_data, buff_out, interac_indx, M_dim0, vec_cnt);
+}

+ 72 - 0
src/permute.cu

@@ -0,0 +1,72 @@
+#include <vector.hpp>
+#include <device_wrapper.hpp>
+
+template <class Real_t>
+__global__ void in_perm_d (
+  Vector *precomp_data,
+  Vector *input_perm,
+  :Vector *input_data,
+  Vector *buff_in,
+  size_t interac_indx,
+  size_t M_dim0 )
+{
+  int i = blockIdx.x*blockDim.x + threadIdx.x;
+  int *perm = (int*)(precomp_data[0] + input_perm[(interac_indx+i)*4 + 0]);
+  const Real_t* scal = (Real_t*)(precomp_data[0] + input_perm[(interac_indx + i)*4 + 1]);
+  const Real_t* v_in = (Real_t*)(input_data[0] + input_perm[(interac_indx + i)*4 + 3]);
+  Real_t* v_out = (Real_t*)(buff_in + input_perm[(interac_indx + i)*4 + 2]);
+
+  for (size_t j = 0; j < M_dim0; j++) v_out[j] = v_in[perm[j]]*scal[j];
+}
+
+template <class Real_t>
+__global__ void out_perm_d (
+  Vector *precomp_data,
+  Vector *input_perm,
+  Vector *input_data,
+  Vector *buff_in,
+  size_t interac_indx,
+  size_t M_dim0 )
+{
+
+
+}
+
+template <class Real_t>
+void in_perm_h (
+  Vector *precomp_data,
+  Vector *input_perm,
+  Vector *input_data,
+  Vector *buff_in,
+  size_t interac_indx,
+  size_t M_dim0,
+  size_t vec_cnt )
+{
+  cudaStream_t *stream;
+  int n_thread, n_block;
+  n_thread = DEFAULT_NUM_THREAD;
+  n_block = vec_cnt/n_thread;
+  stream = DeviceWrapper::CUDA_Lock::acquire_stream(0);
+  in_perm_d<Real_t><<<n_thread, b_block, 0, *stream>>>
+	(precomp_data, input_perm, input_data, buff_in, interac_indx, M_dim0);
+}
+
+template <class Real_t>
+void out_perm_h (
+  Vector *precomp_data,
+  Vector *input_perm,
+  Vector *input_data,
+  Vector *buff_in,
+  size_t interac_indx,
+  size_t M_dim0,
+  size_t vec_cnt )
+{
+  cudaStream_t *stream;
+  int n_thread, n_block;
+  n_thread = DEFAULT_NUM_THREAD;
+  n_block = vec_cnt/n_thread;
+  stream = DeviceWrapper::CUDA_Lock::acquire_stream(0);
+  out_perm_d<Real_t><<<n_thread, b_block, 0, *stream>>>
+	(precomp_data, input_perm, input_data, buff_in, interac_indx, M_dim0);
+}
+