Markopy
Utilizing Markov Models for brute forcing attacks
cudaDeviceController.h
Go to the documentation of this file.
1 /** @file cudaDeviceController.h
2  * @brief Simple static class for basic CUDA device controls.
3  * @authors Ata Hakçıl
4  *
5  * @copydoc Markov::API::CUDA::CUDADeviceController
6  */
7 
8 #pragma once
9 #include <iostream>
10 
11 /** @brief Namespace for objects requiring CUDA libraries.
12 */
13 namespace Markov::API::CUDA{
14  /** @brief Controller class for CUDA device
15  *
16  * This implementation only supports Nvidia devices.
17  */
19  public:
20  /** @brief List CUDA devices in the system.
21  *
22  * This function will print details of every CUDA capable device in the system.
23  *
24  * @b Example @b output:
25  * @code{.txt}
26  * Device Number: 0
27  * Device name: GeForce RTX 2070
28  * Memory Clock Rate (KHz): 7001000
29  * Memory Bus Width (bits): 256
30  * Peak Memory Bandwidth (GB/s): 448.064
31  * Max Linear Threads: 1024
32  * @endcode
33  */
34  __host__ static void ListCudaDevices();
35 
36  protected:
37  /** @brief Check results of the last operation on GPU.
38  *
39  * Check the status returned from cudaMalloc/cudaMemcpy to find failures.
40  *
41  * If a failure occurs, its assumed beyond redemption, and exited.
42  * @param _status Cuda error status to check
43  * @param msg Message to print in case of a failure
44  * @return 0 if successful, 1 if failure.
45  * @b Example @b output:
46  * @code{.cpp}
47  * char *da, a = "test";
48  * cudastatus = cudaMalloc((char **)&da, 5*sizeof(char*));
49  * CudaCheckNotifyErr(cudastatus, "Failed to allocate VRAM for *da.\n");
50  * @endcode
51  */
52  __host__ static int CudaCheckNotifyErr(cudaError_t _status, const char* msg, bool bExit=true);
53 
54 
55  /** @brief Malloc a 2D array in device space
56  *
57  * This function will allocate enough space on VRAM for flattened 2D array.
58  *
59  * @param dst destination pointer
60  * @param row row size of the 2d array
61  * @param col column size of the 2d array
62  * @return cudaError_t status of the cudaMalloc operation
63  *
64  * @b Example @b output:
65  * @code{.cpp}
66  * cudaError_t cudastatus;
67  * char* dst;
68  * cudastatus = CudaMalloc2DToFlat<char>(&dst, 5, 15);
69  * if(cudastatus!=cudaSuccess){
70  * CudaCheckNotifyErr(cudastatus, " CudaMalloc2DToFlat Failed.", false);
71  * }
72  * @endcode
73  */
74  template <typename T>
75  __host__ static cudaError_t CudaMalloc2DToFlat(T** dst, int row, int col){
76  cudaError_t cudastatus = cudaMalloc((T **)dst, row*col*sizeof(T));
77  CudaCheckNotifyErr(cudastatus, "cudaMalloc Failed.", false);
78  return cudastatus;
79  }
80 
81 
82  /** @brief Memcpy a 2D array in device space after flattening
83  *
84  * Resulting buffer will not be true 2D array.
85  *
86  * @param dst destination pointer
87  * @param rc source pointer
88  * @param row row size of the 2d array
89  * @param col column size of the 2d array
90  * @return cudaError_t status of the cudaMalloc operation
91  *
92  * @b Example @b output:
93  * @code{.cpp}
94  * cudaError_t cudastatus;
95  * char* dst;
96  * cudastatus = CudaMalloc2DToFlat<char>(&dst, 5, 15);
97  * CudaCheckNotifyErr(cudastatus, " CudaMalloc2DToFlat Failed.", false);
98  * cudastatus = CudaMemcpy2DToFlat<char>(*dst,src,15,15);
99  * CudaCheckNotifyErr(cudastatus, " CudaMemcpy2DToFlat Failed.", false);
100  * @endcode
101  */
102  template <typename T>
104  T* tempbuf = new T[row*col];
105  for(int i=0;i<row;i++){
106  memcpy(&(tempbuf[row*i]), src[i], col);
107  }
108  return cudaMemcpy(dst, tempbuf, row*col*sizeof(T), cudaMemcpyHostToDevice);
109 
110  }
111 
112  /** @brief Both malloc and memcpy a 2D array into device VRAM.
113  *
114  * Resulting buffer will not be true 2D array.
115  *
116  * @param dst destination pointer
117  * @param rc source pointer
118  * @param row row size of the 2d array
119  * @param col column size of the 2d array
120  * @return cudaError_t status of the cudaMalloc operation
121  *
122  * @b Example @b output:
123  * @code{.cpp}
124  * cudaError_t cudastatus;
125  * char* dst;
126  * cudastatus = CudaMigrate2DFlat<long int>(
127  * &dst, this->valueMatrix, this->matrixSize, this->matrixSize);
128  * CudaCheckNotifyErr(cudastatus, " Cuda failed to initialize value matrix row.");
129  * @endcode
130  */
131  template <typename T>
135  if(cudastatus!=cudaSuccess){
136  CudaCheckNotifyErr(cudastatus, " CudaMalloc2DToFlat Failed.", false);
137  return cudastatus;
138  }
140  CudaCheckNotifyErr(cudastatus, " CudaMemcpy2DToFlat Failed.", false);
141  return cudastatus;
142  }
143 
144 
145  private:
146  };
147 };