Skip to content

Commit

Permalink
[HIPIFY][tests][BLAS][fix] Move __nv_bfloat16-related APIs under `#…
Browse files Browse the repository at this point in the history
…if CUDA_VERSION >= 11000`

[Synopsis]
+ cublas2hipblas.cu, cublas2hipblas_v2.cu, and cublas2rocblas.cu tests fail against CUDA < 11.0

[Reason]
+ `__nv_bfloat16` type appeared in CUDA 11.0 only
  • Loading branch information
emankov committed Oct 26, 2023
1 parent 9aeb155 commit 21c414d
Show file tree
Hide file tree
Showing 3 changed files with 40 additions and 40 deletions.
22 changes: 11 additions & 11 deletions tests/unit_tests/synthetic/libraries/cublas2hipblas.cu
Original file line number Diff line number Diff line change
Expand Up @@ -246,17 +246,6 @@ int main() {
// CHECK: __half** hyarray = 0;
__half** hyarray = 0;

// CHECK: hip_bfloat16** bf16Aarray = 0;
__nv_bfloat16** bf16Aarray = 0;
// CHECK: const hip_bfloat16** const bf16Aarray_const = const_cast<const hip_bfloat16**>(bf16Aarray);
const __nv_bfloat16** const bf16Aarray_const = const_cast<const __nv_bfloat16**>(bf16Aarray);
// CHECK: hip_bfloat16** bf16xarray = 0;
__nv_bfloat16** bf16xarray = 0;
// CHECK: const hip_bfloat16** const bf16xarray_const = const_cast<const hip_bfloat16**>(bf16xarray_const);
const __nv_bfloat16** const bf16xarray_const = const_cast<const __nv_bfloat16**>(bf16xarray_const);
// CHECK: hip_bfloat16** bf16yarray = 0;
__nv_bfloat16** bf16yarray = 0;

double da = 0;
double dA = 0;
double db = 0;
Expand Down Expand Up @@ -1658,6 +1647,17 @@ int main() {
cublasDataType_t R_16BF = CUDA_R_16BF;
cublasDataType_t C_16BF = CUDA_C_16BF;

// CHECK: hip_bfloat16** bf16Aarray = 0;
__nv_bfloat16** bf16Aarray = 0;
// CHECK: const hip_bfloat16** const bf16Aarray_const = const_cast<const hip_bfloat16**>(bf16Aarray);
const __nv_bfloat16** const bf16Aarray_const = const_cast<const __nv_bfloat16**>(bf16Aarray);
// CHECK: hip_bfloat16** bf16xarray = 0;
__nv_bfloat16** bf16xarray = 0;
// CHECK: const hip_bfloat16** const bf16xarray_const = const_cast<const hip_bfloat16**>(bf16xarray_const);
const __nv_bfloat16** const bf16xarray_const = const_cast<const __nv_bfloat16**>(bf16xarray_const);
// CHECK: hip_bfloat16** bf16yarray = 0;
__nv_bfloat16** bf16yarray = 0;

// CHECK: hipblasComputeType_t blasComputeType;
cublasComputeType_t blasComputeType;

Expand Down
22 changes: 11 additions & 11 deletions tests/unit_tests/synthetic/libraries/cublas2hipblas_v2.cu
Original file line number Diff line number Diff line change
Expand Up @@ -257,17 +257,6 @@ int main() {
// CHECK: __half** hyarray = 0;
__half** hyarray = 0;

// CHECK: hip_bfloat16** bf16Aarray = 0;
__nv_bfloat16** bf16Aarray = 0;
// CHECK: const hip_bfloat16** const bf16Aarray_const = const_cast<const hip_bfloat16**>(bf16Aarray);
const __nv_bfloat16** const bf16Aarray_const = const_cast<const __nv_bfloat16**>(bf16Aarray);
// CHECK: hip_bfloat16** bf16xarray = 0;
__nv_bfloat16** bf16xarray = 0;
// CHECK: const hip_bfloat16** const bf16xarray_const = const_cast<const hip_bfloat16**>(bf16xarray_const);
const __nv_bfloat16** const bf16xarray_const = const_cast<const __nv_bfloat16**>(bf16xarray_const);
// CHECK: hip_bfloat16** bf16yarray = 0;
__nv_bfloat16** bf16yarray = 0;

double da = 0;
double dA = 0;
double db = 0;
Expand Down Expand Up @@ -1813,6 +1802,17 @@ int main() {
cublasDataType_t R_16BF = CUDA_R_16BF;
cublasDataType_t C_16BF = CUDA_C_16BF;

// CHECK: hip_bfloat16** bf16Aarray = 0;
__nv_bfloat16** bf16Aarray = 0;
// CHECK: const hip_bfloat16** const bf16Aarray_const = const_cast<const hip_bfloat16**>(bf16Aarray);
const __nv_bfloat16** const bf16Aarray_const = const_cast<const __nv_bfloat16**>(bf16Aarray);
// CHECK: hip_bfloat16** bf16xarray = 0;
__nv_bfloat16** bf16xarray = 0;
// CHECK: const hip_bfloat16** const bf16xarray_const = const_cast<const hip_bfloat16**>(bf16xarray_const);
const __nv_bfloat16** const bf16xarray_const = const_cast<const __nv_bfloat16**>(bf16xarray_const);
// CHECK: hip_bfloat16** bf16yarray = 0;
__nv_bfloat16** bf16yarray = 0;

// CHECK: hipblasComputeType_t blasComputeType;
cublasComputeType_t blasComputeType;

Expand Down
36 changes: 18 additions & 18 deletions tests/unit_tests/synthetic/libraries/cublas2rocblas.cu
Original file line number Diff line number Diff line change
Expand Up @@ -278,24 +278,6 @@ int main() {
// CHECK: rocblas_half** hyarray = 0;
__half** hyarray = 0;

// CHECK: rocblas_bfloat16* bf16A = 0;
__nv_bfloat16* bf16A = 0;
// CHECK: rocblas_bfloat16* bf16x = 0;
__nv_bfloat16* bf16x = 0;
// CHECK: rocblas_bfloat16* bf16y = 0;
__nv_bfloat16* bf16y = 0;

// CHECK: rocblas_bfloat16** bf16Aarray = 0;
__nv_bfloat16** bf16Aarray = 0;
// CHECK: const rocblas_bfloat16** const bf16Aarray_const = const_cast<const rocblas_bfloat16**>(bf16Aarray);
const __nv_bfloat16** const bf16Aarray_const = const_cast<const __nv_bfloat16**>(bf16Aarray);
// CHECK: rocblas_bfloat16** bf16xarray = 0;
__nv_bfloat16** bf16xarray = 0;
// CHECK: const rocblas_bfloat16** const bf16xarray_const = const_cast<const rocblas_bfloat16**>(bf16xarray_const);
const __nv_bfloat16** const bf16xarray_const = const_cast<const __nv_bfloat16**>(bf16xarray_const);
// CHECK: rocblas_bfloat16** bf16yarray = 0;
__nv_bfloat16** bf16yarray = 0;

double da = 0;
double dA = 0;
double db = 0;
Expand Down Expand Up @@ -1793,6 +1775,24 @@ int main() {
cublasDataType_t R_16BF = CUDA_R_16BF;
cublasDataType_t C_16BF = CUDA_C_16BF;

// CHECK: rocblas_bfloat16* bf16A = 0;
__nv_bfloat16* bf16A = 0;
// CHECK: rocblas_bfloat16* bf16x = 0;
__nv_bfloat16* bf16x = 0;
// CHECK: rocblas_bfloat16* bf16y = 0;
__nv_bfloat16* bf16y = 0;

// CHECK: rocblas_bfloat16** bf16Aarray = 0;
__nv_bfloat16** bf16Aarray = 0;
// CHECK: const rocblas_bfloat16** const bf16Aarray_const = const_cast<const rocblas_bfloat16**>(bf16Aarray);
const __nv_bfloat16** const bf16Aarray_const = const_cast<const __nv_bfloat16**>(bf16Aarray);
// CHECK: rocblas_bfloat16** bf16xarray = 0;
__nv_bfloat16** bf16xarray = 0;
// CHECK: const rocblas_bfloat16** const bf16xarray_const = const_cast<const rocblas_bfloat16**>(bf16xarray_const);
const __nv_bfloat16** const bf16xarray_const = const_cast<const __nv_bfloat16**>(bf16xarray_const);
// CHECK: rocblas_bfloat16** bf16yarray = 0;
__nv_bfloat16** bf16yarray = 0;

// CHECK: rocblas_computetype blasComputeType;
// CHECK-NEXT: rocblas_computetype BLAS_COMPUTE_32F = rocblas_compute_type_f32;
cublasComputeType_t blasComputeType;
Expand Down

0 comments on commit 21c414d

Please sign in to comment.