@@ -47,6 +47,7 @@ TEST_CASE("AdjointJacobianGPUMPI::adjointJacobianMPI Op=RX, Obs=[Z,Z]",
47
47
using StateVectorT = StateVectorCudaMPI<double >;
48
48
49
49
MPIManager mpi_manager (MPI_COMM_WORLD);
50
+ CHECK (mpi_manager.getSize () == 2 );
50
51
51
52
AdjointJacobianMPI<StateVectorT> adj;
52
53
std::vector<double > param{-M_PI / 7 , M_PI / 5 , 2 * M_PI / 3 };
@@ -66,6 +67,7 @@ TEST_CASE("AdjointJacobianGPUMPI::adjointJacobianMPI Op=RX, Obs=[Z,Z]",
66
67
67
68
int nDevices = 0 ; // Number of GPU devices per node
68
69
cudaGetDeviceCount (&nDevices);
70
+ CHECK (nDevices >= 2 );
69
71
int deviceId = mpi_manager.getRank () % nDevices;
70
72
cudaSetDevice (deviceId);
71
73
DevTag<int > dt_local (deviceId, 0 );
@@ -104,6 +106,7 @@ TEST_CASE("AdjointJacobianGPUMPI::adjointJacobianMPI Op=[QubitStateVector, "
104
106
using StateVectorT = StateVectorCudaMPI<double >;
105
107
106
108
MPIManager mpi_manager (MPI_COMM_WORLD);
109
+ CHECK (mpi_manager.getSize () == 2 );
107
110
108
111
AdjointJacobianMPI<StateVectorT> adj;
109
112
std::vector<double > param{-M_PI / 7 , M_PI / 5 , 2 * M_PI / 3 };
@@ -124,6 +127,7 @@ TEST_CASE("AdjointJacobianGPUMPI::adjointJacobianMPI Op=[QubitStateVector, "
124
127
125
128
int nDevices = 0 ; // Number of GPU devices per node
126
129
cudaGetDeviceCount (&nDevices);
130
+ CHECK (nDevices >= 2 );
127
131
int deviceId = mpi_manager.getRank () % nDevices;
128
132
cudaSetDevice (deviceId);
129
133
DevTag<int > dt_local (deviceId, 0 );
@@ -167,6 +171,7 @@ TEST_CASE(
167
171
std::vector<double > jacobian_serial (num_obs * tp.size (), 0 );
168
172
169
173
MPIManager mpi_manager (MPI_COMM_WORLD);
174
+ CHECK (mpi_manager.getSize () == 2 );
170
175
171
176
size_t mpi_buffersize = 1 ;
172
177
@@ -177,6 +182,7 @@ TEST_CASE(
177
182
178
183
int nDevices = 0 ; // Number of GPU devices per node
179
184
cudaGetDeviceCount (&nDevices);
185
+ CHECK (nDevices >= 2 );
180
186
int deviceId = mpi_manager.getRank () % nDevices;
181
187
cudaSetDevice (deviceId);
182
188
DevTag<int > dt_local (deviceId, 0 );
@@ -234,6 +240,7 @@ TEST_CASE(
234
240
std::vector<double > jacobian_serial (num_obs * tp.size (), 0 );
235
241
236
242
MPIManager mpi_manager (MPI_COMM_WORLD);
243
+ CHECK (mpi_manager.getSize () == 2 );
237
244
238
245
size_t mpi_buffersize = 1 ;
239
246
@@ -244,6 +251,7 @@ TEST_CASE(
244
251
245
252
int nDevices = 0 ; // Number of GPU devices per node
246
253
cudaGetDeviceCount (&nDevices);
254
+ CHECK (nDevices >= 2 );
247
255
int deviceId = mpi_manager.getRank () % nDevices;
248
256
cudaSetDevice (deviceId);
249
257
DevTag<int > dt_local (deviceId, 0 );
@@ -297,6 +305,7 @@ TEST_CASE("AdjointJacobianGPUMPI::adjointJacobian Op=[RX,RX,RX], Obs=[ZZZ]",
297
305
std::vector<double > jacobian_serial (num_obs * tp.size (), 0 );
298
306
299
307
MPIManager mpi_manager (MPI_COMM_WORLD);
308
+ CHECK (mpi_manager.getSize () == 2 );
300
309
301
310
size_t mpi_buffersize = 1 ;
302
311
@@ -307,6 +316,7 @@ TEST_CASE("AdjointJacobianGPUMPI::adjointJacobian Op=[RX,RX,RX], Obs=[ZZZ]",
307
316
308
317
int nDevices = 0 ; // Number of GPU devices per node
309
318
cudaGetDeviceCount (&nDevices);
319
+ CHECK (nDevices >= 2 );
310
320
int deviceId = mpi_manager.getRank () % nDevices;
311
321
cudaSetDevice (deviceId);
312
322
DevTag<int > dt_local (deviceId, 0 );
@@ -358,6 +368,7 @@ TEST_CASE("AdjointJacobianGPUMPI::adjointJacobian Op=Mixed, Obs=[XXX]",
358
368
std::vector<double > jacobian_serial (num_obs * tp.size (), 0 );
359
369
360
370
MPIManager mpi_manager (MPI_COMM_WORLD);
371
+ CHECK (mpi_manager.getSize () == 2 );
361
372
362
373
size_t mpi_buffersize = 1 ;
363
374
@@ -368,6 +379,7 @@ TEST_CASE("AdjointJacobianGPUMPI::adjointJacobian Op=Mixed, Obs=[XXX]",
368
379
369
380
int nDevices = 0 ; // Number of GPU devices per node
370
381
cudaGetDeviceCount (&nDevices);
382
+ CHECK (nDevices >= 2 );
371
383
int deviceId = mpi_manager.getRank () % nDevices;
372
384
cudaSetDevice (deviceId);
373
385
DevTag<int > dt_local (deviceId, 0 );
@@ -436,6 +448,7 @@ TEST_CASE("AdjointJacobianGPU::AdjointJacobianGPUMPI Op=[RX,RX,RX], "
436
448
std::vector<double > jacobian_serial (num_obs * tp.size (), 0 );
437
449
438
450
MPIManager mpi_manager (MPI_COMM_WORLD);
451
+ CHECK (mpi_manager.getSize () == 2 );
439
452
440
453
size_t mpi_buffersize = 1 ;
441
454
@@ -446,6 +459,7 @@ TEST_CASE("AdjointJacobianGPU::AdjointJacobianGPUMPI Op=[RX,RX,RX], "
446
459
447
460
int nDevices = 0 ; // Number of GPU devices per node
448
461
cudaGetDeviceCount (&nDevices);
462
+ CHECK (nDevices >= 2 );
449
463
int deviceId = mpi_manager.getRank () % nDevices;
450
464
cudaSetDevice (deviceId);
451
465
DevTag<int > dt_local (deviceId, 0 );
@@ -504,6 +518,7 @@ TEST_CASE("AdjointJacobianGPU::AdjointJacobianGPU Test HermitianObs",
504
518
std::vector<double > jacobian2_serial (num_obs * tp.size (), 0 );
505
519
506
520
MPIManager mpi_manager (MPI_COMM_WORLD);
521
+ CHECK (mpi_manager.getSize () == 2 );
507
522
508
523
size_t mpi_buffersize = 1 ;
509
524
@@ -514,6 +529,7 @@ TEST_CASE("AdjointJacobianGPU::AdjointJacobianGPU Test HermitianObs",
514
529
515
530
int nDevices = 0 ; // Number of GPU devices per node
516
531
cudaGetDeviceCount (&nDevices);
532
+ CHECK (nDevices >= 2 );
517
533
int deviceId = mpi_manager.getRank () % nDevices;
518
534
cudaSetDevice (deviceId);
519
535
DevTag<int > dt_local (deviceId, 0 );
0 commit comments