From aa5ccd119bfece3d400b1340afc8ccdbfdaf7b45 Mon Sep 17 00:00:00 2001 From: Jakub Adamski Date: Fri, 1 Sep 2023 14:07:25 +0100 Subject: [PATCH] Made MPI calls in exchangeStateVectors non-blocking --- QuEST/src/CPU/QuEST_cpu_distributed.c | 28 ++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/QuEST/src/CPU/QuEST_cpu_distributed.c b/QuEST/src/CPU/QuEST_cpu_distributed.c index 4d07ef2b..4c9ce0af 100644 --- a/QuEST/src/CPU/QuEST_cpu_distributed.c +++ b/QuEST/src/CPU/QuEST_cpu_distributed.c @@ -494,8 +494,12 @@ void densmatr_initPureState(Qureg targetQureg, Qureg copyQureg) { void exchangeStateVectors(Qureg qureg, int pairRank){ // MPI send/receive vars - int TAG=100; - MPI_Status status; + int sendTag; + int recvTag; + int rank; + MPI_Request * requests; + + MPI_Comm_rank(MPI_COMM_WORLD, &rank); // Multiple messages are required as MPI uses int rather than long long int for count // For openmpi, messages are further restricted to 2GB in size -- do this for all cases @@ -506,20 +510,26 @@ void exchangeStateVectors(Qureg qureg, int pairRank){ // safely assume MPI_MAX... = 2^n, so division always exact int numMessages = qureg.numAmpsPerChunk/maxMessageCount; + requests = (MPI_Request*) malloc(4 * numMessages * sizeof(MPI_Request)); int i; long long int offset; + // send my state vector to pairRank's qureg.pairStateVec // receive pairRank's state vector into qureg.pairStateVec for (i=0; i