-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathMPI_Isend_MPI_Irecv.c
219 lines (183 loc) · 8.35 KB
/
MPI_Isend_MPI_Irecv.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
/*######################################################################
Example 6 : MPI_Isend MPI_Irecv
Description:
Examples 5 and 6 demonstrate the difference between blocking
and non-blocking point-to-point communication.
Example 5: MPI_Send/MPI_Recv (blocking)
Example 6: MPI_Isend/MPI_Irecv (non-blocking)
sendbuff recvbuff sendbuff recvbuff
######## ######## ######## ########
# # # # # # # #
0 # AA # # # # AA # # EE #
# # # # # # # #
######## ######## ######## ########
T # # # # # # # #
1 # BB # # # # BB # # AA #
a # # # # # # # #
######## ######## ######## ########
s # # # # # # # #
2 # CC # # # # CC # # BB #
k # # # # # # # #
######## ######## ######## ########
s # # # # # # # #
3 # DD # # # # DD # # CC #
# # # # # # # #
######## ######## ######## ########
# # # # # # # #
4 # EE # # # # EE # # DD #
# # # # # # # #
######## ######## ######## ########
BEFORE AFTER
Each task transfers a vector of random numbers (sendbuff) to the
next task (taskid+1). The last task transfers it to task 0.
Consequently, each task receives a vector from the preceding task
and puts it in recvbuff.
This example shows that MPI_Isend and MPI_Irecv are much more
appropriate to accomplish this work.
MPI_Isend and MPI_Irecv are non-blocking, meaning that the function
call returns before the communication is completed. Deadlock then
becomes impossible with non-blocking communication, but other
precautions must be taken when using them. In particular you will
want to be sure at a certain point, that your data has effectively
arrived! You will then place an MPI_Wait call for each send and/or
receive you want to be completed before advancing in the program.
It is clear that in using non-blocking call in this example, all
the exchanges between the tasks occur at the same time.
Before the communication, task 0 gather the sum of all the vectors
to sent from each tasks, and prints them out. Similarly after
the communication, task 0 gathers all the sum of the vectors received
by each task and prints them out along with the communication times.
Example 5 show how to use blocking communication (MPI_Send and
MPI_Recv) to accomplish the same work much less efficiently.
The size of the vecteur (buffsize) is given as an argument to
the program at run time.
Author: Carol Gauthier
Centre de Calcul scientifique
Universite de Sherbrooke
Last revision: September 2005
######################################################################*/
#include <malloc.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <mpi.h>
int main(int argc, char **argv)
{
int taskid, ntasks, i, itask, recvtaskid, buffsize;
MPI_Status status;
MPI_Request send_request, recv_request;
double *sendbuff, *recvbuff;
double sendbuffsum, recvbuffsum;
double sendbuffsums[1024], recvbuffsums[1024];
double inittime, totaltime, recvtime, recvtimes[1024];
/*===============================================================*/
/* MPI Initialisation. It's important to put this call at the */
/* begining of the program, after variable declarations. */
MPI_Init(&argc, &argv);
/*===============================================================*/
/* Get the number of MPI tasks and the taskid of this task. */
MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
/*===============================================================*/
/* Get buffsize value from program arguments. */
buffsize=atoi(argv[1]);
/*===============================================================*/
/* Printing out the description of the example. */
if ( taskid == 0 ){
printf("\n\n\n");
printf("##########################################################\n\n");
printf(" Example 6 \n\n");
printf(" Point-to-point Communication: MPI_Isend MPI_Irecv \n\n");
printf(" Vector size: %d\n", buffsize);
printf(" Number of tasks: %d\n\n", ntasks);
printf("##########################################################\n\n");
printf(" --> BEFORE COMMUNICATION <--\n\n");
}
/*=============================================================*/
/* Memory allocation. */
sendbuff=(double *)malloc(sizeof(double)*buffsize);
recvbuff=(double *)malloc(sizeof(double)*buffsize);
/*=============================================================*/
/* Vectors and/or matrices initialisation. */
srand((unsigned)time( NULL ) + taskid);
for(i=0;i<buffsize;i++){
sendbuff[i]=(double)rand()/RAND_MAX;
}
/*==============================================================*/
/* Print out before communication. */
sendbuffsum=0.0;
for(i=0;i<buffsize;i++){
sendbuffsum += sendbuff[i];
}
MPI_Gather(&sendbuffsum, 1, MPI_DOUBLE,
sendbuffsums, 1, MPI_DOUBLE,
0, MPI_COMM_WORLD);
if(taskid==0){
for(itask=0;itask<ntasks;itask++){
recvtaskid=itask+1;
if(itask==(ntasks-1)) recvtaskid=0;
printf("Task %d : Sum of vector sent to %d = %e\n",
itask, recvtaskid, sendbuffsums[itask]);
}
}
/*===============================================================*/
/* Communication. */
inittime = MPI_Wtime();
if ( taskid == 0 ){
MPI_Isend(sendbuff, buffsize, MPI_DOUBLE,
taskid+1, 0, MPI_COMM_WORLD, &send_request);
MPI_Irecv(recvbuff, buffsize, MPI_DOUBLE,
ntasks-1, MPI_ANY_TAG, MPI_COMM_WORLD, &recv_request);
recvtime = MPI_Wtime();
}
else if( taskid == ntasks-1 ){
MPI_Isend(sendbuff, buffsize, MPI_DOUBLE,
0, 0, MPI_COMM_WORLD, &send_request);
MPI_Irecv(recvbuff, buffsize, MPI_DOUBLE,
taskid-1, MPI_ANY_TAG, MPI_COMM_WORLD, &recv_request);
recvtime = MPI_Wtime();
}
else{
MPI_Isend(sendbuff, buffsize, MPI_DOUBLE,
taskid+1, 0, MPI_COMM_WORLD, &send_request);
MPI_Irecv(recvbuff, buffsize, MPI_DOUBLE,
taskid-1, MPI_ANY_TAG, MPI_COMM_WORLD, &recv_request);
recvtime = MPI_Wtime();
}
MPI_Wait(&send_request, &status);
MPI_Wait(&recv_request, &status);
totaltime = MPI_Wtime() - inittime;
/*===============================================================*/
/* Print out after communication. */
recvbuffsum=0.0;
for(i=0;i<buffsize;i++){
recvbuffsum += recvbuff[i];
}
MPI_Gather(&recvbuffsum, 1, MPI_DOUBLE,
recvbuffsums, 1, MPI_DOUBLE,
0, MPI_COMM_WORLD);
MPI_Gather(&recvtime, 1, MPI_DOUBLE,
recvtimes, 1, MPI_DOUBLE,
0, MPI_COMM_WORLD);
if(taskid==0){
printf("##########################################################\n\n");
printf(" --> AFTER COMMUNICATION <-- \n\n");
for(itask=0;itask<ntasks;itask++){
printf("Task %d : Sum of received vector= %e : Time=%f seconds\n",
itask, recvbuffsums[itask], recvtimes[itask]);
}
printf("\n");
printf("##########################################################\n\n");
printf(" Communication time : %f seconds\n\n", totaltime);
printf("##########################################################\n\n");
}
/*===============================================================*/
/* Free the allocated memory. */
free(recvbuff);
free(sendbuff);
/*===============================================================*/
/* MPI finalisation. */
MPI_Finalize();
return EXIT_SUCCESS;
}