我正在学习MPI-2 / MPI-3中引入的MPI单面通信,并且遇到了有关MPI_Accumulate
的online course page:
但是MPI_Accumulate
仅允许使用有限数量的操作(最大,最小,总和,乘积等),并且不允许用户定义操作。我想知道如何使用MPI_Get
,sync,op和MPI_Put
实现上述困惑。 C / C++中是否有任何教程或工作代码示例?
谢谢
为了进行测试,我从SO question修改了一段代码,其中单边通信用于创建一个整数计数器,该计数器在MPI进程之间保持同步。使用MPI_Accumulate
的目标问题行已标记。
代码按原样编译,并在大约15秒后返回。但是,当我尝试用等效的基本操作序列替换MPI_Accumulate
时,如问题行之后的注释块中所示,编译后的程序将无限期挂起。
谁能帮忙解释一下出了什么问题,以及
在这种情况下替换MPI_Accumulate
的正确方法是什么?
附言我用
g++ -std=c++11 -I.. mpistest.cpp -lmpi
并执行二进制
mpiexec -n 4 a.exe
码:
//adpated from https://stackoverflow.com/questions/4948788/
#include <mpi.h>
#include <stdlib.h>
#include <stdio.h>
#include <thread>
#include <chrono>
struct mpi_counter_t {
MPI_Win win;
int hostrank; //id of the process that host values to be exposed to all processes
int rank; //process id
int size; //number of processes
int val;
int *hostvals;
};
struct mpi_counter_t *create_counter(int hostrank) {
struct mpi_counter_t *count;
count = (struct mpi_counter_t *)malloc(sizeof(struct mpi_counter_t));
count->hostrank = hostrank;
MPI_Comm_rank(MPI_COMM_WORLD, &(count->rank));
MPI_Comm_size(MPI_COMM_WORLD, &(count->size));
if (count->rank == hostrank) {
MPI_Alloc_mem(count->size * sizeof(int), MPI_INFO_NULL, &(count->hostvals));
for (int i=0; i<count->size; i++) count->hostvals[i] = 0;
MPI_Win_create(count->hostvals, count->size * sizeof(int), sizeof(int),
MPI_INFO_NULL, MPI_COMM_WORLD, &(count->win));
}
else {
count->hostvals = NULL;
MPI_Win_create(count->hostvals, 0, 1,
MPI_INFO_NULL, MPI_COMM_WORLD, &(count->win));
}
count -> val = 0;
return count;
}
int increment_counter(struct mpi_counter_t *count, int increment) {
int *vals = (int *)malloc( count->size * sizeof(int) );
int val;
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, count->hostrank, 0, count->win);
for (int i=0; i<count->size; i++) {
if (i == count->rank) {
MPI_Accumulate(&increment, 1, MPI_INT, 0, i, 1, MPI_INT, MPI_SUM,count->win); //Problem line: increment hostvals[i] on host
/* //Question: How to correctly replace the above MPI_Accumulate call with the following sequence? Currently, the following causes the program to hang.
MPI_Get(&vals[i], 1, MPI_INT, 0, i, 1, MPI_INT, count->win);
MPI_Win_fence(0,count->win);
vals[i] += increment;
MPI_Put(&vals[i], 1, MPI_INT, 0, i, 1, MPI_INT, count->win);
MPI_Win_fence(0,count->win);
//*/
} else {
MPI_Get(&vals[i], 1, MPI_INT, 0, i, 1, MPI_INT, count->win);
}
}
MPI_Win_unlock(0, count->win);
//do op part of MPI_Accumulate's work on count->rank
count->val += increment;
vals[count->rank] = count->val;
//return the sum of vals
val = 0;
for (int i=0; i<count->size; i++)
val += vals[i];
free(vals);
return val;
}
void delete_counter(struct mpi_counter_t **count) {
if ((*count)->rank == (*count)->hostrank) {
MPI_Free_mem((*count)->hostvals);
}
MPI_Win_free(&((*count)->win));
free((*count));
*count = NULL;
return;
}
void print_counter(struct mpi_counter_t *count) {
if (count->rank == count->hostrank) {
for (int i=0; i<count->size; i++) {
printf("%2d ", count->hostvals[i]);
}
puts("");
}
}
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
const int WORKITEMS=50;
struct mpi_counter_t *c;
int rank;
int result = 0;
c = create_counter(0);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
srand(rank);
while (result < WORKITEMS) {
result = increment_counter(c, 1);
if (result <= WORKITEMS) {
printf("%d working on item %d...\n", rank, result);
std::this_thread::sleep_for (std::chrono::seconds(rand()%2));
} else {
printf("%d done\n", rank);
}
}
MPI_Barrier(MPI_COMM_WORLD);
print_counter(c);
delete_counter(&c);
MPI_Finalize();
return 0;
}
另一个问题,我应该在这里使用
MPI_Win_fence
而不是锁吗?- 编辑 -
我在
increment_counter
中使用了锁定/解锁,如下所示,该程序运行但行为异常。在最终的打印输出中,主节点完成所有工作。还是很困惑。int increment_counter(struct mpi_counter_t *count, int increment) {
int *vals = (int *)malloc( count->size * sizeof(int) );
int val;
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, count->hostrank, 0, count->win);
for (int i=0; i<count->size; i++) {
if (i == count->rank) {
//MPI_Accumulate(&increment, 1, MPI_INT, 0, i, 1, MPI_INT, MPI_SUM,count->win); //Problem line: increment hostvals[i] on host
///* //Question: How to correctly replace the above MPI_Accumulate call with the following sequence? reports that 0 does all the work
MPI_Get(&vals[i], 1, MPI_INT, 0, i, 1, MPI_INT, count->win);
MPI_Win_unlock(0, count->win);
vals[i] += increment;
MPI_Put(&vals[i], 1, MPI_INT, 0, i, 1, MPI_INT, count->win);
MPI_Win_lock(MPI_LOCK_EXCLUSIVE, count->hostrank, 0, count->win);
//*/
} else {
MPI_Get(&vals[i], 1, MPI_INT, 0, i, 1, MPI_INT, count->win);
}
}
MPI_Win_unlock(0, count->win);
//do op part of MPI_Accumulate's work on count->rank
count->val += increment;
vals[count->rank] = count->val;
//return the sum of vals
val = 0;
for (int i=0; i<count->size; i++)
val += vals[i];
free(vals);
return val;
}
最佳答案
实际上,使用Gets和Puts实现Accumulate会非常麻烦,尤其是当您必须处理派生数据类型等时。但是,假设您要对单个整数进行累加,并且只想将本地值加到远程缓冲区中,则可以执行以下操作(仅限伪代码):
MPI_Win_lock(EXCLUSIVE); /* exclusive needed for accumulate atomicity constraints */
MPI_Get(&remote_data);
MPI_Win_flush(win); /* make sure GET has completed */
new = local_data + remote_data;
MPI_Put(&new);
MPI_Win_unlock();
您的代码不正确,因为您在GET之后放弃了排他锁,这在两个进程试图同时求和数据时会导致原子性问题。