|||
编译环境的配置在linux下容易点,到windows下有点烦琐,首先,我的两台64位机不支持CompaqVFortran,兼容条件下勉强安装,但编译还是有问题,再转到只有两核的32位机XP系统下,先装CVFortran,再下载安装Microsoft .Net Framework,用的是dotnetfx35setup2.exe.再从www.mpich.org/downloads/下载安装MPICH2,用的是mpich2-1.4.1p1-win-ia32.msi。配置时主要指明include和lib库文件的路径.
linux下编译: $ mpif90 -o matrix matrix_parallel.f
运行: $ mpirun -np 8 ./matrix
windows下命令行运行: >mpiexec -np 8 matrix.exe
以下是一个矩阵乘法的fortran程序片段,主要调用mpi的几个函数,已在redhat linux和winXP下调试通过,但愿对大家有帮助,也请发表批评.
program main
use mpi
integer TM,TL,TN,ierr
integer MASTER
integer FROM_MASTER,FROM_WORKER
integer status(MPI_STATUS_SIZE)
parameter (TM=180,TL=150,TN=2000)
parameter (MASTER=0,FROM_MASTER=1,FROM_WORKER=2)
integer numtasks,taskid,numworkers,source,dest
integer mtype,cols,numelemt
integer AVGCOL,extra,offset,offset1,i,j,k
double precision a(TM,TL),b(TL,TN),c(TM,TN)
call MPI_INIT( ierr )
call MPI_COMM_RANK( MPI_COMM_WORLD, taskid, ierr )
call MPI_COMM_SIZE( MPI_COMM_WORLD, numtasks, ierr )
numworkers=numtasks-1;
cccccccccccccccccccccccccccccccccMASTERcccccccccccccccccccccccccccccc
IF(taskid.EQ.MASTER) THEN
WRITE(*,*)'Number of worker tasks= ',numworkers
DO i=1,TM,1
DO j=1,TL,1
a(i,j)=i+j
ENDDO
ENDDO
DO i=1,TL,1
DO j=1,TN,1
b(i,j)=(i-1)*(j-1)
ENDDO
ENDDO
AVGCOL=TN/numworkers
write(*,*) 'AVGCOL',AVGCOL
extra=mod(TN,numworkers)
write(*,*) 'extra',extra
offset=1
mtype=FROM_MASTER
do dest=1,numworkers,1
IF(dest.LE.extra) THEN
cols=AVGCOL+1
ELSE
cols=AVGCOL;
ENDIF
WRITE(*,*) 'SEND',cols,'cols to task ',dest
call MPI_SEND(offset,1,MPI_INT,dest,mtype,MPI_COMM_WORLD,ierr)
call MPI_SEND(cols,1,MPI_INT,dest,mtype,MPI_COMM_WORLD,ierr)
numelemt=TM*TL
call MPI_SEND(a(1,1),numelemt,MPI_DOUBLE_PRECISION,dest,mtype,MPI_COMM_WORLD,ierr)
numelemt=TL*cols
call MPI_SEND(b(1,offset),numelemt,MPI_DOUBLE_PRECISION,dest,mtype,MPI_COMM_WORLD,ierr)
offset=offset+cols
enddo
mtype=FROM_WORKER
do i=1,numworkers
source=i;
call MPI_RECV(offset,1,MPI_INT,source,mtype,MPI_COMM_WORLD,status,ierr)
call MPI_RECV(cols,1,MPI_INT,source,mtype,MPI_COMM_WORLD,status,ierr)
numelemt=TM*cols
call MPI_RECV(c(1,offset),numelemt,MPI_DOUBLE_PRECISION,source,mtype,MPI_COMM_WORLD,status,ierr)
enddo
c write(*,*)
c write(*,100) ((c(i,j),j=1,TN),i=1,TM)
c write(*,100) c(TM,TN)
100 FORMAT(1X,4G15.3)
write(*,*)
ENDIF
IF(taskid.GT.MASTER) THEN
mtype=FROM_MASTER;
source=MASTER;
call MPI_RECV(offset,1,MPI_INT,source,mtype,MPI_COMM_WORLD,status,ierr)
offset1=offset
call MPI_RECV(cols,1,MPI_INT,source,mtype,MPI_COMM_WORLD,status,ierr)
numelemt=TM*TL;
call MPI_RECV(a(1,1),numelemt,MPI_DOUBLE_PRECISION,source,mtype,MPI_COMM_WORLD,status,ierr)
numelemt=TL*cols;
call MPI_RECV(b(1,1),numelemt,MPI_DOUBLE_PRECISION,source,mtype,MPI_COMM_WORLD,status,ierr)
do i=1,TM,1
do j=1,cols
c(i,j)=0.0
do k=1,TL
c(i,j)=c(i,j)+a(i,k)*b(k,j)
enddo
enddo
enddo
mtype=FROM_WORKER;
call MPI_SEND(offset1,1,MPI_INT,MASTER,mtype,MPI_COMM_WORLD,ierr)
call MPI_SEND(cols,1,MPI_INT,MASTER,mtype,MPI_COMM_WORLD,ierr)
numelemt=TM*cols
call MPI_SEND(c(1,1),numelemt,MPI_DOUBLE_PRECISION,MASTER,mtype,MPI_COMM_WORLD,ierr)
endif
call MPI_FINALIZE(ierr)
end
Archiver|手机版|科学网 ( 京ICP备07017567号-12 )
GMT+8, 2024-11-25 20:41
Powered by ScienceNet.cn
Copyright © 2007- 中国科学报社