284 CHAPTER 7. MESSAGE PASSING INTERFACE
output from the p rocessors. After MPI is initialized in lines 10-12, each proces-
sor computes local values of a and b in lines 16 and 17. The call to mpi_reduce()
in line 23 sums all the loc_b to sum in processor 0 via the mpi_oper equal to
mpi_sum. The call to mpi_reduce() in line 26 computes the product of all
the loc_b to prod in processor 0 via the mpi_oper equal to mpi_prod. These
results are verified by the print commands in lines 18-20 and 27-30.
MPI/Fortran 9x Code reducmpi.f
1. program reducmpi
2.! Illustrates mpi_reduce.
3. implicit none
4. include ’mpif.h’
5. real:: a,b,h,loc_a,loc_b,total,sum,prod
6. real, dimension(0:31):: a_list
7. integer:: my_rank,p,n,source,dest,tag,ierr,loc_n
8. integer:: i,status(mpi_status_size)
9. data a,b,n,dest,tag/0.0,100.0,1024,0,50/
10. call mpi_init(ierr)
11. call mpi_comm_rank(mpi_comm_world,my_rank,ierr)
12. call mpi_comm_size(mpi_comm_world,p,ierr)
13.! Each processor has a unique loc_n, loc_a and loc_b.
14. h = (b-a)/n
15. loc_n = n/p
16. loc_a = a+my_rank*loc_n*h
17. loc_b = loc_a + loc_n*h
18. print*,’my_rank =’,my_rank, ’loc_a = ’,lo c_a
19. print*,’my_rank =’,my_rank, ’loc_b = ’,loc_b
20. print*,’my_rank =’,my_rank, ’loc_n = ’,loc_n
21.! mpi_reduce is used to compute the sum of all loc_b
22.! to sum on processor 0.
23. call mpi_reduce(loc_b,sum,1,mpi_real,mpi_sum,0,&
mpi_comm_world,status,ierr)
24.! mpi_reduce is used to compute the product of all loc_b
25.! to prod on processor 0.
26. call mpi_reduce(loc_b,prod,1,mpi_real,mpi_prod,0,&
mpi_comm_world,status,ierr)
27. if (my_rank.eq.0) then
28. print*, ’sum = ’,sum
29. print*, ’product = ’,prod
30. end if
31. call mpi_finalize(ierr)
32. end program reducmpi
my_rank = 0 loc_a = 0.0000000000E+00
my_rank = 0 loc_b = 25.00000000
my_rank = 0 loc_n = 256
© 2004 by Chapman & Hall/CRC