-
Notifications
You must be signed in to change notification settings - Fork 758
DRAFT: MPI Fortran tutorial and example code #104
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: gh-pages
Are you sure you want to change the base?
Changes from 13 commits
748ef4b
9c7546a
c77ed49
4639eaf
9efdbf1
7c21282
f811885
7ccf5be
b5471a6
e1c03bf
b57e61e
e352143
e04c165
a467a28
f23539c
02d216c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,98 @@ | ||
| module subs | ||
| implicit none | ||
| contains | ||
| subroutine create_rand_nums(rand_nums, num_elements) | ||
| ! Creates an array of random numbers. Each number has a value from 0 - 1 | ||
| integer, intent(in) :: num_elements | ||
| real, intent(out) :: rand_nums(num_elements) | ||
|
|
||
| integer :: i | ||
|
|
||
| do i = 1, num_elements | ||
| rand_nums(i) = rand() | ||
| end do | ||
|
|
||
| end subroutine create_rand_nums | ||
|
|
||
| function compute_avg(array, num_elements) | ||
| ! Computes the average of an array of numbers | ||
| real :: compute_avg | ||
| integer, intent(in) :: num_elements | ||
| real, intent(in) :: array(num_elements) | ||
|
|
||
| compute_avg = sum(array) / real(num_elements) | ||
| end function compute_avg | ||
| end module subs | ||
|
|
||
stephenpcook marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| program main | ||
| use mpi_f08 | ||
stephenpcook marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| use iso_fortran_env, only: error_unit | ||
stephenpcook marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| use subs | ||
|
|
||
| implicit none | ||
|
|
||
| integer :: num_args | ||
| character(12) :: arg | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. See comment later on about using a hardcoded value of 12 here. |
||
| integer :: num_elements_per_proc | ||
| integer :: world_size, world_rank | ||
| real :: r, sub_avg, avg | ||
| real, allocatable :: rand_nums(:), sub_rand_nums(:), sub_avgs(:) | ||
|
|
||
| num_args = command_argument_count() | ||
|
|
||
| if (num_args .ne. 1) then | ||
| write (error_unit, *) 'Usage: all_avg num_elements_per_proc' | ||
| stop | ||
| end if | ||
|
|
||
stephenpcook marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| call get_command_argument(1, arg) | ||
|
|
||
| read (arg, *) num_elements_per_proc | ||
| ! Seed the random number generator to get different results each time | ||
stephenpcook marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| call srand(time()) | ||
| ! Throw away first rand value | ||
stephenpcook marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| r = rand() | ||
|
|
||
stephenpcook marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| call MPI_INIT() | ||
|
|
||
| call MPI_COMM_SIZE(MPI_COMM_WORLD, world_size) | ||
| call MPI_COMM_RANK(MPI_COMM_WORLD, world_rank) | ||
|
|
||
| ! Create a random array of elements on the root process. Its total | ||
| ! size will be the number of elements per process times the number | ||
| ! of processes | ||
| if (world_rank .eq. 0) then | ||
| allocate(rand_nums(num_elements_per_proc * world_size)) | ||
| call create_rand_nums(rand_nums, num_elements_per_proc * world_size) | ||
| end if | ||
|
|
||
| allocate(sub_rand_nums(num_elements_per_proc)) | ||
|
|
||
| call MPI_Scatter(rand_nums, num_elements_per_proc, MPI_FLOAT, sub_rand_nums, & | ||
| num_elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD) | ||
|
|
||
| ! Compute the average of your subset | ||
| sub_avg = compute_avg(sub_rand_nums, num_elements_per_proc) | ||
|
|
||
| ! Gather all partial averages down to all the processes | ||
| allocate(sub_avgs(world_size)) | ||
| call MPI_Allgather(sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT, MPI_COMM_WORLD) | ||
|
|
||
| ! Now that we have all of the partial averages, compute the | ||
| ! total average of all numbers. Since we are assuming each process computed | ||
| ! an average across an equal amount of elements, this computation will | ||
| ! produce the correct answer. | ||
| avg = compute_avg(sub_avgs, world_size) | ||
| print '("Avg of all elements from proc ", I0, " is ", ES12.5)', world_rank, avg | ||
|
|
||
| ! Clean up | ||
| if (world_rank .eq. 0) then | ||
| deallocate(rand_nums) | ||
| end if | ||
| deallocate(sub_avgs) | ||
| deallocate(sub_rand_nums) | ||
|
|
||
| call MPI_Barrier(MPI_COMM_WORLD) | ||
| call MPI_FINALIZE() | ||
|
|
||
| end program main | ||
stephenpcook marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,105 @@ | ||
| module subs | ||
| implicit none | ||
| contains | ||
| subroutine create_rand_nums(rand_nums, num_elements) | ||
| ! Creates an array of random numbers. Each number has a value from 0 - 1 | ||
| integer, intent(in) :: num_elements | ||
| real, intent(out) :: rand_nums(num_elements) | ||
|
|
||
| integer :: i | ||
|
|
||
| do i = 1, num_elements | ||
| rand_nums(i) = rand() | ||
| end do | ||
|
|
||
| end subroutine create_rand_nums | ||
|
|
||
| function compute_avg(array, num_elements) | ||
| ! Computes the average of an array of numbers | ||
| real :: compute_avg | ||
| integer, intent(in) :: num_elements | ||
| real, intent(in) :: array(num_elements) | ||
|
|
||
| compute_avg = sum(array) / real(num_elements) | ||
| end function compute_avg | ||
| end module subs | ||
stephenpcook marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| program main | ||
| use mpi_f08 | ||
stephenpcook marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| use iso_fortran_env, only: error_unit | ||
| use subs | ||
|
|
||
| implicit none | ||
|
|
||
| integer :: num_args | ||
| character(12) :: arg | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just noticed this is a hard coded value of 12. Perhaps define it as a parameter? If you want to the code to be really flexible and help with ease of maintenance, create a seperate module file that contains default settings/values to create uniform behaviour accross all programs in this pakage. Then import ( |
||
| integer :: num_elements_per_proc | ||
| integer :: world_size, world_rank | ||
| real :: r, sub_avg, avg, original_data_avg | ||
| real, allocatable :: rand_nums(:), sub_rand_nums(:), sub_avgs(:) | ||
|
|
||
| num_args = command_argument_count() | ||
|
|
||
| if (num_args .ne. 1) then | ||
| write (error_unit, *) 'Usage: avg num_elements_per_proc' | ||
| stop | ||
| end if | ||
|
|
||
| call get_command_argument(1, arg) | ||
|
|
||
| read (arg, *) num_elements_per_proc | ||
stephenpcook marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| ! Seed the random number generator to get different results each time | ||
| call srand(time()) | ||
stephenpcook marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| ! Throw away first rand value | ||
| r = rand() | ||
stephenpcook marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| call MPI_INIT() | ||
|
|
||
| call MPI_COMM_SIZE(MPI_COMM_WORLD, world_size) | ||
| call MPI_COMM_RANK(MPI_COMM_WORLD, world_rank) | ||
|
|
||
| ! Create a random array of elements on the root process. Its total | ||
| ! size will be the number of elements per process times the number | ||
| ! of processes | ||
| if (world_rank .eq. 0) then | ||
| allocate(rand_nums(num_elements_per_proc * world_size)) | ||
| call create_rand_nums(rand_nums, num_elements_per_proc * world_size) | ||
| end if | ||
|
|
||
| allocate(sub_rand_nums(num_elements_per_proc)) | ||
|
|
||
| call MPI_Scatter(rand_nums, num_elements_per_proc, MPI_FLOAT, sub_rand_nums, & | ||
| num_elements_per_proc, MPI_FLOAT, 0, MPI_COMM_WORLD) | ||
|
|
||
| ! Compute the average of your subset | ||
| sub_avg = compute_avg(sub_rand_nums, num_elements_per_proc) | ||
|
|
||
| ! Gather all partial averages down to the root process | ||
| if (world_rank .eq. 0) then | ||
| allocate(sub_avgs(world_size)) | ||
| end if | ||
| call MPI_Gather(sub_avg, 1, MPI_FLOAT, sub_avgs, 1, MPI_FLOAT, 0, MPI_COMM_WORLD) | ||
|
|
||
| ! Now that we have all of the partial averages on the root, compute the | ||
| ! total average of all numbers. Since we are assuming each process computed | ||
| ! an average across an equal amount of elements, this computation will | ||
| ! produce the correct answer. | ||
| if (world_rank .eq. 0) then | ||
| avg = compute_avg(sub_avgs, world_size) | ||
| print '("Avg of all elements is ", ES12.5)', avg | ||
| ! Compute the average across the original data for comparison | ||
| original_data_avg = compute_avg(rand_nums, num_elements_per_proc * world_size) | ||
| print '("Avg computed across original data is ", ES12.5)', avg | ||
| end if | ||
|
|
||
| ! Clean up | ||
| if (world_rank .eq. 0) then | ||
| deallocate(rand_nums) | ||
| deallocate(sub_avgs) | ||
| end if | ||
| deallocate(sub_rand_nums) | ||
|
|
||
| call MPI_Barrier(MPI_COMM_WORLD) | ||
| call MPI_FINALIZE() | ||
|
|
||
| end program main | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,48 @@ | ||
| program check_status | ||
| use mpi_f08 | ||
|
|
||
| implicit none | ||
|
|
||
| integer :: world_rank, world_size | ||
| integer, parameter :: MAX_NUMBERS=100 | ||
| integer :: numbers(MAX_NUMBERS) | ||
| integer :: number_amount | ||
| type(MPI_Status) :: recv_status | ||
|
|
||
| real :: r | ||
|
|
||
| call MPI_INIT() | ||
| call MPI_COMM_SIZE(MPI_COMM_WORLD, world_size) | ||
| call MPI_COMM_RANK(MPI_COMM_WORLD, world_rank) | ||
|
|
||
|
|
||
| if (world_rank .eq. 0) then | ||
| ! Pick a random amount of integers to send to process one | ||
| call srand(time()) | ||
stephenpcook marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| ! Throw away first value | ||
| r = rand() | ||
stephenpcook marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| number_amount = int(rand() * real(MAX_NUMBERS)) | ||
| ! Send the amount of integers to process one | ||
| call MPI_SEND(numbers, number_amount, MPI_INT, 1, 9, & | ||
| MPI_COMM_WORLD) | ||
| print '("0 sent ", I0, " numbers to 1")', number_amount | ||
| else if (world_rank .eq. 1) then | ||
| ! Receive at most MAX_NUMBERS from process zero | ||
| call MPI_RECV(numbers, MAX_NUMBERS, MPI_INT, 0, 9, & | ||
| MPI_COMM_WORLD, recv_status) | ||
| ! After receiving the message, check the status to determine how many | ||
| ! numbers were actually received | ||
| call MPI_Get_count(recv_status, MPI_INT, number_amount) | ||
| ! Print off the amount of numbers, and also print additional information | ||
| ! in the status object | ||
| print '("1 received ", I0, " numbers from 0. Message source = ", I0, ", tag = ", I0)', & | ||
| number_amount , recv_status%MPI_SOURCE , recv_status%MPI_TAG | ||
| end if | ||
|
|
||
| call MPI_Barrier(MPI_COMM_WORLD) | ||
|
|
||
| call MPI_FINALIZE() | ||
|
|
||
| end program | ||
stephenpcook marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,51 @@ | ||
| program main | ||
| use mpi_f08 | ||
stephenpcook marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| implicit none | ||
|
|
||
| integer :: world_rank, world_size | ||
| type(MPI_Group) :: world_group, prime_group | ||
| type(MPI_Comm) :: prime_comm | ||
| integer, parameter :: n = 7 | ||
| integer :: ranks(n) | ||
| integer :: prime_rank, prime_size | ||
|
|
||
| call MPI_Init() | ||
|
|
||
| ! Get the rank and size in the original communicator | ||
| call MPI_Comm_rank(MPI_COMM_WORLD, world_rank) | ||
| call MPI_Comm_size(MPI_COMM_WORLD, world_size) | ||
|
|
||
| ! Get the group of processes in MPI_COMM_WORLD | ||
| call MPI_Comm_group(MPI_COMM_WORLD, world_group) | ||
|
|
||
| ranks = [1, 2, 3, 5, 7, 11, 13] | ||
|
|
||
| ! Construct a group containing all of the prime ranks in world_group | ||
| call MPI_Group_incl(world_group, 7, ranks, prime_group) | ||
|
|
||
| ! Create a new communicator based on the group | ||
| call MPI_Comm_create_group(MPI_COMM_WORLD, prime_group, 0, prime_comm) | ||
|
|
||
| prime_rank = -1 | ||
| prime_size = -1 | ||
| ! If this rank isn't in the new communicator, it will be MPI_COMM_NULL | ||
| ! Using MPI_COMM_NULL for MPI_Comm_rank or MPI_Comm_size is erroneous | ||
| if (MPI_COMM_NULL .ne. prime_comm) then | ||
| call MPI_Comm_rank(prime_comm, prime_rank) | ||
| call MPI_Comm_size(prime_comm, prime_size) | ||
| end if | ||
|
|
||
| print '("WORLD RANK/SIZE: ", I0, "/", I0, " --- PRIME RANK/SIZE: ", I0, "/", I0)', & | ||
| world_rank, world_size, prime_rank, prime_size | ||
|
|
||
| call MPI_Group_free(world_group) | ||
| call MPI_Group_free(prime_group) | ||
|
|
||
| if (MPI_COMM_NULL .ne. prime_comm) then | ||
| call MPI_Comm_free(prime_comm) | ||
| end if | ||
|
|
||
| call MPI_Finalize() | ||
|
|
||
| end program main | ||
stephenpcook marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,32 @@ | ||
| program main | ||
| use mpi_f08 | ||
stephenpcook marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| implicit none | ||
|
|
||
| integer :: world_rank, world_size | ||
| integer :: color | ||
| type(MPI_Comm) :: row_comm | ||
| integer :: row_rank, row_size | ||
|
|
||
| call MPI_INIT() | ||
|
|
||
| ! Get the rank and size in the original communicator | ||
| call MPI_Comm_rank(MPI_COMM_WORLD, world_rank) | ||
| call MPI_Comm_size(MPI_COMM_WORLD, world_size) | ||
|
|
||
| color = world_rank / 4 ! Determine color based on row | ||
|
|
||
| ! Split the communicator based on the color and use the original rank for ordering | ||
| call MPI_Comm_split(MPI_COMM_WORLD, color, world_rank, row_comm) | ||
|
|
||
| call MPI_Comm_rank(row_comm, row_rank) | ||
| call MPI_Comm_size(row_comm, row_size) | ||
|
|
||
| print '("WORLD RANK/SIZE: ", I0, "/", I0, " --- ROW RANK/SIZE: ", I0, "/", I0)', & | ||
| world_rank, world_size, row_rank, row_size | ||
|
|
||
| call MPI_Comm_free(row_comm) | ||
|
|
||
| call MPI_Finalize() | ||
|
|
||
| end program main | ||
Uh oh!
There was an error while loading. Please reload this page.