If a block is connected to another block in perodic fashion, this subroutine will take care of that boundary condition.
Type | Intent | Optional | Attributes | Name | ||
---|---|---|---|---|---|---|
real(kind=wp), | dimension(-2:dims%imx+2,-2:dims%jmx+2,-2:dims%kmx+2,1:dims%n_var) | :: | qp | Store primitive variable at cell center |
||
type(controltype), | intent(in) | :: | control | Control parameters |
||
type(boundarytype), | intent(in) | :: | bc | boundary conditions and fixed values |
||
type(extent), | intent(in) | :: | dims | Extent of the domain:imx,jmx,kmx |
subroutine apply_periodic_bc(qp, control, bc, dims)
!<If a block is connected to another block in perodic
!<fashion, this subroutine will take care of that boundary condition.
implicit none
type(controltype), intent(in) :: control
!< Control parameters
type(extent), intent(in) :: dims
!< Extent of the domain:imx,jmx,kmx
type(boundarytype), intent(in) :: bc
!< boundary conditions and fixed values
real(wp), dimension(-2:dims%imx+2,-2:dims%jmx+2,-2:dims%kmx+2,1:dims%n_var) :: qp
!< Store primitive variable at cell center
integer:: i,j,k,n,l
integer:: status(MPI_STATUS_SIZE)
integer:: ierr
integer:: tag=1
integer:: count=0
integer :: imx, jmx, kmx, n_var
DebugCall('apply_periodic_boundary_condition')
imx = dims%imx
jmx = dims%jmx
kmx = dims%kmx
n_var = control%n_var
if(bc%PbcId(1)>=0)then
!collect data
count=0
do n=1,n_var
do l=1,layers
do k=1,kmx-1
do j=1,jmx-1
count=count+1
imin_send_buf(count) = qp(l,j,k,n)
end do
end do
end do
end do
call MPI_SENDRECV(imin_send_buf,ibuf_size, MPI_DOUBLE_PRECISION, bc%PbcId(1),tag,&
imin_recv_buf,ibuf_size, MPI_DOUBLE_PRECISION, bc%PbcId(1),tag,&
MPI_COMM_WORLD,status,ierr)
count=0
do n=1,n_var
do l=1,layers
do k=1,kmx-1
do j=1,jmx-1
count=count+1
qp(1-l,j,k,n) = imin_recv_buf(count)
end do
end do
end do
end do
end if
if(bc%PbcId(2)>=0)then
!collect data
count=0
do n=1,n_var
do l=1,layers
do k=1,kmx-1
do j=1,jmx-1
count=count+1
imax_send_buf(count) = qp(imx-l,j,k,n)
end do
end do
end do
end do
call MPI_SENDRECV(imax_send_buf,ibuf_size, MPI_DOUBLE_PRECISION, bc%PbcId(2),tag,&
imax_recv_buf,ibuf_size, MPI_DOUBLE_PRECISION, bc%PbcId(2),tag,&
MPI_COMM_WORLD,status,ierr)
count=0
do n=1,n_var
do l=1,layers
do k=1,kmx-1
do j=1,jmx-1
count=count+1
qp(imx+l-1,j,k,n) = imax_recv_buf(count)
end do
end do
end do
end do
end if
!--- JMIN ---!
if(bc%PbcId(3)>=0)then
!collect data
count=0
do n=1,n_var
do l=1,layers
do k=1,kmx-1
do i=1,imx-1
count=count+1
jmin_send_buf(count) = qp(i,l,k,n)
end do
end do
end do
end do
call MPI_SENDRECV(jmin_send_buf,jbuf_size, MPI_DOUBLE_PRECISION, bc%PbcId(3),tag,&
jmin_recv_buf,jbuf_size, MPI_DOUBLE_PRECISION, bc%PbcId(3),tag,&
MPI_COMM_WORLD,status,ierr)
! redistribute data
count=0
do n=1,n_var
do l=1,layers
do k=1,kmx-1
do i=1,imx-1
count=count+1
qp(i,1-l,k,n) = jmin_recv_buf(count)
end do
end do
end do
end do
end if
!--- JMAX ---!
if(bc%PbcId(4)>=0)then
!collect data
count=0
do n=1,n_var
do l=1,layers
do k=1,kmx-1
do i=1,imx-1
count=count+1
jmax_send_buf(count) = qp(i,jmx-l,k,n)
end do
end do
end do
end do
call MPI_SENDRECV(jmax_send_buf,jbuf_size, MPI_DOUBLE_PRECISION, bc%PbcId(4),tag,&
jmax_recv_buf,jbuf_size, MPI_DOUBLE_PRECISION, bc%PbcId(4),tag,&
MPI_COMM_WORLD,status,ierr)
! redistribute data
count=0
do n=1,n_var
do l=1,layers
do k=1,kmx-1
do i=1,imx-1
count=count+1
qp(i,jmx+l-1,k,n) = jmax_recv_buf(count)
end do
end do
end do
end do
end if
!--- KMIN ---!
if(bc%PbcId(5)>=0)then
!collect data
count=0
do n=1,n_var
do l=1,layers
do j=1,jmx-1
do i=1,imx-1
count=count+1
kmin_send_buf(count) = qp(i,j,l,n)
end do
end do
end do
end do
call MPI_SENDRECV(kmin_send_buf,kbuf_size, MPI_DOUBLE_PRECISION, bc%PbcId(5),tag,&
kmin_recv_buf,kbuf_size, MPI_DOUBLE_PRECISION, bc%PbcId(5),tag,&
MPI_COMM_WORLD,status,ierr)
! redistribute data
count=0
do n=1,n_var
do l=1,layers
do j=1,jmx-1
do i=1,imx-1
count=count+1
qp(i,j,1-l,n) = kmin_recv_buf(count)
end do
end do
end do
end do
end if
!--- KMAX ---!
if(bc%PbcId(6)>=0)then
!collect data
count=0
do n=1,n_var
do l=1,layers
do j=1,jmx-1
do i=1,imx-1
count=count+1
kmax_send_buf(count) = qp(i,j,kmx-l,n)
end do
end do
end do
end do
call MPI_SENDRECV(kmax_send_buf,kbuf_size, MPI_DOUBLE_PRECISION, bc%PbcId(6),tag,&
kmax_recv_buf,kbuf_size, MPI_DOUBLE_PRECISION, bc%PbcId(6),tag,&
MPI_COMM_WORLD,status,ierr)
! redistribute data
count=0
do n=1,n_var
do l=1,layers
do j=1,jmx-1
do i=1,imx-1
count=count+1
qp(i,j,kmx+l-1,n) = kmax_recv_buf(count)
end do
end do
end do
end do
end if
end subroutine apply_periodic_bc