!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPSENDRECV2DEW             ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpsendrecv2dew(var,nx,ny,nz,ebc,wbc,stagdim,tem) 103,1

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Send & receive east/west boundary data between processors to 
!  update the fake zones.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  11/19/2003
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!    nz       Number of grid points in the vertical
!
!    ebc      East boundary condition
!    wbc      West boundary condition
!
!    stagdim  Dimension of grid staggering:
!              =0, no staggering;
!              =1, staggered in the x-direction (e.g. u);
!              =2, staggered in the y-direction (e.g. v);
!              =3, staggered in the z-direction (e.g. w).
!
!  INPUT & OUTPUT
!
!    var      Variable for which boundaries need updating.
!
!  WORK array
!
!    tem      Work array (with a size at least nyXnzX2).
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: nx,ny,nz      ! Number of grid points in 
                                       ! x, y and z directions
  INTEGER, INTENT(IN) :: ebc,wbc
  INTEGER, INTENT(IN) :: stagdim       ! Dimension of grid staggering:
                               !  =0, no staggering;
                               !  =1, staggered in the x-direction (e.g. u);
                               !  =2, staggered in the y-direction (e.g. v);
                               !  =3, staggered in the z-direction (e.g. w).
  REAL, INTENT(INOUT) :: var(nx,ny,nz)
  REAL, INTENT(INOUT) :: tem(ny,nz,2)   ! Work array.


!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: mpi_status(MPI_STATUS_SIZE)
  INTEGER :: imstat
  INTEGER :: j, k
  INTEGER :: si,sj,sk

  INTEGER :: source, dest
  INTEGER :: mptag           ! Unique MPI id used for this BC update.
                             ! mptag + tag_w for west boundary
                             ! mptag + tag_e for east boundary

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  si = 0
  sj = 0
  sk = 0
  IF (stagdim == 1) si = 1
  IF (stagdim == 2) sj = 1
  IF (stagdim == 3) sk = 1

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  Set the west boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_x == nproc_x) THEN       ! last processor in a row
    IF(ebc == 2) THEN             ! periodic boundary
      dest = proc(1+nproc_x*(loc_y-1))
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x+1+nproc_x*(loc_y-1))
  END IF

  ! 
  ! receive from
  !
  IF(loc_x == 1) THEN             ! First processor in a row
    IF(wbc == 2) THEN
      source = proc(nproc_x+nproc_x*(loc_y-1))
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x-1+nproc_x*(loc_y-1))
  END IF

  !
  ! Pack send buffer, send east slice to update west boundary of 
  ! the east neighbor
  !
  DO k=1,nz           ! -1+sk  send full rank
    DO j=1,ny         ! -1+sj
      tem(j,k,1) = var(nx-2,j,k)
    END DO
  END DO

  CALL mpi_sendrecv(tem(:,:,1),ny*nz,MPI_REAL,dest,  mptag+tag_w,       &
                    tem(:,:,2),ny*nz,MPI_REAL,source,mptag+tag_w,       &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update West boundary data
  !
  IF ( loc_x /= 1 .OR. wbc == 2 ) THEN ! .NOT. (loc_x ==1 .AND. wbc /=2))
    DO k=1,nz         ! -1+sk
      DO j=1,ny       ! -1+sj
        var(1,j,k) = tem(j,k,2)
      END DO
    END DO
  END IF

!-----------------------------------------------------------------------
!
!  Set the east boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_x == 1) THEN             ! First processor in a row
    IF(wbc == 2) THEN             ! periodic boundary
      dest = proc(nproc_x+nproc_x*(loc_y-1))
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x-1+nproc_x*(loc_y-1))
  END IF

  ! 
  ! receive from
  !
  IF(loc_x == nproc_x) THEN        ! Last processor in a row
    IF(ebc == 2) THEN
      source = proc(1+nproc_x*(loc_y-1))
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x+1+nproc_x*(loc_y-1))
  END IF

  !
  ! Pack send buffer, west slice for east boundary of 
  ! the west neighbor
  !
  DO k=1,nz             ! -1+sk
    DO j=1,ny           ! -1+sj
      tem(j,k,1) = var(2+si,j,k)
    END DO
  END DO

  CALL mpi_sendrecv(tem(:,:,1),ny*nz,MPI_REAL,dest,  mptag+tag_e,       &
                    tem(:,:,2),ny*nz,MPI_REAL,source,mptag+tag_e,       &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update east boundary data
  !
  IF ( loc_x /= nproc_x .OR. ebc == 2 )  THEN
                              !.NOT. (loc_x == nproc_x .AND. ebc /=2))
    DO k=1,nz         ! -1+sk
      DO j=1,ny       ! -1+sj
        var(nx-1+si,j,k) = tem(j,k,2)
      END DO
    END DO
  END IF

  RETURN
END SUBROUTINE mpsendrecv2dew
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPSENDRECV2DNS             ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpsendrecv2dns(var,nx,ny,nz,nbc,sbc,stagdim,tem) 103,1

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Send & receive north/south boundary data between processors to 
!  update the fake zones.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  11/19/2003
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!    nz       Number of grid points in the vertical
!
!    nbc      North boundary condition
!    sbc      South boundary condition
!
!    stagdim  Dimension of grid staggering:
!              =0, no staggering;
!              =1, staggered in the x-direction (e.g. u);
!              =2, staggered in the y-direction (e.g. v);
!              =3, staggered in the z-direction (e.g. w).
!
!  INPUT & OUTPUT:
!
!    var      Variable for which boundaries need updating.
!
!    tem      Work array (with a size at least nx X nz X 2).
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: nx,ny,nz      ! Number of grid points in 
                                       ! x, y and z directions
  INTEGER, INTENT(IN) :: nbc,sbc
  INTEGER, INTENT(IN) :: stagdim       ! Dimension of grid staggering:
                               !  =0, no staggering;
                               !  =1, staggered in the x-direction (e.g. u);
                               !  =2, staggered in the y-direction (e.g. v);
                               !  =3, staggered in the z-direction (e.g. w).
  REAL, INTENT(INOUT) :: var(nx,ny,nz)
  REAL, INTENT(INOUT) :: tem(nx,nz,2)   ! Work array.

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: mpi_status(MPI_STATUS_SIZE)
  INTEGER :: imstat
  INTEGER :: i,k
  INTEGER :: si,sj,sk

  INTEGER :: source, dest
  INTEGER :: mptag           ! Unique MPI id used for this BC update.
                             ! mptag + tag_n for north boundary
                             ! mptag + tag_s for south boundary

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  si = 0
  sj = 0
  sk = 0
  IF (stagdim == 1) si = 1
  IF (stagdim == 2) sj = 1
  IF (stagdim == 3) sk = 1

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  Set the north boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_y == 1) THEN             ! the south most processor in a column
    IF(sbc == 2) THEN             ! periodic boundary
      dest = proc(loc_x+nproc_x*(nproc_y-1))
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x+nproc_x*(loc_y-2))
  END IF

  ! 
  ! receive from
  !
  IF(loc_y == nproc_y) THEN       ! The north most processor in a column
    IF(nbc == 2) THEN
      source = proc(loc_x)
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x+nproc_x*loc_y)
  END IF

  !
  ! Pack send buffer, send south slice to update north boundary of 
  ! the south neighbor
  !
  DO k=1,nz         ! -1+sk
    DO i=1,nx       ! -1+si
      tem(i,k,1) = var(i,2+sj,k)
    END DO
  END DO

  CALL mpi_sendrecv(tem(:,:,1),nx*nz,MPI_REAL,dest,  mptag+tag_n,       &
                    tem(:,:,2),nx*nz,MPI_REAL,source,mptag+tag_n,       &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update north boundary data
  !
  IF ( loc_y /= nproc_y .OR. nbc == 2 )  THEN
                               ! .NOT. (loc_y == nproc_y .AND. nbc /=2))
    DO k=1,nz      ! -1+sk
      DO i=1,nx    ! -1+si
        var(i,ny-1+sj,k) = tem(i,k,2)
      END DO
    END DO
  END IF

!-----------------------------------------------------------------------
!
!  Set the south boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_y == nproc_y) THEN       ! The north most processor
    IF(nbc == 2) THEN             ! periodic boundary
      dest = proc(loc_x)
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x+nproc_x*loc_y)
  END IF

  ! 
  ! receive from
  !
  IF(loc_y == 1) THEN            ! The south most processor
    IF(sbc == 2) THEN
      source = proc(loc_x+nproc_x*(nproc_y-1))
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x+nproc_x*(loc_y-2))
  END IF

  !
  ! Pack send buffer, north slice for south boundary of 
  ! the north neighbor
  !
  DO k=1,nz     ! -1+sk
    DO i=1,nx   ! -1+si
      tem(i,k,1) = var(i,ny-2,k)
    END DO
  END DO

  CALL mpi_sendrecv(tem(:,:,1),nx*nz,MPI_REAL,dest,  mptag+tag_s,       &
                    tem(:,:,2),nx*nz,MPI_REAL,source,mptag+tag_s,       &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update south boundary data
  !
  IF ( loc_y /= 1 .OR. sbc == 2 )  THEN
                                    ! .NOT. (loc_y == 1 .AND. sbc /=2))
    DO k=1,nz     ! -1+sk
      DO i=1,nx   ! -1+si
        var(i,1,k) = tem(i,k,2)
      END DO
    END DO
  END IF

  RETURN
END SUBROUTINE mpsendrecv2dns
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPSENDRECV1DEW             ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpsendrecv1dew(var,nx,ny,ebc,wbc,stagdim,tem) 23,1

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Send & receive east/west boundary 1D data between processors to 
!  update the fake zones.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  11/24/2003
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!
!    ebc      East boundary condition
!    wbc      West boundary condition
!
!    stagdim  Dimension of grid staggering:
!              =0, no staggering;
!              =1, staggered in the x-direction (e.g. u);
!              =2, staggered in the y-direction (e.g. v);
!
!  INPUT & OUTPUT
!
!    var      Variable for which boundaries need updating.
!
!  WORK array
!
!    tem      Work array (with a size at least nyX2).
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: nx,ny         ! Number of grid points in 
                                       ! x and y directions
  INTEGER, INTENT(IN) :: ebc,wbc
  INTEGER, INTENT(IN) :: stagdim       ! Dimension of grid staggering:
                               !  =0, no staggering;
                               !  =1, staggered in the x-direction (e.g. u);
                               !  =2, staggered in the y-direction (e.g. v);
  REAL, INTENT(INOUT) :: var(nx,ny)
  REAL, INTENT(INOUT) :: tem(ny,2)   ! Work array.


!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: mpi_status(MPI_STATUS_SIZE)
  INTEGER :: imstat
  INTEGER :: j
  INTEGER :: si,sj

  INTEGER :: source, dest
  INTEGER :: mptag           ! Unique MPI id used for this BC update.
                             ! mptag + tag_w for west boundary
                             ! mptag + tag_e for east boundary

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  si = 0
  sj = 0
  IF (stagdim == 1) si = 1
  IF (stagdim == 2) sj = 1

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  Set the west boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_x == nproc_x) THEN       ! last processor in a row
    IF(ebc == 2) THEN             ! periodic boundary
      dest = proc(1+nproc_x*(loc_y-1))
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x+1+nproc_x*(loc_y-1))
  END IF

  ! 
  ! receive from
  !
  IF(loc_x == 1) THEN             ! First processor in a row
    IF(wbc == 2) THEN
      source = proc(nproc_x+nproc_x*(loc_y-1))
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x-1+nproc_x*(loc_y-1))
  END IF

  !
  ! Pack send buffer, send east slice to update west boundary of 
  ! the east neighbor
  !
  DO j=1,ny-1+sj
    tem(j,1) = var(nx-2,j)
  END DO

  CALL mpi_sendrecv(tem(:,1),ny,MPI_REAL,dest,  mptag+tag_w,       &
                    tem(:,2),ny,MPI_REAL,source,mptag+tag_w,       &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update West boundary data
  !
  IF ( loc_x /= 1 .OR. wbc == 2 ) THEN ! .NOT. (loc_x ==1 .AND. wbc /=2))
    DO j=1,ny-1+sj
      var(1,j) = tem(j,2)
    END DO
  END IF

!-----------------------------------------------------------------------
!
!  Set the east boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_x == 1) THEN             ! First processor in a row
    IF(wbc == 2) THEN             ! periodic boundary
      dest = proc(nproc_x+nproc_x*(loc_y-1))
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x-1+nproc_x*(loc_y-1))
  END IF

  ! 
  ! receive from
  !
  IF(loc_x == nproc_x) THEN        ! Last processor in a row
    IF(ebc == 2) THEN
      source = proc(1+nproc_x*(loc_y-1))
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x+1+nproc_x*(loc_y-1))
  END IF

  !
  ! Pack send buffer, west slice for east boundary of 
  ! the west neighbor
  !
  DO j=1,ny-1+sj
    tem(j,1) = var(2+si,j)
  END DO

  CALL mpi_sendrecv(tem(:,1),ny,MPI_REAL,dest,  mptag+tag_e,       &
                    tem(:,2),ny,MPI_REAL,source,mptag+tag_e,       &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update east boundary data
  !
  IF ( loc_x /= nproc_x .OR. ebc == 2 )  THEN
                              !.NOT. (loc_x == nproc_x .AND. ebc /=2))
    DO j=1,ny-1+sj
      var(nx-1+si,j) = tem(j,2)
    END DO
  END IF

  RETURN
END SUBROUTINE mpsendrecv1dew
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPSENDRECV1DNS             ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpsendrecv1dns(var,nx,ny,nbc,sbc,stagdim,tem) 23,1

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Send & receive north/south boundary data between processors to 
!  update the fake zones.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  11/24/2003
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!
!    nbc      North boundary condition
!    sbc      South boundary condition
!
!    stagdim  Dimension of grid staggering:
!              =0, no staggering;
!              =1, staggered in the x-direction (e.g. u);
!              =2, staggered in the y-direction (e.g. v);
!              =3, staggered in the z-direction (e.g. w).
!
!  INPUT & OUTPUT:
!
!    var      Variable for which boundaries need updating.
!
!    tem      Work array (with a size at least nx X 2).
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: nx,ny         ! Number of grid points in 
                                       ! x, and y directions
  INTEGER, INTENT(IN) :: nbc,sbc
  INTEGER, INTENT(IN) :: stagdim       ! Dimension of grid staggering:
                               !  =0, no staggering;
                               !  =1, staggered in the x-direction (e.g. u);
                               !  =2, staggered in the y-direction (e.g. v);
  REAL, INTENT(INOUT) :: var(nx,ny)
  REAL, INTENT(INOUT) :: tem(nx,2)   ! Work array.

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: mpi_status(MPI_STATUS_SIZE)
  INTEGER :: imstat
  INTEGER :: i
  INTEGER :: si,sj

  INTEGER :: source, dest
  INTEGER :: mptag           ! Unique MPI id used for this BC update.
                             ! mptag + tag_n for north boundary
                             ! mptag + tag_s for south boundary

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  si = 0
  sj = 0
  IF (stagdim == 1) si = 1
  IF (stagdim == 2) sj = 1

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  Set the north boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_y == 1) THEN             ! the south most processor in a column
    IF(sbc == 2) THEN             ! periodic boundary
      dest = proc(loc_x+nproc_x*(nproc_y-1))
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x+nproc_x*(loc_y-2))
  END IF

  ! 
  ! receive from
  !
  IF(loc_y == nproc_y) THEN       ! The north most processor in a column
    IF(nbc == 2) THEN
      source = proc(loc_x)
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x+nproc_x*loc_y)
  END IF

  !
  ! Pack send buffer, send south slice to update north boundary of 
  ! the south neighbor
  !
  DO i=1,nx-1+si
    tem(i,1) = var(i,2+sj)
  END DO

  CALL mpi_sendrecv(tem(:,1),nx,MPI_REAL,dest,  mptag+tag_n,       &
                    tem(:,2),nx,MPI_REAL,source,mptag+tag_n,       &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update north boundary data
  !
  IF ( loc_y /= nproc_y .OR. nbc == 2 )  THEN
                               ! .NOT. (loc_y == nproc_y .AND. nbc /=2))
    DO i=1,nx-1+si
      var(i,ny-1+sj) = tem(i,2)
    END DO
  END IF

!-----------------------------------------------------------------------
!
!  Set the south boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_y == nproc_y) THEN       ! The north most processor
    IF(nbc == 2) THEN             ! periodic boundary
      dest = proc(loc_x)
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x+nproc_x*loc_y)
  END IF

  ! 
  ! receive from
  !
  IF(loc_y == 1) THEN            ! The south most processor
    IF(sbc == 2) THEN
      source = proc(loc_x+nproc_x*(nproc_y-1))
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x+nproc_x*(loc_y-2))
  END IF

  !
  ! Pack send buffer, north slice for south boundary of 
  ! the north neighbor
  !
  DO i=1,nx-1+si
    tem(i,1) = var(i,ny-2)
  END DO

  CALL mpi_sendrecv(tem(:,1),nx,MPI_REAL,dest,  mptag+tag_s,       &
                    tem(:,2),nx,MPI_REAL,source,mptag+tag_s,       &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update south boundary data
  !
  IF ( loc_y /= 1 .OR. sbc == 2 )  THEN
                                    ! .NOT. (loc_y == 1 .AND. sbc /=2))
    DO i=1,nx-1+si
      var(i,1) = tem(i,2)
    END DO
  END IF

  RETURN
END SUBROUTINE mpsendrecv1dns
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPSENDRECV1DIEW            ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpsendrecv1diew(var,nx,ny,ebc,wbc,stagdim,tem) 2,1

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Send & receive east/west boundary 1D data between processors to 
!  update the fake zones.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  11/24/2003
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!
!    ebc      East boundary condition
!    wbc      West boundary condition
!
!    stagdim  Dimension of grid staggering:
!              =0, no staggering;
!              =1, staggered in the x-direction (e.g. u);
!              =2, staggered in the y-direction (e.g. v);
!
!  INPUT & OUTPUT
!
!    var      Variable for which boundaries need updating.
!
!  WORK array
!
!    tem      Work array (with a size at least nyX2).
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: nx,ny         ! Number of grid points in 
                                       ! x and y directions
  INTEGER, INTENT(IN) :: ebc,wbc
  INTEGER, INTENT(IN) :: stagdim       ! Dimension of grid staggering:
                               !  =0, no staggering;
                               !  =1, staggered in the x-direction (e.g. u);
                               !  =2, staggered in the y-direction (e.g. v);
  INTEGER, INTENT(INOUT) :: var(nx,ny)
  INTEGER, INTENT(INOUT) :: tem(ny,2)   ! Work array.


!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: mpi_status(MPI_STATUS_SIZE)
  INTEGER :: imstat
  INTEGER :: j
  INTEGER :: si,sj

  INTEGER :: source, dest
  INTEGER :: mptag           ! Unique MPI id used for this BC update.
                             ! mptag + tag_w for west boundary
                             ! mptag + tag_e for east boundary

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  si = 0
  sj = 0
  IF (stagdim == 1) si = 1
  IF (stagdim == 2) sj = 1

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  Set the west boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_x == nproc_x) THEN       ! last processor in a row
    IF(ebc == 2) THEN             ! periodic boundary
      dest = proc(1+nproc_x*(loc_y-1))
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x+1+nproc_x*(loc_y-1))
  END IF

  ! 
  ! receive from
  !
  IF(loc_x == 1) THEN             ! First processor in a row
    IF(wbc == 2) THEN
      source = proc(nproc_x+nproc_x*(loc_y-1))
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x-1+nproc_x*(loc_y-1))
  END IF

  !
  ! Pack send buffer, send east slice to update west boundary of 
  ! the east neighbor
  !
  DO j=1,ny-1+sj
    tem(j,1) = var(nx-2,j)
  END DO

  CALL mpi_sendrecv(tem(:,1),ny,MPI_INTEGER,dest,  mptag+tag_w,       &
                    tem(:,2),ny,MPI_INTEGER,source,mptag+tag_w,       &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update West boundary data
  !
  IF ( loc_x /= 1 .OR. wbc == 2 ) THEN ! .NOT. (loc_x ==1 .AND. wbc /=2))
    DO j=1,ny-1+sj
      var(1,j) = tem(j,2)
    END DO
  END IF

!-----------------------------------------------------------------------
!
!  Set the east boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_x == 1) THEN             ! First processor in a row
    IF(wbc == 2) THEN             ! periodic boundary
      dest = proc(nproc_x+nproc_x*(loc_y-1))
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x-1+nproc_x*(loc_y-1))
  END IF

  ! 
  ! receive from
  !
  IF(loc_x == nproc_x) THEN        ! Last processor in a row
    IF(ebc == 2) THEN
      source = proc(1+nproc_x*(loc_y-1))
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x+1+nproc_x*(loc_y-1))
  END IF

  !
  ! Pack send buffer, west slice for east boundary of 
  ! the west neighbor
  !
  DO j=1,ny-1+sj
    tem(j,1) = var(2+si,j)
  END DO

  CALL mpi_sendrecv(tem(:,1),ny,MPI_INTEGER,dest,  mptag+tag_e,       &
                    tem(:,2),ny,MPI_INTEGER,source,mptag+tag_e,       &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update east boundary data
  !
  IF ( loc_x /= nproc_x .OR. ebc == 2 )  THEN
                              !.NOT. (loc_x == nproc_x .AND. ebc /=2))
    DO j=1,ny-1+sj
      var(nx-1+si,j) = tem(j,2)
    END DO
  END IF

  RETURN
END SUBROUTINE mpsendrecv1diew
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPSENDRECV1DINS            ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpsendrecv1dins(var,nx,ny,nbc,sbc,stagdim,tem) 2,1

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Send & receive north/south boundary data between processors to 
!  update the fake zones.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  11/24/2003
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!
!    nbc      North boundary condition
!    sbc      South boundary condition
!
!    stagdim  Dimension of grid staggering:
!              =0, no staggering;
!              =1, staggered in the x-direction (e.g. u);
!              =2, staggered in the y-direction (e.g. v);
!              =3, staggered in the z-direction (e.g. w).
!
!  INPUT & OUTPUT:
!
!    var      Variable for which boundaries need updating.
!
!    tem      Work array (with a size at least nx X 2).
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: nx,ny         ! Number of grid points in 
                                       ! x, and y directions
  INTEGER, INTENT(IN) :: nbc,sbc
  INTEGER, INTENT(IN) :: stagdim       ! Dimension of grid staggering:
                               !  =0, no staggering;
                               !  =1, staggered in the x-direction (e.g. u);
                               !  =2, staggered in the y-direction (e.g. v);
  INTEGER, INTENT(INOUT) :: var(nx,ny)
  INTEGER, INTENT(INOUT) :: tem(nx,2)   ! Work array.

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: mpi_status(MPI_STATUS_SIZE)
  INTEGER :: imstat
  INTEGER :: i
  INTEGER :: si,sj

  INTEGER :: source, dest
  INTEGER :: mptag           ! Unique MPI id used for this BC update.
                             ! mptag + tag_n for north boundary
                             ! mptag + tag_s for south boundary

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  si = 0
  sj = 0
  IF (stagdim == 1) si = 1
  IF (stagdim == 2) sj = 1

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  Set the north boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_y == 1) THEN             ! the south most processor in a column
    IF(sbc == 2) THEN             ! periodic boundary
      dest = proc(loc_x+nproc_x*(nproc_y-1))
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x+nproc_x*(loc_y-2))
  END IF

  ! 
  ! receive from
  !
  IF(loc_y == nproc_y) THEN       ! The north most processor in a column
    IF(nbc == 2) THEN
      source = proc(loc_x)
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x+nproc_x*loc_y)
  END IF

  !
  ! Pack send buffer, send south slice to update north boundary of 
  ! the south neighbor
  !
  DO i=1,nx-1+si
    tem(i,1) = var(i,2+sj)
  END DO

  CALL mpi_sendrecv(tem(:,1),nx,MPI_INTEGER,dest,  mptag+tag_n,       &
                    tem(:,2),nx,MPI_INTEGER,source,mptag+tag_n,       &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update north boundary data
  !
  IF ( loc_y /= nproc_y .OR. nbc == 2 )  THEN
                               ! .NOT. (loc_y == nproc_y .AND. nbc /=2))
    DO i=1,nx-1+si
      var(i,ny-1+sj) = tem(i,2)
    END DO
  END IF

!-----------------------------------------------------------------------
!
!  Set the south boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_y == nproc_y) THEN       ! The north most processor
    IF(nbc == 2) THEN             ! periodic boundary
      dest = proc(loc_x)
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x+nproc_x*loc_y)
  END IF

  ! 
  ! receive from
  !
  IF(loc_y == 1) THEN            ! The south most processor
    IF(sbc == 2) THEN
      source = proc(loc_x+nproc_x*(nproc_y-1))
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x+nproc_x*(loc_y-2))
  END IF

  !
  ! Pack send buffer, north slice for south boundary of 
  ! the north neighbor
  !
  DO i=1,nx-1+si
    tem(i,1) = var(i,ny-2)
  END DO

  CALL mpi_sendrecv(tem(:,1),nx,MPI_INTEGER,dest,  mptag+tag_s,       &
                    tem(:,2),nx,MPI_INTEGER,source,mptag+tag_s,       &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update south boundary data
  !
  IF ( loc_y /= 1 .OR. sbc == 2 )  THEN
                                    ! .NOT. (loc_y == 1 .AND. sbc /=2))
    DO i=1,nx-1+si
      var(i,1) = tem(i,2)
    END DO
  END IF

  RETURN
END SUBROUTINE mpsendrecv1dins
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPSENDRECVEXTEW            ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpsendrecvextew(var,nx,ny,nz,ebc,wbc,tem) 1,1

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Send & receive east/west boundary data between processors to 
!  update the fake zones. This is for the extended array whcih
!  has two instead of one fake zones on each boundary (arrays run
!  from 0:nx,0:ny,0:nz).
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  11/24/2003
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!    nz       Number of grid points in the vertical
!
!    ebc      East boundary condition
!    wbc      West boundary condition
!
!  INPUT & OUTPUT
!
!    var      Variable for which boundaries need updating.
!
!  WORK array
!
!    tem      Work array (with a size at least (ny+1)X(nz+1)X2).
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: nx,ny,nz      ! Number of grid points in 
                                       ! x, y and z directions
  INTEGER, INTENT(IN) :: ebc,wbc

  REAL, INTENT(INOUT) :: var(0:nx,0:ny,0:nz)
  REAL, INTENT(INOUT) :: tem(0:ny,0:nz,2)   ! Work array.


!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: mpi_status(MPI_STATUS_SIZE)
  INTEGER :: imstat
  INTEGER :: j, k
  INTEGER :: si,sj,sk

  INTEGER :: source, dest
  INTEGER :: mptag           ! Unique MPI id used for this BC update.
                             ! mptag + tag_w for west boundary
                             ! mptag + tag_e for east boundary

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  Set the west boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_x == nproc_x) THEN       ! last processor in a row
    IF(ebc == 2) THEN             ! periodic boundary
      dest = proc(1+nproc_x*(loc_y-1))
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x+1+nproc_x*(loc_y-1))
  END IF

  ! 
  ! receive from
  !
  IF(loc_x == 1) THEN             ! First processor in a row
    IF(wbc == 2) THEN
      source = proc(nproc_x+nproc_x*(loc_y-1))
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x-1+nproc_x*(loc_y-1))
  END IF

  !
  ! Pack send buffer, send east slice to update west boundary of 
  ! the east neighbor
  !
  DO k=0,nz
    DO j=0,ny
      tem(j,k,1) = var(nx-3,j,k)
    END DO
  END DO

  CALL mpi_sendrecv(tem(:,:,1),(ny+1)*(nz+1),MPI_REAL,dest,  mptag+tag_w, &
                    tem(:,:,2),(ny+1)*(nz+1),MPI_REAL,source,mptag+tag_w, &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update West boundary data
  !
  IF ( loc_x /= 1 .OR. wbc == 2 ) THEN ! .NOT. (loc_x ==1 .AND. wbc /=2))
    DO k=0,nz
      DO j=0,ny
        var(0,j,k) = tem(j,k,2)
      END DO
    END DO
  END IF

!-----------------------------------------------------------------------
!
!  Set the east boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_x == 1) THEN             ! First processor in a row
    IF(wbc == 2) THEN             ! periodic boundary
      dest = proc(nproc_x+nproc_x*(loc_y-1))
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x-1+nproc_x*(loc_y-1))
  END IF

  ! 
  ! receive from
  !
  IF(loc_x == nproc_x) THEN        ! Last processor in a row
    IF(ebc == 2) THEN
      source = proc(1+nproc_x*(loc_y-1))
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x+1+nproc_x*(loc_y-1))
  END IF

  !
  ! Pack send buffer, west slice for east boundary of 
  ! the west neighbor
  !
  DO k=0,nz
    DO j=0,ny
      tem(j,k,1) = var(3,j,k)
    END DO
  END DO

  CALL mpi_sendrecv(tem(:,:,1),(ny+1)*(nz+1),MPI_REAL,dest,  mptag+tag_e, &
                    tem(:,:,2),(ny+1)*(nz+1),MPI_REAL,source,mptag+tag_e, &
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update east boundary data
  !
  IF ( loc_x /= nproc_x .OR. ebc == 2 )  THEN
                              !.NOT. (loc_x == nproc_x .AND. ebc /=2))
    DO k=0,nz
      DO j=0,ny
        var(nx,j,k) = tem(j,k,2)
      END DO
    END DO
  END IF

  RETURN
END SUBROUTINE mpsendrecvextew
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPSENDRECVEXTNS             ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpsendrecvextns(var,nx,ny,nz,nbc,sbc,tem) 1,1

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Send & receive north/south boundary data between processors to 
!  update the fake zones. This is for the extended array whcih
!  has two instead of one fake zones on each boundary (arrays run
!  from 0:nx,0:ny,0:nz).
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  11/24/2003
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!    nz       Number of grid points in the vertical
!
!    nbc      North boundary condition
!    sbc      South boundary condition
!
!  INPUT & OUTPUT:
!
!    var      Variable for which boundaries need updating.
!
!    tem      Work array (with a size at least (nx+1) X (nz+1) X 2).
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: nx,ny,nz      ! Number of grid points in 
                                       ! x, y and z directions
  INTEGER, INTENT(IN) :: nbc,sbc

  REAL, INTENT(INOUT) :: var(0:nx,0:ny,0:nz)
  REAL, INTENT(INOUT) :: tem(0:nx,0:nz,2)   ! Work array.

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: mpi_status(MPI_STATUS_SIZE)
  INTEGER :: imstat
  INTEGER :: i,k

  INTEGER :: source, dest
  INTEGER :: mptag           ! Unique MPI id used for this BC update.
                             ! mptag + tag_n for north boundary
                             ! mptag + tag_s for south boundary

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  Set the north boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_y == 1) THEN             ! the south most processor in a column
    IF(sbc == 2) THEN             ! periodic boundary
      dest = proc(loc_x+nproc_x*(nproc_y-1))
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x+nproc_x*(loc_y-2))
  END IF

  ! 
  ! receive from
  !
  IF(loc_y == nproc_y) THEN       ! The north most processor in a column
    IF(nbc == 2) THEN
      source = proc(loc_x)
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x+nproc_x*loc_y)
  END IF

  !
  ! Pack send buffer, send south slice to update north boundary of 
  ! the south neighbor
  !
  DO k=0,nz
    DO i=0,nx
      tem(i,k,1) = var(i,3,k)
    END DO
  END DO

  CALL mpi_sendrecv(tem(:,:,1),(nx+1)*(nz+1),MPI_REAL,dest,  mptag+tag_n,&
                    tem(:,:,2),(nx+1)*(nz+1),MPI_REAL,source,mptag+tag_n,&
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update north boundary data
  !
  IF ( loc_y /= nproc_y .OR. nbc == 2 )  THEN
                               ! .NOT. (loc_y == nproc_y .AND. nbc /=2))
    DO k=0,nz
      DO i=0,nx
        var(i,ny,k) = tem(i,k,2)
      END DO
    END DO
  END IF

!-----------------------------------------------------------------------
!
!  Set the south boundary conditions
!
!-----------------------------------------------------------------------

  !
  ! send destination
  !
  IF(loc_y == nproc_y) THEN       ! The north most processor
    IF(nbc == 2) THEN             ! periodic boundary
      dest = proc(loc_x)
    ELSE
      dest = MPI_PROC_NULL
    END IF
  ELSE
    dest = proc(loc_x+nproc_x*loc_y)
  END IF

  ! 
  ! receive from
  !
  IF(loc_y == 1) THEN            ! The south most processor
    IF(sbc == 2) THEN
      source = proc(loc_x+nproc_x*(nproc_y-1))
    ELSE
      source = MPI_PROC_NULL
    END IF
  ELSE
    source = proc(loc_x+nproc_x*(loc_y-2))
  END IF

  !
  ! Pack send buffer, north slice for south boundary of 
  ! the north neighbor
  !
  DO k=0,nz
    DO i=0,nx
      tem(i,k,1) = var(i,ny-3,k)
    END DO
  END DO

  CALL mpi_sendrecv(tem(:,:,1),(nx+1)*(nz+1),MPI_REAL,dest,  mptag+tag_s,&
                    tem(:,:,2),(nx+1)*(nz+1),MPI_REAL,source,mptag+tag_s,&
                    MPI_COMM_WORLD,mpi_status,imstat)

  !
  ! Unpack receive buffer, update south boundary data
  !
  IF ( loc_y /= 1 .OR. sbc == 2 )  THEN
                                    ! .NOT. (loc_y == 1 .AND. sbc /=2))
    DO k=0,nz
      DO i=0,nx
        var(i,0,k) = tem(i,k,2)
      END DO
    END DO
  END IF

  RETURN
END SUBROUTINE mpsendrecvextns
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPUPDATER                  ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpupdater(var,num) 742
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Broadcast the value of var from process 0 to all other processes.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Gene Bassett
!  2000/04/24
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT/OUTPUT :
!
!    var      Array to update (INPUT on proc 0, OUTPUT for rest).
!    num      Number of elements in the array.
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER :: num
  REAL :: var(num)

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: imstat

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL mpi_bcast(var,num,mpi_real,0,mpi_comm_world,imstat)
  IF (imstat /= 0) THEN
    WRITE (6,*) "MPUPDATER: error on processor",myproc
  END IF

  RETURN
END SUBROUTINE mpupdater
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPUPDATEI                  ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpupdatei(var,num) 1392
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Broadcast the value of var from process 0 to all other processes.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Gene Bassett
!  2000/04/24
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT/OUTPUT :
!
!    var      Variable to update (INPUT on proc 0, OUTPUT for rest).
!    num      Number of elements in the array.
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER :: num
  INTEGER :: var(num)

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: imstat

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL mpi_bcast(var,num,mpi_integer,0,mpi_comm_world,imstat)
  IF (imstat /= 0) THEN
    WRITE (6,*) "MPUPDATEI: error on processor",myproc
  END IF

  RETURN
END SUBROUTINE mpupdatei
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPUPDATEL                  ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpupdatel(var,num) 1
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Broadcast the value of var from process 0 to all other processes.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: 
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT/OUTPUT :
!
!    var      Variable to update (INPUT on proc 0, OUTPUT for rest).
!    num      Number of elements in the array.
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER :: num
  LOGICAL :: var(num)

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: imstat

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL mpi_bcast(var,num,MPI_LOGICAL,0,mpi_comm_world,imstat)
  IF (imstat /= 0) THEN
    WRITE (6,*) 'MPUPDATEL: error on processor ',myproc
  END IF

  RETURN
END SUBROUTINE mpupdatel
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPUPDATEC                  ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpupdatec(str,lenstr) 73
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Broadcast the string str from process 0 to all other processes.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Gene Bassett
!  2000/04/24
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT/OUTPUT :
!
!    str      String to update (INPUT on proc 0, OUTPUT for rest).
!    lenstr   Length of str.
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER :: lenstr
  CHARACTER (LEN=lenstr) :: str

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: imstat

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL mpi_bcast(str,lenstr,mpi_character,0,mpi_comm_world,imstat)
  IF (imstat /= 0) THEN
    WRITE (6,*) "MPUPDATEC: error on processor",myproc
  END IF

  RETURN
END SUBROUTINE mpupdatec

 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!     Miscellaneous MPI subroutines (not in ARPS standard format)
 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!


SUBROUTINE mpexit(errcode) 21

  IMPLICIT NONE

  INCLUDE 'mpif.h'
  INTEGER :: errcode
  INTEGER :: imstat

  IF (errcode == 0) THEN
    CALL mpi_finalize (imstat)
  ELSE
    CALL mpi_abort (mpi_comm_world, errcode, imstat)
  ENDIF

  RETURN
END SUBROUTINE mpexit



SUBROUTINE inctag 81

  IMPLICIT NONE

  INCLUDE 'mp.inc'

  !
  ! MPI standard only requires MPI_TAG_UB be no less than 32767.
  !
  IF (gentag < 100 .OR. gentag > 32700) gentag = 100
  gentag = gentag + 100

  RETURN
END SUBROUTINE inctag



SUBROUTINE mpbarrier 95

  INCLUDE 'mpif.h'
  INTEGER :: imstat

  CALL mpi_barrier (mpi_comm_world, imstat)

  RETURN
END SUBROUTINE mpbarrier




SUBROUTINE mptotal(var) 14

  IMPLICIT NONE

  INCLUDE 'mpif.h'

  REAL :: var, vartm
  INTEGER :: i,j,imstat

  CALL mpi_allreduce (var, vartm, 1, mpi_real, mpi_sum,                 &
       mpi_comm_world, imstat)

  var = vartm

  RETURN
END SUBROUTINE mptotal


SUBROUTINE mptotali(var) 2

  IMPLICIT NONE

  INCLUDE 'mpif.h'

  INTEGER :: var, vartm
  INTEGER :: imstat

  CALL mpi_allreduce(var, vartm, 1, MPI_INTEGER, MPI_SUM,               &
                     mpi_comm_world, imstat)

  var = vartm

  RETURN
END SUBROUTINE mptotali


SUBROUTINE mpmax0(amax,amin) 21

!
!  Modified by Dan Weber, May 4, 1998
!  Replaces code above for use on t3d/t3e system.
!  mpi_allreduce is not working properly...
!
  IMPLICIT NONE

  REAL, INTENT(INOUT) :: amax,amin

  INCLUDE 'mpif.h'

  REAL    :: maxtm, mintm
  INTEGER :: imstat

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!    start of executable code....
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

!  CALL mpi_allreduce (amax, maxtm, 1, MPI_REAL, MPI_MAX,
!    :     MPI_COMM_WORLD, imstat)  ! commented out because the T3E
                                 ! has trouble with mpi_allreduce
  CALL mpi_reduce(amax,maxtm,1,mpi_real,mpi_max,0,                      &
                  mpi_comm_world,imstat)
  CALL mpi_bcast(maxtm,1,mpi_real,0,mpi_comm_world,imstat)

  amax = maxtm

!  CALL mpi_allreduce (amin, mintm, 1, MPI_REAL, MPI_MIN,
!    :     MPI_COMM_WORLD, imstat)  ! commented out because the T3E
                                 ! has trouble with mpi_allreduce
  CALL mpi_reduce(amin,mintm,1,mpi_real,mpi_min,0,                      &
                  mpi_comm_world,imstat)
  CALL mpi_bcast(mintm,1,mpi_real,0,mpi_comm_world,imstat)

  amin = mintm

  RETURN
END SUBROUTINE mpmax0


SUBROUTINE mpmax(amax,amin,nx,ny,nz,imax,jmax,kmax,imin,jmin,kmin) 1,1

!
!  Modified by Dan Weber, October 23, 1997
!

  IMPLICIT NONE

  INTEGER :: nx,ny,nz,imax,jmax,kmax,imin,jmin,kmin,itema,itemb
  REAL    :: amax,amin
  INTEGER :: imstat

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

  INTEGER :: mpi_status(mpi_status_size)
  REAL    :: maxarr (2), minarr(2)
  REAL    :: maxtm  (2), mintm(2)
  INTEGER :: maxpack (3), maxunpack(3)
  INTEGER :: minpack (3), minunpack(3)

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!    start of executable code....
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  maxtm(1)  = 0.0
  maxtm(2)  = 0.0
  maxarr(1) = amax
  maxarr(2) = FLOAT(myproc)

!  CALL mpi_allreduce (maxarr, maxtm, 1, MPI_2REAL, MPI_MAXLOC,
!    :     MPI_COMM_WORLD, imstat)  ! commented out because the T3E
                                   ! has trouble with mpi_allreduce
  CALL mpi_reduce(maxarr,maxtm,1,MPI_2REAL,MPI_MAXLOC,0,                &
                  MPI_COMM_WORLD,imstat)
  CALL mpi_bcast (maxtm,1,MPI_2REAL,0,MPI_COMM_WORLD,imstat)

  itema = nint(maxtm(2))
  IF(myproc == itema .AND. itema /= 0)THEN    ! send only if
    maxpack(1) = imax + (nx-3)*(loc_x-1)      ! itema .ne. myproc=0!!!
    maxpack(2) = jmax + (ny-3)*(loc_y-1)
    maxpack(3) = kmax
    CALL mpi_send (maxpack,3,MPI_INTEGER,0,                             &
                   gentag,MPI_COMM_WORLD,imstat)

    !wdt forced buffering
    !CALL mpi_bsend (maxpack,3,mpi_integer,0,                           &
    !               gentag,mpi_comm_world,imstat)

  END IF

  IF(myproc == 0 .AND. myproc /= itema)THEN ! receive only if
                                            ! itema .ne. myproc=0
    CALL mpi_recv (maxunpack,3,mpi_integer,itema,                       &
                   gentag,mpi_comm_world,mpi_status,imstat)
    imax = maxunpack(1)
    jmax = maxunpack(2)
    kmax = maxunpack(3)
    amax = maxtm(1)
  END IF


  mintm(1) = 0.0
  mintm(2) = 0.0
  minarr(1) = amin
  minarr(2) = FLOAT(myproc)

!  CALL mpi_allreduce (minarr, mintm, 1, MPI_2REAL, MPI_MINLOC,
!    :     MPI_COMM_WORLD, imstat)  ! commented out because the T3E
                                    ! has trouble with mpi_allreduce

  CALL mpi_reduce(minarr,mintm,1,MPI_2REAL,MPI_MINLOC,0,                &
                  MPI_COMM_WORLD,imstat)
  CALL mpi_bcast (mintm,1,MPI_2REAL,0,MPI_COMM_WORLD,imstat)

  itemb = nint(mintm(2))
  IF (myproc == itemb .AND. itemb /= 0) THEN    ! send only if
    minpack(1) = imin + (nx-3)*(loc_x-1)        ! itema .ne. myproc=0!!!
    minpack(2) = jmin + (ny-3)*(loc_y-1)
    minpack(3) = kmin
    CALL mpi_send (minpack,3,mpi_integer,0,                             &
                     gentag+1,mpi_comm_world,imstat)

    !wdt forced buffering
    !CALL mpi_bsend (minpack,3,mpi_integer,0,                           &
    !                 gentag+1,mpi_comm_world,imstat)
  END IF

  IF (myproc == 0 .AND. myproc /= itemb) THEN ! receive only if
                                              ! itemb .ne. myproc=0
    CALL mpi_recv (minunpack,3,MPI_INTEGER,itemb,                       &
                   gentag+1,MPI_COMM_WORLD,mpi_status,imstat)

    imin = minunpack(1)
    jmin = minunpack(2)
    kmin = minunpack(3)
    amin = mintm(1)
  END IF

  RETURN
END SUBROUTINE mpmax


SUBROUTINE mpinit_proc 7

  IMPLICIT NONE

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

  INTEGER :: imstat

  mp_opt = 1

  CALL mpi_init( imstat )

  CALL mpi_comm_rank( mpi_comm_world, myproc, imstat )

  RETURN
END SUBROUTINE mpinit_proc


SUBROUTINE mpinit_var 9

  IMPLICIT NONE

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'bndry.inc'
  INCLUDE 'mp.inc'

  INTEGER :: i,j,k,l,numg,parent
  INTEGER :: mytid,nprocs0
  INTEGER :: imstat

  CALL mpi_comm_size( mpi_comm_world, nprocs0, imstat )

  nprocs = nproc_x * nproc_y

  IF(nprocs > max_proc) THEN

    WRITE (6,*) "ERROR: number of processors exceeds maximum ",     &
                "specified in mp.inc:"
    WRITE (6,*) "nprocs =",nprocs
    WRITE (6,*) "max_proc (in mp.inc) =",max_proc
    CALL arpsstop ("arpsstop called from mpinit_var mismatch in   &
                   & number of processors-too many",1)

  END IF

!
!  This subroutine defines the proc(nproc_x+nproc_x*(nproc_y-1)) array
!  and the myproc variable for each process.
!
  IF(nprocs /= nprocs0)THEN  ! test to see if the input file
                             ! number of processors = nprocs
                             ! and set on the command line.
    IF(myproc == 0)THEN
      PRINT *,'Number of processors chosen on the command line '
      PRINT *,'is different from that given in arps.input, EXITING'
      PRINT *,'requested: ', nprocs0
      PRINT *,'in arps.input: ', nprocs, ' = ',nproc_x,' * ',nproc_y
    END IF
    CALL arpsstop ("arpsstop called from mpinit_var mismatch in   &
                   & number of processors",1)

  END IF

  l = 0
  DO j = 1, nproc_y
    DO i = 1, nproc_x
      proc(i+nproc_x*(j-1)) = l
      l = l + 1
    END DO
  END DO

  loc_x = MOD(myproc, nproc_x) + 1
  loc_y = myproc / nproc_x + 1

  gentag = 0

  RETURN
END SUBROUTINE mpinit_var
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPIMERGE                   ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpimerge(locvar,nx,ny,nz,nt,fzone,globvar,tem1),2
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Generate global data files from a multiprocessor run to be compared 
!  with a single processor file.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Dan Weber
!  2001/04/11
!
!  MODIFICATION HISTORY:
!  2004/04/02 (Yunheng Wang)
!  Added parameter fzone and globvar to make the code work more flexible.
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    var      Local variable
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!    nz       Number of grid points in the vertical
!
!    fzone    Number of fake zone, 3 for ARPS, 1 for WRF.
!
!  OUTPUT:
!
!    globvar  Global variable
!    tem1     Work array.
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER :: nx,ny,nz          ! Number of grid points in x, y and z
                               ! directions
  INTEGER :: nt
  INTEGER :: fzone
  REAL    :: locvar(nx,ny,nz,nt)
  REAL    :: tem1(nx,ny,nz)

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'mp.inc'

  REAL    :: globvar((nx-fzone)*nproc_x+fzone,(ny-fzone)*nproc_y+fzone,nz)

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i, j, k
  INTEGER :: mptag             ! Unique MPI id used for this BC update.
  INTEGER :: ia,ja, ic,jc,itemc,itemb,itema

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  DO k=1,nz        ! each processor stores the locvar into tem1
    DO j=1,ny
      DO i=1,nx
        tem1(i,j,k) = locvar(i,j,k,nt)
      END DO
    END DO
  END DO
 
  DO jc=1,nproc_y
    DO ic=1,nproc_x

!   message passing section...

      itemb = mptag + 100 + ic + jc
      IF(ic /=1 .OR. jc /=1) THEN     !  pass data to processor 0

        IF( myproc .EQ. (ic+(jc-1)*nproc_x-1)) THEN
          itema = 0 
          ! print *,'sending data',itema,itemb,myproc
          CALL mpi_send (tem1,nx*ny*nz,MPI_REAL,itema,                  &
                         itemb,MPI_COMM_WORLD,imstat) 
        END IF

        itemc = ic+(jc-1)*nproc_x-1
        IF(myproc == 0)THEN            ! receive data
          ! print *,'receiving data',itemc,itemb,myproc
          CALL mpi_recv (tem1,nx*ny*nz,MPI_REAL,itemc,                  &
                         itemb,MPI_COMM_WORLD,stat,imstat)
        END IF

      END IF

!  storage section

      IF(myproc == 0)THEN  ! store data into globvar
        DO k=1,nz
          DO j=1,ny
            ja = j + (jc-1)*(ny-fzone)
            DO i=1,nx
              ia = i + (ic-1)*(nx-fzone)
              ! print *,ic,jc,ia,ja,i,j,k
              globvar(ia,ja,k) = tem1(i,j,k)
            END DO          
          END DO      
        END DO      
      END IF

      call mpbarrier

    END DO      
  END DO      

!  IF(myproc ==0 ) THEN   !  write the file.....
!
!     write(char1(length+1:length+5),'(a5)')  '.form'
!!    itemc = 80
!!    CALL strlnth(char1,itemc)
!!    CALL comlnth(char1,itemc)
!!    print *,'inside mpimerge', length,char1(1:length+5)
!     open(10,file=char1(1:length+5),form= 'formatted',status='unknown')
!     DO k=1,nz
!     DO j=1,(ny-3)*nproc_y+3
!     DO i=1,(nx-3)*nproc_x+3
!     write(10,'(3(i5),2x,g17.11)') i,j,k,globvar(i,j,k)
!     END DO
!     END DO
!     END DO
!     close (10)
!
!     write(char1(length+1:length+7),'(a7)')  '.unform'
!!    itemc = 80
!!    CALL comlnth(char1,itemc)
!!    CALL strlnth(char1,itemc)
!!    print *,'inside mpimerge', itemc,length,char1(1:itemc)
!!    print *,'inside mpimerge', length,char1(1:length+7)
!     open(11,file=char1(1:length+7),form= 'unformatted',status='unknown')
!     write (11) globvar
!     close (11)
!
!  END IF

   RETURN
END SUBROUTINE mpimerge
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPIMERGE1dx                ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpimerge1dx(locvar,nx,globvar) 6,2
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Generate global array from a multiprocessor run 
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/15
!  Based on subroutine mpimerge
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    localvar Variable to be written.
!
!    nx       Dimension of the array
!
!  OUTPUT:
!
!    globvar  global variable to be output
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: nx
  REAL, INTENT(IN) :: locvar(nx)

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

  REAL, INTENT(OUT):: globvar((nx-3)*nproc_x+3)
                            ! Output array in global domain.

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  REAL :: tem(nx)

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER, PARAMETER :: master = 0
  INTEGER :: source
  INTEGER :: ia, ic,jc, i0, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i 

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3 !  arps.

  tem(:) = locvar(:) ! each processor stores the locvar into tem
 
  DO jc=1,nproc_y
    DO ic=1,nproc_x

      ! message passing section...

      IF(jc == 1) THEN 

      IF(ic /= 1) THEN     !  pass data to processor 0
      
        mytag = mptag + 100 + ic + 1
        IF( myproc == ic-1 )THEN
          CALL mpi_send (tem,nx,MPI_REAL,master,                     &
                         mytag,MPI_COMM_WORLD,imstat) 

          !CALL mpi_bsend (tem,nx*ny*nz,MPI_REAL,master,             &
          !                mytag,MPI_COMM_WORLD,imstat) 
                                                    !forced buffering
        END IF

        IF(myproc == 0)THEN          ! receive data
          source = ic - 1
          CALL mpi_recv (tem,nx,MPI_REAL,source,                    &
                         mytag, MPI_COMM_WORLD,stat,imstat)
        END IF

      END IF

      !  storage section

      IF(myproc == 0)THEN  ! store data into globvar
      
        IF (ic == 1) THEN
          i0 = 1
        ELSE
          i0 = 2
        END IF

        DO i=i0,nx
          ia = i + (ic-1)*(nx-fzone)
          globvar(ia) = tem(i)
        END DO          
      
      END IF

      END IF    ! jc == 1

      CALL mpbarrier

    END DO      
  END DO      

  RETURN
END SUBROUTINE mpimerge1dx
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPIMERGE1dy                ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpimerge1dy(locvar,ny,globvar) 6,2
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Generate global array from a multiprocessor run 
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/15
!  Based on subroutine mpimerge
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    localvar Variable to be written.
!
!    ny       Dimension of the array
!
!  OUTPUT:
!
!    globvar  global variable to be output
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: ny
  REAL, INTENT(IN) :: locvar(ny)

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

  REAL, INTENT(OUT):: globvar((ny-3)*nproc_y+3)
                            ! Output array in global domain.

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  REAL :: tem(ny)

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER, PARAMETER :: master = 0
  INTEGER :: source
  INTEGER :: ja, ic,jc, j0, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: j 

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3 !  arps.

  tem(:) = locvar(:) ! each processor stores the locvar into tem
 
  DO jc=1,nproc_y
    DO ic=1,nproc_x

      ! message passing section...

      IF(ic == 1) THEN 

      IF(jc /= 1) THEN     !  pass data to processor 0
      
        mytag = mptag + 100 + jc + 1
        IF(myproc ==  (jc-1)*nproc_x )THEN
          CALL mpi_send (tem,ny,MPI_REAL,master,                     &
                         mytag,MPI_COMM_WORLD,imstat) 

          !CALL mpi_bsend (tem,nx*ny*nz,MPI_REAL,master,             &
          !                mytag,MPI_COMM_WORLD,imstat) 
                                                    !forced buffering
        END IF

        IF(myproc == 0)THEN          ! receive data
          source = (jc-1)*nproc_x
          CALL mpi_recv (tem,ny,MPI_REAL,source,                    &
                         mytag, MPI_COMM_WORLD,stat,imstat)
        END IF

      END IF

      !  storage section

      IF(myproc == 0)THEN  ! store data into globvar
      
        IF (jc == 1) THEN
          j0 = 1
        ELSE
          j0 = 2
        END IF

        DO j=j0,ny
          ja = j + (jc-1)*(ny-fzone)
          globvar(ja) = tem(j)
        END DO          
      
      END IF

      END IF    ! ic == 1

      CALL mpbarrier

    END DO      
  END DO      

  RETURN
END SUBROUTINE mpimerge1dy

!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPIMERGE2D                 ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpimerge2d(locvar,nx,ny,globvar) 105,2
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Generate global array from a multiprocessor run 
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/15
!  Based on subroutine mpimerge
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    localvar Variable to be written.
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!
!  OUTPUT:
!
!    globvar  global variable to be output
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: nx,ny
                            ! Number of grid points in x, y and z
  REAL, INTENT(IN) :: locvar(nx,ny)

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'


  REAL, INTENT(OUT):: globvar((nx-3)*nproc_x+3,(ny-3)*nproc_y+3)
                            ! Output array in global domain.
!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  REAL :: tem(nx,ny)

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER, PARAMETER :: master = 0
  INTEGER :: source
  INTEGER :: ia,ja, ic,jc, i0,j0, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i, j

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3 !  arps.

  tem(:,:) = locvar(:,:) ! each processor stores the locvar into tem
 
  DO jc=1,nproc_y
    DO ic=1,nproc_x

      ! message passing section...

      IF(ic /=1 .OR. jc /=1)THEN     !  pass data to processor 0

        mytag = mptag + 100 + ic + jc

        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_send (tem,nx*ny,MPI_REAL,master,               &
                         mytag,MPI_COMM_WORLD,imstat) 

          !CALL mpi_bsend (tem,nx*ny,MPI_REAL,master,             &
          !                mytag,MPI_COMM_WORLD,imstat) 
                                                    !forced buffering
        END IF

        IF(myproc == 0)THEN          ! receive data

          source = ic+(jc-1)*nproc_x-1

          CALL mpi_recv (tem,nx*ny,MPI_REAL,source,               &
                         mytag, MPI_COMM_WORLD,stat,imstat)
        END IF

      END IF

      !  storage section

      IF(myproc == 0)THEN  ! store data into globvar

        IF (ic == 1) THEN
          i0 = 1
        ELSE
          i0 = 2
        END IF

        IF (jc == 1) THEN
          j0 = 1
        ELSE
          j0 = 2
        END IF

        DO j=j0,ny
            ja = j + (jc-1)*(ny-fzone)
            DO i=i0,nx
              ia = i + (ic-1)*(nx-fzone)
              globvar(ia,ja) = tem(i,j)
            END DO          
        END DO      

      END IF

      CALL mpbarrier

    END DO      
  END DO      

  RETURN
END SUBROUTINE mpimerge2d
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPIMERGE2di                ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpimerge2di(locvar,nx,ny,globvar) 11,2
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Generate global array from a multiprocessor run 
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/15
!  Based on subroutine mpimerge
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    localvar Variable to be written.
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!
!  OUTPUT:
!
!    globvar  global variable to be output
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, INTENT(IN) :: nx,ny
                            ! Number of grid points in x, y and z
  INTEGER, INTENT(IN) :: locvar(nx,ny)

  INTEGER, INTENT(OUT):: globvar((nx-3)*nproc_x+3,(ny-3)*nproc_y+3)
                            ! Output array in global domain.

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: tem(nx,ny)

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER, PARAMETER :: master = 0
  INTEGER :: source
  INTEGER :: ia,ja, ic,jc, i0,j0, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i, j

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3 !  arps.

  tem(:,:) = locvar(:,:) ! each processor stores the locvar into tem
 
  DO jc=1,nproc_y
    DO ic=1,nproc_x

      ! message passing section...

      IF(ic /=1 .OR. jc /=1)THEN     !  pass data to processor 0
        
        mytag = mptag + 100 + ic + jc
        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_send (tem,nx*ny,MPI_INTEGER,master,               &
                         mytag,MPI_COMM_WORLD,imstat) 

          !CALL mpi_bsend (tem,nx*ny,MPI_INTEGER,master,             &
          !                mytag,MPI_COMM_WORLD,imstat) 
                                                    !forced buffering
        END IF

        IF(myproc == 0)THEN          ! receive data

          source = ic+(jc-1)*nproc_x-1

          CALL mpi_recv (tem,nx*ny,MPI_INTEGER,source,               &
                         mytag, MPI_COMM_WORLD,stat,imstat)
        END IF

      END IF

      !  storage section

      IF(myproc == 0)THEN  ! store data into globvar

        IF (ic == 1) THEN
          i0 = 1
        ELSE
          i0 = 2
        END IF

        IF (jc == 1) THEN
          j0 = 1
        ELSE
          j0 = 2
        END IF

        DO j=j0,ny
            ja = j + (jc-1)*(ny-fzone)
            DO i=i0,nx
              ia = i + (ic-1)*(nx-fzone)
              globvar(ia,ja) = tem(i,j)
            END DO          
        END DO      

      END IF

      CALL mpbarrier

    END DO      
  END DO      

  RETURN
END SUBROUTINE mpimerge2di
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPIMERGE3d                 ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpimerge3d(locvar,nx,ny,nz,globvar) 229,2
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Generate global array from a multiprocessor run 
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/15
!  Based on subroutine mpimerge
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    localvar Variable to be written.
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!    nz       Number of grid points in the vertical
!
!  OUTPUT:
!
!    globvar  global variable to be output
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: nx,ny,nz          
                            ! Number of grid points in x, y and z
  REAL, INTENT(IN) :: locvar(nx,ny,nz)

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'


  REAL, INTENT(OUT):: globvar((nx-3)*nproc_x+3,(ny-3)*nproc_y+3,nz)
                            ! Output array in global domain.
!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  REAL :: tem(nx,ny,nz)

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER, PARAMETER :: master = 0
  INTEGER :: source
  INTEGER :: ia,ja, ic,jc, i0,j0, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i, j, k

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3 !  arps.

  tem(:,:,:) = locvar(:,:,:) ! each processor stores the locvar into tem
 
  DO jc=1,nproc_y
    DO ic=1,nproc_x

      ! message passing section...

      IF(ic /=1 .OR. jc /=1)THEN     !  pass data to processor 0

        mytag = mptag + 100 + ic + jc

        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_send (tem,nx*ny*nz,MPI_REAL,master,               &
                         mytag,MPI_COMM_WORLD,imstat) 

          !CALL mpi_bsend (tem,nx*ny*nz,MPI_REAL,master,             &
          !                mytag,MPI_COMM_WORLD,imstat) 
                                                    !forced buffering
        END IF

        IF(myproc == 0)THEN          ! receive data

          source = ic+(jc-1)*nproc_x-1

          CALL mpi_recv (tem,nx*ny*nz,MPI_REAL,source,               &
                         mytag, MPI_COMM_WORLD,stat,imstat)
        END IF

      END IF

      !  storage section

      IF(myproc == 0)THEN  ! store data into globvar

        IF (ic == 1) THEN
          i0 = 1
        ELSE
          i0 = 2
        END IF

        IF (jc == 1) THEN
          j0 = 1
        ELSE
          j0 = 2
        END IF

        DO k=1,nz
          DO j=j0,ny
            ja = j + (jc-1)*(ny-fzone)
            DO i=i0,nx
              ia = i + (ic-1)*(nx-fzone)
              globvar(ia,ja,k) = tem(i,j,k)
            END DO          
          END DO      
        END DO      

      END IF

      CALL mpbarrier

    END DO      
  END DO      

  RETURN
END SUBROUTINE mpimerge3d
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPIMERGE3di                ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpimerge3di(locvar,nx,ny,nz,globvar) 12,2
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Generate global array from a multiprocessor run 
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/15
!  Based on subroutine mpimerge
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    localvar Variable to be written.
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!    nz       Number of grid points in the vertical
!
!  OUTPUT:
!
!    globvar  global variable to be output
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, INTENT(IN) :: nx,ny,nz          
                            ! Number of grid points in x, y and z
  INTEGER, INTENT(IN) :: locvar(nx,ny,nz)

  INTEGER, INTENT(OUT):: globvar((nx-3)*nproc_x+3,(ny-3)*nproc_y+3,nz)
                            ! Output array in global domain.

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: tem(nx,ny,nz)

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER, PARAMETER :: master = 0
  INTEGER :: source
  INTEGER :: ia,ja, ic,jc, i0,j0, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i, j, k

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3 !  arps.

  tem(:,:,:) = locvar(:,:,:) ! each processor stores the locvar into tem
 
  DO jc=1,nproc_y
    DO ic=1,nproc_x

      ! message passing section...

      IF(ic /=1 .OR. jc /=1)THEN     !  pass data to processor 0
        
        mytag = mptag + 100 + ic + jc
        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_send (tem,nx*ny*nz,MPI_INTEGER,master,               &
                         mytag,MPI_COMM_WORLD,imstat) 

          !CALL mpi_bsend (tem,nx*ny*nz,MPI_INTEGER,master,             &
          !                mytag,MPI_COMM_WORLD,imstat) 
                                                    !forced buffering
        END IF

        IF(myproc == 0)THEN          ! receive data

          source = ic+(jc-1)*nproc_x-1

          CALL mpi_recv (tem,nx*ny*nz,MPI_INTEGER,source,               &
                         mytag, MPI_COMM_WORLD,stat,imstat)
        END IF

      END IF

      !  storage section

      IF(myproc == 0)THEN  ! store data into globvar

        IF (ic == 1) THEN
          i0 = 1
        ELSE
          i0 = 2
        END IF

        IF (jc == 1) THEN
          j0 = 1
        ELSE
          j0 = 2
        END IF

        DO k=1,nz
          DO j=j0,ny
            ja = j + (jc-1)*(ny-fzone)
            DO i=i0,nx
              ia = i + (ic-1)*(nx-fzone)
              globvar(ia,ja,k) = tem(i,j,k)
            END DO          
          END DO      
        END DO      

      END IF

      CALL mpbarrier

    END DO      
  END DO      

  RETURN
END SUBROUTINE mpimerge3di
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPIMERGE4d                 ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpimerge4d(locvar,nx,ny,nzsoil,nstyps,globvar) 17,2

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Generate global array from a multiprocessor run 
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/15
!  Based on subroutine mpimerge
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    localvar Variable to be written.
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!    nz       Number of grid points in the vertical
!
!  OUTPUT:
!
!    globvar  global variable to be output
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, INTENT(IN) :: nx,ny,nzsoil, nstyps
                            ! Number of grid points in x, y and z
  REAL, INTENT(IN) :: locvar(nx,ny,nzsoil,nstyps)

  REAL, INTENT(OUT):: globvar((nx-3)*nproc_x+3,(ny-3)*nproc_y+3,nzsoil,nstyps)
                            ! Output array in global domain.

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  REAL :: tem(nx,ny,nzsoil,nstyps)

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER, PARAMETER :: master = 0
  INTEGER :: source
  INTEGER :: ia,ja, ic,jc, i0,j0, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i, j, k, n

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3 !  arps.

  tem(:,:,:,:) = locvar(:,:,:,:) ! each processor stores the locvar into tem
 
  DO jc=1,nproc_y
    DO ic=1,nproc_x

      ! message passing section...

      IF(ic /=1 .OR. jc /=1)THEN     !  pass data to processor 0

        mytag = mptag + 100 + ic + jc
        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_send (tem,nx*ny*nzsoil*nstyps,MPI_REAL,master,    &
                         mytag,MPI_COMM_WORLD,imstat) 

          !CALL mpi_bsend (tem,nx*ny*nz,MPI_REAL,master,             &
          !                mytag,MPI_COMM_WORLD,imstat) 
                                                    !forced buffering
        END IF

        IF(myproc == 0)THEN          ! receive data

          source = ic+(jc-1)*nproc_x-1

          CALL mpi_recv (tem,nx*ny*nzsoil*nstyps,MPI_REAL,source,    &
                         mytag, MPI_COMM_WORLD,stat,imstat)
        END IF

      END IF

      !  storage section

      IF(myproc == 0)THEN  ! store data into globvar

        IF (ic == 1) THEN
          i0 = 1
        ELSE
          i0 = 2
        END IF

        IF (jc == 1) THEN
          j0 = 1
        ELSE
          j0 = 2
        END IF

        DO n=1,nstyps
        DO k=1,nzsoil
          DO j=j0,ny
            ja = j + (jc-1)*(ny-fzone)
            DO i=i0,nx
              ia = i + (ic-1)*(nx-fzone)
              globvar(ia,ja,k,n) = tem(i,j,k,n)
            END DO          
          END DO      
        END DO      
        END DO      

      END IF

      CALL mpbarrier

    END DO      
  END DO      

  RETURN
END SUBROUTINE mpimerge4d
!
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPISPLIT1DX                ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpisplit1dx(globvar,nx,var) 4,2

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Split the global array and scatter to each processor.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/20
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    globvar  Global array passed in from processor 0.
!
!    nx       Dimension of the array in subdomain.
!
!  OUTPUT:
!
!    var      Subdomain variable in each process.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, INTENT(IN) :: nx
  REAL, INTENT(IN):: globvar((nx-3)*nproc_x+3)

  REAL, INTENT(OUT) :: var(nx)


!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, PARAMETER :: master = 0

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER :: target
  INTEGER :: ia,ic,jc, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3        !  arps.

  DO jc=1,nproc_y
    DO ic=1,nproc_x

      !  storage section

      IF(myproc == 0)THEN  ! store data into var

        DO i=1,nx
           ia = i + (ic-1)*(nx-fzone)
           var(i) = globvar(ia)
        END DO      

      END IF

      ! message passing section...

      IF(ic /=1 .OR. jc /= 1)THEN     !  receive data from processor 0

        mytag = mptag + 100 + ic + jc

        IF(myproc == 0)THEN          ! send data

          target = ic+(jc-1)*nproc_x-1

          CALL mpi_send (var,nx,MPI_REAL,target,                 &
                         mytag, MPI_COMM_WORLD,imstat)
        END IF

        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_recv (var,nx,MPI_REAL,master,               &
                         mytag,MPI_COMM_WORLD,stat,imstat) 

        END IF

      END IF

      CALL mpbarrier
 
    END DO      
  END DO      

!At the end, make sure processor 0 contains correct varlue (ic=1,jc=1)
  IF(myproc == 0) var(:) = globvar(1:nx)     

  RETURN
END SUBROUTINE mpisplit1dx
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPISPLIT1DY                ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpisplit1dy(globvar,ny,var) 4,2

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Split the global array and scatter to each processor.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/20
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    globvar  Global array passed in from processor 0.
!
!    ny       Number of grid points in the y-direction (north/south)
!
!  OUTPUT:
!
!    var      Subdomain variable in each process.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, INTENT(IN) :: ny
  REAL, INTENT(IN):: globvar((ny-3)*nproc_y+3)

  REAL, INTENT(OUT) :: var(ny)

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, PARAMETER :: master = 0

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER :: target
  INTEGER :: ja, ic,jc, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: j

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3        !  arps.

  DO jc=1,nproc_y
    DO ic=1,nproc_x

      !  storage section

      IF(myproc == 0)THEN  ! store data into tem

          DO j=1,ny
            ja = j + (jc-1)*(ny-fzone)
            var(j) = globvar(ja)
          END DO      

      END IF

      ! message passing section...

      IF(ic /=1 .OR. jc /=1)THEN     !  receive data from processor 0

        mytag = mptag + 100 + ic + jc

        IF(myproc == 0)THEN          ! send data

          target = ic+(jc-1)*nproc_x-1

          CALL mpi_send (var,ny,MPI_REAL,target,               &
                         mytag, MPI_COMM_WORLD,imstat)
        END IF

        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_recv (var,ny,MPI_REAL,master,               &
                         mytag,MPI_COMM_WORLD,stat,imstat) 

        END IF

      END IF

      CALL mpbarrier
 
    END DO      
  END DO      

!At the end, make sure processor 0 contains correct varlue (ic=1,jc=1)
  IF(myproc == 0) var(:) = globvar(1:ny)     

  RETURN
END SUBROUTINE mpisplit1dy
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPISPLIT2d                 ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpisplit2d(globvar,nx,ny,var) 65,2

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Split the global array and scatter to each processor.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/20
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    globvar  Global array passed in from processor 0.
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!
!  OUTPUT:
!
!    var      Subdomain variable in each process.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, INTENT(IN) :: nx,ny
                            ! Number of grid points in x and y
  REAL, INTENT(IN):: globvar((nx-3)*nproc_x+3,(ny-3)*nproc_y+3)

  REAL, INTENT(OUT) :: var(nx,ny)


!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, PARAMETER :: master = 0

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER :: target
  INTEGER :: ia,ja, ic,jc, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i, j

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3        !  arps.

  DO jc=1,nproc_y
    DO ic=1,nproc_x

      !  storage section

      IF(myproc == 0)THEN  ! store data into var

        DO j=1,ny
          ja = j + (jc-1)*(ny-fzone)
          DO i=1,nx
            ia = i + (ic-1)*(nx-fzone)
            var(i,j) = globvar(ia,ja)
          END DO          
        END DO      

      END IF

      ! message passing section...

      IF(ic /=1 .OR. jc /=1)THEN     !  receive data from processor 0

        mytag = mptag + 100 + ic + jc

        IF(myproc == 0)THEN          ! send data

          target = ic+(jc-1)*nproc_x-1

          CALL mpi_send (var,nx*ny,MPI_REAL,target,               &
                         mytag, MPI_COMM_WORLD,imstat)
        END IF

        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_recv (var,nx*ny,MPI_REAL,master,               &
                         mytag,MPI_COMM_WORLD,stat,imstat) 

        END IF

      END IF

      CALL mpbarrier
 
    END DO      
  END DO      

!At the end, make sure processor 0 contains correct varlue (ic=1,jc=1)
  IF(myproc == 0) var(:,:) = globvar(1:nx,1:ny)     

  RETURN
END SUBROUTINE mpisplit2d
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPISPLIT2DI                ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpisplit2di(globvar,nx,ny,var) 6,2

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Split the global array and scatter to each processor.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/20
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    globvar  Global array passed in from processor 0.
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!
!  OUTPUT:
!
!    var      Subdomain variable in each process.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, INTENT(IN) :: nx,ny
                            ! Number of grid points in x and y
  INTEGER, INTENT(IN):: globvar((nx-3)*nproc_x+3,(ny-3)*nproc_y+3)

  INTEGER, INTENT(OUT) :: var(nx,ny)


!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, PARAMETER :: master = 0

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER :: target
  INTEGER :: ia,ja, ic,jc, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i, j

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3        !  arps.

  DO jc=1,nproc_y
    DO ic=1,nproc_x

      !  storage section

      IF(myproc == 0)THEN  ! store data into tem

        DO j=1,ny
          ja = j + (jc-1)*(ny-fzone)
          DO i=1,nx
            ia = i + (ic-1)*(nx-fzone)
            var(i,j) = globvar(ia,ja)
          END DO          
        END DO      

      END IF

      ! message passing section...

      IF(ic /=1 .OR. jc /=1)THEN     !  receive data from processor 0

        mytag = mptag + 100 + ic + jc

        IF(myproc == 0)THEN          ! send data

          target = ic+(jc-1)*nproc_x-1

          CALL mpi_send (var,nx*ny,MPI_INTEGER,target,               &
                         mytag, MPI_COMM_WORLD,imstat)
        END IF

        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_recv (var,nx*ny,MPI_INTEGER,master,               &
                         mytag,MPI_COMM_WORLD,stat,imstat) 

        END IF

      END IF

      CALL mpbarrier
 
    END DO      
  END DO      

!At the end, make sure processor 0 contains correct varlue (ic=1,jc=1)
  IF(myproc == 0) var(:,:) = globvar(1:nx,1:ny)     

  RETURN
END SUBROUTINE mpisplit2di
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPISPLIT3d                 ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpisplit3d(globvar,nx,ny,nz,var) 239,2

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Split the global array and scatter to each processor.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/20
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    globvar  Global array passed in from processor 0.
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!    nz       3rd dimension of the subdomain array, possible value are
!             vertical grid points (nz in other subroutines), nzsoil,
!             nstyps+1, or 4 (prcrate) or 1 (for 2D arrays)
!
!  OUTPUT:
!
!    var      Subdomain variable in each process.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, INTENT(IN) :: nx,ny,nz          
                            ! Number of grid points in x, y and z
  REAL, INTENT(IN):: globvar((nx-3)*nproc_x+3,(ny-3)*nproc_y+3,nz)

  REAL, INTENT(OUT) :: var(nx,ny,nz)


!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, PARAMETER :: master = 0

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER :: target
  INTEGER :: ia,ja, ic,jc, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i, j, k

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3        !  arps.

  DO jc=1,nproc_y
    DO ic=1,nproc_x

      !  storage section

      IF(myproc == 0)THEN  ! store data into tem

        DO k=1,nz
          DO j=1,ny
            ja = j + (jc-1)*(ny-fzone)
            DO i=1,nx
              ia = i + (ic-1)*(nx-fzone)
              var(i,j,k) = globvar(ia,ja,k)
            END DO          
          END DO      
        END DO      

      END IF

      ! message passing section...

      IF(ic /=1 .OR. jc /=1)THEN     !  receive data from processor 0

        mytag = mptag + 100 + ic + jc

        IF(myproc == 0)THEN          ! send data

          target = ic+(jc-1)*nproc_x-1

          CALL mpi_send (var,nx*ny*nz,MPI_REAL,target,               &
                         mytag, MPI_COMM_WORLD,imstat)
        END IF

        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_recv (var,nx*ny*nz,MPI_REAL,master,               &
                         mytag,MPI_COMM_WORLD,stat,imstat) 

        END IF

      END IF

      CALL mpbarrier
 
    END DO      
  END DO      

!At the end, make sure processor 0 contains correct varlue (ic=1,jc=1)
  IF(myproc == 0) var(:,:,:) = globvar(1:nx,1:ny,1:nz)     

  RETURN
END SUBROUTINE mpisplit3d

!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPISPLIT3DI                ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpisplit3di(globvar,nx,ny,nz,var) 15,2

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Split the global array and scatter to each processor.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/20
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    globvar  Global array passed in from processor 0.
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!    nz       3rd dimension of the subdomain array, possible value are
!             vertical grid points (nz in other subroutines), nzsoil,
!             nstyps+1, or 4 (prcrate) or 1 (for 2D arrays)
!
!  OUTPUT:
!
!    var      Subdomain variable in each process.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, INTENT(IN) :: nx,ny,nz          
                            ! Number of grid points in x, y and z
  INTEGER, INTENT(IN):: globvar((nx-3)*nproc_x+3,(ny-3)*nproc_y+3,nz)

  INTEGER, INTENT(OUT) :: var(nx,ny,nz)

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, PARAMETER :: master = 0

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER :: target
  INTEGER :: ia,ja, ic,jc, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i, j, k

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3        !  arps.

  DO jc=1,nproc_y
    DO ic=1,nproc_x

      !  storage section

      IF(myproc == 0)THEN  ! store data into var

        DO k=1,nz
          DO j=1,ny
            ja = j + (jc-1)*(ny-fzone)
            DO i=1,nx
              ia = i + (ic-1)*(nx-fzone)
              var(i,j,k) = globvar(ia,ja,k)
            END DO          
          END DO      
        END DO      

      END IF

      ! message passing section...

      IF(ic /=1 .OR. jc /=1)THEN     !  receive data from processor 0

        mytag = mptag + 100 + ic + jc

        IF(myproc == 0)THEN          ! send data

          target = ic+(jc-1)*nproc_x-1

          CALL mpi_send (var,nx*ny*nz,MPI_INTEGER,target,               &
                         mytag, MPI_COMM_WORLD,imstat)
        END IF

        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_recv (var,nx*ny*nz,MPI_INTEGER,master,               &
                         mytag,MPI_COMM_WORLD,stat,imstat) 

        END IF

      END IF

      CALL mpbarrier
 
    END DO      
  END DO      

!At the end, make sure processor 0 contains correct varlue (ic=1,jc=1)
  IF(myproc == 0) var(:,:,:) = globvar(1:nx,1:ny,1:nz)     

  RETURN
END SUBROUTINE mpisplit3di
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPISPLIT4d                 ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpisplit4d(globvar,nx,ny,nzsoil,nstyps,var) 13,2

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Split the global array and scatter to each processor.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/20
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    globvar  Global array passed in from processor 0.
!
!    nx       Number of grid points in the x-direction (east/west)
!    ny       Number of grid points in the y-direction (north/south)
!    nzsoil   3rd dimension of the subdomain array, possible value may be
!             as nzsoil in other subroutines. 
!    nstyps   4rd dimentsion of the 4D array, possible value may be
!             nstyps (in other subroutines) + 1.
!
!  OUTPUT:
!
!    var      Subdomain variable in each process.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, INTENT(IN) :: nx,ny,nzsoil, nstyps
  REAL,    INTENT(IN) :: globvar((nx-3)*nproc_x+3,(ny-3)*nproc_y+3,nzsoil,nstyps)

  REAL,    INTENT(OUT):: var(nx,ny,nzsoil,nstyps)

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, PARAMETER :: master = 0

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER :: target
  INTEGER :: ia,ja, ic,jc, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i, j, k, l

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3        !  arps.

  DO jc=1,nproc_y
    DO ic=1,nproc_x

      !  storage section

      IF(myproc == 0)THEN  ! store data into tem

        DO l=1,nstyps
        DO k=1,nzsoil
          DO j=1,ny
            ja = j + (jc-1)*(ny-fzone)
            DO i=1,nx
              ia = i + (ic-1)*(nx-fzone)
              var(i,j,k,l) = globvar(ia,ja,k,l)
            END DO          
          END DO      
        END DO      
        END DO      

      END IF

      ! message passing section...

      IF(ic /=1 .OR. jc /=1)THEN     !  receive data from processor 0

        mytag = mptag + 100 + ic + jc

        IF(myproc == 0)THEN          ! send data

          target = ic+(jc-1)*nproc_x-1

          CALL mpi_send (var,nx*ny*nzsoil*nstyps,MPI_REAL,target,     &
                         mytag, MPI_COMM_WORLD,imstat)
        END IF

        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN

          CALL mpi_recv (var,nx*ny*nzsoil*nstyps,MPI_REAL,master,     &
                         mytag,MPI_COMM_WORLD,stat,imstat) 

        END IF

      END IF

      CALL mpbarrier
 
    END DO      
  END DO      

  ! Finally, make sure processor 0 contains correct varlue (ic=1,jc=1)
  IF(myproc == 0) var(:,:,:,:) = globvar(1:nx,1:ny,1:nzsoil,1:nstyps)  

  RETURN
END SUBROUTINE mpisplit4d
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPIMERGE2dns               ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpimerge2dns(locvar,nx,nz,globvar) 4,2
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Generate global array from a multiprocessor run.
!  for North/South boundary 2D array. 
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2003/02/25
!  Based on subroutine mpimerge
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    localvar Variable to be written.
!
!    nx       Local dimension of the array
!    nz       This dimension will not change.
!
!  OUTPUT:
!
!    globvar  global variable to be output
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: nx
  INTEGER, INTENT(IN) :: nz
  REAL, INTENT(IN) :: locvar(nx,nz)

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

  REAL, INTENT(OUT):: globvar((nx-3)*nproc_x+3,nz)
                            ! Output array in global domain.

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  REAL :: tem(nx,nz)

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER, PARAMETER :: master = 0
  INTEGER :: source
  INTEGER :: ia, ic,jc, i0, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i,k 

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3 !  arps.

  tem(:,:) = locvar(:,:) ! each processor stores the locvar into tem
 
  DO jc=1,nproc_y
    DO ic=1,nproc_x

      ! message passing section...

      IF(jc == 1) THEN

      IF(ic /= 1) THEN     !  pass data to processor 0
      
        mytag = mptag + 100 + ic + 1
        IF( myproc == ic-1 )THEN
          CALL mpi_send (tem,nx*nz,MPI_REAL,master,            &
                         mytag,MPI_COMM_WORLD,imstat) 

        END IF

        IF(myproc == 0)THEN          ! receive data
          source = ic - 1
          CALL mpi_recv (tem,nx*nz,MPI_REAL,source,            &
                         mytag, MPI_COMM_WORLD,stat,imstat)
        END IF

      END IF

      !  storage section

      IF(myproc == 0)THEN  ! store data into globvar
      
        IF (ic == 1) THEN
          i0 = 1
        ELSE
          i0 = 2
        END IF

        DO k=1,nz
        DO i=i0,nx
          ia = i + (ic-1)*(nx-fzone)
          globvar(ia,k) = tem(i,k)
        END DO          
        END DO
      
      END IF

      END IF    ! jc == 1

      CALL mpbarrier

    END DO      
  END DO      

  RETURN
END SUBROUTINE mpimerge2dns
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPIMERGE2dew               ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpimerge2dew(locvar,ny,nz,globvar) 4,2
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Generate global array from a multiprocessor run 
!  for East/West boundary 2D array.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2003/02/25
!  Based on subroutine mpimerge
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    localvar Variable to be written.
!
!    ny       Dimension of the array
!    nz
!
!  OUTPUT:
!
!    globvar  global variable to be output
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: ny
  INTEGER, INTENT(IN) :: nz
  REAL, INTENT(IN) :: locvar(ny,nz)

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

  REAL, INTENT(OUT):: globvar((ny-3)*nproc_y+3,nz)
                            ! Output array in global domain.

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  REAL :: tem(ny,nz)

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER, PARAMETER :: master = 0
  INTEGER :: source
  INTEGER :: ja, ic,jc, j0, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: j,k 

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3 !  arps.

  tem(:,:) = locvar(:,:) ! each processor stores the locvar into tem
 
  DO jc=1,nproc_y
    DO ic=1,nproc_x

      ! message passing section...

      IF(ic == 1) THEN 

      IF(jc /= 1) THEN     !  pass data to processor 0
      
        mytag = mptag + 100 + jc + 1
        IF(myproc ==  (jc-1)*nproc_x )THEN
          CALL mpi_send (tem,ny*nz,MPI_REAL,master,                  &
                         mytag,MPI_COMM_WORLD,imstat) 
        END IF

        IF(myproc == 0)THEN          ! receive data
          source = (jc-1)*nproc_x
          CALL mpi_recv (tem,ny*nz,MPI_REAL,source,                 &
                         mytag, MPI_COMM_WORLD,stat,imstat)
        END IF

      END IF

      !  storage section

      IF(myproc == 0)THEN  ! store data into globvar
      
        IF (jc == 1) THEN
          j0 = 1
        ELSE
          j0 = 2
        END IF

        DO k=1,nz
        DO j=j0,ny
          ja = j + (jc-1)*(ny-fzone)
          globvar(ja,k) = tem(j,k)
        END DO          
        END DO
      
      END IF

      END IF    ! ic == 1

      CALL mpbarrier

    END DO      
  END DO      

  RETURN
END SUBROUTINE mpimerge2dew
!
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPISPLIT2DNS               ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpisplit2dns(globvar,nx,nz,var) 4,2

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Split the global array and scatter to each processor.
!  for North/South boundary arrays.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2003/02/26
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    globvar  Global array passed in from processor 0.
!
!    nx       Dimension of the array in subdomain.
!    nz
!
!  OUTPUT:
!
!    var      Subdomain variable in each process.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, INTENT(IN) :: nx
  INTEGER, INTENT(IN) :: nz
  REAL, INTENT(IN):: globvar((nx-3)*nproc_x+3,nz)

  REAL, INTENT(OUT) :: var(nx,nz)


!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, PARAMETER :: master = 0

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER :: target
  INTEGER :: ia,ic,jc, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: i,k

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3        !  arps.

  DO jc=1,nproc_y
    DO ic=1,nproc_x

      !  storage section

      IF(myproc == 0)THEN  ! store data into var

        DO k=1,nz
        DO i=1,nx
           ia = i + (ic-1)*(nx-fzone)
           var(i,k) = globvar(ia,k)
        END DO      
        END DO

      END IF

      ! message passing section...

      IF(ic /=1 .OR. jc /= 1)THEN     !  receive data from processor 0

        mytag = mptag + 100 + ic + jc

        IF(myproc == 0)THEN          ! send data

          target = ic+(jc-1)*nproc_x-1

          CALL mpi_send (var,nx*nz,MPI_REAL,target,                &
                         mytag, MPI_COMM_WORLD,imstat)
        END IF

        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_recv (var,nx*nz,MPI_REAL,master,               &
                         mytag,MPI_COMM_WORLD,stat,imstat) 

        END IF

      END IF

      CALL mpbarrier
 
    END DO      
  END DO      

!At the end, make sure processor 0 contains correct varlue (ic=1,jc=1)
  IF(myproc == 0) var(:,:) = globvar(1:nx,:)     

  RETURN
END SUBROUTINE mpisplit2dns
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPISPLIT2DEW               ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpisplit2dew(globvar,ny,nz,var) 4,2

!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Split the global array and scatter to each processor.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2002/08/20
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT :
!
!    globvar  Global array passed in from processor 0.
!
!    ny       Number of grid points in the y-direction (north/south)
!    nz
!
!  OUTPUT:
!
!    var      Subdomain variable in each process.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'globcst.inc'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, INTENT(IN) :: ny
  INTEGER, INTENT(IN) :: nz
  REAL, INTENT(IN):: globvar((ny-3)*nproc_y+3,nz)

  REAL, INTENT(OUT) :: var(ny,nz)

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER, PARAMETER :: master = 0

  INTEGER :: mptag        ! Unique MPI id.
  INTEGER :: mytag
  INTEGER :: target
  INTEGER :: ja, ic,jc, fzone
  INTEGER :: stat(mpi_status_size)
  INTEGER :: imstat
  INTEGER :: j,k

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL inctag
  mptag = gentag

!-----------------------------------------------------------------------
!
!  fill the globvar array
!
!-----------------------------------------------------------------------

  fzone = 3        !  arps.

  DO jc=1,nproc_y
    DO ic=1,nproc_x

      !  storage section

      IF(myproc == 0)THEN  ! store data into tem

          DO k = 1,nz
          DO j=1,ny
            ja = j + (jc-1)*(ny-fzone)
            var(j,k) = globvar(ja,k)
          END DO      
          END DO

      END IF

      ! message passing section...

      IF(ic /=1 .OR. jc /=1)THEN     !  receive data from processor 0

        mytag = mptag + 100 + ic + jc

        IF(myproc == 0)THEN          ! send data

          target = ic+(jc-1)*nproc_x-1

          CALL mpi_send (var,ny*nz,MPI_REAL,target,               &
                         mytag, MPI_COMM_WORLD,imstat)
        END IF

        IF(myproc == (ic+(jc-1)*nproc_x-1))THEN
       
          CALL mpi_recv (var,ny*nz,MPI_REAL,master,               &
                         mytag,MPI_COMM_WORLD,stat,imstat) 

        END IF

      END IF

      CALL mpbarrier
 
    END DO      
  END DO      

!At the end, make sure processor 0 contains correct varlue (ic=1,jc=1)
  IF(myproc == 0) var(:,:) = globvar(1:ny,:)     

  RETURN
END SUBROUTINE mpisplit2dew
!
!
!######################################################################
!
! Wrap subroutines added for ARPSPLT_mpi
! 
!   mpsendr  -- CALL mpi_send, REAL array
!   mprecvr  -- CALL mpi_recv, REAL array
!   mpsendi  -- CALL mpi_send, INTEGER scalar
!   mprecvi  -- CALL mpi_recv, INTEGER scalar
!
!#####################################################################


SUBROUTINE mpsendr(a,size,dest,itag,ierror) 68

  IMPLICIT NONE

  INCLUDE 'mpif.h'

  INTEGER :: size, dest, itag, ierror
  REAL :: a(size)

  CALL mpi_send(a,size,MPI_REAL,dest,itag,MPI_COMM_WORLD,ierror)

  RETURN

END SUBROUTINE mpsendr


SUBROUTINE mpsendi(m,dest,itag,ierror) 10

  IMPLICIT NONE

  INCLUDE 'mpif.h'

  INTEGER :: m
  INTEGER :: dest, itag, ierror

  CALL mpi_send(m,1,MPI_INTEGER,dest,itag,MPI_COMM_WORLD,ierror)

  RETURN

END SUBROUTINE mpsendi


SUBROUTINE mprecvr(a,size,source,itag,ierror) 68

  IMPLICIT NONE

  INCLUDE 'mpif.h'

  INTEGER :: size, source, itag, ierror
  REAL :: a(size)

  INTEGER :: stat(MPI_STATUS_SIZE)

  CALL mpi_recv(a,size,MPI_REAL,source,itag,MPI_COMM_WORLD,stat,ierror)

  RETURN

END SUBROUTINE mprecvr


SUBROUTINE mprecvi(m,source,itag,ierror) 10

  IMPLICIT NONE

  INCLUDE 'mpif.h'

  INTEGER :: m
  INTEGER :: size, source, itag, ierror

  INTEGER :: stat(MPI_STATUS_SIZE)

  CALL mpi_recv(m,1,MPI_INTEGER,source,itag,MPI_COMM_WORLD,stat,ierror)

  RETURN

END SUBROUTINE mprecvi


SUBROUTINE mpmaxi(imax) 13

!
!  Find the maximum integer of all processors
!
  IMPLICIT NONE

  INTEGER :: imax

  INTEGER :: imstat

  INCLUDE 'mpif.h'

  INTEGER :: maxtm

!---------------------------------------------------------
!
! Start of executable code....
!
!---------------------------------------------------------

  CALL MPI_REDUCE(imax,maxtm,1,MPI_INTEGER,MPI_MAX,0,     &
                  MPI_COMM_WORLD,imstat)
  CALL MPI_BCAST(maxtm,1,MPI_INTEGER,0,MPI_COMM_WORLD,imstat)

  imax = maxtm

  RETURN
END SUBROUTINE mpmaxi
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPBCASTR                   ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!


SUBROUTINE mpbcastr(var,source) 10
!
!-----------------------------------------------------------------------
!
!  PURPOSE:
!
!  Broadcast a real value from source processor to all other processes.
!
!-----------------------------------------------------------------------
!
!
!  AUTHOR: Yunheng Wang
!  2003/07/31
!
!  MODIFICATION HISTORY:
!
!-----------------------------------------------------------------------
!
!  INPUT/OUTPUT :
!
!    var      Real value to broadcast
!    source   source processor rank
!
!-----------------------------------------------------------------------
!
!  Variable declarations.
!
!-----------------------------------------------------------------------

  IMPLICIT NONE

  INTEGER, INTENT(IN) :: source
  REAL,    INTENT(IN) :: var

!-----------------------------------------------------------------------
!
!  Include files.
!
!-----------------------------------------------------------------------

  INCLUDE 'mpif.h'
  INCLUDE 'mp.inc'

!-----------------------------------------------------------------------
!
!  Local variable declarations.
!
!-----------------------------------------------------------------------

  INTEGER :: imstat

!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
!
!  Beginning of executable code...
!
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@

  CALL mpi_bcast(var,1,MPI_REAL,source,mpi_comm_world,imstat)

  IF (imstat /= 0) THEN
    WRITE (6,*) "MPBCASTR: error on processor",myproc
  END IF

  RETURN
END SUBROUTINE mpbcastr
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPSUMR                     ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpsumr(var,ndim)

  IMPLICIT NONE

  INCLUDE 'mpif.h'

  INTEGER :: ndim
  REAL    :: var(ndim)
  REAL    :: vartm(ndim)
  INTEGER :: imstat

  CALL mpi_allreduce(var, vartm, ndim, MPI_REAL, MPI_SUM,               &
                     mpi_comm_world, imstat)

  var(1:ndim) = vartm(1:ndim)

  RETURN
END SUBROUTINE mpsumr
!
!##################################################################
!##################################################################
!######                                                      ######
!######                SUBROUTINE MPSUMDP                    ######
!######                                                      ######
!######                     Developed by                     ######
!######     Center for Analysis and Prediction of Storms     ######
!######                University of Oklahoma                ######
!######                                                      ######
!##################################################################
!##################################################################
!

SUBROUTINE mpsumdp(var,ndim) 6

  IMPLICIT NONE

  INCLUDE 'mpif.h'

  INTEGER :: ndim
  DOUBLE PRECISION    :: var(ndim)
  DOUBLE PRECISION    :: vartm(ndim)
  INTEGER :: imstat

  CALL mpi_allreduce(var, vartm, ndim, MPI_DOUBLE_PRECISION, MPI_SUM,   &
                     mpi_comm_world, imstat)

  var(1:ndim) = vartm(1:ndim)

  RETURN
END SUBROUTINE mpsumdp