From d829d218ad354f2b2e81518e0205348aedb62f9f Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 28 Jul 2016 15:33:31 +0100 Subject: [PATCH 01/66] Update README.md --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index b67be54..f3f0867 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,9 @@ # DIABLO DNS In A Box...Laptop Optimized! +This branch includes background density gradients and thermal wind +and is designed for frontal zone simulations + A brief description of DIABLO diablo.f Main program, define grid, read/write flow, statistics From 5af2b30605a6504560dfa088c00eae9de4fbf941 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 28 Jul 2016 16:36:48 +0100 Subject: [PATCH 02/66] Added case for surface boundary layer --- diablo_2.0/pre_process/create_grid_h5.m | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/diablo_2.0/pre_process/create_grid_h5.m b/diablo_2.0/pre_process/create_grid_h5.m index b6b3de6..95bcd53 100644 --- a/diablo_2.0/pre_process/create_grid_h5.m +++ b/diablo_2.0/pre_process/create_grid_h5.m @@ -14,6 +14,7 @@ disp('1) High resolution at both ends (tanh stretching function)'); disp('2) High resolution in center (tanh stretching function)'); disp('3) High resolution at both ends (starting at 0)'); + disp('4) High resolution at the top for surface boundary layer'); GRID_TYPE=input('Select a grid type: '); % Set the dimensions of the grid @@ -40,6 +41,12 @@ for J=1:N+1 G(J+1)=(L/2.0)*tanh(CS*((2.0*(J-1))/(N)-1.0))/tanh(CS)+L/2.0; end +elseif (GRID_TYPE==4) +% Surface boundary layer + for J=1:N+1 + G(J+1)=L*tanh(CS*((N-(J-1))/N-1.0))/tanh(CS)+L; + end + G(:)=G(:)*-1; else disp('Error, entered grid type unknown'); end From 3f7c37a41a63be5438ff9f8beab87ac31c08ad10 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Fri, 29 Jul 2016 12:49:54 +0100 Subject: [PATCH 03/66] updated to include thermal wind --- diablo_2.0/for/courant.f | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/diablo_2.0/for/courant.f b/diablo_2.0/for/courant.f index 0a96229..e75f124 100644 --- a/diablo_2.0/for/courant.f +++ b/diablo_2.0/for/courant.f @@ -32,6 +32,24 @@ subroutine courant end do ! Use the model velocity to calculate the CFL number + + IF (FLAVOR.eq.'Front') THEN +! Add thermal wind to velocity when calculating the CFL number + do n=1,N_TH + do j=JSTART,JEND + do k=0,NZP-1 + do i=0,NXM + dt_x=cfl*dx(i)/abs(U1(i,k,j)-1.d0*DRHODZ(N)*RI(N) + & *GYF(J)/I_RO) + dt_y=cfl*dy(j)/abs(U2(i,k,j)) + dt_z=cfl*dz(j)/abs(U3(i,k,j)+(RI(N)/I_RO) + & *DRHODX(N)*GYF(J)) + dt=min(dt,dt_x,dt_y,dt_z) + end do + end do + end do + end do + ELSE do j=1,NY do k=0,NZP-1 do i=0,NXM @@ -42,6 +60,8 @@ subroutine courant end do end do end do + END IF + if (USE_MPI) then call get_minimum_mpi(dt) end if From 0ec6c691b65d450eaae34b5d12c6725052fdd1ff Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Fri, 29 Jul 2016 12:50:18 +0100 Subject: [PATCH 04/66] Added case for thermal wind at front --- diablo_2.0/for/set_ics.f | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/diablo_2.0/for/set_ics.f b/diablo_2.0/for/set_ics.f index d7130da..7e56be9 100644 --- a/diablo_2.0/for/set_ics.f +++ b/diablo_2.0/for/set_ics.f @@ -71,6 +71,18 @@ SUBROUTINE CREATE_FLOW_CHAN END DO END DO END DO + else if (IC_TYPE.eq.4) then +C For Front +C Initialize in thermal wind balance: + DO J=0,NY + DO K=0,NZP-1 + DO I=0,NXM + U1(I,K,J)=0.d0 + U2(I,K,J)=0.d0 + U3(I,K,J)=0.d0 + END DO + END DO + END DO else WRITE(*,*) 'WARNING, unsupported IC_TYPE in CREATE_FLOW' end if @@ -194,6 +206,8 @@ SUBROUTINE CREATE_TH_CHAN INCLUDE 'header' INTEGER I,J,K,N +! A variable for Front case... + REAL*8 RI_B(0:NY+1) DO N=1,N_TH IF (CREATE_NEW_TH(N)) THEN @@ -243,6 +257,29 @@ SUBROUTINE CREATE_TH_CHAN END DO END DO END DO + ELSE IF (IC_TYPE.eq.4) THEN +! For Front case, specify given RI_B profile + DO K=0,NZP-1 + DO I=0,NXM + TH(I,K,0,N)=0.d0 + DO J=1,NY + if (GYF(J).lt.-60.d0) then + RI_B(J)=20.d0 + TH(I,K,J,N)=(GYF(J)-GYF(1))* + & RI_B(J)*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) + else + RI_B(J)=1.0d0 + TH(I,K,J,N)=(GYF(J)+60.d0)* + & RI_B(J)*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) + & +(-60+140.d0)*20.d0*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) + end if + END DO + END DO + END DO + ELSE WRITE(*,*) 'WARNING, unsupported IC_TYPE in CREATE_FLOW' END IF From 5553e9bf1710b2f671e71d217b0eadf8cee45247 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Fri, 29 Jul 2016 12:50:35 +0100 Subject: [PATCH 05/66] Added thermal wind advection and sponge layers --- diablo_2.0/for/user_rhs.f | 224 +++++++++++++++++++++++++++++++++----- 1 file changed, 196 insertions(+), 28 deletions(-) diff --git a/diablo_2.0/for/user_rhs.f b/diablo_2.0/for/user_rhs.f index 6a1c24d..9a0bb4c 100644 --- a/diablo_2.0/for/user_rhs.f +++ b/diablo_2.0/for/user_rhs.f @@ -85,37 +85,68 @@ SUBROUTINE USER_RHS_CHAN_FOURIER integer i,j,k,n - real*8 alpha +! Advection owing to thermal wind + IF ((FLAVOR.eq.'Front').and.(I_RO.ne.0.d0)) THEN + DO N=1,N_TH +! Loop over all scalar -! For example, to add a linear damping term (e.g. -alpha*U) to the RHS: -! alpha=-0.1d0 -! DO J=JSTART,JEND -! DO K=0,TNKY -! DO I=0,NXP-1 -! CF1(I,K,J)=CF1(I,K,J)-alpha*CU1(I,K,J) -! END DO -! END DO -! END DO +! Add thermal wind advection to the momentum equations + do j=JSTART,JEND + do k=0,TNKZ + do i=0,NXP-1 + CF1(I,K,J)=CF1(I,K,J) + & -(DRHODX(N)*RI(N)*GYF(J)/I_RO) + & *CIKZ(K)*CU1(I,K,J) + & -(-1.d0*DRHODZ(N)*RI(N)*GYF(J)/I_RO) + & *CIKX(I)*CU1(I,K,J) + & -(-1.d0*DRHODZ(N)*RI(N)/I_RO) + & *0.5d0*(CU2(I,K,J)+CU2(I,K,J+1)) + CF3(I,K,J)=CF3(I,K,J) + & -(DRHODX(N)*RI(N)*GYF(J)/I_RO) + & *CIKZ(K)*CU3(I,K,J) + & -(-1.d0*DRHODZ(N)*RI(N)*GYF(J)/I_RO) + & *CIKX(I)*CU3(I,K,J) + & -(DRHODX(N)*RI(N)/I_RO) + & *0.5d0*(CU2(I,K,J)+CU2(I,K,J+1)) + end do + end do + end do -! For U2 do this... -! Note that the only thing that changes are the bounds of the J index -! DO J=2,NY -! DO K=0,TNKY -! DO I=0,NXP-1 -! CF2(I,K,J)=CF2(I,K,J)-alpha*CU2(I,K,J) -! END DO -! END DO -! END DO + do j=2,NY + do k=0,TNKZ + do i=0,NXP-1 + CF2(I,K,J)=CF2(I,K,J) + & -(DRHODX(N)*RI(N)*GY(J)/I_RO) + & *CIKZ(K)*CU2(I,K,J) + & -(-1.d0*DRHODZ(N)*RI(N)*GY(J)/I_RO) + & *CIKX(I)*CU2(I,K,J) + end do + end do + end do -! For scalars, do this... -! DO J=JSTART,JEND -! DO K=0,TNKY -! DO I=0,NXP-1 -! CFTH(I,K,J,N)=CFTH(I,K,J,N)-alpha*CTH(I,K,J,N) -! END DO -! END DO -! END DO -! END DO +! Add advection by thermal wind to the scalar equations + DO J=JSTART_TH(N),JEND_TH(N) + DO K=0,TNKZ + DO I=0,NXP-1 + CFTH(I,K,J,N)=CFTH(I,K,J,N) + & -(RI(N)/I_RO)*DRHODX(N)*GYF(J) + & *CIKZ(K)*CTH(I,K,J,N) + & -(RI(N)/I_RO)*-1.d0*DRHODZ(N)*GYF(J) + & *CIKX(I)*CTH(I,K,J,N) + END DO + END DO + END DO + +! End do N_TH + END DO + + END IF + +! Add sponge layer forcing + DO N=1,N_TH + CALL SPONGE_TH(N) + END DO + CALL SPONGE_VEL RETURN END @@ -142,6 +173,7 @@ SUBROUTINE USER_RHS_PER_PHYSICAL END DO C Note, that each cell has the same volume, so we can just average over all points EK=EK/dble(NX*NY*NZ) + ! Scale EK by an amount to compensate for dissipation from 2/3 de-aliasing: EK=0.8d0*EK DO J=0,NYM @@ -204,7 +236,143 @@ SUBROUTINE USER_RHS_CAVITY_PHYSICAL END + SUBROUTINE SPONGE_TH(N) +! This subroutine applies a sponge relaxation (Rayleigh damping) towards a +! specified background state for the temperature field +! The intention is to allow an open boundary + include 'header' + integer i,j,k,n + real*8 L_sponge,L_bottom + real*8 SPONGE_AMP + +! The following variables will store the background state + real*8 TH_0(-1:NY+1) + + real*8 RI_B(0:NY+1) + +! This variable will hold the forcing rate + real*8 SPONGE_SIGMA(0:NY+1) + +! Set the amplitude of the sponge + SPONGE_AMP=0.0001d0 +! Set the top of the sponge layer in physical units + L_sponge=-120.d0 +! Set the bottom of the computational domain in physical units + L_bottom=-140.d0 + DO J=0,NY+1 +! Quadratic damping at lower wall + if (GYF(J).lt.L_sponge) then + SPONGE_SIGMA(j)=SPONGE_AMP*((L_sponge-GYF(J)) + & /(L_sponge-L_bottom))**2.d0 + else + SPONGE_SIGMA(j)=0.d0 + end if + END DO + +! For MLI latmix + if (n.eq.1) then + TH_0(0)=0.d0 + do j=1,NY+1 + RI_B(J)=20.d0 + TH_0(J)=TH_0(J-1) + & +DY(J)*RI_B(J)*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) + end do + else + do j=0,NY+1 + TH_0(j)=0.d0 + end do + end if + +! Add damping to R-K terms +! Damp the perturbations towards 0 + do k=0,TNKZ + do i=0,NKX + if ((i.ne.0).or.(k.ne.0)) then + do j=0,NY+1 + CFTH(i,k,j,n)=CFTH(i,k,j,n) + & -SPONGE_SIGMA(j)*(CTH(i,k,j,n)-0.) + end do + end if + end do + end do +! Damp the mean gradient towards TH_0 + do j=0,NY+1 + CFTH(0,0,j,n)=CFTH(0,0,j,n)-SPONGE_SIGMA(j) + & *(CTH(0,0,j,n)-TH_0(J)) + end do + + return + end + + SUBROUTINE SPONGE_VEL +! This subroutine applies a sponge relaxation (Rayleigh damping) towards a +! specified background state +! The intention is to allow an open boundary + include 'header' + integer i,j,k + + real*8 L_sponge,L_bottom + real*8 SPONGE_AMP + +! The following variables will store the background state + real*8 U1_0(-1:NY+1), U2_0(0:NY+1), U3_0(-1:NY+1) + +! This variable will hold the forcing rate + real*8 SPONGE_SIGMA(0:NY+1) + +! Set the amplitude of the sponge + SPONGE_AMP=0.0001d0 +! Set the top of the sponge layer in physical units + L_sponge=-120.d0 +! Set the bottom of the computational domain in physical units + L_bottom=-140.d0 + DO J=0,NY+1 +! Quadratic damping at lower wall + if (GYF(J).lt.L_sponge) then + SPONGE_SIGMA(j)=SPONGE_AMP*((L_sponge-GYF(J)) + & /(L_sponge-L_bottom))**2.d0 + else + SPONGE_SIGMA(j)=0.d0 + end if + END DO + +! Set the background state +! Here, set the background to be geostrophic, with a linear temperature profile + do j=0,NY+1 + U1_0(j)=0.d0 + U3_0(j)=0.d0 + end do + do j=0,NY+1 + U2_0(j)=0.d0 + end do + +! Add damping function to explicit R-K + do k=0,TNKZ + do i=0,NKX + if ((i.ne.0).or.(k.ne.0)) then + do j=jstart,jend + CF1(I,K,J)=CF1(I,K,J)-SPONGE_SIGMA(j)*(CU1(i,k,j)-0.d0) + CF3(I,K,J)=CF3(I,K,J)-SPONGE_SIGMA(j)*(CU3(i,k,j)-0.d0) + end do + do j=1,NY + CF2(I,K,J)=CF2(I,K,J)- + & 0.5*(SPONGE_SIGMA(j)+SPONGE_SIGMA(j+1))*(CU2(i,k,j)-0.d0) + end do + end if + end do + end do +! Damp mean flow + do j=jstart,jend + CF1(0,0,j)=CF1(0,0,j)-SPONGE_SIGMA(j)*(CU1(0,0,j)-U1_0(j)) + CF3(0,0,j)=CF3(0,0,j)-SPONGE_SIGMA(j)*(CU3(0,0,j)-U3_0(j)) + end do + do j=1,NY + CF2(0,0,j)=CF2(0,0,j)-SPONGE_SIGMA(j)*(CU2(0,0,j)-U2_0(j)) + end do + return + end From d403338db3e9568642d9f9dca05218acf36f1386 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Fri, 29 Jul 2016 16:30:18 +0100 Subject: [PATCH 06/66] corrected lower Neumann BC --- diablo_2.0/for/channel.f | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/diablo_2.0/for/channel.f b/diablo_2.0/for/channel.f index 77a0c94..0cf554e 100644 --- a/diablo_2.0/for/channel.f +++ b/diablo_2.0/for/channel.f @@ -2254,8 +2254,8 @@ SUBROUTINE APPLY_BC_VEL_LOWER END DO C Now, Apply BC to mean IF (RANKZ.EQ.0) THEN - CU1(0,0,1)=CU1(0,0,2)+DY(2)*U_BC_YMIN_C1 - CU1(0,0,0)=CU1(0,0,1)+DY(1)*U_BC_YMIN_C1 + CU1(0,0,1)=CU1(0,0,2)-DY(2)*U_BC_YMIN_C1 + CU1(0,0,0)=CU1(0,0,1)-DY(1)*U_BC_YMIN_C1 END IF ELSE STOP 'Error: U_BC_YMIN must be 0, or 1' @@ -2285,8 +2285,8 @@ SUBROUTINE APPLY_BC_VEL_LOWER END DO C Now, Apply BC to mean IF (RANKZ.EQ.0) THEN - CU3(0,0,1)=CU3(0,0,2)+DY(2)*W_BC_YMIN_C1 - CU3(0,0,0)=CU3(0,0,1)+DY(1)*W_BC_YMIN_C1 + CU3(0,0,1)=CU3(0,0,2)-DY(2)*W_BC_YMIN_C1 + CU3(0,0,0)=CU3(0,0,1)-DY(1)*W_BC_YMIN_C1 END IF ELSE STOP 'Error: W_BC_YMIN must be 0, or 1' From 4d063555f00c3b60010f2e2741e85a6e8efd6b43 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Mon, 1 Aug 2016 21:14:36 +0100 Subject: [PATCH 07/66] Fixed bug in calculation of |S| --- diablo_2.0/for/les.f | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/diablo_2.0/for/les.f b/diablo_2.0/for/les.f index 24530ea..e61afd8 100644 --- a/diablo_2.0/for/les.f +++ b/diablo_2.0/for/les.f @@ -88,7 +88,7 @@ subroutine les_chan & +4.d0*(0.5d0*(Sij4(I,K,J+1)+Sij4(I,K,J)))**2.d0 & +4.d0*Sij5(I,K,J)**2.d0 & +2.d0*Sij2(I,K,J)**2.d0 - & +4.d0*(0.5d0*(Sij6(I,K,J+1)+Sij4(I,K,J)))**2.d0 + & +4.d0*(0.5d0*(Sij6(I,K,J+1)+Sij6(I,K,J)))**2.d0 & +2.d0*Sij3(I,K,J)**2.d0 ) END DO END DO From 64f45f2b9e0f25d236ef73e5e6f381f2a297e7c8 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Tue, 2 Aug 2016 12:53:11 +0100 Subject: [PATCH 08/66] Updated les to fix Neumann BCs --- diablo_2.0/for/les.f | 131 ++++++++++++++++--------------------------- 1 file changed, 49 insertions(+), 82 deletions(-) diff --git a/diablo_2.0/for/les.f b/diablo_2.0/for/les.f index e61afd8..e587f3d 100644 --- a/diablo_2.0/for/les.f +++ b/diablo_2.0/for/les.f @@ -72,6 +72,7 @@ subroutine les_chan call compute_strain_chan + ! Convert the velocity to physical space call FFT_XZ_TO_PHYSICAL(CU1,U1,0,NY+1) call FFT_XZ_TO_PHYSICAL(CU2,U2,0,NY+1) @@ -128,6 +129,7 @@ subroutine les_chan END DO END DO + ! Now, compute at |S|*S_ij at GY points DO J=2,NY+1 DO K=0,NZP-1 @@ -187,6 +189,7 @@ subroutine les_chan ! Convert the stress tensor to Fourier space + CALL FFT_XZ_TO_FOURIER(Sij1,CSij1,0,NY+1) ! Sij2 is added through an implicit eddy viscosity ! CALL FFT_XZ_TO_FOURIER(Sij2,CSij2,0,NY+1) @@ -229,28 +232,29 @@ subroutine les_chan ! Now, add the subgrid scale forcing to CFi ! (This includes the subgrid scale stress as an explicit R-K term + DO J=J1,J2 DO K=0,TNKZ DO I=0,NXP-1 CF1(I,K,J)=CF1(I,K,J) & -CIKX(I)*CSij1(I,K,J) - & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) - & -CIKZ(K)*CSij5(I,K,J) - CF3(I,K,J)=CF3(I,K,J) - & -CIKX(I)*CSij5(I,K,J) - & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) - & -CIKZ(K)*CSij3(I,K,J) +! & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) +! & -CIKZ(K)*CSij5(I,K,J) +! CF3(I,K,J)=CF3(I,K,J) +! & -CIKX(I)*CSij5(I,K,J) +! & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) +! & -CIKZ(K)*CSij3(I,K,J) END DO END DO END DO DO J=2,NY DO K=0,TNKZ DO I=0,NXP-1 - CF2(I,K,J)=CF2(I,K,J) - & -CIKX(I)*CSij4(I,K,J) +! CF2(I,K,J)=CF2(I,K,J) +! & -CIKX(I)*CSij4(I,K,J) ! Sij2 is added through an implict eddy viscosity ! & -(CSij2(I,K,J)-CSij2(I,K,J-1))/DY(j) - & -CIKZ(K)*CSij6(I,K,J) +! & -CIKZ(K)*CSij6(I,K,J) END DO END DO END DO @@ -267,7 +271,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY - CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) +! CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) END DO END DO END DO @@ -282,7 +286,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY - CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) +! CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) END DO END DO END DO @@ -370,6 +374,7 @@ subroutine compute_strain_chan END DO END DO + CALL FFT_XZ_TO_PHYSICAL(CSij1,Sij1,0,NY+1) CALL FFT_XZ_TO_PHYSICAL(CSij2,Sij2,0,NY+1) CALL FFT_XZ_TO_PHYSICAL(CSij3,Sij3,0,NY+1) @@ -624,89 +629,51 @@ SUBROUTINE APPLY_BC_LES ! velocity at the ghost cells so that the LES model doesn't use the large ! velocity gradient IF (U_BC_YMAX.eq.1) THEN - IF (USE_MPI) THEN - IF (RANKY.eq.NPROCY-1) THEN + IF ((RANKY.eq.NPROCY-1).or.(.NOT.USE_MPI)) THEN ! We are on process at the upper wall - DO K=0,NZP-1 - DO I=0,NXM - U1(I,K,NY)=U1(I,K,NY-1) - U1(I,K,NY+1)=U1(I,K,NY-1) - END DO - END DO - END IF - ELSE -! For serial version: - DO K=0,NZM - DO I=0,NXM - U1(I,K,NY)=U1(I,K,NY-1) - U1(I,K,NY+1)=U1(I,K,NY-1) - END DO - END DO - END IF - END IF - IF (U_BC_YMIN.eq.1) THEN - IF (USE_MPI) THEN - IF (RANKY.eq.0) THEN -! We are on a process at the bottom wall - DO K=0,NZP-1 - DO I=0,NXM - U1(I,K,1)=U1(I,K,2) - U1(I,K,0)=U1(I,K,2) - END DO - END DO - END IF - ELSE -! For serial version: - DO K=0,NZM - DO I=0,NXM - U1(I,K,1)=U1(I,K,2) - U1(I,K,0)=U1(I,K,2) - END DO + DO K=0,TNKZ + DO I=0,NXP-1 + CU1(I,K,NY)=CU1(I,K,NY-1) + CU1(I,K,NY+1)=CU1(I,K,NY) + END DO END DO END IF END IF IF (W_BC_YMAX.eq.1) THEN - IF (USE_MPI) THEN - IF (RANKY.eq.NPROCY-1) THEN + IF ((RANKY.eq.NPROCY-1).or.(.NOT.USE_MPI)) THEN ! We are on process at the upper wall - DO K=0,NZP-1 - DO I=0,NXM - U3(I,K,NY)=U3(I,K,NY-1) - U3(I,K,NY+1)=U3(I,K,NY-1) - END DO - END DO - END IF - ELSE -! For serial version: - DO K=0,NZM - DO I=0,NXM - U3(I,K,NY)=U3(I,K,NY-1) - U3(I,K,NY+1)=U3(I,K,NY-1) - END DO + DO K=0,TNKZ + DO I=0,NXP-1 + CU3(I,K,NY)=CU3(I,K,NY-1) + CU3(I,K,NY+1)=CU3(I,K,NY) + END DO END DO END IF END IF + + IF (U_BC_YMIN.eq.1) THEN + IF ((RANKY.eq.0).or.(.NOT.USE_MPI)) THEN +! We are on a process at the bottom wall + DO K=0,TNKZ + DO I=0,NXP-1 + CU1(I,K,1)=CU1(I,K,2) + CU1(I,K,0)=CU1(I,K,1) + END DO + END DO + END IF + END IF + IF (W_BC_YMIN.eq.1) THEN - IF (USE_MPI) THEN - IF (RANKY.eq.0) THEN + IF ((RANKY.eq.0).or.(.NOT.USE_MPI)) THEN ! We are on a process at the bottom wall - DO K=0,NZP-1 - DO I=0,NXM - U3(I,K,1)=U3(I,K,2) - U3(I,K,0)=U3(I,K,2) - END DO - END DO - END IF - ELSE -! For serial version: - DO K=0,NZM - DO I=0,NXM - U3(I,K,1)=U3(I,K,2) - U3(I,K,0)=U3(I,K,2) - END DO - END DO - END IF + DO K=0,TNKZ + DO I=0,NXP-1 + CU3(I,K,1)=CU3(I,K,2) + CU3(I,K,0)=CU3(I,K,1) + END DO + END DO + END IF END IF RETURN From 1140bc841b490592eb3ac4a09168274d0c6a6499 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Tue, 2 Aug 2016 16:22:00 +0100 Subject: [PATCH 09/66] Removed temporary lines --- diablo_2.0/for/diablo_io.f | 62 ++++++++++++-------------------------- 1 file changed, 20 insertions(+), 42 deletions(-) diff --git a/diablo_2.0/for/diablo_io.f b/diablo_2.0/for/diablo_io.f index 3efaafd..172c695 100644 --- a/diablo_2.0/for/diablo_io.f +++ b/diablo_2.0/for/diablo_io.f @@ -242,47 +242,27 @@ SUBROUTINE READ_FLOW IF (RANK.EQ.0) & write(*,*) 'NX_T, NY_T, NZ_T: ',NX_T,NY_T,NZ_T -! TEMPORARY, UNCOMMENT!!! -! IF ((NX .NE. NX_T) .OR. (NY .NE. NY_T) .OR. (NZ .NE. NZ_T)) -! * STOP 'Error: old flowfield wrong dimensions. ' -! IF (NUM_PER_DIR .NE. NUM_PER_DIR_T) -! * STOP 'Error: old flowfield wrong NUM_PER_DIR. ' + IF ((NX .NE. NX_T) .OR. (NY .NE. NY_T) .OR. (NZ .NE. NZ_T)) + * STOP 'Error: old flowfield wrong dimensions. ' + IF (NUM_PER_DIR .NE. NUM_PER_DIR_T) + * STOP 'Error: old flowfield wrong NUM_PER_DIR. ' IF (RANK.EQ.0) & write(*,*) 'READING FLOW' IF (NUM_PER_DIR.EQ.3) THEN -! TEMPORARY!!!!! -! READ (10) (((CU1(I,K,J),I=0,NKX),K=0,TNKZ),J=0,TNKY), -! * (((CU2(I,K,J),I=0,NKX),K=0,TNKZ),J=0,TNKY), -! * (((CU3(I,K,J),I=0,NKX),K=0,TNKZ),J=0,TNKY) -! DO N=1,NUM_READ_TH + READ (10) (((CU1(I,K,J),I=0,NKX),K=0,TNKZ),J=0,TNKY), + * (((CU2(I,K,J),I=0,NKX),K=0,TNKZ),J=0,TNKY), + * (((CU3(I,K,J),I=0,NKX),K=0,TNKZ),J=0,TNKY) + DO N=1,NUM_READ_TH ! Specify in input.dat which scalars are to be read -! OPEN(UNIT=11,FILE=FNAME_TH(READ_TH_INDEX(N)),STATUS="OLD" -! & ,FORM="UNFORMATTED") -! READ (11) NX_T, NY_T, NZ_T, NUM_PER_DIR_T, TIME, TIME_STEP -! READ (11) (((CTH(I,K,J,READ_TH_INDEX(N)) -! & ,I=0,NKX),K=0,TNKZ),J=0,TNKY) -! CLOSE(11) -! END DO - -! TEMPORARY, REMOVE THESE LINES !!!! - READ (10) - * (((CU1(I,K,J),I=0,256/3),K=0,2*256/3),J=0,2*256/3), - * (((CU2(I,K,J),I=0,256/3),K=0,2*256/3),J=0,2*256/3), - * (((CU3(I,K,J),I=0,256/3),K=0,2*256/3),J=0,2*256/3) - DO N=1,NUM_READ_TH -! Specify in input.dat which scalars are to be read - OPEN(UNIT=11,FILE=FNAME_TH(READ_TH_INDEX(N)),STATUS="OLD" - & ,FORM="UNFORMATTED") - READ (11) NX_T, NY_T, NZ_T, NUM_PER_DIR_T, - * TIME, TIME_STEP - READ (11) (((CTH(I,K,J,READ_TH_INDEX(N)) - & ,I=0,256/3),K=0,2*256/3),J=0,2*256/3) - CLOSE(11) - END DO - - + OPEN(UNIT=11,FILE=FNAME_TH(READ_TH_INDEX(N)),STATUS="OLD" + & ,FORM="UNFORMATTED") + READ (11) NX_T, NY_T, NZ_T, NUM_PER_DIR_T, TIME, TIME_STEP + READ (11) (((CTH(I,K,J,READ_TH_INDEX(N)) + & ,I=0,NKX),K=0,TNKZ),J=0,TNKY) + CLOSE(11) + END DO ELSEIF (NUM_PER_DIR.EQ.2) THEN READ (10) (((CU1(I,K,J),I=0,NKX),K=0,TNKZ),J=1,NY), @@ -440,14 +420,14 @@ SUBROUTINE END_RUN(FLAG) call WALL_TIME(END_TIME) if (END_TIME-START_TIME.gt.TIME_LIMIT) THEN IF (RANK.EQ.0) - & write(*,*) ' STOP beacuse of wall-time hit!' + & write(*,*) ' STOP because of wall-time hit!' FLAG=.TRUE. END IF INQUIRE(FILE="stop.now", EXIST=FILE_EXISTS) IF ( FILE_EXISTS ) THEN IF (RANK.EQ.0) - & write(*,*) ' STOP beacuse of stop.now file!' + & write(*,*) ' STOP because of stop.now file!' FLAG=.TRUE. END IF @@ -458,11 +438,9 @@ SUBROUTINE END_RUN(FLAG) subroutine wall_time(wt) c -c Return wall-clock time as seconds after Jan. 1, 2014. +c Return wall-clock time as seconds after Jan. 1, 2016. c Support for leap year is not included anymore. c -c Next leap year is 2016! -c c By using a 'save' statement, the wall-time after the first c call to the subroutine could be computed, but that is not c intended with the present subroutine (e.g. the history file) @@ -514,8 +492,8 @@ subroutine wall_time(wt) c c And compute wall-clock time c - wt = (val(1)-2014)*365*86400+ - & day*86400+val(5)*3600+val(6)*60+val(7)+val(8)/1000. + wt = (val(1)-2016)*365*86400+ + & day*86400+val(5)*3600+val(6)*60+val(7)+dble(val(8)/1000.d0) end From 3d05620a0502691579928207d6467e817dce7823 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Tue, 2 Aug 2016 17:24:45 +0100 Subject: [PATCH 10/66] Now allow only some scalars to be restarted --- diablo_2.0/for/hdf5.f | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/diablo_2.0/for/hdf5.f b/diablo_2.0/for/hdf5.f index 5e1785d..b44f3df 100644 --- a/diablo_2.0/for/hdf5.f +++ b/diablo_2.0/for/hdf5.f @@ -422,6 +422,10 @@ SUBROUTINE ReadHDF5(FNAME) dname="TH"//CHAR(ith+45) end select + if ((ith.lt.3).or.(.NOT.CREATE_NEW_TH(max(0,ith-3)))) then + +! Check to make sure that we should read in this scalar + call h5dopen_f(gid,trim(dname),dset_id,error) call h5dget_space_f(dset_id,filspace_id,error) @@ -455,8 +459,12 @@ SUBROUTINE ReadHDF5(FNAME) ! Close dateset call h5sclose_f(filspace_id, error) call h5dclose_f(dset_id, error) + + end if + end do + ! Decide whether to compute the pressure or to read call h5lexists_f(gid, 'P', READ_PRESSURE, error) if (READ_PRESSURE) then @@ -505,9 +513,11 @@ SUBROUTINE ReadHDF5(FNAME) call fft_xz_to_fourier(U2,CU2,0,NY+1) call fft_xz_to_fourier(U3,CU3,0,NY+1) do ith=1,N_TH + if (.NOT.CREATE_NEW_TH(ith)) then S1(:,:,:)=TH(:,:,:,ith) call fft_xz_to_fourier(S1,CS1,0,NY+1) - CS1(:,:,:)=CTH(:,:,:,ith) + CTH(:,:,:,ith)=CS1(:,:,:) + end if end do IF (USE_MPI) THEN From c9eee9fc25741af098d5e687c6b5332e9855fa42 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 3 Aug 2016 10:20:35 +0100 Subject: [PATCH 11/66] Added the SGS cross terms back --- diablo_2.0/for/les.f | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/diablo_2.0/for/les.f b/diablo_2.0/for/les.f index e587f3d..c4a13f2 100644 --- a/diablo_2.0/for/les.f +++ b/diablo_2.0/for/les.f @@ -238,23 +238,23 @@ subroutine les_chan DO I=0,NXP-1 CF1(I,K,J)=CF1(I,K,J) & -CIKX(I)*CSij1(I,K,J) -! & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) -! & -CIKZ(K)*CSij5(I,K,J) -! CF3(I,K,J)=CF3(I,K,J) -! & -CIKX(I)*CSij5(I,K,J) -! & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) -! & -CIKZ(K)*CSij3(I,K,J) + & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) + & -CIKZ(K)*CSij5(I,K,J) + CF3(I,K,J)=CF3(I,K,J) + & -CIKX(I)*CSij5(I,K,J) + & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) + & -CIKZ(K)*CSij3(I,K,J) END DO END DO END DO DO J=2,NY DO K=0,TNKZ DO I=0,NXP-1 -! CF2(I,K,J)=CF2(I,K,J) -! & -CIKX(I)*CSij4(I,K,J) + CF2(I,K,J)=CF2(I,K,J) + & -CIKX(I)*CSij4(I,K,J) ! Sij2 is added through an implict eddy viscosity ! & -(CSij2(I,K,J)-CSij2(I,K,J-1))/DY(j) -! & -CIKZ(K)*CSij6(I,K,J) + & -CIKZ(K)*CSij6(I,K,J) END DO END DO END DO @@ -271,7 +271,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY -! CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) + CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) END DO END DO END DO @@ -286,7 +286,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY -! CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) + CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) END DO END DO END DO From f337b037994dba5af6c50e660a6179df002ac46d Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 3 Aug 2016 14:48:56 +0100 Subject: [PATCH 12/66] Fixed bug in calculating CFL number for rotating flow --- diablo_2.0/for/courant.f | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/diablo_2.0/for/courant.f b/diablo_2.0/for/courant.f index e75f124..968d7ec 100644 --- a/diablo_2.0/for/courant.f +++ b/diablo_2.0/for/courant.f @@ -12,7 +12,7 @@ subroutine courant integer imin,jmin,kmin ! Set the initial dt to some arbitrary large number - dt=999.d0 + dt=50.d0 ! Set the timestep based on viscosity and diffusivity dt=min(dt,0.5d0*min(dx(1),dy(1))/NU) @@ -42,7 +42,7 @@ subroutine courant dt_x=cfl*dx(i)/abs(U1(i,k,j)-1.d0*DRHODZ(N)*RI(N) & *GYF(J)/I_RO) dt_y=cfl*dy(j)/abs(U2(i,k,j)) - dt_z=cfl*dz(j)/abs(U3(i,k,j)+(RI(N)/I_RO) + dt_z=cfl*dz(k)/abs(U3(i,k,j)+(RI(N)/I_RO) & *DRHODX(N)*GYF(J)) dt=min(dt,dt_x,dt_y,dt_z) end do From f98d058429fe3197f755dca0ef91d1658a6876a3 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 4 Aug 2016 14:47:57 +0100 Subject: [PATCH 13/66] Fixed calculation of nk_th --- diablo_2.0/post_process/matlab/readmean.m | 26 +++++++++++++++-------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/diablo_2.0/post_process/matlab/readmean.m b/diablo_2.0/post_process/matlab/readmean.m index 1d313b3..304da43 100644 --- a/diablo_2.0/post_process/matlab/readmean.m +++ b/diablo_2.0/post_process/matlab/readmean.m @@ -6,18 +6,18 @@ end % Set the grid and domain size in the y-direction %NP=input('Enter the number of processes in the y-direction: '); -NP=4; +NP=2; %NY_S=input('Insert the number of points per process in the y-direction: '); -NY_S=17; +NY_S=33; % Enter the number of scalars -N_TH=1; +N_TH=5; % Enter the viscosity from input.dat NU=0.001; % Enter the Prandtl number Pr=1; kappa=NU/Pr; % Enter the richardson number for each scalar -RI(1:N_TH)=0.15; +RI(1:N_TH)=1.0; % Set the start and end time in code units for start of averaging tstart=0; %tend=999; % If tend isn't defined, tend will default to the final time @@ -86,7 +86,7 @@ % Now read in scalars if (N_TH>0) % Determine the number of records in the file based on its length - nk_th=floor(length(turbo_th(:,1))/(N_TH*(NY_S+2))); + nk_th=floor(length(turbo_th(:,1))/(N_TH*(NY_S)+2)); else nk_th=0; end @@ -236,10 +236,10 @@ end thv_int=trapz(gyf,thv,1); thrms_int=trapz(gyf,thrms,1); - -for j=2:NY - gy(j)=(gyf(j)+gyf(j-1))/2; -end + +% Get GY from hdf5 grid file +gy=h5read([base_dir '/grid.h5'],'/grids/y'); + for j=2:NY-1 dyf(j)=(gy(j+1)-gy(j)); end @@ -255,4 +255,12 @@ end end +for n=1:N_TH +for k=1:nk + thme_int(k,n)=0; + for j=2:NY-1 + thme_int(k,n)=thme_int(k,n)+thme(j,k,n)*(gy(j+1)-gy(j)); + end +end +end From 2eaf3ebcf3c7a00725c95eb19dbc6e9744571a1c Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Tue, 9 Aug 2016 16:14:52 +0100 Subject: [PATCH 14/66] Fixed bug in ReadHDF5 --- diablo_2.0/for/hdf5.f | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/diablo_2.0/for/hdf5.f b/diablo_2.0/for/hdf5.f index b44f3df..e6b7339 100644 --- a/diablo_2.0/for/hdf5.f +++ b/diablo_2.0/for/hdf5.f @@ -422,7 +422,8 @@ SUBROUTINE ReadHDF5(FNAME) dname="TH"//CHAR(ith+45) end select - if ((ith.lt.3).or.(.NOT.CREATE_NEW_TH(max(0,ith-3)))) then + if ((ith.le.3).or. + & ((ith.gt.3).and.(.NOT.CREATE_NEW_TH(max(1,ith-3))))) then ! Check to make sure that we should read in this scalar From cad9316eb38d00c05938c47f0d4c0b6ff98c7c07 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 28 Jul 2016 15:33:31 +0100 Subject: [PATCH 15/66] Update README.md --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index b67be54..f3f0867 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,9 @@ # DIABLO DNS In A Box...Laptop Optimized! +This branch includes background density gradients and thermal wind +and is designed for frontal zone simulations + A brief description of DIABLO diablo.f Main program, define grid, read/write flow, statistics From 62ca157d86019eef8363848dbde5df98b785f72e Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 28 Jul 2016 16:36:48 +0100 Subject: [PATCH 16/66] Added case for surface boundary layer --- diablo_2.0/pre_process/create_grid_h5.m | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/diablo_2.0/pre_process/create_grid_h5.m b/diablo_2.0/pre_process/create_grid_h5.m index b6b3de6..95bcd53 100644 --- a/diablo_2.0/pre_process/create_grid_h5.m +++ b/diablo_2.0/pre_process/create_grid_h5.m @@ -14,6 +14,7 @@ disp('1) High resolution at both ends (tanh stretching function)'); disp('2) High resolution in center (tanh stretching function)'); disp('3) High resolution at both ends (starting at 0)'); + disp('4) High resolution at the top for surface boundary layer'); GRID_TYPE=input('Select a grid type: '); % Set the dimensions of the grid @@ -40,6 +41,12 @@ for J=1:N+1 G(J+1)=(L/2.0)*tanh(CS*((2.0*(J-1))/(N)-1.0))/tanh(CS)+L/2.0; end +elseif (GRID_TYPE==4) +% Surface boundary layer + for J=1:N+1 + G(J+1)=L*tanh(CS*((N-(J-1))/N-1.0))/tanh(CS)+L; + end + G(:)=G(:)*-1; else disp('Error, entered grid type unknown'); end From 2c6bddd8cf14d44268aec12657a42f89f6e06b15 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Fri, 29 Jul 2016 12:49:54 +0100 Subject: [PATCH 17/66] updated to include thermal wind --- diablo_2.0/for/courant.f | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/diablo_2.0/for/courant.f b/diablo_2.0/for/courant.f index 0a96229..e75f124 100644 --- a/diablo_2.0/for/courant.f +++ b/diablo_2.0/for/courant.f @@ -32,6 +32,24 @@ subroutine courant end do ! Use the model velocity to calculate the CFL number + + IF (FLAVOR.eq.'Front') THEN +! Add thermal wind to velocity when calculating the CFL number + do n=1,N_TH + do j=JSTART,JEND + do k=0,NZP-1 + do i=0,NXM + dt_x=cfl*dx(i)/abs(U1(i,k,j)-1.d0*DRHODZ(N)*RI(N) + & *GYF(J)/I_RO) + dt_y=cfl*dy(j)/abs(U2(i,k,j)) + dt_z=cfl*dz(j)/abs(U3(i,k,j)+(RI(N)/I_RO) + & *DRHODX(N)*GYF(J)) + dt=min(dt,dt_x,dt_y,dt_z) + end do + end do + end do + end do + ELSE do j=1,NY do k=0,NZP-1 do i=0,NXM @@ -42,6 +60,8 @@ subroutine courant end do end do end do + END IF + if (USE_MPI) then call get_minimum_mpi(dt) end if From 07cf3432d627246b77117ca84c52d0462733b6f7 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Fri, 29 Jul 2016 12:50:18 +0100 Subject: [PATCH 18/66] Added case for thermal wind at front --- diablo_2.0/for/set_ics.f | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/diablo_2.0/for/set_ics.f b/diablo_2.0/for/set_ics.f index d7130da..7e56be9 100644 --- a/diablo_2.0/for/set_ics.f +++ b/diablo_2.0/for/set_ics.f @@ -71,6 +71,18 @@ SUBROUTINE CREATE_FLOW_CHAN END DO END DO END DO + else if (IC_TYPE.eq.4) then +C For Front +C Initialize in thermal wind balance: + DO J=0,NY + DO K=0,NZP-1 + DO I=0,NXM + U1(I,K,J)=0.d0 + U2(I,K,J)=0.d0 + U3(I,K,J)=0.d0 + END DO + END DO + END DO else WRITE(*,*) 'WARNING, unsupported IC_TYPE in CREATE_FLOW' end if @@ -194,6 +206,8 @@ SUBROUTINE CREATE_TH_CHAN INCLUDE 'header' INTEGER I,J,K,N +! A variable for Front case... + REAL*8 RI_B(0:NY+1) DO N=1,N_TH IF (CREATE_NEW_TH(N)) THEN @@ -243,6 +257,29 @@ SUBROUTINE CREATE_TH_CHAN END DO END DO END DO + ELSE IF (IC_TYPE.eq.4) THEN +! For Front case, specify given RI_B profile + DO K=0,NZP-1 + DO I=0,NXM + TH(I,K,0,N)=0.d0 + DO J=1,NY + if (GYF(J).lt.-60.d0) then + RI_B(J)=20.d0 + TH(I,K,J,N)=(GYF(J)-GYF(1))* + & RI_B(J)*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) + else + RI_B(J)=1.0d0 + TH(I,K,J,N)=(GYF(J)+60.d0)* + & RI_B(J)*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) + & +(-60+140.d0)*20.d0*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) + end if + END DO + END DO + END DO + ELSE WRITE(*,*) 'WARNING, unsupported IC_TYPE in CREATE_FLOW' END IF From 4c5fe42b8a8e0c837b8b00871006aaaaa62221e6 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Tue, 20 Jun 2017 16:56:55 +0100 Subject: [PATCH 19/66] Resolving conflict with user_rhs --- diablo_2.0/for/user_rhs.f | 224 +++++++++++++++++++++++++++++++++----- 1 file changed, 196 insertions(+), 28 deletions(-) diff --git a/diablo_2.0/for/user_rhs.f b/diablo_2.0/for/user_rhs.f index 778ec8e..7fb5e68 100644 --- a/diablo_2.0/for/user_rhs.f +++ b/diablo_2.0/for/user_rhs.f @@ -85,37 +85,68 @@ SUBROUTINE USER_RHS_CHAN_FOURIER integer i,j,k,n - real*8 alpha +! Advection owing to thermal wind + IF ((FLAVOR.eq.'Front').and.(I_RO.ne.0.d0)) THEN + DO N=1,N_TH +! Loop over all scalar -! For example, to add a linear damping term (e.g. -alpha*U) to the RHS: -! alpha=-0.1d0 -! DO J=JSTART,JEND -! DO K=0,TNKZ -! DO I=0,NXP-1 -! CF1(I,K,J)=CF1(I,K,J)-alpha*CU1(I,K,J) -! END DO -! END DO -! END DO +! Add thermal wind advection to the momentum equations + do j=JSTART,JEND + do k=0,TNKZ + do i=0,NXP-1 + CF1(I,K,J)=CF1(I,K,J) + & -(DRHODX(N)*RI(N)*GYF(J)/I_RO) + & *CIKZ(K)*CU1(I,K,J) + & -(-1.d0*DRHODZ(N)*RI(N)*GYF(J)/I_RO) + & *CIKX(I)*CU1(I,K,J) + & -(-1.d0*DRHODZ(N)*RI(N)/I_RO) + & *0.5d0*(CU2(I,K,J)+CU2(I,K,J+1)) + CF3(I,K,J)=CF3(I,K,J) + & -(DRHODX(N)*RI(N)*GYF(J)/I_RO) + & *CIKZ(K)*CU3(I,K,J) + & -(-1.d0*DRHODZ(N)*RI(N)*GYF(J)/I_RO) + & *CIKX(I)*CU3(I,K,J) + & -(DRHODX(N)*RI(N)/I_RO) + & *0.5d0*(CU2(I,K,J)+CU2(I,K,J+1)) + end do + end do + end do -! For U2 do this... -! Note that the only thing that changes are the bounds of the J index -! DO J=2,NY -! DO K=0,TNKZ -! DO I=0,NXP-1 -! CF2(I,K,J)=CF2(I,K,J)-alpha*CU2(I,K,J) -! END DO -! END DO -! END DO + do j=2,NY + do k=0,TNKZ + do i=0,NXP-1 + CF2(I,K,J)=CF2(I,K,J) + & -(DRHODX(N)*RI(N)*GY(J)/I_RO) + & *CIKZ(K)*CU2(I,K,J) + & -(-1.d0*DRHODZ(N)*RI(N)*GY(J)/I_RO) + & *CIKX(I)*CU2(I,K,J) + end do + end do + end do -! For scalars, do this... -! DO J=JSTART,JEND -! DO K=0,TNKZ -! DO I=0,NXP-1 -! CFTH(I,K,J,N)=CFTH(I,K,J,N)-alpha*CTH(I,K,J,N) -! END DO -! END DO -! END DO -! END DO +! Add advection by thermal wind to the scalar equations + DO J=JSTART_TH(N),JEND_TH(N) + DO K=0,TNKZ + DO I=0,NXP-1 + CFTH(I,K,J,N)=CFTH(I,K,J,N) + & -(RI(N)/I_RO)*DRHODX(N)*GYF(J) + & *CIKZ(K)*CTH(I,K,J,N) + & -(RI(N)/I_RO)*-1.d0*DRHODZ(N)*GYF(J) + & *CIKX(I)*CTH(I,K,J,N) + END DO + END DO + END DO + +! End do N_TH + END DO + + END IF + +! Add sponge layer forcing + DO N=1,N_TH + CALL SPONGE_TH(N) + END DO + CALL SPONGE_VEL RETURN END @@ -142,6 +173,7 @@ SUBROUTINE USER_RHS_PER_PHYSICAL END DO C Note, that each cell has the same volume, so we can just average over all points EK=EK/dble(NX*NY*NZ) + ! Scale EK by an amount to compensate for dissipation from 2/3 de-aliasing: EK=0.8d0*EK DO J=0,NYM @@ -204,7 +236,143 @@ SUBROUTINE USER_RHS_CAVITY_PHYSICAL END + SUBROUTINE SPONGE_TH(N) +! This subroutine applies a sponge relaxation (Rayleigh damping) towards a +! specified background state for the temperature field +! The intention is to allow an open boundary + include 'header' + integer i,j,k,n + real*8 L_sponge,L_bottom + real*8 SPONGE_AMP + +! The following variables will store the background state + real*8 TH_0(-1:NY+1) + + real*8 RI_B(0:NY+1) + +! This variable will hold the forcing rate + real*8 SPONGE_SIGMA(0:NY+1) + +! Set the amplitude of the sponge + SPONGE_AMP=0.0001d0 +! Set the top of the sponge layer in physical units + L_sponge=-120.d0 +! Set the bottom of the computational domain in physical units + L_bottom=-140.d0 + DO J=0,NY+1 +! Quadratic damping at lower wall + if (GYF(J).lt.L_sponge) then + SPONGE_SIGMA(j)=SPONGE_AMP*((L_sponge-GYF(J)) + & /(L_sponge-L_bottom))**2.d0 + else + SPONGE_SIGMA(j)=0.d0 + end if + END DO + +! For MLI latmix + if (n.eq.1) then + TH_0(0)=0.d0 + do j=1,NY+1 + RI_B(J)=20.d0 + TH_0(J)=TH_0(J-1) + & +DY(J)*RI_B(J)*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) + end do + else + do j=0,NY+1 + TH_0(j)=0.d0 + end do + end if + +! Add damping to R-K terms +! Damp the perturbations towards 0 + do k=0,TNKZ + do i=0,NKX + if ((i.ne.0).or.(k.ne.0)) then + do j=0,NY+1 + CFTH(i,k,j,n)=CFTH(i,k,j,n) + & -SPONGE_SIGMA(j)*(CTH(i,k,j,n)-0.) + end do + end if + end do + end do +! Damp the mean gradient towards TH_0 + do j=0,NY+1 + CFTH(0,0,j,n)=CFTH(0,0,j,n)-SPONGE_SIGMA(j) + & *(CTH(0,0,j,n)-TH_0(J)) + end do + + return + end + + SUBROUTINE SPONGE_VEL +! This subroutine applies a sponge relaxation (Rayleigh damping) towards a +! specified background state +! The intention is to allow an open boundary + include 'header' + integer i,j,k + + real*8 L_sponge,L_bottom + real*8 SPONGE_AMP + +! The following variables will store the background state + real*8 U1_0(-1:NY+1), U2_0(0:NY+1), U3_0(-1:NY+1) + +! This variable will hold the forcing rate + real*8 SPONGE_SIGMA(0:NY+1) + +! Set the amplitude of the sponge + SPONGE_AMP=0.0001d0 +! Set the top of the sponge layer in physical units + L_sponge=-120.d0 +! Set the bottom of the computational domain in physical units + L_bottom=-140.d0 + DO J=0,NY+1 +! Quadratic damping at lower wall + if (GYF(J).lt.L_sponge) then + SPONGE_SIGMA(j)=SPONGE_AMP*((L_sponge-GYF(J)) + & /(L_sponge-L_bottom))**2.d0 + else + SPONGE_SIGMA(j)=0.d0 + end if + END DO + +! Set the background state +! Here, set the background to be geostrophic, with a linear temperature profile + do j=0,NY+1 + U1_0(j)=0.d0 + U3_0(j)=0.d0 + end do + do j=0,NY+1 + U2_0(j)=0.d0 + end do + +! Add damping function to explicit R-K + do k=0,TNKZ + do i=0,NKX + if ((i.ne.0).or.(k.ne.0)) then + do j=jstart,jend + CF1(I,K,J)=CF1(I,K,J)-SPONGE_SIGMA(j)*(CU1(i,k,j)-0.d0) + CF3(I,K,J)=CF3(I,K,J)-SPONGE_SIGMA(j)*(CU3(i,k,j)-0.d0) + end do + do j=1,NY + CF2(I,K,J)=CF2(I,K,J)- + & 0.5*(SPONGE_SIGMA(j)+SPONGE_SIGMA(j+1))*(CU2(i,k,j)-0.d0) + end do + end if + end do + end do +! Damp mean flow + do j=jstart,jend + CF1(0,0,j)=CF1(0,0,j)-SPONGE_SIGMA(j)*(CU1(0,0,j)-U1_0(j)) + CF3(0,0,j)=CF3(0,0,j)-SPONGE_SIGMA(j)*(CU3(0,0,j)-U3_0(j)) + end do + do j=1,NY + CF2(0,0,j)=CF2(0,0,j)-SPONGE_SIGMA(j)*(CU2(0,0,j)-U2_0(j)) + end do + return + end From cdd7f8dfc99a7150d6ece4fee83b7d54f71d6800 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Tue, 20 Jun 2017 17:05:46 +0100 Subject: [PATCH 20/66] Resolved conflict --- diablo_2.0/for/les.f | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/diablo_2.0/for/les.f b/diablo_2.0/for/les.f index 543bb9a..92ec5d2 100644 --- a/diablo_2.0/for/les.f +++ b/diablo_2.0/for/les.f @@ -94,11 +94,6 @@ subroutine les_chan CALL MPI_BCAST(U3_bar,NY+2,MPI_DOUBLE_PRECISION,0, & MPI_COMM_Z,ierror) -! IF (MOD(TIME_STEP,SAVE_STATS_INT).EQ.0) THEN -! WRITE(*,*) RANK,GYF(78),U1_bar(78),U3_bar(78) -! END IF - - ! Convert the velocity to physical space call FFT_XZ_TO_PHYSICAL(CU1,U1,0,NY+1) call FFT_XZ_TO_PHYSICAL(CU2,U2,0,NY+1) @@ -282,23 +277,23 @@ subroutine les_chan DO I=0,NXP-1 CF1(I,K,J)=CF1(I,K,J) & -CIKX(I)*CSij1(I,K,J) - & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) - & -CIKZ(K)*CSij5(I,K,J) - CF3(I,K,J)=CF3(I,K,J) - & -CIKX(I)*CSij5(I,K,J) - & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) - & -CIKZ(K)*CSij3(I,K,J) +! & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) +! & -CIKZ(K)*CSij5(I,K,J) +! CF3(I,K,J)=CF3(I,K,J) +! & -CIKX(I)*CSij5(I,K,J) +! & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) +! & -CIKZ(K)*CSij3(I,K,J) END DO END DO END DO DO J=2,NY DO K=0,TNKZ DO I=0,NXP-1 - CF2(I,K,J)=CF2(I,K,J) - & -CIKX(I)*CSij4(I,K,J) +! CF2(I,K,J)=CF2(I,K,J) +! & -CIKX(I)*CSij4(I,K,J) ! Sij2 is added through an implict eddy viscosity ! & -(CSij2(I,K,J)-CSij2(I,K,J-1))/DY(j) - & -CIKZ(K)*CSij6(I,K,J) +! & -CIKZ(K)*CSij6(I,K,J) END DO END DO END DO @@ -315,7 +310,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY - CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) +! CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) END DO END DO END DO @@ -330,7 +325,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY - CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) +! CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) END DO END DO END DO From 81fde974e0e8ac89a7a7e227de7e01237d5e6c3a Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 3 Aug 2016 14:48:56 +0100 Subject: [PATCH 21/66] Fixed bug in calculating CFL number for rotating flow --- diablo_2.0/for/courant.f | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/diablo_2.0/for/courant.f b/diablo_2.0/for/courant.f index e75f124..968d7ec 100644 --- a/diablo_2.0/for/courant.f +++ b/diablo_2.0/for/courant.f @@ -12,7 +12,7 @@ subroutine courant integer imin,jmin,kmin ! Set the initial dt to some arbitrary large number - dt=999.d0 + dt=50.d0 ! Set the timestep based on viscosity and diffusivity dt=min(dt,0.5d0*min(dx(1),dy(1))/NU) @@ -42,7 +42,7 @@ subroutine courant dt_x=cfl*dx(i)/abs(U1(i,k,j)-1.d0*DRHODZ(N)*RI(N) & *GYF(J)/I_RO) dt_y=cfl*dy(j)/abs(U2(i,k,j)) - dt_z=cfl*dz(j)/abs(U3(i,k,j)+(RI(N)/I_RO) + dt_z=cfl*dz(k)/abs(U3(i,k,j)+(RI(N)/I_RO) & *DRHODX(N)*GYF(J)) dt=min(dt,dt_x,dt_y,dt_z) end do From dba3273e0c2a1923e222273a5fbf6d38628d964a Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Tue, 20 Jun 2017 17:09:35 +0100 Subject: [PATCH 22/66] Resolved conflict in readmean.m --- diablo_2.0/T2016/MOVIE.dat | 1 + diablo_2.0/T2016/grid.h5 | Bin 0 -> 2960 bytes diablo_2.0/T2016/grid_def | 8 + diablo_2.0/T2016/grid_def.all | 8 + diablo_2.0/T2016/grid_mpi | 3 + diablo_2.0/T2016/input.dat | 35 ++++ diablo_2.0/T2016/input_chan.dat | 60 ++++++ diablo_2.0/T2016/input_per.dat | 35 ++++ diablo_2.0/for/Makefile | 7 +- diablo_2.0/for/channel.f | 2 + diablo_2.0/for/diablo.f | 8 + diablo_2.0/for/diablo_io.f | 10 +- diablo_2.0/for/filter.f | 95 +++++---- diablo_2.0/for/grid_def | 8 +- diablo_2.0/for/grid_def.all | 8 +- diablo_2.0/for/grid_mpi | 2 +- diablo_2.0/for/les.f | 6 +- diablo_2.0/for/mpi.f | 43 ++++- diablo_2.0/for/save_stats.f | 51 +---- diablo_2.0/for/set_ics.f | 32 ++++ diablo_2.0/for/user_rhs.f | 180 +++++++++++------- .../post_process/matlab/plot_movie_xy.m | 23 ++- .../post_process/matlab/plot_movie_xz.m | 67 +++++-- diablo_2.0/post_process/matlab/readmean.m | 17 +- diablo_2.0/post_process/matlab/readmean_all.m | 71 ++++++- diablo_2.0/setup_run | 7 +- 26 files changed, 562 insertions(+), 225 deletions(-) create mode 100644 diablo_2.0/T2016/MOVIE.dat create mode 100644 diablo_2.0/T2016/grid.h5 create mode 100644 diablo_2.0/T2016/grid_def create mode 100644 diablo_2.0/T2016/grid_def.all create mode 100644 diablo_2.0/T2016/grid_mpi create mode 100644 diablo_2.0/T2016/input.dat create mode 100644 diablo_2.0/T2016/input_chan.dat create mode 100644 diablo_2.0/T2016/input_per.dat mode change 100644 => 100755 diablo_2.0/setup_run diff --git a/diablo_2.0/T2016/MOVIE.dat b/diablo_2.0/T2016/MOVIE.dat new file mode 100644 index 0000000..1a2d99e --- /dev/null +++ b/diablo_2.0/T2016/MOVIE.dat @@ -0,0 +1 @@ +0.0 -10.0 0.0 diff --git a/diablo_2.0/T2016/grid.h5 b/diablo_2.0/T2016/grid.h5 new file mode 100644 index 0000000000000000000000000000000000000000..fd67527a0af48bf08ef61c7c095f610696883af4 GIT binary patch literal 2960 zcmeD5aB<`1lHy_j0S*oZ76t(@6Gr@pf(hIZ5f~pPp8#brLg@}Dy@CnCU}OM61_lYJ zxFFPgbaf#?uC5F~l`!*RG*lbI16Bx&112y^kEjsvaCHm-c{l>n-J6#km4q_S>X%}6%b_uM53oF z4fX+Y$Pl+yhKrB+#oELJPriQqBOa7^AaWa%Sn#)m13|`I8B1m*957)OViLDcIB?Zc z=~(vh_yZirgI|gZ#vl0S^oQFcI_|*4_13+D%VH1YaqBd9evCQrz}{oy5!;vp-qou0 z*Sn$*94%{qfAvPx0rlSN(=V$>9oYDkb;jfJ$OE44w zFJ)Zjcpzd)1>=qN4hN2inoZ=du|KfzVpog8LfZqU`0LZ|pR+!|X|8?2jn(SF{j!WY ze;11bkLz92K6INM*!^_zNx3^F2Vw+Xd!I8gJ`lHjAKS;Nh6gwv``Nz#r++{{*LMA) zM>+?(Cf^FWaZU5U#7_&BXWmvj;9aueiS7sG1M@#WmH#E8cp%iJ=!Mf{nFFh8XZyX9 t7C*2e(Qi)YVZj5p689ah6y`io<)W2Wy5sNuSL)^~c$*&YXV3)G_5jTEQG);g literal 0 HcmV?d00001 diff --git a/diablo_2.0/T2016/grid_def b/diablo_2.0/T2016/grid_def new file mode 100644 index 0000000..7a24307 --- /dev/null +++ b/diablo_2.0/T2016/grid_def @@ -0,0 +1,8 @@ +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| +! grid_def, the portion of the header that defines the grid size for diablo. +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| + PARAMETER (NX=512) + PARAMETER (NY=33) + PARAMETER (NZ=512) + PARAMETER (N_TH=1) +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| diff --git a/diablo_2.0/T2016/grid_def.all b/diablo_2.0/T2016/grid_def.all new file mode 100644 index 0000000..95ffef7 --- /dev/null +++ b/diablo_2.0/T2016/grid_def.all @@ -0,0 +1,8 @@ +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| +! grid_def, the portion of the header that defines the grid size for diablo. +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| + PARAMETER (NX=512) + PARAMETER (NY=65) + PARAMETER (NZ=512) + PARAMETER (N_TH=1) +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| diff --git a/diablo_2.0/T2016/grid_mpi b/diablo_2.0/T2016/grid_mpi new file mode 100644 index 0000000..a1f6b2f --- /dev/null +++ b/diablo_2.0/T2016/grid_mpi @@ -0,0 +1,3 @@ + PARAMETER(NPROCS=16) + PARAMETER(NPROCY=2) + PARAMETER(NPROCZ=NPROCS/NPROCY) diff --git a/diablo_2.0/T2016/input.dat b/diablo_2.0/T2016/input.dat new file mode 100644 index 0000000..218412c --- /dev/null +++ b/diablo_2.0/T2016/input.dat @@ -0,0 +1,35 @@ +C A data file for diablo. This is a generic, easy method of data +C input, as long as you don't change the number of comment lines. +C Note that the grid size is defined in the file grid_def. +C FLAVOR VERSION + 'Front' 2.0 +C USE_MPI USE_LES (note, also change these flags in Makefile): + .TRUE. .TRUE. +C Parameters: NU, BETA, LX, LY, LZ + 0.000001 1.0 1000.0 140.0 1000.0 +C Vertical viscosity scale factor NU_V_SCALE (NU_V=NU*NU_V_SCALE) + 1.0 +C Method: NUM_PER_DIR, CREATE_NEW_FLOW + 2 .TRUE. +C Time March: N_TIME_STEPS, TIME_LIMIT, DELTA_T, RESET_TIME, VARIABLE_DT, CFL, UPDATE_DT + 20000 500000 0.1 .FALSE. .TRUE. 0.5 1 +C I/O: VERBOSITY, SAVE_FLOW_INT, SAVE_STATS_INT, MOVIE + 4 100 10 .TRUE. +C Here include 6*N_TH lines, see below for format +C CREATE_FLOW_TH(1) Create new field or Read from DIABLO_TH.START + .TRUE. +C FILTER_TH(1) FILTER_INT(1) (If and how often to filter) + .FALSE. 10 +C RI_TAU(1) PR(1) + 1.0 1.0 + + +C When including scalar advection, include 6 lines like the following for each scalar + +C CREATE_FLOW_TH(1) Create new field or Read from DIABLO_TH.START + .TRUE. +C FILTER_TH(1) FILTER_INT(1) (If and how often to filter) + .TRUE. 10 +C RI_TAU(1) PR(1) BACKGROUND_TH(N) + 0.0 1.0 .FALSE. + diff --git a/diablo_2.0/T2016/input_chan.dat b/diablo_2.0/T2016/input_chan.dat new file mode 100644 index 0000000..8b0ca40 --- /dev/null +++ b/diablo_2.0/T2016/input_chan.dat @@ -0,0 +1,60 @@ +C A data file for diablo. This is a generic, easy method of data +C input, as long as you don't change the number of comment lines. +C Note that the grid size is defined in the file grid_def. +C VERSION + 2.0 +C TIME_AD_METH + 1 +C LES: LES_MODEL_TYPE + 1 +C ICs: IC_TYPE, KICK + 4 0.001 +C Rotation: I_RO (or Coriolis parameter, f) + 0.0001 +C Upward vertical vector components, GRAV_X, GRAV_Y, GRAV_Z + 0.0 1.0 0.0 +C Forcing: F_TYPE, UBULK0, PX0, OMEGA0, AMP_OMEGA0 + 3 0.0 -1.0 6.28 0.0 +C BCs: U_BC_YMIN, U_BC_YMIN_C1, U_BC_YMIN_C2, U_BC_YMIN_C3 + 1 0.0 0.0 0.0 +C BCs: V_BC_YMIN, V_BC_YMIN_C1, V_BC_YMIN_C2, V_BC_YMIN_C3 + 0 0.0 0.0 0.0 +C BCs: W_BC_YMIN, W_BC_YMIN_C1, W_BC_YMIN_C2, W_BC_YMIN_C3 + 1 0.0 0.0 0.0 +C BCs: U_BC_YMAX, U_BC_YMAX_C1, U_BC_YMAX_C2, U_BC_YMAX_C3 + 1 0.0 0.0 0.0 +C BCs: V_BC_YMAX, V_BC_YMAX_C1, V_BC_YMAX_C2, V_BC_YMAX_C3 + 0 0.0 0.0 0.0 +C BCs: W_BC_YMAX, W_BC_YMAX_C1, W_BC_YMAX_C2, W_BC_YMAX_C3 + 1 0.0 0.0 0.0 +C Here include 6*N_TH lines, see below for format +C BCs: TH_BC_YMIN(1), TH_BC_YMIN_C1(1), TH_BC_YMIN_C2(1), TH_BC_YMIN_C3(1) + 1 0.0000018 0.0 0.0 +C BCs: TH_BC_YMAX(1), TH_BC_YMAX_C1(1), TH_BC_YMAX_C2(1), TH_BC_YMAX_C3(1) + 1 -0.0019 0.0 0.0 +C Background scalar gradients, DRHODX(1), DRHODZ(1) + 0.00000003 0.0 + + +C Description +C For channnel flows (NUM_PER_DIR=2): +C IC_TYPE specifies the functional form for the initial velocity +C KICK is a scale factor on the noise added when creating a new flow. +C F_TYPE=0 gives constant mass flux flow (maintaining UBULK0). +C F_TYPE=1 gives constant pressure gradient flow (PX0 constant). +C F_TYPE=2 is for an oscillatory pressure gradient of the form: +C PX0+AMP_OMEGA0*cos(OMEGA0*TIME) +C U_BC_YMIN is the BC TYPE on the U velocity component at the lower wall +C (0 for Dirichlet, 1 for Neumann) +C U_BC_YMIN_C1 is the value of the velocity (if Dirichlet) or it's gradient (if Neumann) + +C When including scalar advection, include 4 lines like the following for each scalar + +C BCs: TH_BC_YMIN(1), TH_BC_YMIN_C1(1), TH_BC_YMIN_C2(1), TH_BC_YMIN_C3(1) + 1 0.0 0.0 0.0 +C BCs: TH_BC_YMAX(1), TH_BC_YMAX_C1(1), TH_BC_YMAX_C2(1), TH_BC_YMAX_C3(1) + 1 0.0 0.0 0.0 + + + + diff --git a/diablo_2.0/T2016/input_per.dat b/diablo_2.0/T2016/input_per.dat new file mode 100644 index 0000000..ae29b42 --- /dev/null +++ b/diablo_2.0/T2016/input_per.dat @@ -0,0 +1,35 @@ +C A data file for diablo. This is a generic, easy method of data +C input, as long as you don't change the number of comment lines. +C Note that the grid size is defined in the file grid_def. +C VERSION + 1.0 +C TIME_AD_METH + 1 +C LES: LES_MODEL_TYPE + 2 +C ICs: IC_TYPE, KICK + 0 .0010 +C FLAGS: TRUCK, GUST, + .FALSE. .FALSE. +C BACKGROUND_GRAD(N): Include this and the next three lines for each scalar + .FALSE. +C BACTERIAL MOTILITY CHI(N): + 0.0 +C BACKGROUND_GRAD(N): Include one of the following lines for each scalar + .FALSE. +C BACTERIAL MOTILITY CHI(N): + 0.001 +C BACKGROUND_GRAD(N): Include one of the following lines for each scalar + .FALSE. +C BACTERIAL MOTILITY CHI(N): + 0.01 + + + + + +C Description +C For triply-periodic flows (NUM_PER_DIR=3): +C IC_TYPE specifies the functional form for the initial velocity +C KICK is a scale factor on the noise added when creating a new flow. +C BACKGROUND_GRAD(N) specifies if the scalar should be considered as a perturbation to a linear background gradient. This is designed to allow simulations of stratified flows in the triply periodic flow geometry. diff --git a/diablo_2.0/for/Makefile b/diablo_2.0/for/Makefile index da8e565..46d6175 100644 --- a/diablo_2.0/for/Makefile +++ b/diablo_2.0/for/Makefile @@ -77,11 +77,11 @@ NETCDF_o = no_netcdf.o endif diablo: $(MAIN) diablo_io.o periodic.o channel.o courant.o $(LES_o) \ - duct.o cavity.o fft.o rand.o set_ics.o save_stats.o user_rhs.o $(MPI) \ + duct.o cavity.o fft.o rand.o set_ics.o save_stats.o filter.o user_rhs.o $(MPI) \ $(HEADER) grid_def $(HDF5_o) $(COMPILER) $(COMPOPTS) $(MAIN) -o diablo \ diablo_io.o periodic.o channel.o courant.o $(LES_o) \ - duct.o cavity.o fft.o rand.o set_ics.o save_stats.o user_rhs.o \ + duct.o cavity.o fft.o rand.o set_ics.o save_stats.o filter.o user_rhs.o \ $(MPI) $(LINKOPTS) $(HDF5_o) diablo_io.o: diablo_io.f header grid_def @@ -99,6 +99,9 @@ set_ics.o: set_ics.f channel.o fft.o $(MPI) header grid_def save_stats.o: save_stats.f channel.o fft.o $(MPI) header grid_def $(COMPILER) $(COMPOPTS) -c save_stats.f +filter.o: filter.f channel.o fft.o $(MPI) header grid_def + $(COMPILER) $(COMPOPTS) -c filter.f + user_rhs.o: user_rhs.f channel.o fft.o $(MPI) header grid_def $(COMPILER) $(COMPOPTS) -c user_rhs.f diff --git a/diablo_2.0/for/channel.f b/diablo_2.0/for/channel.f index 955346c..b0a5f00 100644 --- a/diablo_2.0/for/channel.f +++ b/diablo_2.0/for/channel.f @@ -1447,6 +1447,8 @@ SUBROUTINE INPUT_CHAN READ(11,*) DRHODX(N), DRHODZ(N) END DO + if (RANK.eq.0) write(*,*) 'I_RO: ',I_RO + RETURN END diff --git a/diablo_2.0/for/diablo.f b/diablo_2.0/for/diablo.f index 80e9cae..4c19491 100644 --- a/diablo_2.0/for/diablo.f +++ b/diablo_2.0/for/diablo.f @@ -85,6 +85,14 @@ PROGRAM DIABLO TIME=TIME+DELTA_T FIRST_TIME=.FALSE. +! Optionally apply a filter to the scalar field + DO N=1,N_TH + IF (FILTER_TH(N) + & .AND.(MOD(TIME_STEP,FILTER_INT(N)).EQ.0)) THEN + CALL FILTER_CHAN(N) + END IF + END DO + ! Save statistics to an output file IF (MOD(TIME_STEP,SAVE_STATS_INT).EQ.0) THEN CALL SAVE_STATS(.FALSE.) diff --git a/diablo_2.0/for/diablo_io.f b/diablo_2.0/for/diablo_io.f index 172c695..dd5dbb4 100644 --- a/diablo_2.0/for/diablo_io.f +++ b/diablo_2.0/for/diablo_io.f @@ -205,7 +205,7 @@ SUBROUTINE READ_FLOW CHARACTER*55 FNAME_TH(N_TH) INTEGER I, J, K, N, NUM_PER_DIR_T - FNAME='diablo.start' +! FNAME='diablo.start' FNAME='start.h5' IF (RANK.EQ.0) & WRITE(6,*) 'Reading flow from ',FNAME @@ -317,7 +317,13 @@ SUBROUTINE SAVE_FLOW(FINAL) FNAME='end.h5' SAVE_PRESSURE=.TRUE. else - FNAME='out.h5' + FNAME='out.' + & //CHAR(MOD(TIME_STEP,100000)/10000+48) + & //CHAR(MOD(TIME_STEP,10000)/1000+48) + & //CHAR(MOD(TIME_STEP,1000)/100+48) + & //CHAR(MOD(TIME_STEP,100)/10+48) + & //CHAR(MOD(TIME_STEP,10)+48) + & //'.h5' end if if (FNAME(len_trim(FNAME)-2:len_trim(FNAME)).eq.".h5") then IF (NUM_PER_DIR.NE.2) THEN diff --git a/diablo_2.0/for/filter.f b/diablo_2.0/for/filter.f index 438af07..a967097 100644 --- a/diablo_2.0/for/filter.f +++ b/diablo_2.0/for/filter.f @@ -10,7 +10,7 @@ subroutine filter_chan(n) integer I,J,K,js,je,n ! Variables for horizontal filtering - real*8 sigma(0:NKX,0:TNKZ),sigma0 + real*8 sigma(0:NXP-1,0:TNKZ),sigma0 ! Variables for vertical filtering real*8 alpha @@ -18,11 +18,8 @@ subroutine filter_chan(n) ! Parameters for a larger stencil filter real*8 f_a,f_b,f_c - js=-1 - je=NY+1 - C Set the filtering constants for the horizontal direction - DO i=0,NKX + DO i=0,NXP-1 DO k=0,TNKZ sigma0=0.5d0*(1.d0+ & cos(sqrt((KX(i)*LX*1.d0/float(NX))**2.d0 @@ -35,63 +32,59 @@ subroutine filter_chan(n) C Do the spectral filtering in the horizontal DO K=0,TNKZ - DO I=0,NKX - DO J=js+1,je-1 + DO I=0,NXP-1 + DO J=JSTART_TH(N),JEND_TH(N) CTH(I,K,J,n)=CTH(I,K,J,n)*sigma(i,k) END DO END DO END DO +C Filter the passive scalar, TH in the vertical direction C Set the filtering constants - f_a=(1.d0/8.d0)*(5.d0+6.d0*alpha) - f_b=0.5d0*(1.d0+2.d0*alpha) - f_c=(-1.d0/8.d0)*(1.d0-2.d0*alpha) - - +! f_a=(1.d0/8.d0)*(5.d0+6.d0*alpha) +! f_b=0.5d0*(1.d0+2.d0*alpha) +! f_c=(-1.d0/8.d0)*(1.d0-2.d0*alpha) C First, zero the tridiagonal matrix components - DO I=0,NKX - DO J=1,NY - MATD_C(I,J)=1.d0 - MATL_C(I,J)=0.d0 - MATU_C(I,J)=0.d0 - VEC_C(I,J)=0.d0 - END DO - END DO - - -C Filter the passive scalar, TH in the vertical direction - DO K=1,TNKZ - DO I=1,NKX +! DO I=0,NKX +! DO J=1,NY +! MATD_C(I,J)=1.d0 +! MATL_C(I,J)=0.d0 +! MATU_C(I,J)=0.d0 +! VEC_C(I,J)=0.d0 +! END DO +! END DO +! DO K=1,TNKZ +! DO I=1,NKX C Construct the centered difference terms - DO J=2,NY-1 - MATL_C(I,J)=alpha - MATD_C(I,J)=1.d0 - MATU_C(I,J)=alpha - VEC_C(I,J)=f_a*CTH(I,K,J,n) - & +(f_b/2.d0)*(CTH(I,K,J+1,n)+CTH(I,K,J-1,n)) - & +(f_c/2.d0)*(CTH(I,K,J+2,n)+CTH(I,K,J-2,n)) - END DO +! DO J=2,NY-1 +! MATL_C(I,J)=alpha +! MATD_C(I,J)=1.d0 +! MATU_C(I,J)=alpha +! VEC_C(I,J)=f_a*CTH(I,K,J,n) +! & +(f_b/2.d0)*(CTH(I,K,J+1,n)+CTH(I,K,J-1,n)) +! & +(f_c/2.d0)*(CTH(I,K,J+2,n)+CTH(I,K,J-2,n)) +! END DO C Now, construct the equations for the boundary nodes - J=1 - MATL_C(I,J)=0.d0 - MATD_C(I,J)=1.d0 - MATU_C(I,J)=0.d0 - VEC_C(I,J)=CTH(I,K,J,n) - J=NY - MATL_C(I,J)=0.d0 - MATD_C(I,J)=1.d0 - MATU_C(I,J)=0.d0 - VEC_C(I,J)=CTH(I,K,J,n) - END DO +! J=1 +! MATL_C(I,J)=0.d0 +! MATD_C(I,J)=1.d0 +! MATU_C(I,J)=0.d0 +! VEC_C(I,J)=CTH(I,K,J,n) +! J=NY +! MATL_C(I,J)=0.d0 +! MATD_C(I,J)=1.d0 +! MATU_C(I,J)=0.d0 +! VEC_C(I,J)=CTH(I,K,J,n) +! END DO C Now, solve the tridiagonal system - CALL THOMAS_COMPLEX(MATL_C,MATD_C,MATU_C,VEC_C,NY,NKX) - DO I=1,NKX - DO J=js+1,je-1 - CTH(I,K,J,n)=VEC_C(I,J) - END DO - END DO +! CALL THOMAS_COMPLEX(MATL_C,MATD_C,MATU_C,VEC_C,NY,NKX) +! DO I=1,NKX +! DO J=JSTART_TH(N),JEND_TH(N) +! CTH(I,K,J,n)=VEC_C(I,J) +! END DO +! END DO C END DO K - END DO +! END DO return diff --git a/diablo_2.0/for/grid_def b/diablo_2.0/for/grid_def index e5a4616..76cc6d5 100644 --- a/diablo_2.0/for/grid_def +++ b/diablo_2.0/for/grid_def @@ -1,8 +1,8 @@ !----*|--.---------.---------.---------.---------.---------.---------.-|-------| ! grid_def, the portion of the header that defines the grid size for diablo. !----*|--.---------.---------.---------.---------.---------.---------.-|-------| - PARAMETER (NX=128) - PARAMETER (NY=33) - PARAMETER (NZ=16) - PARAMETER (N_TH=1) + PARAMETER (NX=256) + PARAMETER (NY=26) + PARAMETER (NZ=64) + PARAMETER (N_TH=9) !----*|--.---------.---------.---------.---------.---------.---------.-|-------| diff --git a/diablo_2.0/for/grid_def.all b/diablo_2.0/for/grid_def.all index ebaebad..ddc6b36 100644 --- a/diablo_2.0/for/grid_def.all +++ b/diablo_2.0/for/grid_def.all @@ -1,8 +1,8 @@ !----*|--.---------.---------.---------.---------.---------.---------.-|-------| ! grid_def, the portion of the header that defines the grid size for diablo. !----*|--.---------.---------.---------.---------.---------.---------.-|-------| - PARAMETER (NX=128) - PARAMETER (NY=65) - PARAMETER (NZ=16) - PARAMETER (N_TH=1) + PARAMETER (NX=256) + PARAMETER (NY=51) + PARAMETER (NZ=64) + PARAMETER (N_TH=9) !----*|--.---------.---------.---------.---------.---------.---------.-|-------| diff --git a/diablo_2.0/for/grid_mpi b/diablo_2.0/for/grid_mpi index c87fde0..a1f6b2f 100644 --- a/diablo_2.0/for/grid_mpi +++ b/diablo_2.0/for/grid_mpi @@ -1,3 +1,3 @@ - PARAMETER(NPROCS=4) + PARAMETER(NPROCS=16) PARAMETER(NPROCY=2) PARAMETER(NPROCZ=NPROCS/NPROCY) diff --git a/diablo_2.0/for/les.f b/diablo_2.0/for/les.f index 92ec5d2..53e9f37 100644 --- a/diablo_2.0/for/les.f +++ b/diablo_2.0/for/les.f @@ -21,7 +21,7 @@ subroutine les_chan real*8 U1_bar(0:NY+1) real*8 C_SMAG - parameter (C_SMAG=0.13d0) + parameter (C_SMAG=0.17d0) real*8 DELTA_Y(0:NY+1),DELTA_YF(0:NY+1) real*8 alpha_sgs,beta_sgs real*8 denominator_sum @@ -39,7 +39,7 @@ subroutine les_chan ! Here, alpha is the test/LES filter width ratio parameter (alpha_sgs=2.449d0) ! beta is the LES/grid filter width ratio - parameter (beta_sgs=1.d0) + parameter (beta_sgs=1.5d0) ! Set the indices that are used when adding the off-diagnoal SGS stress terms IF (RANKY.eq.NPROCY-1) then @@ -192,7 +192,7 @@ subroutine les_chan ! At GYF points: ! Constant Smagorinsky DELTA_YF(J)=-2.d0*C_SMAG**2.d0 - & *(DX(1)*beta_sgs*DYF(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) + & *(DX(1)*beta_sgs*DYF(J)*1.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) ! Wall Damping ! DELTA_YF(J)= ! & -2.d0*(0.1d0*(1.d0-exp((-GYF(J)/(NU*25.d0))**3.d0)))**2.d0 diff --git a/diablo_2.0/for/mpi.f b/diablo_2.0/for/mpi.f index 70b1f9d..97fd5a8 100644 --- a/diablo_2.0/for/mpi.f +++ b/diablo_2.0/for/mpi.f @@ -325,12 +325,17 @@ SUBROUTINE GHOST_LES_MPI END IF ELSE -! HERE, NPROCY=1, so just apply the boundary conditions to set NU=0 at the -! top and bottom walls +! Here, NPROCY=1, so we just need to set the boundary values +! Set NU_T=0 at the lower wall DO K=0,NZP-1 DO I=0,NXM NU_T(I,K,1)=0.d0 NU_T(I,K,2)=0.d0 + END DO + END DO +! Set NU_T=0 at the upper wall + DO K=0,NZP-1 + DO I=0,NXM NU_T(I,K,NY)=0.d0 NU_T(I,K,NY+1)=0.d0 END DO @@ -1439,6 +1444,40 @@ SUBROUTINE INTEGRATE_Y_VAR(VAR,RES,COMM) END SUBROUTINE + SUBROUTINE INTEGRATE_Z_VAR(VAR,RES,COMM) + + INCLUDE 'header' + + INTEGER i,k,j,COMM + REAL*8 VAR(0:NX+1,0:NZP+1,0:NY+1),RES(0:NXM,1:NY) + +! Integrat the instantaneous mean profile numerically at GY points + IF (USE_MPI) THEN + do i=0,NXM + do j=1,NY + RES(i,j)=0. + do k=0,NZP + RES(i,j)=RES(i,j)+VAR(i,k,j)*DZ(k) + end do + end do + end do + call MPI_ALLREDUCE(MPI_IN_PLACE,RES,NX*NY, + & MPI_DOUBLE_PRECISION,MPI_SUM,COMM,ierror) + call MPI_BARRIER(MPI_COMM_WORLD,IERROR) + ELSE + do i=0,NXM + do j=1,NY + do k=0,NZM + RES(i,j)=RES(i,j)+VAR(i,k,j)*DZ(k) + end do + end do + end do + END IF + RES=RES/LZ + + END SUBROUTINE + + SUBROUTINE END_RUN_MPI(FLAG) INCLUDE 'header' diff --git a/diablo_2.0/for/save_stats.f b/diablo_2.0/for/save_stats.f index 095436c..11a1f20 100644 --- a/diablo_2.0/for/save_stats.f +++ b/diablo_2.0/for/save_stats.f @@ -9,7 +9,8 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) CHARACTER*20 GNAME LOGICAL FINAL integer i,j,k,n - real*8 uc, ubulk + real*8 uc, ubulk,zmean + real*8 zvar(0:NZP) ! This variable is used to add up scalar diagnostics real*8 thsum(0:NY+1) @@ -533,17 +534,17 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) #ifdef HDF5 if (MOVIE) then FNAME='movie.h5' - if (n.eq.1) then if (USE_MPI) then call mpi_barrier(MPI_COMM_WORLD,ierror) end if +! CALL INTEGRATE_Z_VAR(TH(:,:,:,n),varxy,MPI_COMM_Z) IF (RANKZ.EQ.RANKZMOVIE) THEN do I=0,NXM do J=1,NY varxy(i,j)=TH(i,NzMovie,j,n) end do end do - GNAME='th1_xy' + GNAME='th'//CHAR(n+48)//'_xy' call writeHDF5_xyplane(FNAME,GNAME,varxy) END IF if (USE_MPI) then @@ -555,7 +556,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) varxz(i,j)=TH(i,j,NyMovie,n) end do end do - GNAME='th1_xz' + GNAME='th'//CHAR(n+48)//'_xz' call writeHDF5_xzplane(FNAME,GNAME,varxz) END IF if (USE_MPI) then @@ -566,44 +567,8 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) varzy(i,j)=TH(NxMovie,i,j,n) end do end do - GNAME='th1_zy' + GNAME='th'//CHAR(n+48)//'_zy' call writeHDF5_zyplane(FNAME,GNAME,varzy) - else if (n.eq.2) then - if (USE_MPI) then - call mpi_barrier(MPI_COMM_WORLD,ierror) - end if - IF (RANKZ.EQ.RANKZMOVIE) THEN - do I=0,NXM - do J=1,NY - varxy(i,j)=TH(i,NzMovie,j,n) - end do - end do - GNAME='th2_xy' - call writeHDF5_xyplane(FNAME,GNAME,varxy) - END IF - if (USE_MPI) then - call mpi_barrier(MPI_COMM_WORLD,ierror) - end if - IF (RANKY.EQ.RANKYMOVIE) THEN - do I=0,NXM - do J=0,NZP-1 - varxz(i,j)=TH(i,j,NyMovie,n) - end do - end do - GNAME='th2_xz' - call writeHDF5_xzplane(FNAME,GNAME,varxz) - END IF - if (USE_MPI) then - call mpi_barrier(MPI_COMM_WORLD,ierror) - end if - do I=0,NZP-1 - do J=1,NY - varzy(i,j)=TH(NxMovie,i,j,n) - end do - end do - GNAME='th2_zy' - call writeHDF5_zyplane(FNAME,GNAME,varzy) - end if END IF #endif @@ -703,6 +668,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) if (USE_MPI) then call mpi_barrier(MPI_COMM_WORLD,ierror) end if +! CALL INTEGRATE_Z_VAR(U1(:,:,:),varxy,MPI_COMM_Z) IF (RANKZ.EQ.RANKZMOVIE) THEN do I=0,NXM do J=1,NY @@ -716,6 +682,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) if (USE_MPI) then call mpi_barrier(MPI_COMM_WORLD,ierror) end if +! CALL INTEGRATE_Z_VAR(U2(:,:,:),varxy,MPI_COMM_Z) IF (RANKZ.EQ.RANKZMOVIE) THEN do I=0,NXM do J=1,NY @@ -729,6 +696,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) if (USE_MPI) then call mpi_barrier(MPI_COMM_WORLD,ierror) end if +! CALL INTEGRATE_Z_VAR(U3(:,:,:),varxy,MPI_COMM_Z) IF (RANKZ.EQ.RANKZMOVIE) THEN do I=0,NXM do J=1,NY @@ -743,6 +711,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) if (USE_MPI) then call mpi_barrier(MPI_COMM_WORLD,ierror) end if +! CALL INTEGRATE_Z_VAR(NU_T(:,:,:),varxy,MPI_COMM_Z) IF (RANKZ.EQ.RANKZMOVIE) THEN do I=0,NXM do J=1,NY diff --git a/diablo_2.0/for/set_ics.f b/diablo_2.0/for/set_ics.f index 7e56be9..3926b17 100644 --- a/diablo_2.0/for/set_ics.f +++ b/diablo_2.0/for/set_ics.f @@ -83,6 +83,18 @@ SUBROUTINE CREATE_FLOW_CHAN END DO END DO END DO + ELSE IF (IC_TYPE.eq.5) THEN + DO J=0,NY + DO K=0,NZP-1 + DO I=0,NXM + U3(i,k,j)=GYF(J)*(1.56d-5/250.d0*(1.d0-(tanh((GX(i)-LX/2.d0) + & /250.d0))**2.d0)-DRHODX(1))*RI(1)/I_RO + U1(i,k,j)=0.d0 + U2(i,k,j)=0.d0 +! P(i,k,j)=GYF(J)*1.d-3*tanh((GX(i)-LX/2.d0)/250.d0)*RI(1) + end do + end do + end do else WRITE(*,*) 'WARNING, unsupported IC_TYPE in CREATE_FLOW' end if @@ -258,6 +270,7 @@ SUBROUTINE CREATE_TH_CHAN END DO END DO ELSE IF (IC_TYPE.eq.4) THEN + IF (N.eq.1) THEN ! For Front case, specify given RI_B profile DO K=0,NZP-1 DO I=0,NXM @@ -279,6 +292,25 @@ SUBROUTINE CREATE_TH_CHAN END DO END DO END DO + ELSE +! Passive tracers + DO K=0,NZP-1 + DO I=0,NXM + DO J=1,NY + TH(I,K,J,N)=exp(GYF(J)/10.d0) + END DO + END DO + END DO + END IF + ELSE IF (IC_TYPE.eq.5) THEN + DO K=0,NZP-1 + DO I=0,NXM + DO J=1,NY + TH(I,K,J,N)=1.56d-5*tanh((GX(i)-LX/2.d0)/250.d0) + & -DRHODX(N)*GX(i) + END DO + END DO + END DO ELSE WRITE(*,*) 'WARNING, unsupported IC_TYPE in CREATE_FLOW' diff --git a/diablo_2.0/for/user_rhs.f b/diablo_2.0/for/user_rhs.f index 7fb5e68..836cce6 100644 --- a/diablo_2.0/for/user_rhs.f +++ b/diablo_2.0/for/user_rhs.f @@ -11,64 +11,7 @@ SUBROUTINE USER_RHS_CHAN_PHYSICAL real*8 alpha -! For example, to add a linear damping term (e.g. -alpha*U) to the RHS: -! alpha=-0.1d0 -! DO J=JSTART,JEND -! DO K=0,NZP-1 -! DO I=0,NXM -! S1(I,K,J)=-alpha*U1(I,K,J) -! END DO -! END DO -! END DO -! Convert to Fourier space -! CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) -! DO J=JSTART,JEND -! DO K=0,TNKZ -! DO I=0,NXP-1 -! CF1(I,K,J)=CF1(I,K,J)+CS1(I,K,J) -! END DO -! END DO -! END DO - -! For U2 do this... -! Note that the only thing that changes are the bounds of the J index -! DO J=2,NY -! DO K=0,NZP-1 -! DO I=0,NXM -! S1(I,K,J)=-alpha*U2(I,K,J) -! END DO -! END DO -! END DO -! Convert to Fourier space -! CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) -! DO J=2,NY -! DO K=0,TNKZ -! DO I=0,NXP-1 -! CF2(I,K,J)=CF2(I,K,J)+CS1(I,K,J) -! END DO -! END DO -! END DO - -! For scalars, do this... -! Loop over all scalars -! DO N=1,N_TH -! DO J=JSTART,JEND -! DO K=0,NZP-1 -! DO I=0,NXM -! S1(I,K,J)=-alpha*TH(I,K,J,N) -! END DO -! END DO -! END DO -! Convert to Fourier space -! CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) -! DO J=JSTART,JEND -! DO K=0,TNKZ -! DO I=0,NXP-1 -! CFTH(I,K,J,N)=CFTH(I,K,J,N)+CS1(I,K,J) -! END DO -! END DO -! END DO -! END DO +! CALL SLIP_VEL RETURN END @@ -88,7 +31,7 @@ SUBROUTINE USER_RHS_CHAN_FOURIER ! Advection owing to thermal wind IF ((FLAVOR.eq.'Front').and.(I_RO.ne.0.d0)) THEN DO N=1,N_TH -! Loop over all scalar +! Loop over all scalars ! Add thermal wind advection to the momentum equations do j=JSTART,JEND @@ -143,10 +86,10 @@ SUBROUTINE USER_RHS_CHAN_FOURIER END IF ! Add sponge layer forcing - DO N=1,N_TH - CALL SPONGE_TH(N) - END DO - CALL SPONGE_VEL +! DO N=1,N_TH +! CALL SPONGE_TH(N) +! END DO +! CALL SPONGE_VEL RETURN END @@ -274,9 +217,9 @@ SUBROUTINE SPONGE_TH(N) TH_0(0)=0.d0 do j=1,NY+1 RI_B(J)=20.d0 - TH_0(J)=TH_0(J-1) - & +DY(J)*RI_B(J)*(RI(N)*DRHODX(N))**2.d0 - & /I_RO**2.d0/RI(N) + TH_0(J)=(GYF(J)-GYF(1))* + & RI_B(J)*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) end do else do j=0,NY+1 @@ -287,9 +230,9 @@ SUBROUTINE SPONGE_TH(N) ! Add damping to R-K terms ! Damp the perturbations towards 0 do k=0,TNKZ - do i=0,NKX - if ((i.ne.0).or.(k.ne.0)) then - do j=0,NY+1 + do i=0,NXP-1 + if ((RANKZ.ne.0).or.(i.ne.0).or.(k.ne.0)) then + do j=JSTART_TH(N),JEND_TH(N) CFTH(i,k,j,n)=CFTH(i,k,j,n) & -SPONGE_SIGMA(j)*(CTH(i,k,j,n)-0.) end do @@ -297,10 +240,12 @@ SUBROUTINE SPONGE_TH(N) end do end do ! Damp the mean gradient towards TH_0 - do j=0,NY+1 + if (RANKZ.eq.0) then + do j=JSTART_TH(N),JEND_TH(N) CFTH(0,0,j,n)=CFTH(0,0,j,n)-SPONGE_SIGMA(j) & *(CTH(0,0,j,n)-TH_0(J)) end do + end if return end @@ -349,13 +294,13 @@ SUBROUTINE SPONGE_VEL ! Add damping function to explicit R-K do k=0,TNKZ - do i=0,NKX - if ((i.ne.0).or.(k.ne.0)) then + do i=0,NXP-1 + if ((RANKZ.ne.0).or.(i.ne.0).or.(k.ne.0)) then do j=jstart,jend CF1(I,K,J)=CF1(I,K,J)-SPONGE_SIGMA(j)*(CU1(i,k,j)-0.d0) CF3(I,K,J)=CF3(I,K,J)-SPONGE_SIGMA(j)*(CU3(i,k,j)-0.d0) end do - do j=1,NY + do j=2,NY CF2(I,K,J)=CF2(I,K,J)- & 0.5*(SPONGE_SIGMA(j)+SPONGE_SIGMA(j+1))*(CU2(i,k,j)-0.d0) end do @@ -363,16 +308,103 @@ SUBROUTINE SPONGE_VEL end do end do ! Damp mean flow + if (RANKZ.eq.0) then do j=jstart,jend CF1(0,0,j)=CF1(0,0,j)-SPONGE_SIGMA(j)*(CU1(0,0,j)-U1_0(j)) CF3(0,0,j)=CF3(0,0,j)-SPONGE_SIGMA(j)*(CU3(0,0,j)-U3_0(j)) end do - do j=1,NY + do j=2,NY CF2(0,0,j)=CF2(0,0,j)-SPONGE_SIGMA(j)*(CU2(0,0,j)-U2_0(j)) end do + end if return end + SUBROUTINE SLIP_VEL +! This subroutine adds advection by a slip velocity to some scalars + include 'header' + integer i,j,k,n,J1_TH(1:N_TH),J2_TH(1:N_TH) + real*8 W_S(0:NY+1,1:N_TH) + +! Set indices corresponding to start and end of GYF grid + do n=1,N_TH + IF (RANKY.eq.NPROCY-1) then +! We are at the upper wall + J1_TH(n)=JSTART_TH(n) + J2_TH(n)=NY-1 + ELSE IF (RANKY.eq.0) then +! We are at the lower wall + J1_TH(n)=2 + J2_TH(n)=JEND_TH(n) + ELSE +! We are on a middle process + J1_TH(n)=JSTART_TH(n) + J2_TH(n)=JEND_TH(n) + END IF + end do + +! First, set the slip velocity + do j=0,NY+1 + W_S(j,1)=0.d0 + W_S(j,2)=0.d0 + W_S(j,3)=0.00005d0 + W_S(j,4)=0.0005d0 + W_S(j,5)=0.005d0 + end do + + IF (RANKY.eq.NPROCY-1) THEN +! We are on a process at the top boundary +! Set the slip velocity to zero at GY(NY) (and ghost cells) + do n=1,N_TH + W_S(NY,n)=0.d0 + W_S(NY+1,n)=0.d0 + end do + ELSE IF (RANKY.eq.0) THEN +! We are on a process at the bottom boundary +! Set the slip velocit to zero at GY(2) (and ghost cells) + do n=1,N_TH + W_S(0,n)=0.d0 + W_S(1,n)=0.d0 + W_S(2,n)=0.d0 + end do + END IF + + do n=1,N_TH + DO J=J1_TH(N),J2_TH(N) + DO K=0,NZP-1 + DO I=0,NXM +! Central differencing +! S1(I,K,J)= +! & ((TH(I,K,J+1,N)*W_S(J+1,N) + TH(I,K,J,N)*W_S(J+1,N) +! & -TH(I,K,J,N)*W_S(J,N)-TH(I,K,J-1,N)*W_S(J,n))/(2.d0*DYF(J))) +! Second order Upwinding +! S1(I,K,J)=(W_S(J+1,N)*TH(I,K,J,N) +! & -W_S(J,N)*(TH(I,K,J,N)+TH(I,K,J-1,N))/2.d0) +! & /(GYF(j)-GY(j)) +! First order upwinding + S1(I,K,J)=(W_S(J+1,N)*TH(I,K,J,N) + & -W_S(J,N)*TH(I,K,J-1,N)) + & /(GYF(j)-GYF(j-1)) + +! S1(I,K,J)=0.5d0*(W_S(J+1,N)+W_S(J,N)) +! & *(TH(I,K,J,N)-TH(I,K,J-1,N))/(GYF(J)-GYF(J-1)) + END DO + END DO + END DO + CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) + DO J=J1_TH(N),J2_TH(N) + DO K=0,TNKZ + DO I=0,NXP-1 + CFTH(I,K,J,N)=CFTH(I,K,J,N) - CS1(I,K,J) + END DO + END DO + END DO + end do + + RETURN + END + + diff --git a/diablo_2.0/post_process/matlab/plot_movie_xy.m b/diablo_2.0/post_process/matlab/plot_movie_xy.m index 1356d26..9b36a42 100644 --- a/diablo_2.0/post_process/matlab/plot_movie_xy.m +++ b/diablo_2.0/post_process/matlab/plot_movie_xy.m @@ -1,12 +1,14 @@ % This script shows how to load in 2D slices and make a movie of the simulation output % Run after readmean.m -LX=30; -NX=128; +LX=1000; +NX=512; x=linspace(0,LX,NX); filename=[base_dir '/movie.h5']; +drhodx=3e-8; + for k=1:nk k if (k<10) @@ -19,13 +21,22 @@ timename=[int2str(k)]; end -varname=['/th1_xy/' timename]; +varname_th=['/th1_xy/' timename]; +varname_u=['/u_xy/' timename]; %varname=['/nu_t_xy/' timename]; -A=h5read(filename,varname); +A_th=h5read(filename,varname_th); +A_u=h5read(filename,varname_u); + +for i=1:size(A_th,1) + A_th(i,:)=A_th(i,:)+drhodx*x(i); +end -pcolor(x,gyf,A'); shading interp; -%caxis([-1.5 1.5]); +pcolor(x,gyf,A_u'); shading interp; +hold on +contour(x,gyf,A_th',linspace(0,3e-4,40),'w-'); +caxis([-0.02 0.02]); +title(['time=' num2str(tii(k)/3600) ' hours']); axis tight shading interp diff --git a/diablo_2.0/post_process/matlab/plot_movie_xz.m b/diablo_2.0/post_process/matlab/plot_movie_xz.m index afb9bca..90cdd96 100644 --- a/diablo_2.0/post_process/matlab/plot_movie_xz.m +++ b/diablo_2.0/post_process/matlab/plot_movie_xz.m @@ -1,19 +1,20 @@ % Run after readmean.m -LX=30; -NX=128; -LZ=4; -NZ=16; +LX=1000; +NX=512; +LZ=1000; +NZ=512; filename=[base_dir '/movie.h5']; % Background density gradient -drhodx=0.0; +drhodx=0.00000003; +%drhodx=0.0; drhodz=0.0; x=linspace(0,LX,NX); z=linspace(0,LZ,NZ); -for k=1:nk +for k=400:400 k if (k<10) timename=['000' int2str(k)]; @@ -26,36 +27,66 @@ end varname=['/th1_xz/' timename]; -A_th=h5read(filename,varname); -for j=1:size(A_th,2) - A_th(:,j)=A_th(:,j)+drhodz*z(j); - end +A_th1=h5read(filename,varname); +for j=1:size(A_th1,2) + A_th1(:,j)=A_th1(:,j)+drhodz*z(j); +end +for i=1:size(A_th1,1) + A_th1(i,:)=A_th1(i,:)+drhodx*x(i); +end +varname=['/th4_xz/' timename]; +A_th4=h5read(filename,varname); +varname=['/th2_xz/' timename]; +A_th2=h5read(filename,varname); + varname=['/v_xz/' timename]; A_v=h5read(filename,varname); varname=['/w_xz/' timename]; A_w=h5read(filename,varname); +varname=['/u_xz/' timename]; +A_u=h5read(filename,varname); -subplot(2,1,1) -pcolor(x,z,A_th'); shading interp; +subplot(2,2,1) +pcolor(x,z,A_th1'); shading interp; set(gca,'FontName','Times','FontSize',14); -xlabel('x'); ylabel('z'); title(['b, t=' num2str(tii(k)) ]); -caxis([-1 1]); +xlabel('x'); ylabel('z'); title(['b, t=' num2str(floor(tii(k)/3600)) ' hours' ]); +axis equal; axis tight; +caxis([1.4e-4 1.8e-4]); colormap(jet(256)); colorbar -subplot(2,1,2) +subplot(2,2,2) pcolor(x,z,A_v'); shading interp; set(gca,'FontName','Times','FontSize',14); -xlabel('x'); ylabel('z'); title(['v, t=' num2str(tii(k)) ]); -caxis([-0.15 0.15]); +xlabel('x'); ylabel('z'); title(['v']); +axis equal; axis tight; +caxis([-0.01 0.01]); +colormap(jet(256)); +colorbar + +subplot(2,2,3) +pcolor(x,z,A_th2'); shading interp; +set(gca,'FontName','Times','FontSize',14); +xlabel('x'); ylabel('z'); title(['th2']); +axis equal; axis tight; +caxis([0 1]); +colormap(jet(256)); +colorbar + +subplot(2,2,4) +pcolor(x,z,A_th4'); shading interp; +set(gca,'FontName','Times','FontSize',14); +xlabel('x'); ylabel('z'); title(['th4']); +axis equal; axis tight; +caxis([0 1]); colormap(jet(256)); colorbar colorbar M(k)=getframe(gcf); -clf; +%clf; end diff --git a/diablo_2.0/post_process/matlab/readmean.m b/diablo_2.0/post_process/matlab/readmean.m index f3e5770..8746080 100644 --- a/diablo_2.0/post_process/matlab/readmean.m +++ b/diablo_2.0/post_process/matlab/readmean.m @@ -1,18 +1,19 @@ % Reads in statistics outputted by diablo % User settings.... % Set the run directory if it hasn't already been defined -if (~exist(base_dir)) - base_dir='../../KH_test'; +if (~exist('base_dir')) + base_dir='/data/oceanus/jrt51/Crowe/bz0p001'; end +run_dir='/data/oceanus/jrt51/Crowe'; % Set the grid and domain size in the y-direction %NP=input('Enter the number of processes in the y-direction: '); -NP=2; +NP=1; %NY_S=input('Insert the number of points per process in the y-direction: '); -NY_S=26; +NY_S=51; % Enter the number of scalars N_TH=1; % Enter the viscosity from input.dat -NU=0.001; +NU=1e-6; % Enter the Prandtl number Pr=1; kappa=NU/Pr; @@ -232,7 +233,7 @@ hke_int=trapz(gyf,(urms.^2+wrms.^2)/2,1); vke_int=trapz(gyf,vrms.^2/2,1); for n=1:N_TH - tpe_int(:,n)=RI(n)*trapz(gyf,thrms(:,:,n).^2,1)./(thme(end,:,n)-thme(1,:,n))/2; + tpe_int(1:nk,n)=RI(n)*trapz(gyf,thrms(:,1:nk,n).^2,1)./(thme(end,1:nk,n)-thme(1,1:nk,n))/2; end thv_int=trapz(gyf,thv,1); thrms_int=trapz(gyf,thrms,1); @@ -240,6 +241,10 @@ for j=2:NY gy(j)=(gyf(j)+gyf(j-1))/2; end + +% Optionally, get GY from hdf5 grid file +%gy=h5read([run_dir '/grid.h5'],'/grids/y'); + for j=2:NY-1 dyf(j)=(gy(j+1)-gy(j)); end diff --git a/diablo_2.0/post_process/matlab/readmean_all.m b/diablo_2.0/post_process/matlab/readmean_all.m index c3d61c7..9e46142 100644 --- a/diablo_2.0/post_process/matlab/readmean_all.m +++ b/diablo_2.0/post_process/matlab/readmean_all.m @@ -1,15 +1,17 @@ -% This script illustrates how to combine several mean files -% Before running this, comment out the definition of "base_dir" in readmean.m +%nruns=4; +%run_dir='./double_front_jet_thermocline'; +nruns=2; +run_dir='/local/data/public/jrt51/T2016_slip'; -nruns=3; -run_dir='../../KH_test'; +tstart_save=4*3600*24; +tend_save=5*3600*24; kindex=1; for irun=1:nruns irun - base_dir=[run_dir '/run' num2str(irun) '/']; + base_dir=[run_dir '/run' num2str(irun)]; readmean; - + nk % Do the folowing for each variable you want to save tii_save(kindex:kindex+nk-1)=tii(1:nk); clear tii; @@ -26,6 +28,21 @@ urms_save(:,kindex:kindex+nk-1)=urms(:,1:nk); clear urms; + ume_save(:,kindex:kindex+nk-1)=ume(:,1:nk); + clear ume; + + wme_save(:,kindex:kindex+nk-1)=wme(:,1:nk); + clear wme; + + thme_save(:,kindex:kindex+nk-1,1:N_TH)=thme(:,1:nk,1:N_TH); + clear thme; + + thrms_save(:,kindex:kindex+nk-1,1:N_TH)=thrms(:,1:nk,1:N_TH); + clear thrms; + + thv_save(:,kindex:kindex+nk-1,1:N_TH)=thv(:,1:nk,1:N_TH); + clear thv; + uw_save(:,kindex:kindex+nk-1)=uw(:,1:nk); clear uw; @@ -35,10 +52,48 @@ dudy_save(:,kindex:kindex+nk-1)=dudy(:,1:nk); clear dudy; - dthdy_save(:,kindex:kindex+nk-1,1)=dthdy(:,1:nk,1); + dthdy_save(:,kindex:kindex+nk-1,1:N_TH)=dthdy(:,1:nk,1:N_TH); clear dthdy; + kappa_t_save(:,kindex:kindex+nk-1,1:N_TH)=kappa_t(:,1:nk,1:N_TH); + clear kappa_t; + kindex=kindex+nk - nk end + +% Reset nk to the full number of timesteps +nk=length(tii_save); + +% Get the time index based on start time +kstart=0; +for k=1:nk + if (tii_save(k) <= tstart_save) + kstart=k; + end +end +if (kstart == 0) + kstart=1; +end +'Start of time averaging window: ',tii_save(kstart) + +% Get the time index based on end time (if defined) +if exist('tend_save') +kend=0; +for k=1:nk + if (tii_save(k) <= tend_save) + kend=k; + end +end +if (kend == 0) + kend=1; +end +else +kend=nk; +end +'End of time averaging window: ',tii_save(kend) + +thv_mean(:,:)=squeeze(trapz(tii_save(kstart:kend),thv_save(:,kstart:kend,:),2)); +dthdy_mean(:,:)=squeeze(trapz(tii_save(kstart:kend),dthdy_save(:,kstart:kend,:),2)); +kappa_t_mean(:,:)=squeeze(trapz(tii_save(kstart:kend),kappa_t_save(:,kstart:kend,:),2)); + diff --git a/diablo_2.0/setup_run b/diablo_2.0/setup_run old mode 100644 new mode 100755 index 91b6d00..2266473 --- a/diablo_2.0/setup_run +++ b/diablo_2.0/setup_run @@ -1,5 +1,5 @@ #!/bin/bash -rundir=/data/proteus/jrt51/KH_test +rundir=/local/data/public/jrt51/T2016_slip codedir=./for echo $rundir @@ -15,14 +15,15 @@ module load hdf5/parallel module load mpich/3.0.4 # compile the code +make clean make # copy the executable back to the run dir cp diablo $rundir/diablo # to run the code, you could do one of the following -# cd $rundir +cd $rundir # for parallel run: -# mpirun -np 4 diablo >output.dat & +mpirun -np 4 diablo >output.dat & # for serial run: # diablo >output.dat & From 99e04ee9ea8f23b3eca207f7e867bf635db5c76c Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 22 Jun 2017 09:20:08 +0100 Subject: [PATCH 23/66] Added another index to output time --- diablo_2.0/for/diablo_io.f | 1 + 1 file changed, 1 insertion(+) diff --git a/diablo_2.0/for/diablo_io.f b/diablo_2.0/for/diablo_io.f index dd5dbb4..03aa424 100644 --- a/diablo_2.0/for/diablo_io.f +++ b/diablo_2.0/for/diablo_io.f @@ -318,6 +318,7 @@ SUBROUTINE SAVE_FLOW(FINAL) SAVE_PRESSURE=.TRUE. else FNAME='out.' + & //CHAR(MOD(TIME_STEP,1000000)/100000+48) & //CHAR(MOD(TIME_STEP,100000)/10000+48) & //CHAR(MOD(TIME_STEP,10000)/1000+48) & //CHAR(MOD(TIME_STEP,1000)/100+48) From d6536d1dcc941a88a7bfae119849bbdc0eea90e1 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 22 Jun 2017 09:21:11 +0100 Subject: [PATCH 24/66] Changed user_rhs to add sponge layer for TF2010 case --- diablo_2.0/for/set_ics.f | 4 ++-- diablo_2.0/for/user_rhs.f | 33 +++++++++++---------------------- 2 files changed, 13 insertions(+), 24 deletions(-) diff --git a/diablo_2.0/for/set_ics.f b/diablo_2.0/for/set_ics.f index 3926b17..25e2b87 100644 --- a/diablo_2.0/for/set_ics.f +++ b/diablo_2.0/for/set_ics.f @@ -249,8 +249,8 @@ SUBROUTINE CREATE_TH_CHAN ELSE IF ((TH_BC_YMIN(N).EQ.1) & .AND.(TH_BC_YMAX(N).EQ.1)) THEN DO J=1,NY -! Linear profile with slope corresponding to upper value - TH(I,K,J,N)=TH_BC_YMAX_C1(N)*GYF(J) +! Linear profile with slope corresponding to lower value + TH(I,K,J,N)=TH_BC_YMIN_C1(N)*GYF(J) END DO ELSE IF (RANK.EQ.0) then diff --git a/diablo_2.0/for/user_rhs.f b/diablo_2.0/for/user_rhs.f index 836cce6..70dd316 100644 --- a/diablo_2.0/for/user_rhs.f +++ b/diablo_2.0/for/user_rhs.f @@ -191,17 +191,15 @@ SUBROUTINE SPONGE_TH(N) ! The following variables will store the background state real*8 TH_0(-1:NY+1) - real*8 RI_B(0:NY+1) - ! This variable will hold the forcing rate real*8 SPONGE_SIGMA(0:NY+1) ! Set the amplitude of the sponge - SPONGE_AMP=0.0001d0 + SPONGE_AMP=0.005d0 ! Set the top of the sponge layer in physical units - L_sponge=-120.d0 + L_sponge=-40.d0 ! Set the bottom of the computational domain in physical units - L_bottom=-140.d0 + L_bottom=-50.d0 DO J=0,NY+1 ! Quadratic damping at lower wall if (GYF(J).lt.L_sponge) then @@ -212,20 +210,11 @@ SUBROUTINE SPONGE_TH(N) end if END DO -! For MLI latmix - if (n.eq.1) then - TH_0(0)=0.d0 - do j=1,NY+1 - RI_B(J)=20.d0 - TH_0(J)=(GYF(J)-GYF(1))* - & RI_B(J)*(RI(N)*DRHODX(N))**2.d0 - & /I_RO**2.d0/RI(N) - end do - else - do j=0,NY+1 - TH_0(j)=0.d0 - end do - end if +! Set the profile for relaxing the mean TH + DO J=0,NY+1 + TH_0(J)=TH_BC_YMIN_C1(N)*GYF(J) + END DO + ! Add damping to R-K terms ! Damp the perturbations towards 0 @@ -267,11 +256,11 @@ SUBROUTINE SPONGE_VEL real*8 SPONGE_SIGMA(0:NY+1) ! Set the amplitude of the sponge - SPONGE_AMP=0.0001d0 + SPONGE_AMP=0.005d0 ! Set the top of the sponge layer in physical units - L_sponge=-120.d0 + L_sponge=-40.d0 ! Set the bottom of the computational domain in physical units - L_bottom=-140.d0 + L_bottom=-50.d0 DO J=0,NY+1 ! Quadratic damping at lower wall if (GYF(J).lt.L_sponge) then From ba711a2abea8f5dfdab798450bdaaeace0979549 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 22 Jun 2017 09:24:44 +0100 Subject: [PATCH 25/66] Added input files for T+F2010 test case --- diablo_2.0/TF2010/grid.h5 | Bin 0 -> 2848 bytes diablo_2.0/TF2010/grid_def | 8 +++++ diablo_2.0/TF2010/grid_def.all | 8 +++++ diablo_2.0/TF2010/grid_mpi | 3 ++ diablo_2.0/TF2010/input.dat | 35 ++++++++++++++++++ diablo_2.0/TF2010/input_chan.dat | 60 +++++++++++++++++++++++++++++++ diablo_2.0/TF2010/input_per.dat | 35 ++++++++++++++++++ 7 files changed, 149 insertions(+) create mode 100644 diablo_2.0/TF2010/grid.h5 create mode 100644 diablo_2.0/TF2010/grid_def create mode 100644 diablo_2.0/TF2010/grid_def.all create mode 100644 diablo_2.0/TF2010/grid_mpi create mode 100644 diablo_2.0/TF2010/input.dat create mode 100644 diablo_2.0/TF2010/input_chan.dat create mode 100644 diablo_2.0/TF2010/input_per.dat diff --git a/diablo_2.0/TF2010/grid.h5 b/diablo_2.0/TF2010/grid.h5 new file mode 100644 index 0000000000000000000000000000000000000000..bb3d1a772ff817879db8aea9430c1c80adce0ad4 GIT binary patch literal 2848 zcmeD5aB<`1lHy_j0S*oZ76t(@6Gr@p0tIe}2#gPtPk=HQp>zk7Ucm%mFfxE31A_!q zTo7tLy1I}cS62q0N|^aD8mf)q0V{;X0TURdM^p%SxH<-aJRAY_H7u2$fTlB8V5Api zrW6A?u=MBwlOU7Em3}>#AqHkZ(xfO#unEp7x4lV5oq~3=D;y zt~A&O#vMc5){1QNtA8^+5424=p~W=K<3M+QG>?{?snU)|L$vTg_N zEJ_uO^mRQjH{w9Z(K?p{?9$)<`)+bR@N!bYW9Ju62j0!Pa^kas(}4#I1-O>QI3AGw zVrxEcw!?vgrx|CM?y)~$=+^RbrH9>t<3TfyGu*W~z&%qcXIiE8fyU=w&hD18Iw1RM z$&FwkZYb^mfc03E?Bi}d zvIo{4HnsScE_GmWf!(zDH{u6uxAQ08?G-&RWBHAhW!}OEyz_TE8mb5$=vDsDsiwzw zKq&3h1y?t&1KXx-dw#i(?ZB?(Vm2>WnGei}axruX{=fe^xB26s4?p&^eh*|!xc_PY jd8TFOPguU%f8%oEy!}(}?ca0J)hJ}`;r$Gkz_dL8e03W_ literal 0 HcmV?d00001 diff --git a/diablo_2.0/TF2010/grid_def b/diablo_2.0/TF2010/grid_def new file mode 100644 index 0000000..3e44889 --- /dev/null +++ b/diablo_2.0/TF2010/grid_def @@ -0,0 +1,8 @@ +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| +! grid_def, the portion of the header that defines the grid size for diablo. +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| + PARAMETER (NX=256) + PARAMETER (NY=26) + PARAMETER (NZ=64) + PARAMETER (N_TH=1) +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| diff --git a/diablo_2.0/TF2010/grid_def.all b/diablo_2.0/TF2010/grid_def.all new file mode 100644 index 0000000..c7cf5bf --- /dev/null +++ b/diablo_2.0/TF2010/grid_def.all @@ -0,0 +1,8 @@ +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| +! grid_def, the portion of the header that defines the grid size for diablo. +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| + PARAMETER (NX=256) + PARAMETER (NY=51) + PARAMETER (NZ=64) + PARAMETER (N_TH=1) +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| diff --git a/diablo_2.0/TF2010/grid_mpi b/diablo_2.0/TF2010/grid_mpi new file mode 100644 index 0000000..c87fde0 --- /dev/null +++ b/diablo_2.0/TF2010/grid_mpi @@ -0,0 +1,3 @@ + PARAMETER(NPROCS=4) + PARAMETER(NPROCY=2) + PARAMETER(NPROCZ=NPROCS/NPROCY) diff --git a/diablo_2.0/TF2010/input.dat b/diablo_2.0/TF2010/input.dat new file mode 100644 index 0000000..5a9a090 --- /dev/null +++ b/diablo_2.0/TF2010/input.dat @@ -0,0 +1,35 @@ +C A data file for diablo. This is a generic, easy method of data +C input, as long as you don't change the number of comment lines. +C Note that the grid size is defined in the file grid_def. +C FLAVOR VERSION + 'Front' 2.0 +C USE_MPI USE_LES (note, also change these flags in Makefile): + .TRUE. .TRUE. +C Parameters: NU, BETA, LX, LY, LZ + 0.000001 1.0 1000.0 50.0 250.0 +C Vertical viscosity scale factor NU_V_SCALE (NU_V=NU*NU_V_SCALE) + 1.0 +C Method: NUM_PER_DIR, CREATE_NEW_FLOW + 2 .TRUE. +C Time March: N_TIME_STEPS, TIME_LIMIT, DELTA_T, RESET_TIME, VARIABLE_DT, CFL, UPDATE_DT + 10000 500000 0.1 .FALSE. .TRUE. 0.5 1 +C I/O: VERBOSITY, SAVE_FLOW_INT, SAVE_STATS_INT, MOVIE + 4 10000 100 .TRUE. +C Here include 6*N_TH lines, see below for format +C CREATE_FLOW_TH(1) Create new field or Read from DIABLO_TH.START + .TRUE. +C FILTER_TH(1) FILTER_INT(1) (If and how often to filter) + .FALSE. 10 +C RI_TAU(1) PR(1) + -0.00981 7.0 + + +C When including scalar advection, include 6 lines like the following for each scalar + +C CREATE_FLOW_TH(1) Create new field or Read from DIABLO_TH.START + .TRUE. +C FILTER_TH(1) FILTER_INT(1) (If and how often to filter) + .TRUE. 10 +C RI_TAU(1) PR(1) BACKGROUND_TH(N) + 0.0 1.0 .FALSE. + diff --git a/diablo_2.0/TF2010/input_chan.dat b/diablo_2.0/TF2010/input_chan.dat new file mode 100644 index 0000000..c4edf90 --- /dev/null +++ b/diablo_2.0/TF2010/input_chan.dat @@ -0,0 +1,60 @@ +C A data file for diablo. This is a generic, easy method of data +C input, as long as you don't change the number of comment lines. +C Note that the grid size is defined in the file grid_def. +C VERSION + 2.0 +C TIME_AD_METH + 1 +C LES: LES_MODEL_TYPE + 1 +C ICs: IC_TYPE, KICK + 1 0.001 +C Rotation: I_RO (or Coriolis parameter, f) + 0.0001 +C Upward vertical vector components, GRAV_X, GRAV_Y, GRAV_Z + 0.0 1.0 0.0 +C Forcing: F_TYPE, UBULK0, PX0, OMEGA0, AMP_OMEGA0 + 3 0.0 -1.0 6.28 0.0 +C BCs: U_BC_YMIN, U_BC_YMIN_C1, U_BC_YMIN_C2, U_BC_YMIN_C3 + 1 0.0 0.0 0.0 +C BCs: V_BC_YMIN, V_BC_YMIN_C1, V_BC_YMIN_C2, V_BC_YMIN_C3 + 0 0.0 0.0 0.0 +C BCs: W_BC_YMIN, W_BC_YMIN_C1, W_BC_YMIN_C2, W_BC_YMIN_C3 + 1 0.0 0.0 0.0 +C BCs: U_BC_YMAX, U_BC_YMAX_C1, U_BC_YMAX_C2, U_BC_YMAX_C3 + 1 0.0 0.0 0.0 +C BCs: V_BC_YMAX, V_BC_YMAX_C1, V_BC_YMAX_C2, V_BC_YMAX_C3 + 0 0.0 0.0 0.0 +C BCs: W_BC_YMAX, W_BC_YMAX_C1, W_BC_YMAX_C2, W_BC_YMAX_C3 + 1 0.0 0.0 0.0 +C Here include 6*N_TH lines, see below for format +C BCs: TH_BC_YMIN(1), TH_BC_YMIN_C1(1), TH_BC_YMIN_C2(1), TH_BC_YMIN_C3(1) + 1 -0.00917 0.0 0.0 +C BCs: TH_BC_YMAX(1), TH_BC_YMAX_C1(1), TH_BC_YMAX_C2(1), TH_BC_YMAX_C3(1) + 1 30.24 0.0 0.0 +C Background scalar gradients, DRHODX(1), DRHODZ(1) + 0.0000432 0.0 + + +C Description +C For channnel flows (NUM_PER_DIR=2): +C IC_TYPE specifies the functional form for the initial velocity +C KICK is a scale factor on the noise added when creating a new flow. +C F_TYPE=0 gives constant mass flux flow (maintaining UBULK0). +C F_TYPE=1 gives constant pressure gradient flow (PX0 constant). +C F_TYPE=2 is for an oscillatory pressure gradient of the form: +C PX0+AMP_OMEGA0*cos(OMEGA0*TIME) +C U_BC_YMIN is the BC TYPE on the U velocity component at the lower wall +C (0 for Dirichlet, 1 for Neumann) +C U_BC_YMIN_C1 is the value of the velocity (if Dirichlet) or it's gradient (if Neumann) + +C When including scalar advection, include 4 lines like the following for each scalar + +C BCs: TH_BC_YMIN(1), TH_BC_YMIN_C1(1), TH_BC_YMIN_C2(1), TH_BC_YMIN_C3(1) + 1 0.0 0.0 0.0 +C BCs: TH_BC_YMAX(1), TH_BC_YMAX_C1(1), TH_BC_YMAX_C2(1), TH_BC_YMAX_C3(1) + 1 0.0 0.0 0.0 + + + + diff --git a/diablo_2.0/TF2010/input_per.dat b/diablo_2.0/TF2010/input_per.dat new file mode 100644 index 0000000..ae29b42 --- /dev/null +++ b/diablo_2.0/TF2010/input_per.dat @@ -0,0 +1,35 @@ +C A data file for diablo. This is a generic, easy method of data +C input, as long as you don't change the number of comment lines. +C Note that the grid size is defined in the file grid_def. +C VERSION + 1.0 +C TIME_AD_METH + 1 +C LES: LES_MODEL_TYPE + 2 +C ICs: IC_TYPE, KICK + 0 .0010 +C FLAGS: TRUCK, GUST, + .FALSE. .FALSE. +C BACKGROUND_GRAD(N): Include this and the next three lines for each scalar + .FALSE. +C BACTERIAL MOTILITY CHI(N): + 0.0 +C BACKGROUND_GRAD(N): Include one of the following lines for each scalar + .FALSE. +C BACTERIAL MOTILITY CHI(N): + 0.001 +C BACKGROUND_GRAD(N): Include one of the following lines for each scalar + .FALSE. +C BACTERIAL MOTILITY CHI(N): + 0.01 + + + + + +C Description +C For triply-periodic flows (NUM_PER_DIR=3): +C IC_TYPE specifies the functional form for the initial velocity +C KICK is a scale factor on the noise added when creating a new flow. +C BACKGROUND_GRAD(N) specifies if the scalar should be considered as a perturbation to a linear background gradient. This is designed to allow simulations of stratified flows in the triply periodic flow geometry. From 341b26db8213aa8cce2485bbdb78b4438ae24496 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 28 Jul 2016 15:33:31 +0100 Subject: [PATCH 26/66] Update README.md --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index b67be54..f3f0867 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,9 @@ # DIABLO DNS In A Box...Laptop Optimized! +This branch includes background density gradients and thermal wind +and is designed for frontal zone simulations + A brief description of DIABLO diablo.f Main program, define grid, read/write flow, statistics From 04449e7bf1c725be3707adc13220b1fce4e9c5e1 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 28 Jul 2016 16:36:48 +0100 Subject: [PATCH 27/66] Added case for surface boundary layer --- diablo_2.0/pre_process/create_grid_h5.m | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/diablo_2.0/pre_process/create_grid_h5.m b/diablo_2.0/pre_process/create_grid_h5.m index b6b3de6..95bcd53 100644 --- a/diablo_2.0/pre_process/create_grid_h5.m +++ b/diablo_2.0/pre_process/create_grid_h5.m @@ -14,6 +14,7 @@ disp('1) High resolution at both ends (tanh stretching function)'); disp('2) High resolution in center (tanh stretching function)'); disp('3) High resolution at both ends (starting at 0)'); + disp('4) High resolution at the top for surface boundary layer'); GRID_TYPE=input('Select a grid type: '); % Set the dimensions of the grid @@ -40,6 +41,12 @@ for J=1:N+1 G(J+1)=(L/2.0)*tanh(CS*((2.0*(J-1))/(N)-1.0))/tanh(CS)+L/2.0; end +elseif (GRID_TYPE==4) +% Surface boundary layer + for J=1:N+1 + G(J+1)=L*tanh(CS*((N-(J-1))/N-1.0))/tanh(CS)+L; + end + G(:)=G(:)*-1; else disp('Error, entered grid type unknown'); end From 1d88ed8bf866fa6aa95a1f09fba9184a0ee6031f Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Fri, 29 Jul 2016 12:49:54 +0100 Subject: [PATCH 28/66] updated to include thermal wind --- diablo_2.0/for/courant.f | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/diablo_2.0/for/courant.f b/diablo_2.0/for/courant.f index 0a96229..e75f124 100644 --- a/diablo_2.0/for/courant.f +++ b/diablo_2.0/for/courant.f @@ -32,6 +32,24 @@ subroutine courant end do ! Use the model velocity to calculate the CFL number + + IF (FLAVOR.eq.'Front') THEN +! Add thermal wind to velocity when calculating the CFL number + do n=1,N_TH + do j=JSTART,JEND + do k=0,NZP-1 + do i=0,NXM + dt_x=cfl*dx(i)/abs(U1(i,k,j)-1.d0*DRHODZ(N)*RI(N) + & *GYF(J)/I_RO) + dt_y=cfl*dy(j)/abs(U2(i,k,j)) + dt_z=cfl*dz(j)/abs(U3(i,k,j)+(RI(N)/I_RO) + & *DRHODX(N)*GYF(J)) + dt=min(dt,dt_x,dt_y,dt_z) + end do + end do + end do + end do + ELSE do j=1,NY do k=0,NZP-1 do i=0,NXM @@ -42,6 +60,8 @@ subroutine courant end do end do end do + END IF + if (USE_MPI) then call get_minimum_mpi(dt) end if From ad45f1432bd8f7ca1afffbac5da56e2977a4795f Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Fri, 29 Jul 2016 12:50:18 +0100 Subject: [PATCH 29/66] Added case for thermal wind at front --- diablo_2.0/for/set_ics.f | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/diablo_2.0/for/set_ics.f b/diablo_2.0/for/set_ics.f index d7130da..7e56be9 100644 --- a/diablo_2.0/for/set_ics.f +++ b/diablo_2.0/for/set_ics.f @@ -71,6 +71,18 @@ SUBROUTINE CREATE_FLOW_CHAN END DO END DO END DO + else if (IC_TYPE.eq.4) then +C For Front +C Initialize in thermal wind balance: + DO J=0,NY + DO K=0,NZP-1 + DO I=0,NXM + U1(I,K,J)=0.d0 + U2(I,K,J)=0.d0 + U3(I,K,J)=0.d0 + END DO + END DO + END DO else WRITE(*,*) 'WARNING, unsupported IC_TYPE in CREATE_FLOW' end if @@ -194,6 +206,8 @@ SUBROUTINE CREATE_TH_CHAN INCLUDE 'header' INTEGER I,J,K,N +! A variable for Front case... + REAL*8 RI_B(0:NY+1) DO N=1,N_TH IF (CREATE_NEW_TH(N)) THEN @@ -243,6 +257,29 @@ SUBROUTINE CREATE_TH_CHAN END DO END DO END DO + ELSE IF (IC_TYPE.eq.4) THEN +! For Front case, specify given RI_B profile + DO K=0,NZP-1 + DO I=0,NXM + TH(I,K,0,N)=0.d0 + DO J=1,NY + if (GYF(J).lt.-60.d0) then + RI_B(J)=20.d0 + TH(I,K,J,N)=(GYF(J)-GYF(1))* + & RI_B(J)*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) + else + RI_B(J)=1.0d0 + TH(I,K,J,N)=(GYF(J)+60.d0)* + & RI_B(J)*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) + & +(-60+140.d0)*20.d0*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) + end if + END DO + END DO + END DO + ELSE WRITE(*,*) 'WARNING, unsupported IC_TYPE in CREATE_FLOW' END IF From 42d034c410f09ef5cb0fff55a9ee4d705995c89c Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 1 Nov 2017 17:04:39 +0000 Subject: [PATCH 30/66] Added changes to branch front in user_rhs after rebase --- diablo_2.0/for/user_rhs.f | 224 +++++++++++++++++++++++++++++++++----- 1 file changed, 196 insertions(+), 28 deletions(-) diff --git a/diablo_2.0/for/user_rhs.f b/diablo_2.0/for/user_rhs.f index 778ec8e..68203d3 100644 --- a/diablo_2.0/for/user_rhs.f +++ b/diablo_2.0/for/user_rhs.f @@ -85,37 +85,68 @@ SUBROUTINE USER_RHS_CHAN_FOURIER integer i,j,k,n - real*8 alpha +! Advection owing to thermal wind + IF ((FLAVOR.eq.'Front').and.(I_RO.ne.0.d0)) THEN + DO N=1,N_TH +! Loop over all scalars +! Add thermal wind advection to the momentum equations + do j=JSTART,JEND + do k=0,TNKZ + do i=0,NXP-1 + CF1(I,K,J)=CF1(I,K,J) + & -(DRHODX(N)*RI(N)*GYF(J)/I_RO) + & *CIKZ(K)*CU1(I,K,J) + & -(-1.d0*DRHODZ(N)*RI(N)*GYF(J)/I_RO) + & *CIKX(I)*CU1(I,K,J) + & -(-1.d0*DRHODZ(N)*RI(N)/I_RO) + & *0.5d0*(CU2(I,K,J)+CU2(I,K,J+1)) + CF3(I,K,J)=CF3(I,K,J) + & -(DRHODX(N)*RI(N)*GYF(J)/I_RO) + & *CIKZ(K)*CU3(I,K,J) + & -(-1.d0*DRHODZ(N)*RI(N)*GYF(J)/I_RO) + & *CIKX(I)*CU3(I,K,J) + & -(DRHODX(N)*RI(N)/I_RO) + & *0.5d0*(CU2(I,K,J)+CU2(I,K,J+1)) + end do + end do + end do -! For example, to add a linear damping term (e.g. -alpha*U) to the RHS: -! alpha=-0.1d0 -! DO J=JSTART,JEND -! DO K=0,TNKZ -! DO I=0,NXP-1 -! CF1(I,K,J)=CF1(I,K,J)-alpha*CU1(I,K,J) -! END DO -! END DO -! END DO + do j=2,NY + do k=0,TNKZ + do i=0,NXP-1 + CF2(I,K,J)=CF2(I,K,J) + & -(DRHODX(N)*RI(N)*GY(J)/I_RO) + & *CIKZ(K)*CU2(I,K,J) + & -(-1.d0*DRHODZ(N)*RI(N)*GY(J)/I_RO) + & *CIKX(I)*CU2(I,K,J) + end do + end do + end do -! For U2 do this... -! Note that the only thing that changes are the bounds of the J index -! DO J=2,NY -! DO K=0,TNKZ -! DO I=0,NXP-1 -! CF2(I,K,J)=CF2(I,K,J)-alpha*CU2(I,K,J) -! END DO -! END DO -! END DO +! Add advection by thermal wind to the scalar equations + DO J=JSTART_TH(N),JEND_TH(N) + DO K=0,TNKZ + DO I=0,NXP-1 + CFTH(I,K,J,N)=CFTH(I,K,J,N) + & -(RI(N)/I_RO)*DRHODX(N)*GYF(J) + & *CIKZ(K)*CTH(I,K,J,N) + & -(RI(N)/I_RO)*-1.d0*DRHODZ(N)*GYF(J) + & *CIKX(I)*CTH(I,K,J,N) + END DO + END DO + END DO -! For scalars, do this... -! DO J=JSTART,JEND -! DO K=0,TNKZ -! DO I=0,NXP-1 -! CFTH(I,K,J,N)=CFTH(I,K,J,N)-alpha*CTH(I,K,J,N) -! END DO -! END DO -! END DO -! END DO +! End do N_TH + END DO + + END IF + +! Add sponge layer forcing + DO N=1,N_TH + CALL SPONGE_TH(N) + END DO + CALL SPONGE_VEL +>>>>>>> Added thermal wind advection and sponge layers RETURN END @@ -142,6 +173,7 @@ SUBROUTINE USER_RHS_PER_PHYSICAL END DO C Note, that each cell has the same volume, so we can just average over all points EK=EK/dble(NX*NY*NZ) + ! Scale EK by an amount to compensate for dissipation from 2/3 de-aliasing: EK=0.8d0*EK DO J=0,NYM @@ -204,7 +236,143 @@ SUBROUTINE USER_RHS_CAVITY_PHYSICAL END + SUBROUTINE SPONGE_TH(N) +! This subroutine applies a sponge relaxation (Rayleigh damping) towards a +! specified background state for the temperature field +! The intention is to allow an open boundary + include 'header' + integer i,j,k,n + real*8 L_sponge,L_bottom + real*8 SPONGE_AMP + +! The following variables will store the background state + real*8 TH_0(-1:NY+1) + + real*8 RI_B(0:NY+1) + +! This variable will hold the forcing rate + real*8 SPONGE_SIGMA(0:NY+1) + +! Set the amplitude of the sponge + SPONGE_AMP=0.0001d0 +! Set the top of the sponge layer in physical units + L_sponge=-120.d0 +! Set the bottom of the computational domain in physical units + L_bottom=-140.d0 + DO J=0,NY+1 +! Quadratic damping at lower wall + if (GYF(J).lt.L_sponge) then + SPONGE_SIGMA(j)=SPONGE_AMP*((L_sponge-GYF(J)) + & /(L_sponge-L_bottom))**2.d0 + else + SPONGE_SIGMA(j)=0.d0 + end if + END DO + +! For MLI latmix + if (n.eq.1) then + TH_0(0)=0.d0 + do j=1,NY+1 + RI_B(J)=20.d0 + TH_0(J)=TH_0(J-1) + & +DY(J)*RI_B(J)*(RI(N)*DRHODX(N))**2.d0 + & /I_RO**2.d0/RI(N) + end do + else + do j=0,NY+1 + TH_0(j)=0.d0 + end do + end if + +! Add damping to R-K terms +! Damp the perturbations towards 0 + do k=0,TNKZ + do i=0,NKX + if ((i.ne.0).or.(k.ne.0)) then + do j=0,NY+1 + CFTH(i,k,j,n)=CFTH(i,k,j,n) + & -SPONGE_SIGMA(j)*(CTH(i,k,j,n)-0.) + end do + end if + end do + end do +! Damp the mean gradient towards TH_0 + do j=0,NY+1 + CFTH(0,0,j,n)=CFTH(0,0,j,n)-SPONGE_SIGMA(j) + & *(CTH(0,0,j,n)-TH_0(J)) + end do + + return + end + + SUBROUTINE SPONGE_VEL +! This subroutine applies a sponge relaxation (Rayleigh damping) towards a +! specified background state +! The intention is to allow an open boundary + include 'header' + integer i,j,k + + real*8 L_sponge,L_bottom + real*8 SPONGE_AMP + +! The following variables will store the background state + real*8 U1_0(-1:NY+1), U2_0(0:NY+1), U3_0(-1:NY+1) + +! This variable will hold the forcing rate + real*8 SPONGE_SIGMA(0:NY+1) + +! Set the amplitude of the sponge + SPONGE_AMP=0.0001d0 +! Set the top of the sponge layer in physical units + L_sponge=-120.d0 +! Set the bottom of the computational domain in physical units + L_bottom=-140.d0 + DO J=0,NY+1 +! Quadratic damping at lower wall + if (GYF(J).lt.L_sponge) then + SPONGE_SIGMA(j)=SPONGE_AMP*((L_sponge-GYF(J)) + & /(L_sponge-L_bottom))**2.d0 + else + SPONGE_SIGMA(j)=0.d0 + end if + END DO + +! Set the background state +! Here, set the background to be geostrophic, with a linear temperature profile + do j=0,NY+1 + U1_0(j)=0.d0 + U3_0(j)=0.d0 + end do + do j=0,NY+1 + U2_0(j)=0.d0 + end do + +! Add damping function to explicit R-K + do k=0,TNKZ + do i=0,NKX + if ((i.ne.0).or.(k.ne.0)) then + do j=jstart,jend + CF1(I,K,J)=CF1(I,K,J)-SPONGE_SIGMA(j)*(CU1(i,k,j)-0.d0) + CF3(I,K,J)=CF3(I,K,J)-SPONGE_SIGMA(j)*(CU3(i,k,j)-0.d0) + end do + do j=1,NY + CF2(I,K,J)=CF2(I,K,J)- + & 0.5*(SPONGE_SIGMA(j)+SPONGE_SIGMA(j+1))*(CU2(i,k,j)-0.d0) + end do + end if + end do + end do +! Damp mean flow + do j=jstart,jend + CF1(0,0,j)=CF1(0,0,j)-SPONGE_SIGMA(j)*(CU1(0,0,j)-U1_0(j)) + CF3(0,0,j)=CF3(0,0,j)-SPONGE_SIGMA(j)*(CU3(0,0,j)-U3_0(j)) + end do + do j=1,NY + CF2(0,0,j)=CF2(0,0,j)-SPONGE_SIGMA(j)*(CU2(0,0,j)-U2_0(j)) + end do + return + end From 6813086a678084acaf207fcfc0f09e7d412501d3 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 1 Nov 2017 17:10:03 +0000 Subject: [PATCH 31/66] fixed conflict in les.f --- diablo_2.0/for/les.f | 1 - 1 file changed, 1 deletion(-) diff --git a/diablo_2.0/for/les.f b/diablo_2.0/for/les.f index 543bb9a..65e74c9 100644 --- a/diablo_2.0/for/les.f +++ b/diablo_2.0/for/les.f @@ -120,7 +120,6 @@ subroutine les_chan & -0.5d0*(U1_bar(J)-U1_bar(J-1))/DY(J) & -0.5d0*(U1_bar(J+1)-U1_bar(J))/DY(J+1) & ))**2.d0 -! end edit & +4.d0*Sij5(I,K,J)**2.d0 & +2.d0*Sij2(I,K,J)**2.d0 & +4.d0*(0.5d0*(Sij6(I,K,J+1)+Sij6(I,K,J) From 8aca6a625fb57c9ffe1b7e396fe3b52d6f41e958 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 1 Nov 2017 17:11:42 +0000 Subject: [PATCH 32/66] Fixed LES conflict --- diablo_2.0/for/les.f | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/diablo_2.0/for/les.f b/diablo_2.0/for/les.f index 65e74c9..6e41bdc 100644 --- a/diablo_2.0/for/les.f +++ b/diablo_2.0/for/les.f @@ -98,7 +98,6 @@ subroutine les_chan ! WRITE(*,*) RANK,GYF(78),U1_bar(78),U3_bar(78) ! END IF - ! Convert the velocity to physical space call FFT_XZ_TO_PHYSICAL(CU1,U1,0,NY+1) call FFT_XZ_TO_PHYSICAL(CU2,U2,0,NY+1) @@ -281,23 +280,23 @@ subroutine les_chan DO I=0,NXP-1 CF1(I,K,J)=CF1(I,K,J) & -CIKX(I)*CSij1(I,K,J) - & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) - & -CIKZ(K)*CSij5(I,K,J) - CF3(I,K,J)=CF3(I,K,J) - & -CIKX(I)*CSij5(I,K,J) - & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) - & -CIKZ(K)*CSij3(I,K,J) +! & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) +! & -CIKZ(K)*CSij5(I,K,J) +! CF3(I,K,J)=CF3(I,K,J) +! & -CIKX(I)*CSij5(I,K,J) +! & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) +! & -CIKZ(K)*CSij3(I,K,J) END DO END DO END DO DO J=2,NY DO K=0,TNKZ DO I=0,NXP-1 - CF2(I,K,J)=CF2(I,K,J) - & -CIKX(I)*CSij4(I,K,J) +! CF2(I,K,J)=CF2(I,K,J) +! & -CIKX(I)*CSij4(I,K,J) ! Sij2 is added through an implict eddy viscosity ! & -(CSij2(I,K,J)-CSij2(I,K,J-1))/DY(j) - & -CIKZ(K)*CSij6(I,K,J) +! & -CIKZ(K)*CSij6(I,K,J) END DO END DO END DO @@ -314,7 +313,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY - CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) +! CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) END DO END DO END DO @@ -329,7 +328,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY - CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) +! CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) END DO END DO END DO From 3d818acffecb8538a028da371fcbfe03337a3750 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 3 Aug 2016 10:20:35 +0100 Subject: [PATCH 33/66] Added the SGS cross terms back --- diablo_2.0/for/les.f | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/diablo_2.0/for/les.f b/diablo_2.0/for/les.f index 6e41bdc..65ea9c0 100644 --- a/diablo_2.0/for/les.f +++ b/diablo_2.0/for/les.f @@ -280,23 +280,23 @@ subroutine les_chan DO I=0,NXP-1 CF1(I,K,J)=CF1(I,K,J) & -CIKX(I)*CSij1(I,K,J) -! & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) -! & -CIKZ(K)*CSij5(I,K,J) -! CF3(I,K,J)=CF3(I,K,J) -! & -CIKX(I)*CSij5(I,K,J) -! & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) -! & -CIKZ(K)*CSij3(I,K,J) + & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) + & -CIKZ(K)*CSij5(I,K,J) + CF3(I,K,J)=CF3(I,K,J) + & -CIKX(I)*CSij5(I,K,J) + & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) + & -CIKZ(K)*CSij3(I,K,J) END DO END DO END DO DO J=2,NY DO K=0,TNKZ DO I=0,NXP-1 -! CF2(I,K,J)=CF2(I,K,J) -! & -CIKX(I)*CSij4(I,K,J) + CF2(I,K,J)=CF2(I,K,J) + & -CIKX(I)*CSij4(I,K,J) ! Sij2 is added through an implict eddy viscosity ! & -(CSij2(I,K,J)-CSij2(I,K,J-1))/DY(j) -! & -CIKZ(K)*CSij6(I,K,J) + & -CIKZ(K)*CSij6(I,K,J) END DO END DO END DO @@ -313,7 +313,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY -! CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) + CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) END DO END DO END DO @@ -328,7 +328,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY -! CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) + CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) END DO END DO END DO From aaf119b63d552072c5c0e34b6cb114ee65420f57 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 3 Aug 2016 14:48:56 +0100 Subject: [PATCH 34/66] Fixed bug in calculating CFL number for rotating flow --- diablo_2.0/for/courant.f | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/diablo_2.0/for/courant.f b/diablo_2.0/for/courant.f index e75f124..968d7ec 100644 --- a/diablo_2.0/for/courant.f +++ b/diablo_2.0/for/courant.f @@ -12,7 +12,7 @@ subroutine courant integer imin,jmin,kmin ! Set the initial dt to some arbitrary large number - dt=999.d0 + dt=50.d0 ! Set the timestep based on viscosity and diffusivity dt=min(dt,0.5d0*min(dx(1),dy(1))/NU) @@ -42,7 +42,7 @@ subroutine courant dt_x=cfl*dx(i)/abs(U1(i,k,j)-1.d0*DRHODZ(N)*RI(N) & *GYF(J)/I_RO) dt_y=cfl*dy(j)/abs(U2(i,k,j)) - dt_z=cfl*dz(j)/abs(U3(i,k,j)+(RI(N)/I_RO) + dt_z=cfl*dz(k)/abs(U3(i,k,j)+(RI(N)/I_RO) & *DRHODX(N)*GYF(J)) dt=min(dt,dt_x,dt_y,dt_z) end do From 60dda58200dae89ce42a2134b59e6c2a0dd674e8 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 1 Nov 2017 17:15:26 +0000 Subject: [PATCH 35/66] fixed conflict --- diablo_2.0/post_process/matlab/readmean.m | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/diablo_2.0/post_process/matlab/readmean.m b/diablo_2.0/post_process/matlab/readmean.m index f3e5770..33af225 100644 --- a/diablo_2.0/post_process/matlab/readmean.m +++ b/diablo_2.0/post_process/matlab/readmean.m @@ -10,14 +10,14 @@ %NY_S=input('Insert the number of points per process in the y-direction: '); NY_S=26; % Enter the number of scalars -N_TH=1; +N_TH=5; % Enter the viscosity from input.dat NU=0.001; % Enter the Prandtl number Pr=1; kappa=NU/Pr; % Enter the richardson number for each scalar -RI(1:N_TH)=0.15; +RI(1:N_TH)=1.0; % Set the start and end time in code units for start of averaging tstart=0; %tend=999; % If tend isn't defined, tend will default to the final time @@ -236,10 +236,10 @@ end thv_int=trapz(gyf,thv,1); thrms_int=trapz(gyf,thrms,1); - -for j=2:NY - gy(j)=(gyf(j)+gyf(j-1))/2; -end + +% Get GY from hdf5 grid file +gy=h5read([base_dir '/grid.h5'],'/grids/y'); + for j=2:NY-1 dyf(j)=(gy(j+1)-gy(j)); end @@ -255,4 +255,12 @@ end end +for n=1:N_TH +for k=1:nk + thme_int(k,n)=0; + for j=2:NY-1 + thme_int(k,n)=thme_int(k,n)+thme(j,k,n)*(gy(j+1)-gy(j)); + end +end +end From 314035b166c0aa7f580b9717b54a4f343f9344a1 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 1 Nov 2017 17:18:39 +0000 Subject: [PATCH 36/66] fixed conflict --- diablo_2.0/for/user_rhs.f | 1 - 1 file changed, 1 deletion(-) diff --git a/diablo_2.0/for/user_rhs.f b/diablo_2.0/for/user_rhs.f index 68203d3..085835e 100644 --- a/diablo_2.0/for/user_rhs.f +++ b/diablo_2.0/for/user_rhs.f @@ -146,7 +146,6 @@ SUBROUTINE USER_RHS_CHAN_FOURIER CALL SPONGE_TH(N) END DO CALL SPONGE_VEL ->>>>>>> Added thermal wind advection and sponge layers RETURN END From 34f8d481fe0bd482f1a33e02090bcc8f5364ca8d Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 1 Nov 2017 17:19:16 +0000 Subject: [PATCH 37/66] fixed commit --- diablo_2.0/for/les.f | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/diablo_2.0/for/les.f b/diablo_2.0/for/les.f index 65ea9c0..27d5be5 100644 --- a/diablo_2.0/for/les.f +++ b/diablo_2.0/for/les.f @@ -94,10 +94,6 @@ subroutine les_chan CALL MPI_BCAST(U3_bar,NY+2,MPI_DOUBLE_PRECISION,0, & MPI_COMM_Z,ierror) -! IF (MOD(TIME_STEP,SAVE_STATS_INT).EQ.0) THEN -! WRITE(*,*) RANK,GYF(78),U1_bar(78),U3_bar(78) -! END IF - ! Convert the velocity to physical space call FFT_XZ_TO_PHYSICAL(CU1,U1,0,NY+1) call FFT_XZ_TO_PHYSICAL(CU2,U2,0,NY+1) @@ -280,23 +276,23 @@ subroutine les_chan DO I=0,NXP-1 CF1(I,K,J)=CF1(I,K,J) & -CIKX(I)*CSij1(I,K,J) - & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) - & -CIKZ(K)*CSij5(I,K,J) - CF3(I,K,J)=CF3(I,K,J) - & -CIKX(I)*CSij5(I,K,J) - & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) - & -CIKZ(K)*CSij3(I,K,J) +! & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) +! & -CIKZ(K)*CSij5(I,K,J) +! CF3(I,K,J)=CF3(I,K,J) +! & -CIKX(I)*CSij5(I,K,J) +! & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) +! & -CIKZ(K)*CSij3(I,K,J) END DO END DO END DO DO J=2,NY DO K=0,TNKZ DO I=0,NXP-1 - CF2(I,K,J)=CF2(I,K,J) - & -CIKX(I)*CSij4(I,K,J) +! CF2(I,K,J)=CF2(I,K,J) +! & -CIKX(I)*CSij4(I,K,J) ! Sij2 is added through an implict eddy viscosity ! & -(CSij2(I,K,J)-CSij2(I,K,J-1))/DY(j) - & -CIKZ(K)*CSij6(I,K,J) +! & -CIKZ(K)*CSij6(I,K,J) END DO END DO END DO @@ -313,7 +309,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY - CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) +! CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) END DO END DO END DO @@ -328,7 +324,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY - CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) +! CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) END DO END DO END DO From c82c5763ba3667c6224094db7a3c49cb401cbd72 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 1 Nov 2017 17:21:35 +0000 Subject: [PATCH 38/66] fixed conflict --- diablo_2.0/for/diablo_io.f | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/diablo_2.0/for/diablo_io.f b/diablo_2.0/for/diablo_io.f index 172c695..5b56a88 100644 --- a/diablo_2.0/for/diablo_io.f +++ b/diablo_2.0/for/diablo_io.f @@ -317,7 +317,14 @@ SUBROUTINE SAVE_FLOW(FINAL) FNAME='end.h5' SAVE_PRESSURE=.TRUE. else - FNAME='out.h5' + FNAME='out.' + & //CHAR(MOD(TIME_STEP,1000000)/100000+48) + & //CHAR(MOD(TIME_STEP,100000)/10000+48) + & //CHAR(MOD(TIME_STEP,10000)/1000+48) + & //CHAR(MOD(TIME_STEP,1000)/100+48) + & //CHAR(MOD(TIME_STEP,100)/10+48) + & //CHAR(MOD(TIME_STEP,10)+48) + & //'.h5' end if if (FNAME(len_trim(FNAME)-2:len_trim(FNAME)).eq.".h5") then IF (NUM_PER_DIR.NE.2) THEN From 389ceadbb35fe968e50066faefad05e7a70ca07d Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 1 Nov 2017 17:22:36 +0000 Subject: [PATCH 39/66] fixed conflict --- diablo_2.0/for/set_ics.f | 4 ++-- diablo_2.0/for/user_rhs.f | 14 ++++++-------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/diablo_2.0/for/set_ics.f b/diablo_2.0/for/set_ics.f index 7e56be9..807b87d 100644 --- a/diablo_2.0/for/set_ics.f +++ b/diablo_2.0/for/set_ics.f @@ -237,8 +237,8 @@ SUBROUTINE CREATE_TH_CHAN ELSE IF ((TH_BC_YMIN(N).EQ.1) & .AND.(TH_BC_YMAX(N).EQ.1)) THEN DO J=1,NY -! Linear profile with slope corresponding to upper value - TH(I,K,J,N)=TH_BC_YMAX_C1(N)*GYF(J) +! Linear profile with slope corresponding to lower value + TH(I,K,J,N)=TH_BC_YMIN_C1(N)*GYF(J) END DO ELSE IF (RANK.EQ.0) then diff --git a/diablo_2.0/for/user_rhs.f b/diablo_2.0/for/user_rhs.f index 085835e..837d5ec 100644 --- a/diablo_2.0/for/user_rhs.f +++ b/diablo_2.0/for/user_rhs.f @@ -247,17 +247,15 @@ SUBROUTINE SPONGE_TH(N) ! The following variables will store the background state real*8 TH_0(-1:NY+1) - real*8 RI_B(0:NY+1) - ! This variable will hold the forcing rate real*8 SPONGE_SIGMA(0:NY+1) ! Set the amplitude of the sponge - SPONGE_AMP=0.0001d0 + SPONGE_AMP=0.005d0 ! Set the top of the sponge layer in physical units - L_sponge=-120.d0 + L_sponge=-40.d0 ! Set the bottom of the computational domain in physical units - L_bottom=-140.d0 + L_bottom=-50.d0 DO J=0,NY+1 ! Quadratic damping at lower wall if (GYF(J).lt.L_sponge) then @@ -321,11 +319,11 @@ SUBROUTINE SPONGE_VEL real*8 SPONGE_SIGMA(0:NY+1) ! Set the amplitude of the sponge - SPONGE_AMP=0.0001d0 + SPONGE_AMP=0.005d0 ! Set the top of the sponge layer in physical units - L_sponge=-120.d0 + L_sponge=-40.d0 ! Set the bottom of the computational domain in physical units - L_bottom=-140.d0 + L_bottom=-50.d0 DO J=0,NY+1 ! Quadratic damping at lower wall if (GYF(J).lt.L_sponge) then From 46160254fc332d0d626d9a0c0eac59e38bda9557 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 22 Jun 2017 09:24:44 +0100 Subject: [PATCH 40/66] Added input files for T+F2010 test case --- diablo_2.0/TF2010/grid.h5 | Bin 0 -> 2848 bytes diablo_2.0/TF2010/grid_def | 8 +++++ diablo_2.0/TF2010/grid_def.all | 8 +++++ diablo_2.0/TF2010/grid_mpi | 3 ++ diablo_2.0/TF2010/input.dat | 35 ++++++++++++++++++ diablo_2.0/TF2010/input_chan.dat | 60 +++++++++++++++++++++++++++++++ diablo_2.0/TF2010/input_per.dat | 35 ++++++++++++++++++ 7 files changed, 149 insertions(+) create mode 100644 diablo_2.0/TF2010/grid.h5 create mode 100644 diablo_2.0/TF2010/grid_def create mode 100644 diablo_2.0/TF2010/grid_def.all create mode 100644 diablo_2.0/TF2010/grid_mpi create mode 100644 diablo_2.0/TF2010/input.dat create mode 100644 diablo_2.0/TF2010/input_chan.dat create mode 100644 diablo_2.0/TF2010/input_per.dat diff --git a/diablo_2.0/TF2010/grid.h5 b/diablo_2.0/TF2010/grid.h5 new file mode 100644 index 0000000000000000000000000000000000000000..bb3d1a772ff817879db8aea9430c1c80adce0ad4 GIT binary patch literal 2848 zcmeD5aB<`1lHy_j0S*oZ76t(@6Gr@p0tIe}2#gPtPk=HQp>zk7Ucm%mFfxE31A_!q zTo7tLy1I}cS62q0N|^aD8mf)q0V{;X0TURdM^p%SxH<-aJRAY_H7u2$fTlB8V5Api zrW6A?u=MBwlOU7Em3}>#AqHkZ(xfO#unEp7x4lV5oq~3=D;y zt~A&O#vMc5){1QNtA8^+5424=p~W=K<3M+QG>?{?snU)|L$vTg_N zEJ_uO^mRQjH{w9Z(K?p{?9$)<`)+bR@N!bYW9Ju62j0!Pa^kas(}4#I1-O>QI3AGw zVrxEcw!?vgrx|CM?y)~$=+^RbrH9>t<3TfyGu*W~z&%qcXIiE8fyU=w&hD18Iw1RM z$&FwkZYb^mfc03E?Bi}d zvIo{4HnsScE_GmWf!(zDH{u6uxAQ08?G-&RWBHAhW!}OEyz_TE8mb5$=vDsDsiwzw zKq&3h1y?t&1KXx-dw#i(?ZB?(Vm2>WnGei}axruX{=fe^xB26s4?p&^eh*|!xc_PY jd8TFOPguU%f8%oEy!}(}?ca0J)hJ}`;r$Gkz_dL8e03W_ literal 0 HcmV?d00001 diff --git a/diablo_2.0/TF2010/grid_def b/diablo_2.0/TF2010/grid_def new file mode 100644 index 0000000..3e44889 --- /dev/null +++ b/diablo_2.0/TF2010/grid_def @@ -0,0 +1,8 @@ +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| +! grid_def, the portion of the header that defines the grid size for diablo. +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| + PARAMETER (NX=256) + PARAMETER (NY=26) + PARAMETER (NZ=64) + PARAMETER (N_TH=1) +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| diff --git a/diablo_2.0/TF2010/grid_def.all b/diablo_2.0/TF2010/grid_def.all new file mode 100644 index 0000000..c7cf5bf --- /dev/null +++ b/diablo_2.0/TF2010/grid_def.all @@ -0,0 +1,8 @@ +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| +! grid_def, the portion of the header that defines the grid size for diablo. +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| + PARAMETER (NX=256) + PARAMETER (NY=51) + PARAMETER (NZ=64) + PARAMETER (N_TH=1) +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| diff --git a/diablo_2.0/TF2010/grid_mpi b/diablo_2.0/TF2010/grid_mpi new file mode 100644 index 0000000..c87fde0 --- /dev/null +++ b/diablo_2.0/TF2010/grid_mpi @@ -0,0 +1,3 @@ + PARAMETER(NPROCS=4) + PARAMETER(NPROCY=2) + PARAMETER(NPROCZ=NPROCS/NPROCY) diff --git a/diablo_2.0/TF2010/input.dat b/diablo_2.0/TF2010/input.dat new file mode 100644 index 0000000..5a9a090 --- /dev/null +++ b/diablo_2.0/TF2010/input.dat @@ -0,0 +1,35 @@ +C A data file for diablo. This is a generic, easy method of data +C input, as long as you don't change the number of comment lines. +C Note that the grid size is defined in the file grid_def. +C FLAVOR VERSION + 'Front' 2.0 +C USE_MPI USE_LES (note, also change these flags in Makefile): + .TRUE. .TRUE. +C Parameters: NU, BETA, LX, LY, LZ + 0.000001 1.0 1000.0 50.0 250.0 +C Vertical viscosity scale factor NU_V_SCALE (NU_V=NU*NU_V_SCALE) + 1.0 +C Method: NUM_PER_DIR, CREATE_NEW_FLOW + 2 .TRUE. +C Time March: N_TIME_STEPS, TIME_LIMIT, DELTA_T, RESET_TIME, VARIABLE_DT, CFL, UPDATE_DT + 10000 500000 0.1 .FALSE. .TRUE. 0.5 1 +C I/O: VERBOSITY, SAVE_FLOW_INT, SAVE_STATS_INT, MOVIE + 4 10000 100 .TRUE. +C Here include 6*N_TH lines, see below for format +C CREATE_FLOW_TH(1) Create new field or Read from DIABLO_TH.START + .TRUE. +C FILTER_TH(1) FILTER_INT(1) (If and how often to filter) + .FALSE. 10 +C RI_TAU(1) PR(1) + -0.00981 7.0 + + +C When including scalar advection, include 6 lines like the following for each scalar + +C CREATE_FLOW_TH(1) Create new field or Read from DIABLO_TH.START + .TRUE. +C FILTER_TH(1) FILTER_INT(1) (If and how often to filter) + .TRUE. 10 +C RI_TAU(1) PR(1) BACKGROUND_TH(N) + 0.0 1.0 .FALSE. + diff --git a/diablo_2.0/TF2010/input_chan.dat b/diablo_2.0/TF2010/input_chan.dat new file mode 100644 index 0000000..c4edf90 --- /dev/null +++ b/diablo_2.0/TF2010/input_chan.dat @@ -0,0 +1,60 @@ +C A data file for diablo. This is a generic, easy method of data +C input, as long as you don't change the number of comment lines. +C Note that the grid size is defined in the file grid_def. +C VERSION + 2.0 +C TIME_AD_METH + 1 +C LES: LES_MODEL_TYPE + 1 +C ICs: IC_TYPE, KICK + 1 0.001 +C Rotation: I_RO (or Coriolis parameter, f) + 0.0001 +C Upward vertical vector components, GRAV_X, GRAV_Y, GRAV_Z + 0.0 1.0 0.0 +C Forcing: F_TYPE, UBULK0, PX0, OMEGA0, AMP_OMEGA0 + 3 0.0 -1.0 6.28 0.0 +C BCs: U_BC_YMIN, U_BC_YMIN_C1, U_BC_YMIN_C2, U_BC_YMIN_C3 + 1 0.0 0.0 0.0 +C BCs: V_BC_YMIN, V_BC_YMIN_C1, V_BC_YMIN_C2, V_BC_YMIN_C3 + 0 0.0 0.0 0.0 +C BCs: W_BC_YMIN, W_BC_YMIN_C1, W_BC_YMIN_C2, W_BC_YMIN_C3 + 1 0.0 0.0 0.0 +C BCs: U_BC_YMAX, U_BC_YMAX_C1, U_BC_YMAX_C2, U_BC_YMAX_C3 + 1 0.0 0.0 0.0 +C BCs: V_BC_YMAX, V_BC_YMAX_C1, V_BC_YMAX_C2, V_BC_YMAX_C3 + 0 0.0 0.0 0.0 +C BCs: W_BC_YMAX, W_BC_YMAX_C1, W_BC_YMAX_C2, W_BC_YMAX_C3 + 1 0.0 0.0 0.0 +C Here include 6*N_TH lines, see below for format +C BCs: TH_BC_YMIN(1), TH_BC_YMIN_C1(1), TH_BC_YMIN_C2(1), TH_BC_YMIN_C3(1) + 1 -0.00917 0.0 0.0 +C BCs: TH_BC_YMAX(1), TH_BC_YMAX_C1(1), TH_BC_YMAX_C2(1), TH_BC_YMAX_C3(1) + 1 30.24 0.0 0.0 +C Background scalar gradients, DRHODX(1), DRHODZ(1) + 0.0000432 0.0 + + +C Description +C For channnel flows (NUM_PER_DIR=2): +C IC_TYPE specifies the functional form for the initial velocity +C KICK is a scale factor on the noise added when creating a new flow. +C F_TYPE=0 gives constant mass flux flow (maintaining UBULK0). +C F_TYPE=1 gives constant pressure gradient flow (PX0 constant). +C F_TYPE=2 is for an oscillatory pressure gradient of the form: +C PX0+AMP_OMEGA0*cos(OMEGA0*TIME) +C U_BC_YMIN is the BC TYPE on the U velocity component at the lower wall +C (0 for Dirichlet, 1 for Neumann) +C U_BC_YMIN_C1 is the value of the velocity (if Dirichlet) or it's gradient (if Neumann) + +C When including scalar advection, include 4 lines like the following for each scalar + +C BCs: TH_BC_YMIN(1), TH_BC_YMIN_C1(1), TH_BC_YMIN_C2(1), TH_BC_YMIN_C3(1) + 1 0.0 0.0 0.0 +C BCs: TH_BC_YMAX(1), TH_BC_YMAX_C1(1), TH_BC_YMAX_C2(1), TH_BC_YMAX_C3(1) + 1 0.0 0.0 0.0 + + + + diff --git a/diablo_2.0/TF2010/input_per.dat b/diablo_2.0/TF2010/input_per.dat new file mode 100644 index 0000000..ae29b42 --- /dev/null +++ b/diablo_2.0/TF2010/input_per.dat @@ -0,0 +1,35 @@ +C A data file for diablo. This is a generic, easy method of data +C input, as long as you don't change the number of comment lines. +C Note that the grid size is defined in the file grid_def. +C VERSION + 1.0 +C TIME_AD_METH + 1 +C LES: LES_MODEL_TYPE + 2 +C ICs: IC_TYPE, KICK + 0 .0010 +C FLAGS: TRUCK, GUST, + .FALSE. .FALSE. +C BACKGROUND_GRAD(N): Include this and the next three lines for each scalar + .FALSE. +C BACTERIAL MOTILITY CHI(N): + 0.0 +C BACKGROUND_GRAD(N): Include one of the following lines for each scalar + .FALSE. +C BACTERIAL MOTILITY CHI(N): + 0.001 +C BACKGROUND_GRAD(N): Include one of the following lines for each scalar + .FALSE. +C BACTERIAL MOTILITY CHI(N): + 0.01 + + + + + +C Description +C For triply-periodic flows (NUM_PER_DIR=3): +C IC_TYPE specifies the functional form for the initial velocity +C KICK is a scale factor on the noise added when creating a new flow. +C BACKGROUND_GRAD(N) specifies if the scalar should be considered as a perturbation to a linear background gradient. This is designed to allow simulations of stratified flows in the triply periodic flow geometry. From 4975f8298fdd750689ee68ece9c2c99566013cea Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 1 Nov 2017 17:33:46 +0000 Subject: [PATCH 41/66] fixed spurious lines left over from rebase --- diablo_2.0/for/user_rhs.f | 8 -------- 1 file changed, 8 deletions(-) diff --git a/diablo_2.0/for/user_rhs.f b/diablo_2.0/for/user_rhs.f index 353c4ac..562dbf3 100644 --- a/diablo_2.0/for/user_rhs.f +++ b/diablo_2.0/for/user_rhs.f @@ -435,14 +435,6 @@ SUBROUTINE SLIP_VEL RETURN END -======= - do j=0,NY+1 - CFTH(0,0,j,n)=CFTH(0,0,j,n)-SPONGE_SIGMA(j) - & *(CTH(0,0,j,n)-TH_0(J)) - end do - - return - end SUBROUTINE SPONGE_VEL ! This subroutine applies a sponge relaxation (Rayleigh damping) towards a From 87dc42b041872d119f40d5669c03d3e342f0d705 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 2 Nov 2017 15:01:19 +0000 Subject: [PATCH 42/66] updated on pontus --- diablo_2.0/post_process/matlab/read_tke_h5.m | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/diablo_2.0/post_process/matlab/read_tke_h5.m b/diablo_2.0/post_process/matlab/read_tke_h5.m index e65b564..d4a53b4 100644 --- a/diablo_2.0/post_process/matlab/read_tke_h5.m +++ b/diablo_2.0/post_process/matlab/read_tke_h5.m @@ -14,7 +14,7 @@ tstart=0; % Set the filename -filename=[base_dir '/mean.h5']; +filename=[base_dir '/tke.h5']; % Read in the number of samples (store in nk) file_info=h5info(filename); From fb6450543bce3809f27dee2097fa26d89a7dd4f7 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Mon, 27 Nov 2017 09:25:06 +0000 Subject: [PATCH 43/66] Fixed bug in writing wv to hdf5 --- diablo_2.0/for/save_stats.f | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/diablo_2.0/for/save_stats.f b/diablo_2.0/for/save_stats.f index 11a1f20..bd076fb 100644 --- a/diablo_2.0/for/save_stats.f +++ b/diablo_2.0/for/save_stats.f @@ -384,7 +384,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) call WriteStatH5(FNAME,gname,Diag) gname='wv' - Diag=uw(1:NY) + Diag=wv(1:NY) call WriteStatH5(FNAME,gname,Diag) gname='dudy' From 2309c71260c8415aaf5b003071fe96790c77df33 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Tue, 10 Jul 2018 13:16:49 +0100 Subject: [PATCH 44/66] Fixed a bug in the calculation of epsilon --- diablo_2.0/for/save_stats.f | 92 ++++++++++++++++++++----------------- 1 file changed, 49 insertions(+), 43 deletions(-) diff --git a/diablo_2.0/for/save_stats.f b/diablo_2.0/for/save_stats.f index bd076fb..bc4c0f4 100644 --- a/diablo_2.0/for/save_stats.f +++ b/diablo_2.0/for/save_stats.f @@ -347,9 +347,14 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) IF (RANKZ.eq.0) then + + write(*,*) RANK,'Writing GYF' gname='gyf' Diag=gyf(1:NY) call WriteStatH5(FNAME,gname,Diag) + write(*,*) RANK,'Done writing GYF' + + pause gname='ume' Diag=ume(1:NY) @@ -937,12 +942,12 @@ subroutine tkebudget_chan integer i,j,k ! Compute the turbulent dissipation rate, epsilon=nu* - - do j=2,NYM +! epsilon is calculated at GY points + do j=1,NY epsilon(j)=0. end do ! Store du/dx in CS1 - do j=2,NYM + do j=1,NY do k=0,TNKZ do i=0,NXP-1 CS1(i,k,j)=CIKX(i)*CR1(i,k,j) @@ -951,50 +956,50 @@ subroutine tkebudget_chan end do ! Convert to physical space call fft_xz_to_physical(CS1,S1,0,NY+1) - do j=2,NYM + do j=1,NY do k=0,NZP-1 do i=0,NXM - epsilon(j)=epsilon(j)+(S1(i,k,j)**2.0) + epsilon(j)=epsilon(j)+(DYF(j-1)*S1(i,k,j)**2.d0 + & +DYF(j)*S1(i,k,j-1)**2.d0)/(2.d0*DY(j)) end do end do end do ! Store dv/dx in CS1 - do j=2,NYM + do j=1,NY do k=0,TNKZ do i=0,NXP-1 - CS1(i,k,j)=CIKX(i)*(CR2(i,k,j)+CR2(i,k,j+1))/2.0 + CS1(i,k,j)=CIKX(i)*CR2(i,k,j) end do end do end do ! Convert to physical space call fft_xz_to_physical(CS1,S1,0,NY+1) - do j=2,NYM + do j=1,NY do k=0,NZP-1 do i=0,NXM - epsilon(j)=epsilon(j)+0.5*(S1(i,k,j)**2.0) + epsilon(j)=epsilon(j)+(S1(i,k,j)**2.0) end do end do end do ! Compute du/dy at GYF gridpoints, note remove mean - do j=2,NYM + do j=1,NY do k=0,NZP-1 do i=0,NXM - F1(i,k,j)=((U1(i,k,j+1)-CR1(0,0,j+1)) - & -(U1(i,k,j-1)-CR1(0,0,j-1)))/(GY(j)+GY(j+1)) + F1(i,k,j)=((U1(i,k,j)-dble(CR1(0,0,j))) + & -(U1(i,k,j-1)-dble(CR1(0,0,j-1)))) + & /DY(j) end do end do end do - do j=2,NYM + do j=1,NY do k=0,NZP-1 do i=0,NXM - epsilon(j)=epsilon(j)+0.5*(F1(i,k,j)**2.0) -! Cross term dvdx*dudy - epsilon(j)=epsilon(j)+(S1(i,k,j)*F1(i,k,j)) + epsilon(j)=epsilon(j)+(F1(i,k,j)**2.0) end do end do end do ! Store dw/dx in CS1 - do j=2,NYM + do j=1,NY do k=0,TNKZ do i=0,NXP-1 CS1(i,k,j)=CIKX(i)*CR3(i,k,j) @@ -1003,16 +1008,17 @@ subroutine tkebudget_chan end do ! Convert to physical space call fft_xz_to_physical(CS1,S1,0,NY+1) - do j=2,NYM + do j=1,NY do k=0,NZP-1 do i=0,NXM - epsilon(j)=epsilon(j)+0.5*(S1(i,k,j)**2.0) + epsilon(j)=epsilon(j)+(DYF(j-1)*S1(i,k,j)**2.d0 + & +DYF(j)*S1(i,k,j-1)**2.d0)/(2.d0*DY(j)) end do end do end do ! Compute du/dz at GYF gridpoints, note remove mean ! Store du/dz in CS1 - do j=2,NYM + do j=1,NY do k=0,TNKZ do i=0,NXP-1 CF1(i,k,j)=CIKZ(k)*CR1(i,k,j) @@ -1021,12 +1027,11 @@ subroutine tkebudget_chan end do ! Convert to physical space call fft_xz_to_physical(CF1,F1,0,NY+1) - do j=2,NYM + do j=1,NY do k=0,NZP-1 do i=0,NXM - epsilon(j)=epsilon(j)+0.5*(F1(i,k,j)**2.0) -! Cross term dudz*dwdx - epsilon(j)=epsilon(j)+S1(i,k,j)*F1(i,k,j) + epsilon(j)=epsilon(j)+(DYF(j-1)*F1(i,k,j)**2.d0 + & +DYF(j)*F1(i,k,j-1)**2.d0)/(2.d0*DY(j)) end do end do end do @@ -1034,12 +1039,13 @@ subroutine tkebudget_chan do j=2,NYM do k=0,NZP-1 do i=0,NXM - S1(i,k,j)=((U2(i,k,j+1)-CR2(0,0,j+1))-(U2(i,k,j)-CR2(0,0,j))) - & /GYF(j) + S1(i,k,j)=((U2(i,k,j+1)-dble(CR2(0,0,j+1))) + & -(U2(i,k,j-1)-dble(CR2(0,0,j-1)))) + & /(GY(j+1)-GY(j-1)) end do end do end do - do j=2,NYM + do j=1,NY do k=0,NZP-1 do i=0,NXM epsilon(j)=epsilon(j)+(S1(i,k,j)**2.0) @@ -1047,42 +1053,41 @@ subroutine tkebudget_chan end do end do ! Compute dw/dy at GYF gridpoints, note remove mean - do j=2,NYM + do j=1,NY do k=0,NZP-1 do i=0,NXM - S1(i,k,j)=((U3(i,k,j+1)-CR3(0,0,j+1)) - & -(U3(i,k,j-1)-CR3(0,0,j-1)))/(GY(j)+GY(j+1)) + S1(i,k,j)=((U3(i,k,j)-dble(CR3(0,0,j))) + & -(U3(i,k,j-1)-dble(CR3(0,0,j-1)))) + & /DY(j) end do end do end do - do j=2,NYM + do j=1,NY do k=0,NZP-1 do i=0,NXM - epsilon(j)=epsilon(j)+0.5*(S1(i,k,j)**2.0) + epsilon(j)=epsilon(j)+(S1(i,k,j)**2.0) end do end do end do ! Store dv/dz in CF1 - do j=2,NYM + do j=1,NY do k=0,TNKZ do i=0,NXP-1 - CF1(i,k,j)=CIKZ(k)*(CR2(i,k,j)+CR2(i,k,j+1))/2.0 + CF1(i,k,j)=CIKZ(k)*CR2(i,k,j) end do end do end do ! Convert to physical space call fft_xz_to_physical(CF1,F1,0,NY+1) - do j=2,NYM + do j=1,NY do k=0,NZP-1 do i=0,NXM - epsilon(j)=epsilon(j)+0.5*(F1(i,k,j)**2.0) -! Cross term dvdz*dwdy - epsilon(j)=epsilon(j)+S1(i,k,j)*F1(i,k,j) + epsilon(j)=epsilon(j)+(F1(i,k,j)**2.0) end do end do end do ! Store dw/dz in CS1 - do j=2,NYM + do j=1,NY do k=0,TNKZ do i=0,NXP-1 CS1(i,k,j)=CIKZ(k)*CR3(i,k,j) @@ -1091,15 +1096,16 @@ subroutine tkebudget_chan end do ! Convert to physical space call fft_xz_to_physical(CS1,S1,0,NY+1) - do j=2,NYM + do j=1,NY do k=0,NZP-1 do i=0,NXM - epsilon(j)=epsilon(j)+(S1(i,k,j)**2.0) + epsilon(j)=epsilon(j)+(DYF(j-1)*S1(i,k,j)**2.d0 + & +DYF(j)*S1(i,k,j-1)**2.d0)/(2.d0*DY(j)) end do end do end do - do j=2,NYM - epsilon(j)=epsilon(j)/float(NX*NZ) + do j=1,NY + epsilon(j)=NU*epsilon(j)/dble(NX*NZ) end do call mpi_allreduce(mpi_in_place,epsilon,NY+2,MPI_DOUBLE_PRECISION, & MPI_SUM,MPI_COMM_Z,ierror) From 57340ea2a58a9cb5578bad09edf90f9dcfc75be8 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Tue, 10 Jul 2018 13:40:42 +0100 Subject: [PATCH 45/66] minor changes to channel.f --- diablo_2.0/for/channel.f | 611 +++++++++++++++++++++++---------------- 1 file changed, 355 insertions(+), 256 deletions(-) diff --git a/diablo_2.0/for/channel.f b/diablo_2.0/for/channel.f index 0791a88..0d0e6ed 100644 --- a/diablo_2.0/for/channel.f +++ b/diablo_2.0/for/channel.f @@ -69,7 +69,7 @@ SUBROUTINE RK_CHAN_1 C----*|--.---------.---------.---------.---------.---------.---------.-|-------| INCLUDE 'header' - INTEGER I,J,K,N,ISTART + INTEGER I,J,K,N,ISTART REAL*8 TEMP1, TEMP2, TEMP3, TEMP4, TEMP5, UBULK C Define the constants that are used in the time-stepping @@ -93,7 +93,7 @@ SUBROUTINE RK_CHAN_1 END DO END DO END DO - DO J=2,NY + DO J=2,NY DO K=0,TNKZ DO I=0,NXP-1 CR2(I,K,J)=CU2(I,K,J) @@ -101,7 +101,7 @@ SUBROUTINE RK_CHAN_1 END DO END DO -C Add the R-K term from the rk-1 step +C Add the R-K term from the rk-1 step IF (RK_STEP .GT. 1) THEN DO J=JSTART,JEND DO K=0,TNKZ @@ -119,7 +119,7 @@ SUBROUTINE RK_CHAN_1 END DO END DO END IF - + C Take the y-derivative of the pressure at GY points in Fourier space DO J=2,NY DO K=0,TNKZ @@ -152,7 +152,7 @@ SUBROUTINE RK_CHAN_1 C F_TYPE=1 -> Constant pressure gradient in the x-direction C F_TYPE=2 -> Oscillatory pressure gradient in the x-direction C OTHER VALUES -> No forcing added - IF (F_TYPE.EQ.1) THEN + IF (F_TYPE.EQ.1) THEN C Add forcing for a constant pressure gradient DO J=JSTART,JEND IF (RANKZ.eq.0) CR1(0,0,J)=CR1(0,0,J)-TEMP4*PX0 @@ -175,20 +175,20 @@ SUBROUTINE RK_CHAN_1 DO J=JSTART,JEND DO K=0,TNKZ DO I=0,NXP-1 - CF1(I,K,J)=-NU * (KX2(I)+KZ2(K))**BETA * CU1(I,K,J) - CF3(I,K,J)=-NU * (KX2(I)+KZ2(K))**BETA * CU3(I,K,J) + CF1(I,K,J)=-NU * (KX2(I)+KZ2(K))**BETA * CU1(I,K,J) + CF3(I,K,J)=-NU * (KX2(I)+KZ2(K))**BETA * CU3(I,K,J) END DO END DO END DO DO J=2,NY DO K=0,TNKZ DO I=0,NXP-1 - CF2(I,K,J)=-NU * (KX2(I)+KZ2(K))**BETA * CU2(I,K,J) - END DO + CF2(I,K,J)=-NU * (KX2(I)+KZ2(K))**BETA * CU2(I,K,J) + END DO END DO END DO -! Add the terms owing to the system rotation (Coriolis terms) +! Add the terms owing to the system rotation (Coriolis terms) ! Assume that the flow is on an f-plane DO K=0,TNKZ DO I=0,NXP-1 @@ -203,9 +203,9 @@ SUBROUTINE RK_CHAN_1 DO N=1,N_TH ! If a scalar contributes to the denisty, RI is not equal to zero and -! add the buoyancy term as explicit R-K. Don't add the 0,0 mode in the +! add the buoyancy term as explicit R-K. Don't add the 0,0 mode in the ! y-direction, which corresponds to the plane-average. -! The plane averaged density balances the hydrostatic pressure +! The plane averaged density balances the hydrostatic pressure DO J=2,NY DO K=1,TNKZ DO I=0,NXP-1 @@ -306,85 +306,17 @@ SUBROUTINE RK_CHAN_1 C In the process, Ui is converted to physical space call les_chan -C Re-apply the boundary conditions for velocity -C In the case of Neumann (applied stress) BCs, these were changed in the LES - call APPLY_BC_VEL_PHYS_MPI - -C APPLY constant SGS Prandlt number - DO N=1,N_TH - do j=1,NY+1 - do k=0,NZP-1 - do i=0,NXM - KAPPA_T(I,K,J,N)=1.d0*NU_T(I,K,J) - end do - end do - end do - end do - -C Add the horizontal diffusive terms using explicit timestepping -C This is already done for the viscous terms inside les_chan.f + CALL FFT_XZ_TO_FOURIER(U1,CU1,0,NY+1) + CALL FFT_XZ_TO_FOURIER(U2,CU2,0,NY+1) + CALL FFT_XZ_TO_FOURIER(U3,CU3,0,NY+1) - DO N=1,N_TH - - DO J=1,NY+1 - DO K=0,TNKZ - DO I=0,NXP-1 - CS1(I,K,J)=CIKX(I)*CTH(I,K,J,N) - END DO - END DO - END DO - CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) - do j=1,NY+1 - do k=0,NZP-1 - do i=0,NXM - S1(I,K,J)=KAPPA_T(I,K,J,N)*S1(I,K,J) - end do - end do - end do - CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) - DO J=JSTART_TH(N),JEND_TH(N) - DO K=0,TNKZ - DO I=0,NXP-1 - CFTH(I,K,J,N)=CFTH(I,K,J,N)+CIKX(I)*CS1(I,K,J) - END DO - END DO - END DO - - DO J=1,NY+1 - DO K=0,TNKZ - DO I=0,NXP-1 - CS1(I,K,J)=CIKZ(K)*CTH(I,K,J,N) - END DO - END DO - END DO - CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) - do j=1,NY+1 - do k=0,NZP-1 - do i=0,NXM - S1(I,K,J)=KAPPA_T(I,K,J,N)*S1(I,K,J) - end do - end do - end do - CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) - DO J=JSTART_TH(N),JEND_TH(N) - DO K=0,TNKZ - DO I=0,NXP-1 - CFTH(I,K,J,N)=CFTH(I,K,J,N)+CIKZ(K)*CS1(I,K,J) - END DO - END DO - END DO - END DO ! end do n - - -! Now, convert TH to physical space for calculation of nonlinear terms - DO N=1,N_TH - CS1(:,:,:)=CTH(:,:,:,N) - CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) - TH(:,:,:,N)=S1(:,:,:) - END DO + call les_chan_th +C Re-apply the boundary conditions for the scalars +C In the case of Neumann (applied stress) BCs, these were changed in the LES + call APPLY_BC_TH_PHYS_MPI - ELSE -C If the subgrid model hasn't been called, then it is necessary to + ELSE +C If the subgrid model hasn't been called, then it is necessary to C convert to physical space. CALL FFT_XZ_TO_PHYSICAL(CU1,U1,0,NY+1) CALL FFT_XZ_TO_PHYSICAL(CU2,U2,0,NY+1) @@ -396,7 +328,7 @@ SUBROUTINE RK_CHAN_1 CS1(:,:,:)=CTH(:,:,:,N) CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) TH(:,:,:,N)=S1(:,:,:) - END DO + END DO END IF @@ -404,7 +336,7 @@ SUBROUTINE RK_CHAN_1 C Compute the nonlinear products in physical space, then transform C back to Fourier space to compute the derivative. C Here, we compute the horizontal derivatives of the nonlinear terms -C which will be treated with RKW3. +C which will be treated with RKW3. C Do terms one at a time to save on memory C U1*U3 DO J=JSTART,JEND @@ -414,14 +346,14 @@ SUBROUTINE RK_CHAN_1 END DO END DO END DO - + CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) - + DO J=JSTART,JEND DO K=0,TNKZ DO I=0,NXP-1 - CF1(I,K,J)=CF1(I,K,J) - CIKZ(K) * CS1(I,K,J) - CF3(I,K,J)=CF3(I,K,J) - CIKX(I) * CS1(I,K,J) + CF1(I,K,J)=CF1(I,K,J) - CIKZ(K) * CS1(I,K,J) + CF3(I,K,J)=CF3(I,K,J) - CIKX(I) * CS1(I,K,J) END DO END DO END DO @@ -434,13 +366,13 @@ SUBROUTINE RK_CHAN_1 END DO END DO END DO - + CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) - + DO J=JSTART,JEND DO K=0,TNKZ DO I=0,NXP-1 - CF1(I,K,J)=CF1(I,K,J) - CIKX(I) * CS1(I,K,J) + CF1(I,K,J)=CF1(I,K,J) - CIKX(I) * CS1(I,K,J) END DO END DO END DO @@ -453,13 +385,13 @@ SUBROUTINE RK_CHAN_1 END DO END DO END DO - + CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) - + DO J=JSTART,JEND DO K=0,TNKZ DO I=0,NXP-1 - CF3(I,K,J)=CF3(I,K,J) - CIKZ(K) * CS1(I,K,J) + CF3(I,K,J)=CF3(I,K,J) - CIKZ(K) * CS1(I,K,J) END DO END DO END DO @@ -470,18 +402,18 @@ SUBROUTINE RK_CHAN_1 DO K=0,NZP-1 DO I=0,NXM S1(I,K,J)=((DYF(J)*U1(I,K,J) - & +DYF(J-1)*U1(I,K,J-1))/(2.*DY(J))) + & +DYF(J-1)*U1(I,K,J-1))/(2.*DY(J))) & *U2(I,K,J) END DO END DO END DO - + CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) - + DO J=2,NY DO K=0,TNKZ DO I=0,NXP-1 - CF2(I,K,J)=CF2(I,K,J) - CIKX(I) * CS1(I,K,J) + CF2(I,K,J)=CF2(I,K,J) - CIKX(I) * CS1(I,K,J) END DO END DO END DO @@ -491,14 +423,14 @@ SUBROUTINE RK_CHAN_1 DO K=0,NZP-1 DO I=0,NXM S1(I,K,J)=((DYF(J)*U3(I,K,J) - & +DYF(J-1)*U3(I,K,J-1))/(2.*DY(J))) + & +DYF(J-1)*U3(I,K,J-1))/(2.*DY(J))) & *U2(I,K,J) END DO END DO END DO - + CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) - + DO J=2,NY DO K=0,TNKZ DO I=0,NXP-1 @@ -523,7 +455,7 @@ SUBROUTINE RK_CHAN_1 DO J=JSTART,JEND DO K=0,TNKZ DO I=0,NXP-1 - CF1(I,K,J)=CF1(I,K,J) - CS1(I,K,J) + CF1(I,K,J)=CF1(I,K,J) - CS1(I,K,J) END DO END DO END DO @@ -544,7 +476,7 @@ SUBROUTINE RK_CHAN_1 DO J=JSTART,JEND DO K=0,TNKZ DO I=0,NXP-1 - CF3(I,K,J)=CF3(I,K,J) - CS1(I,K,J) + CF3(I,K,J)=CF3(I,K,J) - CS1(I,K,J) END DO END DO END DO @@ -595,9 +527,9 @@ SUBROUTINE RK_CHAN_1 END DO C Convert RHS terms to physical space - CALL FFT_XZ_TO_PHYSICAL(CR1,R1,0,NY+1) - CALL FFT_XZ_TO_PHYSICAL(CR2,R2,2,NY) - CALL FFT_XZ_TO_PHYSICAL(CR3,R3,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(CR1,R1,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(CR2,R2,2,NY) + CALL FFT_XZ_TO_PHYSICAL(CR3,R3,0,NY+1) C Compute the vertical viscous term in physical space and add to RHS C This is the explicit part of the Crank-Nicolson term @@ -607,26 +539,26 @@ SUBROUTINE RK_CHAN_1 DO K=0,NZP-1 DO I=0,NXM R1(I,K,J)=R1(I,K,J)+TEMP1*NU_V_SCALE* - & ( ((U1(I,K,J+1) - U1(I,K,J)) / DY(J+1) + & ( ((U1(I,K,J+1) - U1(I,K,J)) / DY(J+1) & -(U1(I,K,J) - U1(I,K,J-1)) / DY(J)) /DYF(J) ) R3(I,K,J)=R3(I,K,J)+TEMP1*NU_V_SCALE* - & ( ((U3(I,K,J+1) - U3(I,K,J)) / DY(J+1) + & ( ((U3(I,K,J+1) - U3(I,K,J)) / DY(J+1) & -(U3(I,K,J) - U3(I,K,J-1)) / DY(J)) /DYF(J) ) END DO END DO END DO - DO J=2,NY + DO J=2,NY DO K=0,NZP-1 DO I=0,NXM R2(I,K,J)=R2(I,K,J)+TEMP1*NU_V_SCALE* - & ( ((U2(I,K,J+1) - U2(I,K,J)) / DYF(J) + & ( ((U2(I,K,J+1) - U2(I,K,J)) / DYF(J) & -(U2(I,K,J) - U2(I,K,J-1))/ DYF(J-1))/DY(J) ) END DO END DO END DO C If we are using a subgrid model, add the eddy viscosity term -C This is an added viscosity that will be treated just like the +C This is an added viscosity that will be treated just like the C molecular viscosity with Crank-Nicolson for the vertical derivatives IF (LES) then C Note, NU_T is defined at GY points @@ -634,23 +566,23 @@ SUBROUTINE RK_CHAN_1 DO K=0,NZP-1 DO I=0,NXM R1(I,K,J)=R1(I,K,J)+TEMP2* - & ( (NU_T(I,K,J+1) * (U1(I,K,J+1) - U1(I,K,J)) / DY(J+1) + & ( (NU_T(I,K,J+1) * (U1(I,K,J+1) - U1(I,K,J)) / DY(J+1) & - NU_T(I,K,J) * (U1(I,K,J) - U1(I,K,J-1)) / DY(J)) & /DYF(J) ) R3(I,K,J)=R3(I,K,J)+TEMP2* - & ( (NU_T(I,K,J+1) * (U3(I,K,J+1) - U3(I,K,J)) / DY(J+1) - & - NU_T(I,K,J) * (U3(I,K,J) - U3(I,K,J-1)) / DY(J)) + & ( (NU_T(I,K,J+1) * (U3(I,K,J+1) - U3(I,K,J)) / DY(J+1) + & - NU_T(I,K,J) * (U3(I,K,J) - U3(I,K,J-1)) / DY(J)) & /DYF(J) ) END DO END DO END DO ! Here, interpolate NU_T to GYF points - DO J=2,NY + DO J=2,NY DO K=0,NZP-1 DO I=0,NXM R2(I,K,J)=R2(I,K,J)+TEMP2* & ((0.5d0*(NU_T(I,K,J)+NU_T(I,K,J+1))*(U2(I,K,J+1)-U2(I,K,J)) - & / DYF(J) + & / DYF(J) & -0.5d0*(NU_T(I,K,J)+NU_T(I,K,J-1))*(U2(I,K,J)-U2(I,K,J-1)) & / DYF(J-1)) /DY(J) ) END DO @@ -682,7 +614,7 @@ SUBROUTINE RK_CHAN_1 END DO END DO END DO -! U3*TH +! U3*TH DO J=JSTART_TH(N),JEND_TH(N) DO K=0,NZP-1 DO I=0,NXM @@ -756,14 +688,14 @@ SUBROUTINE RK_CHAN_1 & -KAPPA_T(I,K,J,N)*(TH(I,K,J,N)-TH(I,K,J-1,N))/DY(J))/DYF(J)) END DO END DO - END DO + END DO END IF C -- Now, timestep the passive scalar equation -- C We solve the the passive scalar before the velocity so that C it is advected with the velocity from the previous R-K step -C which we have already made divergence free - +C which we have already made divergence free + ! Solve the implicit equation for THETA ! Note that the system size is NY+1, but only 1..NY are used @@ -775,8 +707,8 @@ SUBROUTINE RK_CHAN_1 MATU(I,J)=0. VEC(I,J)=0. END DO - END DO - + END DO + ! Build implicit matrix ! Use quasi-second order interpolation for TH on GY points DO K=0,NZP-1 @@ -793,8 +725,8 @@ SUBROUTINE RK_CHAN_1 ! IF using a subgrid model (LES) then add the eddy diffusivity part implicitly IF (LES) THEN DO J=JSTART_TH(N),JEND_TH(N) - DO I=0,NXM - MATL(I,J) = MATL(I,J) - TEMP2 * KAPPA_T(I,K,J,N) + DO I=0,NXM + MATL(I,J) = MATL(I,J) - TEMP2 * KAPPA_T(I,K,J,N) & / (DY(J)*DYF(J)) MATD(I,J) = MATD(I,J)+ TEMP2 * KAPPA_T(I,K,J+1,N) & / (DY(J+1)*DYF(J)) @@ -830,11 +762,11 @@ SUBROUTINE RK_CHAN_1 END DO ! END do k - END DO + END DO ! End do number of passive scalars END DO - + C Initialize the matrix to zeros to be used for implicit solves C Note that the system size is NY+1, but only 1..NY are used @@ -846,29 +778,29 @@ SUBROUTINE RK_CHAN_1 MATU(I,J)=0. VEC(I,J)=0. END DO - END DO + END DO C Build implicit matrix for U2 DO K=0,NZP-1 DO J=2,NY DO I=0,NXM MATL(I,J)= -TEMP1*NU_V_SCALE/(DYF(J-1)*DY(J)) - MATD(I,J)=1.+TEMP1*NU_V_SCALE/(DYF(J)*DY(J)) - & + TEMP1*NU_V_SCALE/(DYF(J-1)*DY(J)) + MATD(I,J)=1.+TEMP1*NU_V_SCALE/(DYF(J)*DY(J)) + & + TEMP1*NU_V_SCALE/(DYF(J-1)*DY(J)) MATU(I,J)= -TEMP1*NU_V_SCALE/(DYF(J)*DY(J)) VEC(I,J)=R2(I,K,J) - END DO + END DO END DO IF (LES) THEN ! IF using a subgrid model (LES) then add the eddy viscosity part implicitly DO J=2,NY DO I=0,NXM - MATL(I,J) = MATL(I,J) + MATL(I,J) = MATL(I,J) & - TEMP2 * 0.5d0*(NU_T(I,K,J)+NU_T(I,K,J-1))/(DYF(J-1)*DY(J)) - MATD(I,J) = MATD(I,J) + MATD(I,J) = MATD(I,J) & + TEMP2 * 0.5d0*(NU_T(I,K,J)+NU_T(I,K,J+1))/(DYF(J)*DY(J)) & + TEMP2 * 0.5d0*(NU_T(I,K,J)+NU_T(I,K,J-1))/(DYF(J-1)*DY(J)) - MATU(I,J) = MATU(I,J) + MATU(I,J) = MATU(I,J) & - TEMP2 * 0.5d0*(NU_T(I,K,J)+NU_T(I,K,J+1))/(DYF(J)*DY(J)) END DO END DO @@ -898,7 +830,7 @@ SUBROUTINE RK_CHAN_1 END DO END DO ! End do k - END DO + END DO C Solve for U1 C Note, here the matrix will be indexed from 1...NY+1 corresponding to U1(0:NY) @@ -911,15 +843,15 @@ SUBROUTINE RK_CHAN_1 MATU(I,J)=0. VEC(I,J)=0. END DO - END DO + END DO -C Build the implicit system of equations for U1 +C Build the implicit system of equations for U1 DO K=0,NZP-1 DO J=JSTART,JEND DO I=0,NXM MATL(I,J)=-TEMP1*NU_V_SCALE/(DY(J)*DYF(J)) MATD(I,J)=1.-TEMP1*NU_V_SCALE*(-1./(DY(J+1)*DYF(J)) - & -1./(DY(J)*DYF(J))) + & -1./(DY(J)*DYF(J))) MATU(I,J)=-TEMP1*NU_V_SCALE/(DY(J+1)*DYF(J)) VEC(I,J)=R1(I,K,J) END DO @@ -928,7 +860,7 @@ SUBROUTINE RK_CHAN_1 IF (LES) THEN DO J=JSTART,JEND DO I=0,NXM - MATL(I,J) = MATL(I,J) - TEMP2 * NU_T(I,K,J) + MATL(I,J) = MATL(I,J) - TEMP2 * NU_T(I,K,J) & / (DY(J)*DYF(J)) MATD(I,J) = MATD(I,J) + TEMP2 * NU_T(I,K,J+1) & / (DY(J+1)*DYF(J)) @@ -973,7 +905,7 @@ SUBROUTINE RK_CHAN_1 MATU(I,J)=0. VEC(I,J)=0. END DO - END DO + END DO C Solve for U3 C Note, here the matrix will be indexed from 1...NY+1 corresponding to U1(0:NY) @@ -1039,7 +971,7 @@ SUBROUTINE RK_CHAN_1 CALL COURANT END IF -! Transform TH and U to Fourier Space +! Transform TH and U to Fourier Space CALL FFT_XZ_TO_FOURIER(U1,CU1,0,NY+1) CALL FFT_XZ_TO_FOURIER(U2,CU2,0,NY+1) CALL FFT_XZ_TO_FOURIER(U3,CU3,0,NY+1) @@ -1076,23 +1008,23 @@ SUBROUTINE RK_CHAN_1 SUBROUTINE RK_CHAN_2 C----*|--.---------.---------.---------.---------.---------.---------.-|-------| C Alternative time-stepping algorithm for the channel-flow case. -C This algorithm uses Crank-Nicolson for all viscous terms and +C This algorithm uses Crank-Nicolson for all viscous terms and C third order Runge-Kutta for all nonlinear terms C INPUTS (in Fourier space): CUi, P, and (if k>1) CFi at (k-1) (for i=1,2,3) C OUTPUTS (in Fourier space): CUi, P, and (if k<3) CFi at (k) -C Each RK step, there are 11 FFT calls. 11 storage variables are used. +C Each RK step, there are 11 FFT calls. 11 storage variables are used. C----*|--.---------.---------.---------.---------.---------.---------.-|-------| INCLUDE 'header' - INTEGER I,J,K,N + INTEGER I,J,K,N REAL*8 TEMP1, TEMP2, TEMP3, TEMP4, TEMP5, UBULK ! STOP ----------------------------------- - IF (RANK.EQ.0) + IF (RANK.EQ.0) & write(*,*) ' RK_CHAN_2 not supported yet ' call mpi_finalize(ierror) - stop + stop ! ---------------------------------------- @@ -1102,14 +1034,14 @@ SUBROUTINE RK_CHAN_2 C----*|--.---------.---------.---------.---------.---------.---------.-|-------| SUBROUTINE REM_DIV_CHAN C----*|--.---------.---------.---------.---------.---------.---------.-|-------| - + C Compute varphi, store in variable CR1. C Solves for phi in computational space C H_BAR has been absorbed into PHI, so we are solving for H_BAR*PHI INCLUDE 'header' INTEGER I,J,K - + C First, Initialize the matrix components DO J=0,NY+1 DO I=0,NXP-1 @@ -1134,10 +1066,10 @@ SUBROUTINE REM_DIV_CHAN END DO C Now, create the RHS vector - DO J=1,NY + DO J=1,NY DO I=0,NXP-1 - VEC_C(I,J)=(CIKX(I)*CU1(I,K,J) - & + (CU2(I,K,J+1)-CU2(I,K,J))/DYF(J) + VEC_C(I,J)=(CIKX(I)*CU1(I,K,J) + & + (CU2(I,K,J+1)-CU2(I,K,J))/DYF(J) & + CIKZ(K)*CU3(I,K,J)) END DO END DO @@ -1157,7 +1089,7 @@ SUBROUTINE REM_DIV_CHAN IF ((K.EQ.0).AND.(I.EQ.0)) THEN C Use homogeneous dirichlet BCS for kx=kz=0 component at bottom wall C Otherwise the matrix will be singular - MATL_C(I,1)=0. + MATL_C(I,1)=0. MATD_C(I,1)=1. MATU_C(I,1)=0. VEC_C(I,1)=(0.,0.) @@ -1197,7 +1129,7 @@ SUBROUTINE REM_DIV_CHAN DO K=0,TNKZ DO I=0,NXP-1 CU1(I,K,J)=CU1(I,K,J)-CIKX(I)*CR1(I,K,J) - CU3(I,K,J)=CU3(I,K,J)-CIKZ(K)*CR1(I,K,J) + CU3(I,K,J)=CU3(I,K,J)-CIKZ(K)*CR1(I,K,J) END DO END DO END DO @@ -1221,14 +1153,14 @@ SUBROUTINE POISSON_P_CHAN INCLUDE 'header' INTEGER I,J,K,N - + if (flavor.eq.'Basic') then - IF (RANK.EQ.0) + IF (RANK.EQ.0) & WRITE(*,*) 'COMPUTING CP FROM CUI' end if -C First, construct the RHS vector, (dui/dxj)(duj/dxi) +C First, construct the RHS vector, (dui/dxj)(duj/dxi) DO J=2,NY DO K=0,TNKZ DO I=0,NXP-1 ! NKX @@ -1242,7 +1174,7 @@ SUBROUTINE POISSON_P_CHAN CALL FFT_XZ_TO_PHYSICAL(CF1,F1,0,NY+1) CALL FFT_XZ_TO_PHYSICAL(CF2,F2,0,NY+1) CALL FFT_XZ_TO_PHYSICAL(CF3,F3,0,NY+1) - + DO J=2,NY DO K=0,NZP-1 DO I=0,NXM @@ -1287,13 +1219,13 @@ SUBROUTINE POISSON_P_CHAN END DO END DO END DO - + CALL FFT_XZ_TO_FOURIER(F1,CF1,0,NY+1) C Add to RHS term DO J=2,NY DO K=0,TNKZ - DO I=0,NXP-1 ! NKX + DO I=0,NXP-1 ! NKX CS1(I,K,J)=CS1(I,K,J)+CF1(I,K,J) END DO END DO @@ -1345,7 +1277,7 @@ SUBROUTINE POISSON_P_CHAN CALL FFT_XZ_TO_PHYSICAL(CF1,F1,0,NY+1) CALL FFT_XZ_TO_PHYSICAL(CF2,F2,0,NY+1) - + C Compute product DO J=2,NY DO K=0,NZP-1 @@ -1364,8 +1296,8 @@ SUBROUTINE POISSON_P_CHAN CS1(I,K,J)=CS1(I,K,J)+CF1(I,K,J) END DO END DO - END DO - + END DO + C Finally, if the buoyancy force is active, then we need to add C the contribution of the density to the pressure. Note that the C plane averaged density and the corresponding hydrostatic part of the @@ -1373,7 +1305,7 @@ SUBROUTINE POISSON_P_CHAN DO N=1,N_TH DO J=2,NY DO K=0,TNKZ - DO I=0,NXP-1 + DO I=0,NXP-1 IF ((RANKZ.NE.0).OR.(I.NE.0).or.(K.NE.0)) THEN CS1(I,K,J)=CS1(I,K,J)+RI(N)* & (CTH(I,K,J+1,N)-CTH(I,K,J-1,N))/(GYF(J+1)-GYF(J-1)) @@ -1383,7 +1315,7 @@ SUBROUTINE POISSON_P_CHAN END DO END DO -C Now, the RHS term should be stored in CS1 +C Now, the RHS term should be stored in CS1 C Construct the tridiagonal system in Fourier space to solve for CP C First, zero the vectors @@ -1402,7 +1334,7 @@ SUBROUTINE POISSON_P_CHAN MATL_C(I,J)=1./(DY(J)*DYF(J)) MATD_C(I,J)=-KX2(I)-KZ2(K)-1./(DY(J+1)*DYF(J)) & -1./(DY(J)*DYF(J)) - MATU_C(I,J)=1./(DY(J+1)*DYF(J)) + MATU_C(I,J)=1./(DY(J+1)*DYF(J)) VEC_C(I,J)=-1.*CS1(I,K,J) END DO END DO @@ -1460,7 +1392,7 @@ SUBROUTINE INPUT_CHAN INTEGER I,J,K,N ! Read in input parameters specific for channel flow case - OPEN (11,file='input_chan.dat',form='formatted',status='old') + OPEN (11,file='input_chan.dat',form='formatted',status='old') C Read input file. CURRENT_VERSION=2.0 @@ -1469,23 +1401,23 @@ SUBROUTINE INPUT_CHAN READ(11,*) READ(11,*) READ(11,*) VERSION - IF (VERSION .NE. CURRENT_VERSION) + IF (VERSION .NE. CURRENT_VERSION) & STOP 'Wrong input data format input_chan' READ(11,*) READ(11,*) TIME_AD_METH - READ(11,*) + READ(11,*) READ(11,*) LES_MODEL_TYPE READ(11,*) READ(11,*) IC_TYPE, KICK READ(11,*) READ(11,*) I_RO - READ(11,*) + READ(11,*) READ(11,*) GRAV_X, GRAV_Y, GRAV_Z READ(11,*) READ(11,*) F_TYPE, UBULK0, PX0, OMEGA0, AMP_OMEGA0 READ(11,*) READ(11,*) U_BC_YMIN, U_BC_YMIN_C1, U_BC_YMIN_C2, U_BC_YMIN_C3 - READ(11,*) + READ(11,*) READ(11,*) V_BC_YMIN, V_BC_YMIN_C1, V_BC_YMIN_C2, V_BC_YMIN_C3 READ(11,*) READ(11,*) W_BC_YMIN, W_BC_YMIN_C1, W_BC_YMIN_C2, W_BC_YMIN_C3 @@ -1509,6 +1441,7 @@ SUBROUTINE INPUT_CHAN END DO if (RANK.eq.0) write(*,*) 'I_RO: ',I_RO + if (RANK.eq.0) write(*,*) 'DRHODX: ',DRHODX(1) RETURN END @@ -1558,7 +1491,7 @@ SUBROUTINE INIT_CHAN_MOVIE & ' (NzMovie: ', RankZMovie*NZP+NzMovie, ')' END IF - + RETURN END @@ -1569,26 +1502,26 @@ SUBROUTINE CREATE_GRID_CHAN CHARACTER*55 FNAME INTEGER I,J,K - IF (RANK.EQ.0) + IF (RANK.EQ.0) & WRITE (6,*) 'Fourier in X' DO I=0,NX GX(I)=(I*LX)/NX DX(I)=LX/NX - IF (VERBOSITY .GT. 3 .AND. RANK.EQ.0) + IF (VERBOSITY .GT. 3 .AND. RANK.EQ.0) & WRITE(6,*) 'GX(',I,') = ',GX(I) END DO - IF (RANK.EQ.0) + IF (RANK.EQ.0) & WRITE (6,*) 'Fourier in Z' DO K=0,NZ GZ(K)=(K*LZ)/NZ DZ(K)=LZ/NZ - IF (RANK.EQ.0 .AND. VERBOSITY .GT. 3) + IF (RANK.EQ.0 .AND. VERBOSITY .GT. 3) & WRITE(6,*) 'GZ(',K,') = ',GZ(K) END DO - IF (RANK.EQ.0) + IF (RANK.EQ.0) & WRITE (6,*) 'Finite-difference in Y' - IF (RANK.EQ.0) + IF (RANK.EQ.0) & write(*,*) 'USE_MPI: ',USE_MPI FNAME='grid.h5' @@ -1603,9 +1536,9 @@ SUBROUTINE CREATE_GRID_CHAN write(*,*) ' **** ERROR ******************************' write(*,*) ' Program not compiled with HDF5 libraries.' END IF - stop + stop #endif - else + else IF (USE_MPI) THEN FNAME='./ygrid'//trim(MPI_IO_NUM)//'.txt' IF (RANK.EQ.0) THEN @@ -1618,13 +1551,13 @@ SUBROUTINE CREATE_GRID_CHAN READ (30,*) NY_T C Check to make sure that grid file is the correct dimensions IF (NY_T.ne.NY) THEN - IF (RANK.EQ.0) + IF (RANK.EQ.0) & WRITE(6,*) 'NY, NY_T',NY,NY_T STOP 'Error: ygrid.txt wrong dimensions' END IF DO J=1,NY+1 READ(30,*) GY(j) - IF (VERBOSITY .GT. 3 .AND. RANK.EQ.0) + IF (VERBOSITY .GT. 3 .AND. RANK.EQ.0) & WRITE(6,*) 'GY(',J,') = ',GY(J) END DO DO J=1,NY @@ -1653,11 +1586,11 @@ SUBROUTINE CREATE_GRID_CHAN END DO DYF(NY+1)=DYF(NY) - RETURN + RETURN END - + C----*|--.---------.---------.---------.---------.---------.---------.-|------ SUBROUTINE APPLY_BC_1_LOWER(MATL,MATD,MATU,VEC) C----*|--.---------.---------.---------.---------.---------.---------.-|----- @@ -1668,15 +1601,15 @@ SUBROUTINE APPLY_BC_1_LOWER(MATL,MATD,MATU,VEC) IF (U_BC_YMIN.EQ.0) THEN C Dirichlet DO I=0,NXM - MATL(I,0)=0. + MATL(I,0)=0. MATD(I,0)=1. - MATU(I,0)=0. + MATU(I,0)=0. VEC(I,0)=0. - MATL(I,1)=0. + MATL(I,1)=0. MATD(I,1)=1. - MATU(I,1)=0. - VEC(I,1)=U_BC_YMIN_C1 + MATU(I,1)=0. + VEC(I,1)=U_BC_YMIN_C1 END DO ELSE C Neumann @@ -1695,7 +1628,7 @@ SUBROUTINE APPLY_BC_1_LOWER(MATL,MATD,MATU,VEC) END IF - RETURN + RETURN END C----*|--.---------.---------.---------.---------.---------.---------.-|------ @@ -1708,15 +1641,15 @@ SUBROUTINE APPLY_BC_1_LOWER_C(MATL_C,MATD_C,MATU_C,VEC_C) IF (U_BC_YMIN.EQ.0) THEN C Dirichlet DO I=0,NKX - MATL_C(I,0)=0. + MATL_C(I,0)=0. MATD_C(I,0)=1. - MATU_C(I,0)=0. + MATU_C(I,0)=0. VEC_C(I,0)=0. - MATL_C(I,1)=0. + MATL_C(I,1)=0. MATD_C(I,1)=1. - MATU_C(I,1)=0. - VEC_C(I,1)=U_BC_YMIN_C1 + MATU_C(I,1)=0. + VEC_C(I,1)=U_BC_YMIN_C1 END DO ELSE C Neumann @@ -1735,7 +1668,7 @@ SUBROUTINE APPLY_BC_1_LOWER_C(MATL_C,MATD_C,MATU_C,VEC_C) END IF - RETURN + RETURN END @@ -1830,15 +1763,15 @@ SUBROUTINE APPLY_BC_2_LOWER(MATL,MATD,MATU,VEC) IF (V_BC_YMIN.EQ.0) THEN C Dirichlet DO I=0,NXM - MATL(I,1)=0.d0 + MATL(I,1)=0.d0 MATD(I,1)=1.d0 - MATU(I,1)=0.d0 - VEC(I,1)=V_BC_YMIN_C1 + MATU(I,1)=0.d0 + VEC(I,1)=V_BC_YMIN_C1 - MATL(I,2)=0.d0 + MATL(I,2)=0.d0 MATD(I,2)=1.d0 - MATU(I,2)=0.d0 - VEC(I,2)=V_BC_YMIN_C1 + MATU(I,2)=0.d0 + VEC(I,2)=V_BC_YMIN_C1 END DO ELSE IF (V_BC_YMIN.EQ.1) THEN C Neumann @@ -1871,15 +1804,15 @@ SUBROUTINE APPLY_BC_2_LOWER_C(MATL_C,MATD_C,MATU_C,VEC_C) IF (V_BC_YMIN.EQ.0) THEN C Dirichlet DO I=0,NKX - MATL_C(I,1)=0.d0 + MATL_C(I,1)=0.d0 MATD_C(I,1)=1.d0 - MATU_C(I,1)=0.d0 - VEC_C(I,1)=V_BC_YMIN_C1 + MATU_C(I,1)=0.d0 + VEC_C(I,1)=V_BC_YMIN_C1 - MATL_C(I,2)=0.d0 + MATL_C(I,2)=0.d0 MATD_C(I,2)=1.d0 - MATU_C(I,2)=0.d0 - VEC_C(I,2)=V_BC_YMIN_C1 + MATU_C(I,2)=0.d0 + VEC_C(I,2)=V_BC_YMIN_C1 END DO ELSE IF (V_BC_YMIN.EQ.1) THEN C Neumann @@ -1902,7 +1835,7 @@ SUBROUTINE APPLY_BC_2_LOWER_C(MATL_C,MATD_C,MATU_C,VEC_C) RETURN END - + C----*|--.---------.---------.---------.---------.---------.---------.-|-- SUBROUTINE APPLY_BC_2_UPPER(MATL,MATD,MATU,VEC) C----*|--.---------.---------.---------.---------.---------.---------.-|-- @@ -1916,7 +1849,7 @@ SUBROUTINE APPLY_BC_2_UPPER(MATL,MATD,MATU,VEC) MATD(I,NY+1)=1. MATU(I,NY+1)=0. VEC(I,NY+1)=V_BC_YMAX_C1 - + MATL(I,NY)=0. MATD(I,NY)=1. MATU(I,NY)=0. @@ -1930,7 +1863,7 @@ SUBROUTINE APPLY_BC_2_UPPER(MATL,MATD,MATU,VEC) MATU(I,NY+1)=0. VEC(I,NY+1)=DYF(NY)*V_BC_YMAX_C1 END DO - END IF + END IF RETURN END @@ -1947,7 +1880,7 @@ SUBROUTINE APPLY_BC_2_UPPER_C(MATL_C,MATD_C,MATU_C,VEC_C) MATD_C(I,NY+1)=1. MATU_C(I,NY+1)=0. VEC_C(I,NY+1)=V_BC_YMAX_C1 - + MATL_C(I,NY)=0. MATD_C(I,NY)=1. MATU_C(I,NY)=0. @@ -1960,7 +1893,7 @@ SUBROUTINE APPLY_BC_2_UPPER_C(MATL_C,MATD_C,MATU_C,VEC_C) MATD_C(I,NY+1)=1. MATU_C(I,NY+1)=0. VEC_C(I,NY+1)=DYF(NY)*V_BC_YMAX_C1 - END DO + END DO END IF RETURN END @@ -1975,14 +1908,14 @@ SUBROUTINE APPLY_BC_3_LOWER(MATL,MATD,MATU,VEC) IF (W_BC_YMIN.EQ.0) THEN C Dirichlet DO I=0,NXM - MATL(I,0)=0. + MATL(I,0)=0. MATD(I,0)=1. - MATU(I,0)=0. + MATU(I,0)=0. VEC(I,0)=0. - MATL(I,1)=0. + MATL(I,1)=0. MATD(I,1)=1. - MATU(I,1)=0. + MATU(I,1)=0. VEC(I,1)=W_BC_YMIN_C1 END DO ELSE @@ -2015,14 +1948,14 @@ SUBROUTINE APPLY_BC_3_LOWER_C(MATL_C,MATD_C,MATU_C,VEC_C) IF (W_BC_YMIN.EQ.0) THEN C Dirichlet DO I=0,NKX - MATL_C(I,0)=0. + MATL_C(I,0)=0. MATD_C(I,0)=1. - MATU_C(I,0)=0. + MATU_C(I,0)=0. VEC_C(I,0)=0. - MATL_C(I,1)=0. + MATL_C(I,1)=0. MATD_C(I,1)=1. - MATU_C(I,1)=0. + MATU_C(I,1)=0. VEC_C(I,1)=W_BC_YMIN_C1 END DO ELSE @@ -2082,7 +2015,7 @@ SUBROUTINE APPLY_BC_3_UPPER(MATL,MATD,MATU,VEC) END IF - RETURN + RETURN END @@ -2135,14 +2068,14 @@ subroutine APPLY_BC_TH_LOWER(MATL,MATD,MATU,VEC,N) if (TH_BC_YMIN(N).eq.0) then ! Dirichlet do i=0,NXM - MATL(i,0)=0. + MATL(i,0)=0. MATD(i,0)=1. - MATU(i,0)=0. + MATU(i,0)=0. VEC(i,0)=0. - MATL(i,1)=0. + MATL(i,1)=0. MATD(i,1)=1. - MATU(i,1)=0. + MATU(i,1)=0. VEC(i,1)=TH_BC_YMIN_C1(N) end do else @@ -2173,14 +2106,14 @@ subroutine APPLY_BC_TH_LOWER_C(MATL_C,MATD_C,MATU_C,VEC_C,N) if (TH_BC_YMIN(N).eq.0) then ! Dirichlet do i=0,NKX - MATL_C(i,0)=0. + MATL_C(i,0)=0. MATD_C(i,0)=1. - MATU_C(i,0)=0. + MATU_C(i,0)=0. VEC_C(i,0)=0. - MATL_C(i,1)=0. + MATL_C(i,1)=0. MATD_C(i,1)=1. - MATU_C(i,1)=0. + MATU_C(i,1)=0. VEC_C(i,1)=TH_BC_YMIN_C1(N) end do else @@ -2290,11 +2223,11 @@ SUBROUTINE APPLY_BC_VEL_LOWER C It sets the appropriate boundary conditions including ghost cell values C on the velocity field in Fourier space INCLUDE 'header' - INTEGER I,K + INTEGER I,K -C Now, apply the boundary conditions depending on the type specified +C Now, apply the boundary conditions depending on the type specified IF (U_BC_YMIN.EQ.0) THEN -C Dirichlet +C Dirichlet C Start with zero DO K=0,TNKZ DO I=0,NXP-1 @@ -2352,7 +2285,7 @@ SUBROUTINE APPLY_BC_VEL_LOWER CU3(0,0,0)=CU3(0,0,1)-DY(1)*W_BC_YMIN_C1 END IF ELSE - STOP 'Error: W_BC_YMIN must be 0, or 1' + STOP 'Error: W_BC_YMIN must be 0, or 1' END IF IF (V_BC_YMIN.EQ.0) THEN @@ -2360,8 +2293,8 @@ SUBROUTINE APPLY_BC_VEL_LOWER C Set the vertical velocity at GYF(1) (halfway between GY(2) and GY(1)) DO K=0,TNKZ DO I=0,NXP-1 - CU2(I,K,1)=2.d0*V_BC_YMIN_C1-CU2(I,K,2) - CU2(I,K,0)=CU2(I,K,1) + CU2(I,K,1)=2.d0*V_BC_YMIN_C1-CU2(I,K,2) + CU2(I,K,0)=CU2(I,K,1) END DO END DO ELSE IF (V_BC_YMIN.EQ.1) THEN @@ -2373,8 +2306,8 @@ SUBROUTINE APPLY_BC_VEL_LOWER END DO END DO IF (RANKZ.EQ.0) THEN - CU2(0,0,1)=CU2(0,0,2)-DYF(1)*V_BC_YMIN_C1 - CU2(0,0,0)=CU2(0,0,1)-DYF(1)*V_BC_YMIN_C1 + CU2(0,0,1)=CU2(0,0,2)-DYF(1)*V_BC_YMIN_C1 + CU2(0,0,0)=CU2(0,0,1)-DYF(1)*V_BC_YMIN_C1 END IF ELSE IF (V_BC_YMIN.EQ.2) THEN @@ -2395,11 +2328,11 @@ SUBROUTINE APPLY_BC_VEL_UPPER C It sets the appropriate boundary conditions including ghost cell values C on the velocity field in Fourier space INCLUDE 'header' - INTEGER I,K + INTEGER I,K ! Now, apply boundary conditions to the top of the domain IF (U_BC_YMAX.EQ.0) THEN -C Dirichlet +C Dirichlet C Start with zero DO K=0,TNKZ DO I=0,NXP-1 @@ -2493,6 +2426,82 @@ SUBROUTINE APPLY_BC_VEL_UPPER RETURN END +C----*|--.---------.---------.---------.---------.---------.---------.-|-- + SUBROUTINE APPLY_BC_TH_PHYS_LOWER +C----*|--.---------.---------.---------.---------.---------.---------.-|-- +C This subroutine is called after initializing the flow +C It sets the appropriate boundary conditions including ghost cell values +C on the velocity field in Physical space + INCLUDE 'header' + INTEGER I,K,N + + DO N=1,N_TH + +C Now, apply the boundary conditions depending on the type specified + IF (TH_BC_YMIN(N).EQ.0) THEN +C Dirichlet +C Start with zero + DO K=0,NZP-1 + DO I=0,NXM + TH(I,K,0,N)=TH_BC_YMIN_C1(N) + TH(I,K,1,N)=TH_BC_YMIN_C1(N) + END DO + END DO + ELSE IF (TH_BC_YMIN(1).EQ.1) THEN +C Neumann + DO K=0,NZP-1 + DO I=0,NXM + TH(I,K,1,N)=TH(I,K,2,N)-DY(2)*TH_BC_YMIN_C1(N) + TH(I,K,0,N)=TH(I,K,1,N)-DY(1)*TH_BC_YMIN_C1(N) + END DO + END DO + ELSE + STOP 'Error: TH_BC_YMIN must be 0, or 1' + END IF + + END DO + + RETURN + END + +C----*|--.---------.---------.---------.---------.---------.---------.-|-- + SUBROUTINE APPLY_BC_TH_PHYS_UPPER +C----*|--.---------.---------.---------.---------.---------.---------.-|-- +C This subroutine is called after initializing the flow +C It sets the appropriate boundary conditions including ghost cell values +C on the velocity field in Fourier space + INCLUDE 'header' + INTEGER I,K,N + + DO N=1,N_TH + +! Now, apply boundary conditions to the top of the domain + IF (TH_BC_YMAX(N).EQ.0) THEN +C Dirichlet +C Start with zero + DO K=0,NZP-1 + DO I=0,NXM + TH(I,K,NY,N)=TH_BC_YMAX_C1(N) + TH(I,K,NY+1,N)=TH_BC_YMAX_C1(N) + END DO + END DO + ELSE IF (TH_BC_YMAX(N).EQ.1) THEN +C Neumann + DO K=0,NZP-1 + DO I=0,NXM + TH(I,K,NY,N)=TH(I,K,NY-1,N)+DY(NY)*TH_BC_YMAX_C1(N) + TH(I,K,NY+1,N)=TH(I,K,NY,N)+DY(NY)*TH_BC_YMAX_C1(N) + END DO + END DO + ELSE + STOP 'Error: TH_BC_YMAX must be 0, or 1' + END IF + + END DO + + RETURN + END + C----*|--.---------.---------.---------.---------.---------.---------.-|-- SUBROUTINE APPLY_BC_VEL_PHYS_LOWER C----*|--.---------.---------.---------.---------.---------.---------.-|-- @@ -2500,11 +2509,11 @@ SUBROUTINE APPLY_BC_VEL_PHYS_LOWER C It sets the appropriate boundary conditions including ghost cell values C on the velocity field in Physical space INCLUDE 'header' - INTEGER I,K + INTEGER I,K -C Now, apply the boundary conditions depending on the type specified +C Now, apply the boundary conditions depending on the type specified IF (U_BC_YMIN.EQ.0) THEN -C Dirichlet +C Dirichlet C Start with zero DO K=0,NZP-1 DO I=0,NXM @@ -2542,7 +2551,7 @@ SUBROUTINE APPLY_BC_VEL_PHYS_LOWER END DO END DO ELSE - STOP 'Error: W_BC_YMIN must be 0, or 1' + STOP 'Error: W_BC_YMIN must be 0, or 1' END IF IF (V_BC_YMIN.EQ.0) THEN @@ -2550,8 +2559,8 @@ SUBROUTINE APPLY_BC_VEL_PHYS_LOWER C Set the vertical velocity at GYF(1) (halfway between GY(2) and GY(1)) DO K=0,NZP-1 DO I=0,NXM - U2(I,K,1)=2.d0*V_BC_YMIN_C1-U2(I,K,2) - U2(I,K,0)=U2(I,K,1) + U2(I,K,1)=2.d0*V_BC_YMIN_C1-U2(I,K,2) + U2(I,K,0)=U2(I,K,1) END DO END DO ELSE IF (V_BC_YMIN.EQ.1) THEN @@ -2580,11 +2589,11 @@ SUBROUTINE APPLY_BC_VEL_PHYS_UPPER C It sets the appropriate boundary conditions including ghost cell values C on the velocity field in Fourier space INCLUDE 'header' - INTEGER I,K + INTEGER I,K ! Now, apply boundary conditions to the top of the domain IF (U_BC_YMAX.EQ.0) THEN -C Dirichlet +C Dirichlet C Start with zero DO K=0,NZP-1 DO I=0,NXM @@ -2627,10 +2636,10 @@ SUBROUTINE APPLY_BC_VEL_PHYS_UPPER IF (V_BC_YMAX.EQ.0) THEN C Dirichlet -C Set the vertical velocity at GYF(NY) (halfway between GY(NY) and GY(NY+1)) DO K=0,NZP-1 DO I=0,NXM - U2(I,K,NY+1)=2.d0*V_BC_YMAX_C1-U2(I,K,NY) + U2(I,K,NY+1)=V_BC_YMAX_C1 + U2(I,K,NY)=V_BC_YMAX_C1 END DO END DO ELSE IF (V_BC_YMAX.EQ.1) THEN @@ -2685,7 +2694,7 @@ SUBROUTINE THOMAS_REAL(A,B,C,G,NY,NX) RETURN END -C----*|--.---------.---------.---------.---------.---------.---------.-|-------| +C----*|--.---------.---------.---------.---------.---------.---------.-|-------| SUBROUTINE THOMAS_COMPLEX(A,B,C,G,NY,NX) C----*|--.---------.---------.---------.---------.---------.---------.-|-------| @@ -2721,8 +2730,98 @@ SUBROUTINE THOMAS_COMPLEX(A,B,C,G,NY,NX) RETURN END +C----*|--.---------.---------.---------.---------.---------.---------.-|-- + SUBROUTINE APPLY_BC_SCALAR_LOWER +C----*|--.---------.---------.---------.---------.---------.---------.-|-- +C This subroutine is called after initializing the flow +C It sets the appropriate boundary conditions including ghost cell values +C on the scalar fields in Fourier space + INCLUDE 'header' + INTEGER I,K,N + + DO N=1,N_TH +C Now, apply the boundary conditions depending on the type specified + IF (TH_BC_YMIN(N).EQ.0) THEN +C Dirichlet +C Start with zero + DO K=0,TNKZ + DO I=0,NXP-1 + CTH(I,K,0,N)=0.d0 + CTH(I,K,1,N)=0.d0 + END DO + END DO +C Now, set only the mean + IF (RANKZ.EQ.0) THEN + CTH(0,0,1,N)=TH_BC_YMIN_C1(N) + CTH(0,0,0,N)=TH_BC_YMIN_C1(N) + END IF + ELSE IF (TH_BC_YMIN(N).EQ.1) THEN +C Neumann + DO K=0,TNKZ + DO I=0,NXP-1 + CTH(I,K,1,N)=CTH(I,K,2,N) + CTH(I,K,0,N)=CTH(I,K,1,N) + END DO + END DO +C Now, Apply BC to mean + IF (RANKZ.EQ.0) THEN + CTH(0,0,1,N)=CTH(0,0,2,N)-DY(2)*TH_BC_YMIN_C1(N) + CTH(0,0,0,N)=CTH(0,0,1,N)-DY(1)*TH_BC_YMIN_C1(N) + END IF + ELSE + STOP 'Error: TH_BC_YMIN must be 0, or 1' + END IF + END DO + RETURN + END +C----*|--.---------.---------.---------.---------.---------.---------.-|-- + SUBROUTINE APPLY_BC_SCALAR_UPPER +C----*|--.---------.---------.---------.---------.---------.---------.-|-- +C This subroutine is called after initializing the flow +C It sets the appropriate boundary conditions including ghost cell values +C on the scalar fields in Fourier space + INCLUDE 'header' + INTEGER I,K,N + + DO N=1,N_TH + +! Now, apply boundary conditions to the top of the domain + IF (TH_BC_YMAX(N).EQ.0) THEN +C Dirichlet +C Start with zero + DO K=0,TNKZ + DO I=0,NXP-1 + CTH(I,K,NY,N)=0.d0 + CTH(I,K,NY+1,N)=0.d0 + END DO + END DO +C Now, set only the mean + IF (RANKZ.EQ.0) THEN + CTH(0,0,NY,N)=TH_BC_YMAX_C1(N) + CTH(0,0,NY+1,N)=TH_BC_YMAX_C1(N) + END IF + ELSE IF (TH_BC_YMAX(N).EQ.1) THEN +C Neumann + DO K=0,TNKZ + DO I=0,NXP-1 + CTH(I,K,NY,N)=CTH(I,K,NY-1,N) + CTH(I,K,NY+1,N)=CTH(I,K,NY,N) + END DO + END DO +C Now, Apply BC to mean + IF (RANKZ.EQ.0) THEN + CTH(0,0,NY,N)=CTH(0,0,NY-1,N)+DY(NY)*TH_BC_YMAX_C1(N) + CTH(0,0,NY+1,N)=CTH(0,0,NY,N)+DY(NY)*TH_BC_YMAX_C1(N) + END IF + ELSE + STOP 'Error: TH_BC_YMAX must be 0, or 1' + END IF + END DO + + RETURN + END From 900e0e51fd0486af7fb257231e9a71cfdc3f9535 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 19 Jul 2018 11:42:58 +0100 Subject: [PATCH 46/66] Fixed bug in tkebudget --- diablo_2.0/for/header | 2 +- diablo_2.0/for/save_stats.f | 191 +++++++++++++++++++++++++++--------- 2 files changed, 144 insertions(+), 49 deletions(-) diff --git a/diablo_2.0/for/header b/diablo_2.0/for/header index 87cc94a..803d2a6 100644 --- a/diablo_2.0/for/header +++ b/diablo_2.0/for/header @@ -284,7 +284,7 @@ INTEGER NSAMPLES COMMON /STAT_VARS/ THBAR, THRMS, UBAR, VBAR, WBAR, URMS, URMS_B, - & VRMS_B,WRMS_B,TKE_B,WRMS, VRMS,UV,WV,UW, NSAMPLES + & UME,VME,WME,VRMS_B,WRMS_B,TKE_B,WRMS, VRMS,UV,WV,UW, NSAMPLES !----*|--.---------.---------.---------.---------.---------.---------.-|-------| ! Variable viscosity parameters diff --git a/diablo_2.0/for/save_stats.f b/diablo_2.0/for/save_stats.f index bc4c0f4..7d186be 100644 --- a/diablo_2.0/for/save_stats.f +++ b/diablo_2.0/for/save_stats.f @@ -9,8 +9,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) CHARACTER*20 GNAME LOGICAL FINAL integer i,j,k,n - real*8 uc, ubulk,zmean - real*8 zvar(0:NZP) + real*8 uc, ubulk ! This variable is used to add up scalar diagnostics real*8 thsum(0:NY+1) @@ -166,6 +165,10 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) call fft_xz_to_physical(CU2,U2,0,NY+1) call fft_xz_to_physical(CU3,U3,0,NY+1) +! Calculate the dissipation rate + call tkebudget_chan + IF (LES) call tkebudget_chan_les + ! Get the turbulent kinetic energy at each level do j=1,NY urms(j)=0. @@ -200,18 +203,22 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) CALL INTEGRATE_Y_VAR(wrms,wrms_b,MPI_COMM_Y) ! Compute the Reynolds stress and mean velocity gradient +! Here, uv and wv are defined on the GY grid +! uv is defined on the GYF grid do j=1,NY uv(j)=0. uw(j)=0. wv(j)=0. do k=0,NZP-1 do i=0,NXM - uv(j)=uv(j)+(U1(i,k,j)-ume(j)) - + *(0.5*(U2(i,k,j)+U2(i,k,j+1)) - & -0.5*(vme(j)+vme(j+1))) - wv(j)=wv(j)+(U3(i,k,j)-wme(j)) - + *(0.5*(U2(i,k,j)+U2(i,k,j+1)) - & -0.5*(vme(j)+vme(j+1))) + uv(j)=uv(j)+ + & (DYF(j-1)*(U1(i,k,j)-ume(j))+DYF(j)*(U1(i,k,j-1)-ume(j-1))) + & /(2.d0*DY(J)) + & *(U2(i,k,j)-vme(j)) + wv(j)=wv(j)+ + & (DYF(j-1)*(U3(i,k,j)-wme(j))+DYF(j)*(U3(i,k,j-1)-wme(j-1))) + & /(2.d0*DY(J)) + & *(U2(i,k,j)-vme(j)) uw(j)=uw(j)+(U1(i,k,j)-ume(j)) + *(U3(i,k,j)-wme(j)) end do @@ -347,14 +354,9 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) IF (RANKZ.eq.0) then - - write(*,*) RANK,'Writing GYF' gname='gyf' Diag=gyf(1:NY) call WriteStatH5(FNAME,gname,Diag) - write(*,*) RANK,'Done writing GYF' - - pause gname='ume' Diag=ume(1:NY) @@ -445,9 +447,6 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) 401 format(I3,' ',17(F30.20,' ')) #endif -! Calculate the dissipation rate - call tkebudget_chan - IF (LES) call tkebudget_chan_les ! Do over the number of passive scalars do n=1,N_TH @@ -539,17 +538,17 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) #ifdef HDF5 if (MOVIE) then FNAME='movie.h5' + if (n.eq.1) then if (USE_MPI) then call mpi_barrier(MPI_COMM_WORLD,ierror) end if -! CALL INTEGRATE_Z_VAR(TH(:,:,:,n),varxy,MPI_COMM_Z) IF (RANKZ.EQ.RANKZMOVIE) THEN do I=0,NXM do J=1,NY varxy(i,j)=TH(i,NzMovie,j,n) end do end do - GNAME='th'//CHAR(n+48)//'_xy' + GNAME='th1_xy' call writeHDF5_xyplane(FNAME,GNAME,varxy) END IF if (USE_MPI) then @@ -561,7 +560,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) varxz(i,j)=TH(i,j,NyMovie,n) end do end do - GNAME='th'//CHAR(n+48)//'_xz' + GNAME='th1_xz' call writeHDF5_xzplane(FNAME,GNAME,varxz) END IF if (USE_MPI) then @@ -572,8 +571,44 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) varzy(i,j)=TH(NxMovie,i,j,n) end do end do - GNAME='th'//CHAR(n+48)//'_zy' + GNAME='th1_zy' call writeHDF5_zyplane(FNAME,GNAME,varzy) + else if (n.eq.2) then + if (USE_MPI) then + call mpi_barrier(MPI_COMM_WORLD,ierror) + end if + IF (RANKZ.EQ.RANKZMOVIE) THEN + do I=0,NXM + do J=1,NY + varxy(i,j)=TH(i,NzMovie,j,n) + end do + end do + GNAME='th2_xy' + call writeHDF5_xyplane(FNAME,GNAME,varxy) + END IF + if (USE_MPI) then + call mpi_barrier(MPI_COMM_WORLD,ierror) + end if + IF (RANKY.EQ.RANKYMOVIE) THEN + do I=0,NXM + do J=0,NZP-1 + varxz(i,j)=TH(i,j,NyMovie,n) + end do + end do + GNAME='th2_xz' + call writeHDF5_xzplane(FNAME,GNAME,varxz) + END IF + if (USE_MPI) then + call mpi_barrier(MPI_COMM_WORLD,ierror) + end if + do I=0,NZP-1 + do J=1,NY + varzy(i,j)=TH(NxMovie,i,j,n) + end do + end do + GNAME='th2_zy' + call writeHDF5_zyplane(FNAME,GNAME,varzy) + end if END IF #endif @@ -673,7 +708,6 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) if (USE_MPI) then call mpi_barrier(MPI_COMM_WORLD,ierror) end if -! CALL INTEGRATE_Z_VAR(U1(:,:,:),varxy,MPI_COMM_Z) IF (RANKZ.EQ.RANKZMOVIE) THEN do I=0,NXM do J=1,NY @@ -687,7 +721,6 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) if (USE_MPI) then call mpi_barrier(MPI_COMM_WORLD,ierror) end if -! CALL INTEGRATE_Z_VAR(U2(:,:,:),varxy,MPI_COMM_Z) IF (RANKZ.EQ.RANKZMOVIE) THEN do I=0,NXM do J=1,NY @@ -701,7 +734,6 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) if (USE_MPI) then call mpi_barrier(MPI_COMM_WORLD,ierror) end if -! CALL INTEGRATE_Z_VAR(U3(:,:,:),varxy,MPI_COMM_Z) IF (RANKZ.EQ.RANKZMOVIE) THEN do I=0,NXM do J=1,NY @@ -716,7 +748,6 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) if (USE_MPI) then call mpi_barrier(MPI_COMM_WORLD,ierror) end if -! CALL INTEGRATE_Z_VAR(NU_T(:,:,:),varxy,MPI_COMM_Z) IF (RANKZ.EQ.RANKZMOVIE) THEN do I=0,NXM do J=1,NY @@ -932,17 +963,22 @@ subroutine tkebudget_chan_les subroutine tkebudget_chan -! NOte, it is important to only run this routine after complete R-K -! time advancement since F1 is overwritten which is needed between R-K steps +! Calculate the turbulent dissipation rate, epsilon +! Note that this is actually the pseudo-dissipation (see Pope, Turb. Flows) +! for an explanation include 'header' character*35 FNAME character*20 GNAME real*8 Diag(1:NY) + real*8 varxy(0:NXM,1:NY),varzy(0:NZP-1,1:NY),varxz(0:NXM,0:NZP-1) integer i,j,k +! Store the 3D dissipation rate in F1 + F1(:,:,:)=0.d0 + ! Compute the turbulent dissipation rate, epsilon=nu* -! epsilon is calculated at GY points +! epsilon will be calculated on the GY grid do j=1,NY epsilon(j)=0. end do @@ -960,6 +996,8 @@ subroutine tkebudget_chan do k=0,NZP-1 do i=0,NXM epsilon(j)=epsilon(j)+(DYF(j-1)*S1(i,k,j)**2.d0 + & +DYF(j)*S1(i,k,j-1)**2.d0)/(2.d0*DY(j)) + F1(i,k,j)=F1(i,k,j)+(DYF(j-1)*S1(i,k,j)**2.d0 & +DYF(j)*S1(i,k,j-1)**2.d0)/(2.d0*DY(j)) end do end do @@ -978,15 +1016,16 @@ subroutine tkebudget_chan do k=0,NZP-1 do i=0,NXM epsilon(j)=epsilon(j)+(S1(i,k,j)**2.0) + F1(i,k,j)=F1(i,k,j)+(S1(i,k,j)**2.0) end do end do end do -! Compute du/dy at GYF gridpoints, note remove mean +! Compute du/dy at GY gridpoints, note remove mean do j=1,NY do k=0,NZP-1 do i=0,NXM - F1(i,k,j)=((U1(i,k,j)-dble(CR1(0,0,j))) - & -(U1(i,k,j-1)-dble(CR1(0,0,j-1)))) + S1(i,k,j)=((U1(i,k,j)-ume(j)) + & -(U1(i,k,j-1)-ume(j-1))) & /DY(j) end do end do @@ -994,7 +1033,8 @@ subroutine tkebudget_chan do j=1,NY do k=0,NZP-1 do i=0,NXM - epsilon(j)=epsilon(j)+(F1(i,k,j)**2.0) + epsilon(j)=epsilon(j)+(S1(i,k,j)**2.0) + F1(i,k,j)=F1(i,k,j)+(S1(i,k,j)**2.0) end do end do end do @@ -1012,35 +1052,39 @@ subroutine tkebudget_chan do k=0,NZP-1 do i=0,NXM epsilon(j)=epsilon(j)+(DYF(j-1)*S1(i,k,j)**2.d0 + & +DYF(j)*S1(i,k,j-1)**2.d0)/(2.d0*DY(j)) + F1(i,k,j)=F1(i,k,j)+(DYF(j-1)*S1(i,k,j)**2.d0 & +DYF(j)*S1(i,k,j-1)**2.d0)/(2.d0*DY(j)) end do end do end do -! Compute du/dz at GYF gridpoints, note remove mean +! Compute du/dz at GY gridpoints ! Store du/dz in CS1 do j=1,NY do k=0,TNKZ do i=0,NXP-1 - CF1(i,k,j)=CIKZ(k)*CR1(i,k,j) + CS1(i,k,j)=CIKZ(k)*CR1(i,k,j) end do end do end do ! Convert to physical space - call fft_xz_to_physical(CF1,F1,0,NY+1) + call fft_xz_to_physical(CS1,S1,0,NY+1) do j=1,NY do k=0,NZP-1 do i=0,NXM - epsilon(j)=epsilon(j)+(DYF(j-1)*F1(i,k,j)**2.d0 - & +DYF(j)*F1(i,k,j-1)**2.d0)/(2.d0*DY(j)) + epsilon(j)=epsilon(j)+(DYF(j-1)*S1(i,k,j)**2.d0 + & +DYF(j)*S1(i,k,j-1)**2.d0)/(2.d0*DY(j)) + F1(i,k,j)=F1(i,k,j)+(DYF(j-1)*S1(i,k,j)**2.d0 + & +DYF(j)*S1(i,k,j-1)**2.d0)/(2.d0*DY(j)) end do end do end do -! Compute dv/dy at GYF gridpoints, note remove mean +! Compute dv/dy at GY gridpoints, note remove mean do j=2,NYM do k=0,NZP-1 do i=0,NXM - S1(i,k,j)=((U2(i,k,j+1)-dble(CR2(0,0,j+1))) - & -(U2(i,k,j-1)-dble(CR2(0,0,j-1)))) + S1(i,k,j)=((U2(i,k,j+1)-vme(j)) + & -(U2(i,k,j-1)-vme(j-1))) & /(GY(j+1)-GY(j-1)) end do end do @@ -1049,15 +1093,16 @@ subroutine tkebudget_chan do k=0,NZP-1 do i=0,NXM epsilon(j)=epsilon(j)+(S1(i,k,j)**2.0) + F1(i,k,j)=F1(i,k,j)+(S1(i,k,j)**2.0) end do end do end do -! Compute dw/dy at GYF gridpoints, note remove mean +! Compute dw/dy at GY gridpoints, note remove mean do j=1,NY do k=0,NZP-1 do i=0,NXM - S1(i,k,j)=((U3(i,k,j)-dble(CR3(0,0,j))) - & -(U3(i,k,j-1)-dble(CR3(0,0,j-1)))) + S1(i,k,j)=((U3(i,k,j)-wme(j)) + & -(U3(i,k,j-1)-wme(j-1))) & /DY(j) end do end do @@ -1066,23 +1111,25 @@ subroutine tkebudget_chan do k=0,NZP-1 do i=0,NXM epsilon(j)=epsilon(j)+(S1(i,k,j)**2.0) + F1(i,k,j)=F1(i,k,j)+(S1(i,k,j)**2.0) end do end do end do -! Store dv/dz in CF1 +! Store dv/dz in CS1 do j=1,NY do k=0,TNKZ do i=0,NXP-1 - CF1(i,k,j)=CIKZ(k)*CR2(i,k,j) + CS1(i,k,j)=CIKZ(k)*CR2(i,k,j) end do end do end do ! Convert to physical space - call fft_xz_to_physical(CF1,F1,0,NY+1) + call fft_xz_to_physical(CS1,S1,0,NY+1) do j=1,NY do k=0,NZP-1 do i=0,NXM - epsilon(j)=epsilon(j)+(F1(i,k,j)**2.0) + epsilon(j)=epsilon(j)+(S1(i,k,j)**2.0) + F1(i,k,j)=F1(i,k,j)+(S1(i,k,j)**2.0) end do end do end do @@ -1100,6 +1147,8 @@ subroutine tkebudget_chan do k=0,NZP-1 do i=0,NXM epsilon(j)=epsilon(j)+(DYF(j-1)*S1(i,k,j)**2.d0 + & +DYF(j)*S1(i,k,j-1)**2.d0)/(2.d0*DY(j)) + F1(i,k,j)=F1(i,k,j)+(DYF(j-1)*S1(i,k,j)**2.d0 & +DYF(j)*S1(i,k,j-1)**2.d0)/(2.d0*DY(j)) end do end do @@ -1107,20 +1156,66 @@ subroutine tkebudget_chan do j=1,NY epsilon(j)=NU*epsilon(j)/dble(NX*NZ) end do + F1=NU*F1 call mpi_allreduce(mpi_in_place,epsilon,NY+2,MPI_DOUBLE_PRECISION, & MPI_SUM,MPI_COMM_Z,ierror) + #ifdef HDF5 FNAME='tke.h5' + gname='time' + call WriteHDF5_real(FNAME,gname,TIME) + IF (RANKZ.eq.0) THEN - epsilon(1)=0.d0 - epsilon(NY)=0.d0 + gname='gyf' + Diag=gyf(1:NY) + call WriteStatH5(FNAME,gname,Diag) + gname='epsilon' Diag=epsilon(1:NY) call WriteStatH5(FNAME,gname,Diag) END IF + if (MOVIE) then + FNAME='movie.h5' + if (USE_MPI) then + call mpi_barrier(MPI_COMM_WORLD,ierror) + end if + IF (RANKZ.EQ.RANKZMOVIE) THEN + do I=0,NXM + do J=1,NY + varxy(i,j)=F1(i,NzMovie,j) + end do + end do + GNAME='epsilon_xy' + call writeHDF5_xyplane(FNAME,GNAME,varxy) + END IF + if (USE_MPI) then + call mpi_barrier(MPI_COMM_WORLD,ierror) + end if + IF (RANKY.EQ.RANKYMOVIE) THEN + do I=0,NXM + do J=0,NZP-1 + varxz(i,j)=F1(i,j,NyMovie) + end do + end do + GNAME='epsilon_xz' + call writeHDF5_xzplane(FNAME,GNAME,varxz) + END IF + if (USE_MPI) then + call mpi_barrier(MPI_COMM_WORLD,ierror) + end if + do I=0,NZP-1 + do J=1,NY + varzy(i,j)=F1(NxMovie,i,j) + end do + end do + GNAME='epsilon_zy' + call writeHDF5_zyplane(FNAME,GNAME,varzy) + + END IF + #else ! Here we aren't using HDF5, so write to a text file IF (RANKZ.EQ.0) THEN From 3e5467f7ce021ce9313d85c5560763e76a0d296f Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Fri, 2 Nov 2018 09:09:02 +0000 Subject: [PATCH 47/66] constant subgrid-scale Prandtl number removed call to les_chan_th (from AMD model branch in development) --- diablo_2.0/for/channel.f | 75 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 71 insertions(+), 4 deletions(-) diff --git a/diablo_2.0/for/channel.f b/diablo_2.0/for/channel.f index 0d0e6ed..5ae6878 100644 --- a/diablo_2.0/for/channel.f +++ b/diablo_2.0/for/channel.f @@ -310,10 +310,77 @@ SUBROUTINE RK_CHAN_1 CALL FFT_XZ_TO_FOURIER(U2,CU2,0,NY+1) CALL FFT_XZ_TO_FOURIER(U3,CU3,0,NY+1) - call les_chan_th -C Re-apply the boundary conditions for the scalars -C In the case of Neumann (applied stress) BCs, these were changed in the LES - call APPLY_BC_TH_PHYS_MPI +C APPLY constant SGS Prandlt number + DO N=1,N_TH + do j=1,NY+1 + do k=0,NZP-1 + do i=0,NXM + KAPPA_T(I,K,J,N)=1.d0*NU_T(I,K,J) + end do + end do + end do + end do + +C Add the horizontal diffusive terms using explicit timestepping +C This is already done for the viscous terms inside les_chan.f + + DO N=1,N_TH + + DO J=1,NY+1 + DO K=0,TNKZ + DO I=0,NXP-1 + CS1(I,K,J)=CIKX(I)*CTH(I,K,J,N) + END DO + END DO + END DO + CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) + do j=1,NY+1 + do k=0,NZP-1 + do i=0,NXM + S1(I,K,J)=KAPPA_T(I,K,J,N)*S1(I,K,J) + end do + end do + end do + CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) + DO J=JSTART_TH(N),JEND_TH(N) + DO K=0,TNKZ + DO I=0,NXP-1 + CFTH(I,K,J,N)=CFTH(I,K,J,N)+CIKX(I)*CS1(I,K,J) + END DO + END DO + END DO + + DO J=1,NY+1 + DO K=0,TNKZ + DO I=0,NXP-1 + CS1(I,K,J)=CIKZ(K)*CTH(I,K,J,N) + END DO + END DO + END DO + CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) + do j=1,NY+1 + do k=0,NZP-1 + do i=0,NXM + S1(I,K,J)=KAPPA_T(I,K,J,N)*S1(I,K,J) + end do + end do + end do + CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) + DO J=JSTART_TH(N),JEND_TH(N) + DO K=0,TNKZ + DO I=0,NXP-1 + CFTH(I,K,J,N)=CFTH(I,K,J,N)+CIKZ(K)*CS1(I,K,J) + END DO + END DO + END DO + END DO ! end do n + +! Now, convert TH to physical space for calculation of nonlinear terms + DO N=1,N_TH + CS1(:,:,:)=CTH(:,:,:,N) + CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) + TH(:,:,:,N)=S1(:,:,:) + END DO ELSE C If the subgrid model hasn't been called, then it is necessary to From 09a2bbe648c6a5d4b492d9a8546508c170e1659f Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 6 Mar 2019 15:38:47 +0000 Subject: [PATCH 48/66] removed spurious FFT call after les_chan --- diablo_2.0/for/channel.f | 4 ---- 1 file changed, 4 deletions(-) diff --git a/diablo_2.0/for/channel.f b/diablo_2.0/for/channel.f index 5ae6878..2c813b7 100644 --- a/diablo_2.0/for/channel.f +++ b/diablo_2.0/for/channel.f @@ -306,10 +306,6 @@ SUBROUTINE RK_CHAN_1 C In the process, Ui is converted to physical space call les_chan - CALL FFT_XZ_TO_FOURIER(U1,CU1,0,NY+1) - CALL FFT_XZ_TO_FOURIER(U2,CU2,0,NY+1) - CALL FFT_XZ_TO_FOURIER(U3,CU3,0,NY+1) - C APPLY constant SGS Prandlt number DO N=1,N_TH do j=1,NY+1 From 09fae5588c57ddb4afc30ce3b9a0539681c9f84e Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 6 Mar 2019 15:40:57 +0000 Subject: [PATCH 49/66] Removed comments used for de-bugging --- diablo_2.0/for/les.f | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/diablo_2.0/for/les.f b/diablo_2.0/for/les.f index 8a23aad..6dca3d8 100644 --- a/diablo_2.0/for/les.f +++ b/diablo_2.0/for/les.f @@ -278,23 +278,23 @@ subroutine les_chan DO I=0,NXP-1 CF1(I,K,J)=CF1(I,K,J) & -CIKX(I)*CSij1(I,K,J) -! & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) -! & -CIKZ(K)*CSij5(I,K,J) -! CF3(I,K,J)=CF3(I,K,J) -! & -CIKX(I)*CSij5(I,K,J) -! & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) -! & -CIKZ(K)*CSij3(I,K,J) + & -(CSij4(I,K,J+1)-CSij4(I,K,J))/DYF(j) + & -CIKZ(K)*CSij5(I,K,J) + CF3(I,K,J)=CF3(I,K,J) + & -CIKX(I)*CSij5(I,K,J) + & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) + & -CIKZ(K)*CSij3(I,K,J) END DO END DO END DO DO J=2,NY DO K=0,TNKZ DO I=0,NXP-1 -! CF2(I,K,J)=CF2(I,K,J) -! & -CIKX(I)*CSij4(I,K,J) + CF2(I,K,J)=CF2(I,K,J) + & -CIKX(I)*CSij4(I,K,J) ! Sij2 is added through an implict eddy viscosity ! & -(CSij2(I,K,J)-CSij2(I,K,J-1))/DY(j) -! & -CIKZ(K)*CSij6(I,K,J) + & -CIKZ(K)*CSij6(I,K,J) END DO END DO END DO @@ -311,7 +311,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY -! CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) + CF2(I,K,J)=CF2(I,K,J)+CIKX(I)*CTEMP(I,K,J) END DO END DO END DO @@ -326,7 +326,7 @@ subroutine les_chan DO K=0,TNKZ DO I=0,NXP-1 DO J=2,NY -! CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) + CF2(I,K,J)=CF2(I,K,J)+CIKZ(K)*CTEMP(I,K,J) END DO END DO END DO From b8aa886a4ddfe750dfde2f53a93d4743a2e059f8 Mon Sep 17 00:00:00 2001 From: Aaron Wienkers Date: Wed, 17 Jul 2019 02:56:38 +0100 Subject: [PATCH 50/66] Fixes a number of issues contributing to mpi striping -- both diagnostic in saving stats, and with the subgrid model. 1. ume(j-1) mismatch when subtracting background strain. 2. dyf(Nyp+1) is now communicated down to be consistent across processes, rather than locally extrapolating. 3. CSij2 was not defined on Nyp+1 which is needed for computing temp in the sgs model, requiring some communicating down processors again. --- diablo_2.0/for/channel.f | 24 +++- diablo_2.0/for/les.f | 91 +++++++-------- diablo_2.0/for/mpi.f | 220 ++++++++++++++++++++++++++---------- diablo_2.0/for/save_stats.f | 77 ++++++------- diablo_2.0/for/user_rhs.f | 132 +++------------------- 5 files changed, 278 insertions(+), 266 deletions(-) diff --git a/diablo_2.0/for/channel.f b/diablo_2.0/for/channel.f index 2c813b7..102196b 100644 --- a/diablo_2.0/for/channel.f +++ b/diablo_2.0/for/channel.f @@ -341,7 +341,7 @@ SUBROUTINE RK_CHAN_1 DO J=JSTART_TH(N),JEND_TH(N) DO K=0,TNKZ DO I=0,NXP-1 - CFTH(I,K,J,N)=CFTH(I,K,J,N)+CIKX(I)*CS1(I,K,J) + CFTH(I,K,J,N)=CFTH(I,K,J,N)+CIKX(I)*CS1(I,K,J) END DO END DO END DO @@ -370,7 +370,7 @@ SUBROUTINE RK_CHAN_1 END DO END DO END DO ! end do n - + ! Now, convert TH to physical space for calculation of nonlinear terms DO N=1,N_TH CS1(:,:,:)=CTH(:,:,:,N) @@ -1647,7 +1647,25 @@ SUBROUTINE CREATE_GRID_CHAN DO J=1,NY DYF(J)=(GY(J+1)-GY(J)) END DO - DYF(NY+1)=DYF(NY) + !DYF(NY+1)=DYF(NY) + + ! Communicate dyf(0) and dyf(Ny + 1), rather than extrapolating + if (rankY == 0) then + dyf(0) = dyf(1) + else + call mpi_send(dyf(2), 1, mpi_double_precision, rankY - 1, + & 100 + rankY, mpi_comm_y, ierror) + call mpi_recv(dyf(0), 1, mpi_double_precision, rankY - 1, + & 110 + rankY - 1, mpi_comm_y, status, ierror) + end if + if (rankY == NprocY - 1) then + dyf(Ny + 1) = dyf(Ny) + else + call mpi_send(dyf(Ny - 1), 1, mpi_double_precision, rankY + 1, + & 110 + rankY, mpi_comm_y, ierror) + call mpi_recv(dyf(Ny + 1), 1, mpi_double_precision, rankY + 1, + & 100 + rankY + 1, mpi_comm_y, status, ierror) + end if RETURN END diff --git a/diablo_2.0/for/les.f b/diablo_2.0/for/les.f index 6dca3d8..2bb8caa 100644 --- a/diablo_2.0/for/les.f +++ b/diablo_2.0/for/les.f @@ -1,8 +1,8 @@ subroutine les_chan C This subroutine models the terms owing to the subgrid scale stress C if the computation is to be treated as an LES not a DNS -C This subroutine should be called when the velocity is in fourier space -C in the periodic directions, on output, the velocity will be +C This subroutine should be called when the velocity is in fourier space +C in the periodic directions, on output, the velocity will be C in physical space. C It is assumed that the test filter and the LES filter are performed C by the same operation @@ -19,10 +19,10 @@ subroutine les_chan real*8 EPS_SGS1_MEAN(0:NY+1) real*8 U3_bar(0:NY+1) real*8 U1_bar(0:NY+1) - + real*8 C_SMAG parameter (C_SMAG=0.17d0) - real*8 DELTA_Y(0:NY+1),DELTA_YF(0:NY+1) + real*8 DELTA_Y(0:NY+1),DELTA_YF(0:NY+1) real*8 alpha_sgs,beta_sgs real*8 denominator_sum @@ -88,7 +88,7 @@ subroutine les_chan U1_bar(J)=DBLE(CU1(0,0,J)) U3_bar(J)=DBLE(CU3(0,0,J)) END DO - END IF + END IF IF (USE_MPI) THEN CALL MPI_BCAST(U1_bar,NY+2,MPI_DOUBLE_PRECISION,0, & MPI_COMM_Z,ierror) @@ -104,9 +104,9 @@ subroutine les_chan ! Compute |S| at GYF points, store in S1 ! Interpolation to GYF points is easy since by definition ! GYF points are exactly midway between neighboring GY points -! Sij4 and Sij6 have the mean shear -! remove the zero value U1,3bar as in -! compute strain +! Sij4 and Sij6 have the mean shear +! remove the zero value U1,3bar as in +! compute strain DO J=JSTART,JEND DO K=0,NZP-1 DO I=0,NXM @@ -123,7 +123,7 @@ subroutine les_chan ! Optionally remove mean shear & -0.5d0*(U3_bar(J)-U3_bar(J-1))/DY(J) & -0.5d0*(U3_bar(J+1)-U3_bar(J))/DY(J+1) - & ))**2.d0 + & ))**2.d0 & +2.d0*Sij3(I,K,J)**2.d0 ) END DO END DO @@ -156,7 +156,7 @@ subroutine les_chan END DO ! Now, compute |S|*S_ij, storing in Sij -! First compute at GYF points +! First compute at GYF points DO J=JSTART,JEND DO K=0,NZP-1 DO I=0,NXM @@ -265,7 +265,7 @@ subroutine les_chan else if ((LES_MODEL_TYPE.EQ.2).or.(LES_MODEL_TYPE.eq.3)) then ! Here, use a dynamic smagorinsky model with or without scale similar part - stop 'ERROR: LES_MODEL_TYPE=2, 3 not supported yet in MPI' + stop 'ERROR: LES_MODEL_TYPE=2, 3 not supported yet in MPI' end if @@ -283,7 +283,7 @@ subroutine les_chan CF3(I,K,J)=CF3(I,K,J) & -CIKX(I)*CSij5(I,K,J) & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) - & -CIKZ(K)*CSij3(I,K,J) + & -CIKZ(K)*CSij3(I,K,J) END DO END DO END DO @@ -376,7 +376,7 @@ subroutine les_chan & -CIKZ(K)*CSij3(I,K,J) END DO END DO - END DO + END DO CALL FFT_XZ_TO_PHYSICAL(CTEMP,TEMP,0,NY+1) do J=0,NY+1 do I=0,NXM @@ -453,7 +453,7 @@ subroutine les_chan end do end do end do - + call mpi_allreduce(mpi_in_place,S1_mean,NY+2,MPI_DOUBLE_PRECISION, & MPI_SUM,MPI_COMM_Z,ierror) call mpi_allreduce(mpi_in_place,NU_T_mean,NY+2 @@ -482,16 +482,16 @@ subroutine les_chan Diag=gyf(1:NY) call WriteStatH5(FNAME,gname,Diag) - gname='nu_sgs' + gname='nu_sgs' Diag=NU_T_mean(1:NY) - call WriteStatH5(FNAME,gname,Diag) + call WriteStatH5(FNAME,gname,Diag) - gname='eps_sgs1' + gname='eps_sgs1' Diag=EPS_SGS1_MEAN(1:NY) - call WriteStatH5(FNAME,gname,Diag) - + call WriteStatH5(FNAME,gname,Diag) + END IF - + #else ! Here we aren't using HDF5, so write to text file IF (RANKZ.EQ.0) THEN @@ -528,15 +528,17 @@ subroutine compute_strain_chan include 'header_les' integer I,J,K,ij - - DO J=1,NY + + DO J=1,NY+1 DO K=0,TNKZ DO I=0,NXP-1 CSij1(I,K,J)=CIKX(I)*CU1(I,K,J) - CSij2(I,K,J)=(CU2(I,K,J+1)-CU2(I,K,J))/DYF(j) - CSij3(I,K,J)=CIKZ(K)*CU3(I,K,J) + CSij3(I,K,J)=CIKZ(K)*CU3(I,K,J) CSij5(I,K,J)=0.5d0*(CIKZ(K)*CU1(I,K,J) & +CIKX(I)*CU3(I,K,J)) + if (J /= Ny + 1) then + CSij2(I,K,J)=(CU2(I,K,J+1)-CU2(I,K,J))/DYF(j) + end if END DO END DO END DO @@ -544,12 +546,15 @@ subroutine compute_strain_chan DO K=0,TNKZ DO I=0,NXP-1 CSij4(I,K,J)=0.5d0*((CU1(I,K,J)-CU1(I,K,J-1))/DY(j) - & +CIKX(I)*CU2(I,K,J) ) + & +CIKX(I)*CU2(I,K,J) ) CSij6(I,K,J)=0.5d0*(CIKZ(K)*CU2(I,K,J) & +(CU3(I,K,J)-CU3(I,K,J-1))/DY(j) ) END DO END DO - END DO + END DO + + ! Need to communicate down CSij2 to each j = Ny + 1 + call ghost_CSij2_mpi CALL FFT_XZ_TO_PHYSICAL(CSij1,Sij1,0,NY+1) @@ -564,7 +569,7 @@ subroutine compute_strain_chan RETURN END - + subroutine les_filter_chan(A,jstart,jend) ! This subroutine applies the les filter to the input field ! The indices to the start and end of the array in the y-direction @@ -591,7 +596,7 @@ subroutine les_filter_chan(A,jstart,jend) ! W0=1.d0/2.d0 ! W1=1.d0/4.d0 ! W2=0.d0 - Wm1_j=1.d0/4.d0 + Wm1_j=1.d0/4.d0 W0_j=1.d0/2.d0 W1_j=1.d0/4.d0 ! The following is for the 5-point trapezoidal rule, alpha*beta=9 @@ -634,7 +639,7 @@ subroutine les_filter_chan(A,jstart,jend) end do ip1(NXM)=0 do i=0,NX-3 - ip2(i)=i+2 + ip2(i)=i+2 end do ip2(NX-2)=0 ip2(NXM)=1 @@ -645,9 +650,9 @@ subroutine les_filter_chan(A,jstart,jend) B(i,k,j)=Wm2*A(im2(i),k,j)+Wm1*A(im1(i),k,j)+W0*A(i,k,j) & +W1*A(ip1(i),k,j)+W2*A(ip2(i),k,j) end do - end do + end do end do - + ! Apply filter in the z-diretion ! B=Wm2*CSHIFT(B,-2,2)+Wm1*CSHIFT(B,-1,2)+W0*B+W1*CSHIFT(B,1,2) ! & +W2*CSHIFT(B,2,2) @@ -667,7 +672,7 @@ subroutine les_filter_chan(A,jstart,jend) end do kp1(NZM)=0 do k=0,NZ-3 - kp2(k)=k+2 + kp2(k)=k+2 end do kp2(NZ-2)=0 kp2(NZM)=1 @@ -678,7 +683,7 @@ subroutine les_filter_chan(A,jstart,jend) A(i,k,j)=Wm2*B(i,km2(k),j)+Wm1*B(i,km1(k),j)+W0*B(i,k,j) & +W1*B(i,kp1(k),j)+W2*B(i,kp2(k),j) end do - end do + end do end do ! Apply filter in the vertical direction at all physical cells @@ -691,7 +696,7 @@ subroutine les_filter_chan(A,jstart,jend) ! do j=jstart+1,jend-1 ! do k=0,NZM ! do i=0,NXM -! B(i,k,j)=Wm1_j*B(i,k,j-1)+W0_j*B(i,k,j)+W1_j*B(i,k,j+1) +! B(i,k,j)=Wm1_j*B(i,k,j-1)+W0_j*B(i,k,j)+W1_j*B(i,k,j+1) ! end do ! end do ! end do @@ -722,7 +727,7 @@ subroutine les_filter_chan_fourier(A,jstart,jend) real*8 PI, LX, LZ integer NKX,NKZ,TNKZ - real*8 KX(0:NX/3),KZ(0:2*(NZ/3)) + real*8 KX(0:NX/3),KZ(0:2*(NZ/3)) real*8 A(0:NX+1,0:NZ+1,0:NY+1) @@ -744,7 +749,7 @@ subroutine les_filter_chan_fourier(A,jstart,jend) LX=PI LZ=2.d0*PI - + ! Get the wavenumber vectors: NKX=NX/3 DO I=0,NKX @@ -766,7 +771,7 @@ subroutine les_filter_chan_fourier(A,jstart,jend) B(i,k,j)=A(i,k,j) end do enddo - end do + end do ! Convert to fourier space @@ -787,13 +792,13 @@ subroutine les_filter_chan_fourier(A,jstart,jend) ! Now, convert back to physical space call fft_xz_to_physical(CB,B,jstart,jend) - do j=jstart,jend + do j=jstart,jend do k=0,NZM do i=0,NXM A(i,k,j)=B(i,k,j) end do end do - end do + end do return end @@ -831,7 +836,7 @@ SUBROUTINE APPLY_BC_LES IF (U_BC_YMIN.eq.1) THEN IF ((RANKY.eq.0).or.(.NOT.USE_MPI)) THEN -! We are on a process at the bottom wall +! We are on a process at the bottom wall DO K=0,TNKZ DO I=0,NXP-1 CU1(I,K,1)=CU1(I,K,2) @@ -843,7 +848,7 @@ SUBROUTINE APPLY_BC_LES IF (W_BC_YMIN.eq.1) THEN IF ((RANKY.eq.0).or.(.NOT.USE_MPI)) THEN -! We are on a process at the bottom wall +! We are on a process at the bottom wall DO K=0,TNKZ DO I=0,NXP-1 CU3(I,K,1)=CU3(I,K,2) @@ -855,7 +860,3 @@ SUBROUTINE APPLY_BC_LES RETURN END - - - - diff --git a/diablo_2.0/for/mpi.f b/diablo_2.0/for/mpi.f index 3be2f10..c24581f 100644 --- a/diablo_2.0/for/mpi.f +++ b/diablo_2.0/for/mpi.f @@ -101,7 +101,7 @@ SUBROUTINE GHOST_CHAN_MPI END DO END DO END IF - + ! AT this point we have passed data up the chain IF (RANKY.eq.NPROCY-1) THEN ! If we are the higest ranked process, then we don't need to recieve @@ -264,7 +264,7 @@ SUBROUTINE GHOST_LES_MPI END DO END DO END IF - + ! Now, we have hit the top process. Set the BCs and pass data down IF (RANKY.eq.NPROCY-1) THEN @@ -459,6 +459,105 @@ SUBROUTINE GHOST_GRID_MPI RETURN END + + subroutine ghost_CSij2_mpi + +! This subroutine is part of the MPI package for the LES subroutine +! to compute the strain +! Share down the CSij2 Ny + 1 ghost cell between neighboring processors +! CSij2(Ny+1) is needed to compute temp at Ny + + include 'header' + include 'header_les' + + integer i, j, k, n + + ! Define the arrays that will be used for data packing. This makes the + ! communication between processes more efficient by only requiring one + ! send and recieve. + ! The communication will be done in Fourier space, so these arrays should + ! be complex arrays to match the velocity + ! The size of the buffer array is 0:Nxp-1,0:NZP-1 + COMPLEX*16 ocpack(0:Nxp-1, 0:Nzp - 1) + COMPLEX*16 icpack(0:Nxp-1, 0:Nzp - 1) + + ! If we are using more than one processor, then we need to pass data + + if (NprocY > 1) then + + ! Set the BCs and pass data down + + if (rankY == NprocY - 1) then + ! If we are the higest ranked process, then we don't need to recieve + ! data at the upper ghost cells. + + ! We don't need CSij2 at the boundaries (dictates CSij4' & 6' only) + do k = 0, tNkz + do i = 0, Nxp - 1 + CSij2(i, k, Ny + 1) = -10000.d0 + end do + end do + + ! Now, send data down the chain + do k = 0, tNkz + do i = 0, Nxp - 1 + ocpack(i, k) = CSij2(i, k, 2) + end do + end do + ! Now, we have packed the data into a compact array, pass the data up + call mpi_send(ocpack, (Nxp) * (Nzp) + & , mpi_double_complex + & , rankY - 1, 3, mpi_comm_y, ierror) + else if (rankY > 0) then + ! Here, we are one of the middle processes and we need to pass data + ! down and recieve data from above us + do k = 0, tNkz + do i = 0, Nxp - 1 + ocpack(i, k) = CSij2(i, k, 2) + end do + end do + + call mpi_send(ocpack, (Nxp) * (Nzp) + & , mpi_double_complex + & , rankY - 1, 3, mpi_comm_y, ierror) + + call mpi_recv(icpack, (Nxp) * (Nzp) + & , mpi_double_complex + & , rankY + 1, 3, mpi_comm_y, status, ierror) + ! Now, unpack the data that we have recieved + do k = 0, tNkz + do i = 0, Nxp - 1 + CSij2(i, k, Ny + 1) = icpack(i, k) + end do + end do + else + ! Here, we must be the lowest process (RANK=0) and we need to receive + ! data from above + call mpi_recv(icpack, (Nxp) * (Nzp) + & , mpi_double_complex + & , rankY + 1, 3, mpi_comm_y, status, ierror) + ! Unpack the data that we have recieved + do k = 0, tNkz + do i = 0, Nxp - 1 + CSij2(i, k, Ny + 1) = icpack(i, k) + end do + end do + end if + + else + ! Here, NprocY=1, so we just need to set the boundary values + ! Set CSij2 (cu2(i, k, Ny + 2) - cu2(i, k, Ny + 1)) / dyf(Ny + 1) + do k = 0, tNkz + do i = 0, Nxp - 1 + CSij2(i, k, Ny + 1) = -10000.d0 + end do + end do + + end if + + return + end + C----*|--.---------.---------.---------.---------.---------.---------.-|-------| SUBROUTINE THOMAS_FORWARD_REAL_MPI(A,B,C,G,INY,INX) C----*|--.---------.---------.---------.---------.---------.---------.-|-------| @@ -683,13 +782,13 @@ SUBROUTINE THOMAS_BACKWARD_COMPLEX_MPI(A,B,C,G,INY,INX) SUBROUTINE INIT_MPI C----*|--.---------.---------.---------.---------.---------.---------.-|-----| INCLUDE 'header' - + INTEGER bl(2),disp(2),types(2) INTEGER IPROCS,TYPE1,COMM_CART INTEGER DIMS(2),PERDIM(2) INTEGER MFLAG(2) - + INTEGER I,J,K,XI,ZI COMPLEX*16 TMP(0:NX/2,0:NZP+1,0:NY+1) @@ -720,14 +819,14 @@ SUBROUTINE INIT_MPI IF (RANK.EQ.0) WRITE(*,*) 'ERROR: compiled with ',NPROCS, & 'cores, running with ',IPROCS,' cores.' CALL MPI_FINALIZE(ierror) - stop + stop END IF IF (MOD(NPROCS,NPROCY).NE.0) THEN IF (RANK.EQ.0) WRITE(*,*) ' Error. NPROCS is not a ', & 'multiple of NPROCY' CALL MPI_FINALIZE(ierror) - stop + stop END IF DIMS(2)=NPROCY @@ -738,7 +837,7 @@ SUBROUTINE INIT_MPI call MPI_CART_CREATE(MPI_COMM_WORLD,2,DIMS,PERDIM,.FALSE., & COMM_CART,IERROR) - ! In PERDIM I put the information for the remain_dims + ! In PERDIM I put the information for the remain_dims PERDIM=(/0,1/) call MPI_CART_SUB(COMM_CART,PERDIM,MPI_COMM_Y,IERROR) PERDIM=(/1,0/) @@ -751,7 +850,7 @@ SUBROUTINE INIT_MPI & NPROCS call MPI_BARRIER(MPI_COMM_WORLD,IERROR) write(*,'(1A,4I8)') 'RANK,RANKY,RANKZ: ',RANK,RANKY,RANKZ - + !call fftw_mpi_init() !alloc_local=fftw_mpi_local_size_2d(NZ,NX, MPI_COMM_Z, !& NZP,iNZ) @@ -759,12 +858,12 @@ SUBROUTINE INIT_MPI !NZP=NZ/NPROCSZ !NXP=NX/(2*NPROCSZ) - + C$$$ PI=4.*atan(1.0) C$$$ do i=0,NX-1 C$$$ do j=1,1 C$$$ do k=0,NZP-1 -C$$$ xi= i +C$$$ xi= i C$$$ zi= k+NZP*RANKZ C$$$ V(i,k,j)=sin(2*PI/NX*xi+2*2*PI/NZ*zi) C$$$ end do @@ -774,12 +873,12 @@ SUBROUTINE INIT_MPI c$$$c ------------------------------ c$$$c Define FFT c$$$c ------------------------------ -c$$$ CALL RFFTWND_F77_CREATE_PLAN(FFTW_X_TO_F_PLAN, 1, NX, -c$$$ * FFTW_FORWARD, FFTW_MEASURE ) +c$$$ CALL RFFTWND_F77_CREATE_PLAN(FFTW_X_TO_F_PLAN, 1, NX, +c$$$ * FFTW_FORWARD, FFTW_MEASURE ) c$$$ CALL FFTWND_F77_CREATE_PLAN(FFTW_Z_TO_F_PLAN, 1, NZ, c$$$ * FFTW_FORWARD, FFTW_MEASURE + FFTW_IN_PLACE ) -c$$$ CALL RFFTWND_F77_CREATE_PLAN(FFTW_X_TO_P_PLAN, 1, NX, -c$$$ * FFTW_BACKWARD, FFTW_MEASURE ) +c$$$ CALL RFFTWND_F77_CREATE_PLAN(FFTW_X_TO_P_PLAN, 1, NX, +c$$$ * FFTW_BACKWARD, FFTW_MEASURE ) c$$$ CALL FFTWND_F77_CREATE_PLAN(FFTW_Z_TO_P_PLAN, 1, NZ, c$$$ * FFTW_BACKWARD, FFTW_MEASURE + FFTW_IN_PLACE ) @@ -790,11 +889,11 @@ SUBROUTINE INIT_MPI ! Box full x to z (1) call MPI_TYPE_VECTOR(NZP,NXP,NX/2+1, - & MPI_DOUBLE_COMPLEX,TYPE1,ierror) - call MPI_TYPE_COMMIT(TYPE1,ierror) + & MPI_DOUBLE_COMPLEX,TYPE1,ierror) + call MPI_TYPE_COMMIT(TYPE1,ierror) bl(1:2)=(/1, 1/) - disp(1:2)= (/0, NXP*16/) + disp(1:2)= (/0, NXP*16/) types=(/TYPE1, MPI_UB/) call MPI_TYPE_STRUCT(2,bl,disp,types,XY2ZY_1,ierror) @@ -803,11 +902,11 @@ SUBROUTINE INIT_MPI ! Box full x to z (2) call MPI_TYPE_VECTOR(NZP,NXP,NXP+1, - & MPI_DOUBLE_COMPLEX,TYPE1,ierror) - call MPI_TYPE_COMMIT(TYPE1,ierror) + & MPI_DOUBLE_COMPLEX,TYPE1,ierror) + call MPI_TYPE_COMMIT(TYPE1,ierror) bl(1:2)=(/1, 1/) - disp(1:2)= (/0, NZP*(NXP+1)*16/) + disp(1:2)= (/0, NZP*(NXP+1)*16/) types=(/TYPE1, MPI_UB/) call MPI_TYPE_STRUCT(2,bl,disp,types,XY2ZY_2,ierror) @@ -819,29 +918,29 @@ SUBROUTINE INIT_MPI c /////////////////////////////////// c$$$ ! Box full x to z (2) c$$$ call MPI_TYPE_VECTOR(NXP,1,NZ, -c$$$ & MPI_DOUBLE_COMPLEX,TYPE1,ierror) -c$$$ call MPI_TYPE_COMMIT(TYPE1,ierror) +c$$$ & MPI_DOUBLE_COMPLEX,TYPE1,ierror) +c$$$ call MPI_TYPE_COMMIT(TYPE1,ierror) c$$$ c$$$ bl(1:2)=(/1, 1/) -c$$$ disp(1:2)= (/0, 16/) +c$$$ disp(1:2)= (/0, 16/) c$$$ types=(/TYPE1, MPI_UB/) c$$$ c$$$ call MPI_TYPE_STRUCT(2,bl,disp,types,XY2ZY_2,ierror) c$$$ call MPI_TYPE_COMMIT(XY2ZY_2,ierror) c$$$ call MPI_TYPE_VECTOR(NZP,NXP,NXP+1, -c$$$ & MPI_DOUBLE_COMPLEX,XY2ZY_2,ierror) -c$$$ call MPI_TYPE_COMMIT(XY2ZY_2,ierror) +c$$$ & MPI_DOUBLE_COMPLEX,XY2ZY_2,ierror) +c$$$ call MPI_TYPE_COMMIT(XY2ZY_2,ierror) c$$$ ! Box full x to z c$$$ call MPI_TYPE_VECTOR(NZ,NXP,NX/2+1, -c$$$ & MPI_DOUBLE_COMPLEX,XY2ZY_1,ierr) -c$$$ call MPI_TYPE_COMMIT(XY2ZY_1,ierr) +c$$$ & MPI_DOUBLE_COMPLEX,XY2ZY_1,ierr) +c$$$ call MPI_TYPE_COMMIT(XY2ZY_1,ierr) c$$$ c$$$ ! Box full z to x c$$$ call MPI_TYPE_VECTOR(NX/2,NX/2,NX/2+1, -c$$$ & MPI_DOUBLE_COMPLEX,XY2ZY,ierr) -c$$$ call MPI_TYPE_COMMIT(XY2ZY,ierr) - +c$$$ & MPI_DOUBLE_COMPLEX,XY2ZY,ierr) +c$$$ call MPI_TYPE_COMMIT(XY2ZY,ierr) + c$$$c @@ -855,11 +954,11 @@ SUBROUTINE INIT_MPI c$$$! call WriteHDF5_var_real(FNAME) c$$$ c$$$ FNAME='out.h5' -c$$$ call writeHDF5(FNAME) +c$$$ call writeHDF5(FNAME) c$$$ c$$$ call mpi_finalize(ierror) -c$$$ stop -c$$$ +c$$$ stop +c$$$ C Set a string to determine which input/output files to use C When MPI is used, each process will read/write to files with the @@ -895,7 +994,7 @@ SUBROUTINE INIT_CHAN_MPI INTEGER J, N - IF (RANK.EQ.0) + IF (RANK.EQ.0) & write(*,*) '*******IN INIT_CHAN_MPI *********' PI=4.D0*ATAN(1.D0) @@ -1010,7 +1109,7 @@ SUBROUTINE APPLY_BC_U2_MPI(MATL,MATD,MATU,VEC) CALL APPLY_BC_2_LOWER(MATL,MATD,MATU,VEC) END IF IF (RANKY.eq.NPROCY-1) THEN -! If we have the highest plane, apply the boundary conditions +! If we have the highest plane, apply the boundary conditions CALL APPLY_BC_2_UPPER(MATL,MATD,MATU,VEC) END IF RETURN @@ -1029,7 +1128,7 @@ SUBROUTINE APPLY_BC_U2_MPI_C(MATL_C,MATD_C,MATU_C,VEC_C) CALL APPLY_BC_2_LOWER_C(MATL_C,MATD_C,MATU_C,VEC_C) END IF IF (RANK.eq.NPROCS-1) THEN -! If we have the highest plane, apply the boundary conditions +! If we have the highest plane, apply the boundary conditions CALL APPLY_BC_2_UPPER_C(MATL_C,MATD_C,MATU_C,VEC_C) END IF RETURN @@ -1046,10 +1145,10 @@ SUBROUTINE APPLY_BC_U1_MPI(MATL,MATD,MATU,VEC) ! the upper or lowermost process, then apply boundary conditions IF (RANKY.eq.0) THEN ! If we have the lowest plane, apply the boundary conditions - CALL APPLY_BC_1_LOWER(MATL,MATD,MATU,VEC) + CALL APPLY_BC_1_LOWER(MATL,MATD,MATU,VEC) END IF IF (RANKY.eq.NPROCY-1) THEN -! If we have the highest plane, apply the boundary conditions +! If we have the highest plane, apply the boundary conditions CALL APPLY_BC_1_UPPER(MATL,MATD,MATU,VEC) END IF RETURN @@ -1068,7 +1167,7 @@ SUBROUTINE APPLY_BC_U1_MPI_C(MATL_C,MATD_C,MATU_C,VEC_C) CALL APPLY_BC_1_LOWER_C(MATL_C,MATD_C,MATU_C,VEC_C) END IF IF (RANK.eq.NPROCS-1) THEN -! If we have the highest plane, apply the boundary conditions +! If we have the highest plane, apply the boundary conditions CALL APPLY_BC_1_UPPER_C(MATL_C,MATD_C,MATU_C,VEC_C) END IF RETURN @@ -1088,7 +1187,7 @@ SUBROUTINE APPLY_BC_U3_MPI(MATL,MATD,MATU,VEC) CALL APPLY_BC_3_LOWER(MATL,MATD,MATU,VEC) END IF IF (RANKY.eq.NPROCY-1) THEN -! If we have the highest plane, apply the boundary conditions +! If we have the highest plane, apply the boundary conditions CALL APPLY_BC_3_UPPER(MATL,MATD,MATU,VEC) END IF RETURN @@ -1107,7 +1206,7 @@ SUBROUTINE APPLY_BC_U3_MPI_C(MATL_C,MATD_C,MATU_C,VEC_C) CALL APPLY_BC_3_LOWER_C(MATL_C,MATD_C,MATU_C,VEC_C) END IF IF (RANK.eq.NPROCS-1) THEN -! If we have the highest plane, apply the boundary conditions +! If we have the highest plane, apply the boundary conditions CALL APPLY_BC_3_UPPER_C(MATL_C,MATD_C,MATU_C,VEC_C) END IF RETURN @@ -1204,7 +1303,7 @@ SUBROUTINE APPLY_BC_VEL_MPI IF (RANKY.EQ.NPROCY-1) THEN CALL APPLY_BC_VEL_UPPER END IF - + RETURN END @@ -1225,7 +1324,7 @@ SUBROUTINE APPLY_BC_VEL_PHYS_MPI RETURN END - + SUBROUTINE TRANSPOSE_MPI_XZ_TO_XY() ! This subroutine starts with all arrays decomposed in x-z slabs ! and transposes the data so that it is decomposed in x-y slabs @@ -1234,7 +1333,7 @@ SUBROUTINE TRANSPOSE_MPI_XZ_TO_XY() c$$$ c$$$ integer i,j,k,N c$$$ -c$$$ real*8 A(0:NX+1,0:NZ+1,0:NY+1) +c$$$ real*8 A(0:NX+1,0:NZ+1,0:NY+1) c$$$ real*8 buffer(0:NX+1,0:NY+1,0:NZ+1) c$$$ c$$$ real*8 test1(1:NX,1:NY,1:NZ) @@ -1264,16 +1363,16 @@ SUBROUTINE TRANSPOSE_MPI_XZ_TO_XY() c$$$ c$$$ n=1 c$$$ do k=0,NZ/NPROCS-1 -c$$$ do i=0,NX-1 +c$$$ do i=0,NX-1 c$$$ do j=1,NY c$$$ A(i,k,j)=test2(i+1,j,k+1) c$$$ end do c$$$ end do c$$$ end do -c$$$ +c$$$ c$$$ do n=2,NPROCS c$$$ do k=0,NZ/NPROCS-1 -c$$$ do i=0,NX-1 +c$$$ do i=0,NX-1 c$$$ do j=2,NY c$$$ A(i,k,NY+j-1)=test2(i+1,NY+j,k+1) c$$$ end do @@ -1284,7 +1383,7 @@ SUBROUTINE TRANSPOSE_MPI_XZ_TO_XY() c$$$ NX_t=NX c$$$ NZ_t=NZ/NPROCS c$$$ NY_t=NY*NPROCS-(NPROCS-1) -c$$$ +c$$$ c$$$! A should now be indexed from A(0:NX_t-1,0:NZ_t/NPROCS-1,1:NY*NPROCS-(NPROCS-1)) c$$$! (The new wall locations are 1 and NY-(NPROCS-1) ) c$$$ @@ -1295,27 +1394,27 @@ SUBROUTINE TRANSPOSE_MPI_XZ_TO_XY() c$$$ end do c$$$ - + RETURN END - + SUBROUTINE get_minimum_mpi(val) - + include 'header' REAL*8 val,vmin call MPI_ALLREDUCE(val,vmin,1,MPI_DOUBLE_PRECISION, & MPI_MIN,MPI_COMM_WORLD,ierror) - + val=vmin RETURN - END + END @@ -1328,7 +1427,7 @@ SUBROUTINE FFT_XZ_TO_FOURIER(V,VV,JMIN,JMAX) COMPLEX*16 TMP(0:NX/2,0:NZP+1,0:NY+1) INTEGER I,J,K - INTEGER JMIN,JMAX + INTEGER JMIN,JMAX !write(100+RANK,'(1E25.15)') V(0:NX-1,0:NZP-1,1) !write(100+RANK) V(0:NX-1,0:NZP-1,1) @@ -1357,7 +1456,7 @@ SUBROUTINE FFT_XZ_TO_FOURIER(V,VV,JMIN,JMAX) !write(120+RANK,'(2E25.15)') VV(0:NXP-1,0:NZ-1,1) !write(120+RANK) VV(0:NXP-1,0:NZ-1,1) - + ! FFT in Z DO J=JMIN,JMAX CALL FFTWND_F77(FFTW_Z_TO_F_PLAN,NXP, @@ -1391,7 +1490,7 @@ SUBROUTINE FFT_XZ_TO_FOURIER(V,VV,JMIN,JMAX) - SUBROUTINE FFT_XZ_TO_PHYSICAL(VV,V,JMIN,JMAX) + SUBROUTINE FFT_XZ_TO_PHYSICAL(VV,V,JMIN,JMAX) include 'header' @@ -1457,7 +1556,7 @@ SUBROUTINE INTEGRATE_Y_VAR(VAR,RES,COMM) RES=RES+0.5*(VAR(j)+VAR(j-1))*DY(j) end do END IF - RES=RES/LY + RES=RES/LY END SUBROUTINE @@ -1491,13 +1590,13 @@ SUBROUTINE INTEGRATE_Z_VAR(VAR,RES,COMM) end do end do END IF - RES=RES/LZ + RES=RES/LZ END SUBROUTINE SUBROUTINE END_RUN_MPI(FLAG) - + INCLUDE 'header' LOGICAL FLAG @@ -1506,6 +1605,5 @@ SUBROUTINE END_RUN_MPI(FLAG) CALL END_RUN(FLAG) END IF CALL MPI_BCAST(FLAG,1,MPI_LOGICAL,0,MPI_COMM_WORLD,IERROR) - - END + END diff --git a/diablo_2.0/for/save_stats.f b/diablo_2.0/for/save_stats.f index 7d186be..4de94f1 100644 --- a/diablo_2.0/for/save_stats.f +++ b/diablo_2.0/for/save_stats.f @@ -10,16 +10,16 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) LOGICAL FINAL integer i,j,k,n real*8 uc, ubulk - + ! This variable is used to add up scalar diagnostics real*8 thsum(0:NY+1) -! These variables are used to store and write 2D slices +! These variables are used to store and write 2D slices real*8 varxy(0:NXM,1:NY),varzy(0:NZP-1,1:NY),varxz(0:NXM,0:NZP-1) ! These variable are used for HDF5 writing real*8 Diag(1:NY) - IF (RANK.EQ.0) + IF (RANK.EQ.0) & WRITE(6,*) 'Saving flow statistics.' IF (USE_MPI) THEN @@ -94,12 +94,12 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) IF (NPROCY.EQ.1) THEN if (int(float(NY)/2.) .eq. float(NY)/2.) then ! IF NY is even - uc=dble(CU1(0,0,int(float(NY)/2.))) + uc=dble(CU1(0,0,int(float(NY)/2.))) else uc=0.5*(dble(CU1(0,0,int(float(NY)/2.)-1)) + +dble(CU1(0,0,int(float(NY)/2.)))) end if - write(*,*) 'Centerline velocity = ', uc + write(*,*) 'Centerline velocity = ', uc ! Compute and write out bulk velocity END IF @@ -146,7 +146,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) IF (RANKZ.EQ.0) THEN ume=dble(CU1(0,0,:)) vme=dble(CU2(0,0,:)) - wme=dble(CU3(0,0,:)) + wme=dble(CU3(0,0,:)) DO n=1,N_TH thme(:,n)=dble(CTH(0,0,:,n)) END DO @@ -169,13 +169,13 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) call tkebudget_chan IF (LES) call tkebudget_chan_les -! Get the turbulent kinetic energy at each level +! Get the turbulent kinetic energy at each level do j=1,NY urms(j)=0. vrms(j)=0. wrms(j)=0. do k=0,NZP-1 - do i=0,NXM + do i=0,NXM urms(j)=urms(j)+(U1(i,k,j)-ume(j))**2. vrms(j)=vrms(j)+0.5*((U2(i,k,j )-vme(j ))**2. + & (U2(i,k,j+1)-vme(j+1))**2. ) @@ -195,7 +195,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) urms(j)=sqrt(urms(j)/(float(NZ)*float(NX))) vrms(j)=sqrt(vrms(j)/(float(NZ)*float(NX))) wrms(j)=sqrt(wrms(j)/(float(NZ)*float(NX))) - end do + end do ! Get the bulk rms value CALL INTEGRATE_Y_VAR(urms,urms_b,MPI_COMM_Y) @@ -206,7 +206,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) ! Here, uv and wv are defined on the GY grid ! uv is defined on the GYF grid do j=1,NY - uv(j)=0. + uv(j)=0. uw(j)=0. wv(j)=0. do k=0,NZP-1 @@ -231,13 +231,13 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) & MPI_SUM,MPI_COMM_Z,ierror) call mpi_allreduce(mpi_in_place,wv,NY+2,MPI_DOUBLE_PRECISION, & MPI_SUM,MPI_COMM_Z,ierror) - + do j=1,NY uv(j)=uv(j)/(float(NZ)*float(NX)) uw(j)=uw(j)/(float(NZ)*float(NX)) wv(j)=wv(j)/(float(NZ)*float(NX)) end do - + ! Get the y-derivative of the mean velocity at GY points do j=1,NY dudy(j)=(ume(j)-ume(j-1))/(GYF(j)-GYF(j-1)) @@ -355,13 +355,13 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) IF (RANKZ.eq.0) then gname='gyf' - Diag=gyf(1:NY) + Diag=gyf(1:NY) call WriteStatH5(FNAME,gname,Diag) gname='ume' Diag=ume(1:NY) call WriteStatH5(FNAME,gname,Diag) - + gname='vme' Diag=vme(1:NY) call WriteStatH5(FNAME,gname,Diag) @@ -535,7 +535,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) pe_diss(j,n)=thsum(j)/dble(NX*NZ) end do -#ifdef HDF5 +#ifdef HDF5 if (MOVIE) then FNAME='movie.h5' if (n.eq.1) then @@ -623,13 +623,13 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) #ifdef HDF5 - - FNAME='mean.h5' - + + FNAME='mean.h5' + IF (RANKZ.eq.0) THEN - + do n=1,N_TH - + Diag=thme(1:NY,n) gname='thme' & //CHAR(MOD(N,100)/10+48) @@ -659,7 +659,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) & //CHAR(MOD(N,100)/10+48) & //CHAR(MOD(N,10)+48) call WriteStatH5(FNAME,gname,Diag) - + end do END IF @@ -676,7 +676,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) open(41,file=FNAME,form='formatted',status='unknown') write(41,*) TIME_STEP,TIME,DELTA_T write(41,*) UBULK - do n=1,N_TH + do n=1,N_TH do j=1,NY write(41,402) j,GYF(J),thme(j,n) + ,dthdy(j,n),thrms(j,n),thv(j,n),pe_diss(j,n) @@ -686,10 +686,10 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) 402 format(I3,' ',6(F30.20,' ')) #endif - IF (RANK.EQ.0) + IF (RANK.EQ.0) & write(*,*) 'VERBOSITY: ',VERBOSITY - if (VERBOSITY.gt.4) then - IF (RANK.EQ.0) + if (VERBOSITY.gt.4) then + IF (RANK.EQ.0) & write(*,*) 'Outputting info for gnuplot...' open (unit=10, file="solution") do i=2,NXM @@ -699,7 +699,7 @@ SUBROUTINE SAVE_STATS_CHAN(FINAL) write (10,*) "" end do close (10) - call system ('gnuplot Date: Thu, 12 Dec 2019 09:03:08 +0000 Subject: [PATCH 51/66] fixed a bug with two lines over the max length --- diablo_2.0/for/channel.f | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/diablo_2.0/for/channel.f b/diablo_2.0/for/channel.f index 102196b..e938792 100644 --- a/diablo_2.0/for/channel.f +++ b/diablo_2.0/for/channel.f @@ -1661,9 +1661,9 @@ SUBROUTINE CREATE_GRID_CHAN if (rankY == NprocY - 1) then dyf(Ny + 1) = dyf(Ny) else - call mpi_send(dyf(Ny - 1), 1, mpi_double_precision, rankY + 1, + call mpi_send(dyf(Ny-1),1,mpi_double_precision,rankY+1, & 110 + rankY, mpi_comm_y, ierror) - call mpi_recv(dyf(Ny + 1), 1, mpi_double_precision, rankY + 1, + call mpi_recv(dyf(Ny+1),1,mpi_double_precision,rankY+1, & 100 + rankY + 1, mpi_comm_y, status, ierror) end if From 816cc36110f5436edbed9c3e72186114ae9145b4 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 12 Dec 2019 09:11:43 +0000 Subject: [PATCH 52/66] moved call to sponge subroutine into Front flavor --- diablo_2.0/for/user_rhs.f | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/diablo_2.0/for/user_rhs.f b/diablo_2.0/for/user_rhs.f index 0117c6f..ad949a1 100644 --- a/diablo_2.0/for/user_rhs.f +++ b/diablo_2.0/for/user_rhs.f @@ -83,13 +83,13 @@ SUBROUTINE USER_RHS_CHAN_FOURIER ! End do N_TH END DO - END IF - ! Add sponge layer forcing DO N=1,N_TH CALL SPONGE_TH(N) END DO CALL SPONGE_VEL + + END IF RETURN END From 11d7024f4d5e277549c9ee016f18f2b7cd147eb4 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 12 Mar 2020 12:39:01 +0000 Subject: [PATCH 53/66] fixed indexing of array in ghost_CSij2_mpi --- diablo_2.0/for/mpi.f | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/diablo_2.0/for/mpi.f b/diablo_2.0/for/mpi.f index c24581f..1d046e1 100644 --- a/diablo_2.0/for/mpi.f +++ b/diablo_2.0/for/mpi.f @@ -460,7 +460,7 @@ SUBROUTINE GHOST_GRID_MPI END - subroutine ghost_CSij2_mpi + subroutine GHOST_CSij2_MPI ! This subroutine is part of the MPI package for the LES subroutine ! to compute the strain @@ -478,8 +478,8 @@ subroutine ghost_CSij2_mpi ! The communication will be done in Fourier space, so these arrays should ! be complex arrays to match the velocity ! The size of the buffer array is 0:Nxp-1,0:NZP-1 - COMPLEX*16 ocpack(0:Nxp-1, 0:Nzp - 1) - COMPLEX*16 icpack(0:Nxp-1, 0:Nzp - 1) + COMPLEX*16 ocpack(0:Nxp-1, 0:TNKZ) + COMPLEX*16 icpack(0:Nxp-1, 0:TNKZ) ! If we are using more than one processor, then we need to pass data @@ -492,40 +492,40 @@ subroutine ghost_CSij2_mpi ! data at the upper ghost cells. ! We don't need CSij2 at the boundaries (dictates CSij4' & 6' only) - do k = 0, tNkz + do k = 0, TNKZ do i = 0, Nxp - 1 CSij2(i, k, Ny + 1) = -10000.d0 end do end do ! Now, send data down the chain - do k = 0, tNkz + do k = 0, TNKZ do i = 0, Nxp - 1 ocpack(i, k) = CSij2(i, k, 2) end do end do ! Now, we have packed the data into a compact array, pass the data up - call mpi_send(ocpack, (Nxp) * (Nzp) + call mpi_send(ocpack, (Nxp) * (TNKZ+1) & , mpi_double_complex & , rankY - 1, 3, mpi_comm_y, ierror) else if (rankY > 0) then ! Here, we are one of the middle processes and we need to pass data ! down and recieve data from above us - do k = 0, tNkz + do k = 0, TNKZ do i = 0, Nxp - 1 ocpack(i, k) = CSij2(i, k, 2) end do end do - call mpi_send(ocpack, (Nxp) * (Nzp) + call mpi_send(ocpack, (Nxp) * (TNKZ+1) & , mpi_double_complex & , rankY - 1, 3, mpi_comm_y, ierror) - call mpi_recv(icpack, (Nxp) * (Nzp) + call mpi_recv(icpack, (Nxp) * (TNKZ+1) & , mpi_double_complex & , rankY + 1, 3, mpi_comm_y, status, ierror) ! Now, unpack the data that we have recieved - do k = 0, tNkz + do k = 0, TNKZ do i = 0, Nxp - 1 CSij2(i, k, Ny + 1) = icpack(i, k) end do @@ -533,11 +533,11 @@ subroutine ghost_CSij2_mpi else ! Here, we must be the lowest process (RANK=0) and we need to receive ! data from above - call mpi_recv(icpack, (Nxp) * (Nzp) + call mpi_recv(icpack, (Nxp) * (TNKZ+1) & , mpi_double_complex & , rankY + 1, 3, mpi_comm_y, status, ierror) ! Unpack the data that we have recieved - do k = 0, tNkz + do k = 0, TNKZ do i = 0, Nxp - 1 CSij2(i, k, Ny + 1) = icpack(i, k) end do @@ -547,7 +547,7 @@ subroutine ghost_CSij2_mpi else ! Here, NprocY=1, so we just need to set the boundary values ! Set CSij2 (cu2(i, k, Ny + 2) - cu2(i, k, Ny + 1)) / dyf(Ny + 1) - do k = 0, tNkz + do k = 0, TNKZ do i = 0, Nxp - 1 CSij2(i, k, Ny + 1) = -10000.d0 end do From 38a89fa13e4af4962d31e77658597e9d6c14d33e Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Thu, 9 Apr 2020 15:41:14 +0100 Subject: [PATCH 54/66] Fixed viscous timestep criterion in courant.f --- diablo_2.0/for/courant.f | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/diablo_2.0/for/courant.f b/diablo_2.0/for/courant.f index 968d7ec..5aca159 100644 --- a/diablo_2.0/for/courant.f +++ b/diablo_2.0/for/courant.f @@ -15,7 +15,7 @@ subroutine courant dt=50.d0 ! Set the timestep based on viscosity and diffusivity - dt=min(dt,0.5d0*min(dx(1),dy(1))/NU) + dt=min(dt,0.5d0*min(dx(1),dy(1))**2.d0/NU) do n=1,N_TH dt=min(dt,dt*NU/(NU/PR(n))) end do From d6a55a5e4262916927cc0912d0fbaff49c239389 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 14 Oct 2020 14:49:10 +0100 Subject: [PATCH 55/66] Fixed bug causing 'striping' problems in large aspect ratio domains --- diablo_2.0/for/mpi.f | 452 ++++++++++++++++++++++++++++--------------- 1 file changed, 294 insertions(+), 158 deletions(-) diff --git a/diablo_2.0/for/mpi.f b/diablo_2.0/for/mpi.f index 1d046e1..e6fa6ff 100644 --- a/diablo_2.0/for/mpi.f +++ b/diablo_2.0/for/mpi.f @@ -181,6 +181,173 @@ SUBROUTINE GHOST_CHAN_MPI RETURN END + SUBROUTINE GHOST_LES_MPI_KAPPA_T +! This subroutine is part of the MPI package for the LES subroutine +! Here, after calculating the SGS viscosity, NU_T on each core, +! We need to share the ghost cells between neighboring processors + + include 'header' + + integer i,j,k,N + +! Define the arrays that will be used for data packing. This makes the +! communication between processes more efficient by only requiring one +! send and recieve. +! The communication will be done in Fourier space, so these arrays should +! be complex arrays to match the velocity +! The size of the buffer array is 0:NXM,0:NZP-1 + REAL*8 OCPACK(0:NXM,0:NZP-1) + REAL*8 ICPACK(0:NXM,0:NZP-1) + + DO N=1,N_TH + +! If we are using more than one processor, then we need to pass data + + IF (NPROCY.gt.1) THEN + +! First, Pass data up the chain to higher ranked processes + + IF (RANKY.eq.0) THEN +! If we are the lowest ranked process, then we don't need to recieve +! data at the lower ghost cells. Instead, set NU_T=0 at the lower wall + DO K=0,NZP-1 + DO I=0,NXM + KAPPA_T(I,K,1,N)=0.d0 + KAPPA_T(I,K,2,N)=0.d0 + END DO + END DO + +! Pass data up to the next process from GY(NY) + DO K=0,NZP-1 + DO I=0,NXM + OCPACK(I,K)=KAPPA_T(I,K,NY,N) + END DO + END DO +! Now, we have packed the data into a compact array, pass the data up + CALL MPI_SEND(OCPACK,(NXM+1)*(NZP) + & ,MPI_DOUBLE_PRECISION + & ,RANKY+1,1,MPI_COMM_Y,IERROR) + +! End if RANK=0 + ELSE IF (RANKY.lt.NPROCY-1) THEN +! Here, we are one of the middle processes and we need to pass data +! up and recieve data from below + DO K=0,NZP-1 + DO I=0,NXM + OCPACK(I,K)=KAPPA_T(I,K,NY,N) + END DO + END DO +! Use MPI_SENDRECV since we need to recieve and send data + CALL MPI_SEND(OCPACK,(NXM+1)*(NZP) + & ,MPI_DOUBLE_PRECISION + & ,RANKY+1,1,MPI_COMM_Y,IERROR) + + CALL MPI_RECV(ICPACK,(NXM+1)*(NZP) + & ,MPI_DOUBLE_PRECISION + & ,RANKY-1,1,MPI_COMM_Y,STATUS,IERROR) +! Now, unpack the data that we have recieved + DO K=0,NZP-1 + DO I=0,NXM + KAPPA_T(I,K,1,N)=ICPACK(I,K) + END DO + END DO + + ELSE +! Otherwise, we must be the uppermost process with RANK=NPROCS-1 +! Here, we need to recieve data from below, but don't need to send data up + CALL MPI_RECV(ICPACK,(NXM+1)*(NZP) + & ,MPI_DOUBLE_PRECISION + & ,RANKY-1,1,MPI_COMM_Y,STATUS,IERROR) +! Unpack the data that we have recieved + DO K=0,NZP-1 + DO I=0,NXM + KAPPA_T(I,K,1,N)=ICPACK(I,K) + END DO + END DO + END IF + +! Now, we have hit the top process. Set the BCs and pass data down + + IF (RANKY.eq.NPROCY-1) THEN +! If we are the higest ranked process, then we don't need to recieve +! data at the upper ghost cells. +! Set KAPPA_T=0 at the upper wall + DO K=0,NZP-1 + DO I=0,NXM + KAPPA_T(I,K,NY,N)=0.d0 + KAPPA_T(I,K,NY+1,N)=0.d0 + END DO + END DO + +! Now, send data down the chain + DO K=0,NZP-1 + DO I=0,NXM + OCPACK(I,K)=KAPPA_T(I,K,2,N) + END DO + END DO +! Now, we have packed the data into a compact array, pass the data up + CALL MPI_SEND(OCPACK,(NXM+1)*(NZP) + & ,MPI_DOUBLE_PRECISION + & ,RANKY-1,3,MPI_COMM_Y,IERROR) + ELSE IF (RANKY.GT.0) THEN +! Here, we are one of the middle processes and we need to pass data +! down and recieve data from above us + DO K=0,NZP-1 + DO I=0,NXM + OCPACK(I,K)=KAPPA_T(I,K,2,N) + END DO + END DO + + CALL MPI_SEND(OCPACK,(NXM+1)*(NZP) + & ,MPI_DOUBLE_PRECISION + & ,RANKY-1,3,MPI_COMM_Y,IERROR) + + CALL MPI_RECV(ICPACK,(NXM+1)*(NZP) + & ,MPI_DOUBLE_PRECISION + & ,RANKY+1,3,MPI_COMM_Y,STATUS,IERROR) +! Now, unpack the data that we have recieved + DO K=0,NZP-1 + DO I=0,NXM + KAPPA_T(I,K,NY+1,N)=ICPACK(I,K) + END DO + END DO + ELSE +! Here, we must be the lowest process (RANK=0) and we need to recieve +! data from above + CALL MPI_RECV(ICPACK,(NXM+1)*(NZP) + & ,MPI_DOUBLE_PRECISION + & ,RANKY+1,3,MPI_COMM_Y,STATUS,IERROR) +! Unpack the data that we have recieved + DO K=0,NZP-1 + DO I=0,NXM + KAPPA_T(I,K,NY+1,N)=ICPACK(I,K) + END DO + END DO + END IF + + ELSE +! Here, NPROCY=1, so we just need to set the boundary values +! Set NU_T=0 at the lower wall + DO K=0,NZP-1 + DO I=0,NXM + KAPPA_T(I,K,1,N)=0.d0 + KAPPA_T(I,K,2,N)=0.d0 + END DO + END DO +! Set NU_T=0 at the upper wall + DO K=0,NZP-1 + DO I=0,NXM + KAPPA_T(I,K,NY,N)=0.d0 + KAPPA_T(I,K,NY+1,N)=0.d0 + END DO + END DO + + END IF + + END DO + + RETURN + END SUBROUTINE GHOST_LES_MPI ! This subroutine is part of the MPI package for the LES subroutine @@ -459,105 +626,6 @@ SUBROUTINE GHOST_GRID_MPI RETURN END - - subroutine GHOST_CSij2_MPI - -! This subroutine is part of the MPI package for the LES subroutine -! to compute the strain -! Share down the CSij2 Ny + 1 ghost cell between neighboring processors -! CSij2(Ny+1) is needed to compute temp at Ny - - include 'header' - include 'header_les' - - integer i, j, k, n - - ! Define the arrays that will be used for data packing. This makes the - ! communication between processes more efficient by only requiring one - ! send and recieve. - ! The communication will be done in Fourier space, so these arrays should - ! be complex arrays to match the velocity - ! The size of the buffer array is 0:Nxp-1,0:NZP-1 - COMPLEX*16 ocpack(0:Nxp-1, 0:TNKZ) - COMPLEX*16 icpack(0:Nxp-1, 0:TNKZ) - - ! If we are using more than one processor, then we need to pass data - - if (NprocY > 1) then - - ! Set the BCs and pass data down - - if (rankY == NprocY - 1) then - ! If we are the higest ranked process, then we don't need to recieve - ! data at the upper ghost cells. - - ! We don't need CSij2 at the boundaries (dictates CSij4' & 6' only) - do k = 0, TNKZ - do i = 0, Nxp - 1 - CSij2(i, k, Ny + 1) = -10000.d0 - end do - end do - - ! Now, send data down the chain - do k = 0, TNKZ - do i = 0, Nxp - 1 - ocpack(i, k) = CSij2(i, k, 2) - end do - end do - ! Now, we have packed the data into a compact array, pass the data up - call mpi_send(ocpack, (Nxp) * (TNKZ+1) - & , mpi_double_complex - & , rankY - 1, 3, mpi_comm_y, ierror) - else if (rankY > 0) then - ! Here, we are one of the middle processes and we need to pass data - ! down and recieve data from above us - do k = 0, TNKZ - do i = 0, Nxp - 1 - ocpack(i, k) = CSij2(i, k, 2) - end do - end do - - call mpi_send(ocpack, (Nxp) * (TNKZ+1) - & , mpi_double_complex - & , rankY - 1, 3, mpi_comm_y, ierror) - - call mpi_recv(icpack, (Nxp) * (TNKZ+1) - & , mpi_double_complex - & , rankY + 1, 3, mpi_comm_y, status, ierror) - ! Now, unpack the data that we have recieved - do k = 0, TNKZ - do i = 0, Nxp - 1 - CSij2(i, k, Ny + 1) = icpack(i, k) - end do - end do - else - ! Here, we must be the lowest process (RANK=0) and we need to receive - ! data from above - call mpi_recv(icpack, (Nxp) * (TNKZ+1) - & , mpi_double_complex - & , rankY + 1, 3, mpi_comm_y, status, ierror) - ! Unpack the data that we have recieved - do k = 0, TNKZ - do i = 0, Nxp - 1 - CSij2(i, k, Ny + 1) = icpack(i, k) - end do - end do - end if - - else - ! Here, NprocY=1, so we just need to set the boundary values - ! Set CSij2 (cu2(i, k, Ny + 2) - cu2(i, k, Ny + 1)) / dyf(Ny + 1) - do k = 0, TNKZ - do i = 0, Nxp - 1 - CSij2(i, k, Ny + 1) = -10000.d0 - end do - end do - - end if - - return - end - C----*|--.---------.---------.---------.---------.---------.---------.-|-------| SUBROUTINE THOMAS_FORWARD_REAL_MPI(A,B,C,G,INY,INX) C----*|--.---------.---------.---------.---------.---------.---------.-|-------| @@ -671,7 +739,11 @@ SUBROUTINE THOMAS_FORWARD_COMPLEX_MPI(A,B,C,G,INY,INX) END IF DO J=2,INY - A(I,J)=-A(I,J)/B(I,J-1) + IF (B(I,J-1).ne.0.d0) THEN + A(I,J)=-A(I,J)/B(I,J-1) + ELSE + A(I,J)=0.d0 + END IF B(I,J)=B(I,J)+A(I,J)*C(I,J-1) G(I,J)=G(I,J)+A(I,J)*G(I,J-1) END DO @@ -721,7 +793,11 @@ SUBROUTINE THOMAS_BACKWARD_REAL_MPI(A,B,C,G,INY,INX) END IF ! All processes solve from INY..1 DO J=INY,0,-1 - G(I,J)=(G(I,J)-C(I,J)*G(I,J+1))/B(I,J) + IF (B(I,J).ne.0.d0) THEN + G(I,J)=(G(I,J)-C(I,J)*G(I,J+1))/B(I,J) + ELSE + G(I,J)=0.d0 + END IF END DO IF (RANKY.NE.0) THEN ICPACK(1)=G(I,2) @@ -758,14 +834,24 @@ SUBROUTINE THOMAS_BACKWARD_COMPLEX_MPI(A,B,C,G,INY,INX) CALL MPI_RECV(G(I,INY+1),1,MPI_DOUBLE_COMPLEX,RANKY+1,11 & ,MPI_COMM_Y,status,ierror) J=INY - G(I,J)=(G(I,J)-C(I,J)*G(I,J+1))/B(I,J) + if (B(I,J).ne.0.d0) THEN + G(I,J)=(G(I,J)-C(I,J)*G(I,J+1))/B(I,J) + END IF ELSE C Else, if we are the highest process, then compute the solution at j=INY - G(I,INY)=G(I,INY)/B(I,INY) + IF (B(I,INY).ne.0.d0) THEN + G(I,INY)=G(I,INY)/B(I,INY) + ELSE + G(I,INY)=0.d0 + END IF END IF DO J=INY-1,1,-1 - G(I,J)=(G(I,J)-C(I,J)*G(I,J+1))/B(I,J) + IF (B(I,J).ne.0.d0) THEN + G(I,J)=(G(I,J)-C(I,J)*G(I,J+1))/B(I,J) + ELSE + G(I,J)=0.d0 + END IF END DO IF (RANKY.NE.0) THEN @@ -803,6 +889,8 @@ SUBROUTINE INIT_MPI * FFTW_OUT_OF_PLACE=0, FFTW_IN_PLACE=8, * FFTW_USE_WISDOM=16, FFTW_THREADSAFE=128 ) + + ! This subroutine initializes all mpi variables c$$$ integer(C_INTPTR_T) :: L,M @@ -811,13 +899,15 @@ SUBROUTINE INIT_MPI c$$$ integer(C_INTPTR_T) :: i, j, alloc_local, local_M, local_j_offset c$$$ INTEGER(C_INTPTR_T) NZP,iNZ,alloc_local + CALL MPI_INIT(IERROR) CALL MPI_COMM_SIZE(MPI_COMM_WORLD,IPROCS,IERROR) CALL MPI_COMM_RANK(MPI_COMM_WORLD,RANK,IERROR) + ! NPROCY=2 IF (NPROCS.NE.IPROCS) THEN - IF (RANK.EQ.0) WRITE(*,*) 'ERROR: compiled with ',NPROCS, - & 'cores, running with ',IPROCS,' cores.' + IF (RANK.EQ.0) WRITE(*,*) ' NPROCS is not equal to', + & ' the number of processes which we run on. ' CALL MPI_FINALIZE(ierror) stop END IF @@ -828,6 +918,7 @@ SUBROUTINE INIT_MPI CALL MPI_FINALIZE(ierror) stop END IF + ! NPROCSZ=NPROCS/NPROCY DIMS(2)=NPROCY DIMS(1)=NPROCZ @@ -888,10 +979,9 @@ SUBROUTINE INIT_MPI c ------------------------------ ! Box full x to z (1) - call MPI_TYPE_VECTOR(NZP,NXP,NX/2+1, + call MPI_TYPE_VECTOR(NZP,NXP,NX/2, & MPI_DOUBLE_COMPLEX,TYPE1,ierror) call MPI_TYPE_COMMIT(TYPE1,ierror) - bl(1:2)=(/1, 1/) disp(1:2)= (/0, NXP*16/) types=(/TYPE1, MPI_UB/) @@ -901,18 +991,22 @@ SUBROUTINE INIT_MPI call MPI_TYPE_FREE(TYPE1,ierror) ! Box full x to z (2) - call MPI_TYPE_VECTOR(NZP,NXP,NXP+1, - & MPI_DOUBLE_COMPLEX,TYPE1,ierror) - call MPI_TYPE_COMMIT(TYPE1,ierror) +C$$$ call MPI_TYPE_VECTOR(NZP,NXP,NXP+1, +C$$$ & MPI_DOUBLE_COMPLEX,TYPE1,ierror) +C$$$ call MPI_TYPE_COMMIT(TYPE1,ierror) - bl(1:2)=(/1, 1/) - disp(1:2)= (/0, NZP*(NXP+1)*16/) - types=(/TYPE1, MPI_UB/) +C$$$ bl(1:2)=(/1, 1/) +C$$$ disp(1:2)= (/0, NZP*(NXP+1)*16/) +C$$$ types=(/TYPE1, MPI_UB/) - call MPI_TYPE_STRUCT(2,bl,disp,types,XY2ZY_2,ierror) - call MPI_TYPE_COMMIT(XY2ZY_2,ierror) - call MPI_TYPE_FREE(TYPE1,ierror) +C$$$ call MPI_TYPE_STRUCT(2,bl,disp,types,XY2ZY_2,ierror) +C$$$ call MPI_TYPE_COMMIT(XY2ZY_2,ierror) +C$$$ call MPI_TYPE_FREE(TYPE1,ierror) + call MPI_TYPE_VECTOR(NZP,NXP,NXP, + & MPI_DOUBLE_COMPLEX,XY2ZY_2,ierror) + call MPI_TYPE_COMMIT(XY2ZY_2,ierror) + c /////////////////////////////////// c OTHER POSSIBLE TRANSPOSES!!!! c /////////////////////////////////// @@ -943,37 +1037,20 @@ SUBROUTINE INIT_MPI -c$$$c -c$$$c CHECK POINTS -c$$$c -c$$$ FNAME='start.h5' -c$$$ call readHDF5(FNAME) -c$$$ -c$$$! tvar=U1 -c$$$! FNAME='test.h5' -c$$$! call WriteHDF5_var_real(FNAME) -c$$$ -c$$$ FNAME='out.h5' -c$$$ call writeHDF5(FNAME) -c$$$ -c$$$ call mpi_finalize(ierror) -c$$$ stop -c$$$ - C Set a string to determine which input/output files to use C When MPI is used, each process will read/write to files with the C number of their rank (+1) appended to the end of the file. C The string MPI_IO_NUM will be used to define the RANK+1 for each process - IF (NPROCY.lt.10) THEN + IF (NPROCY.le.10) THEN MPI_IO_NUM=CHAR(MOD(RANKY+1,10)+48) - ELSE IF (NPROCY.lt.100) THEN + ELSE IF (NPROCY.le.100) THEN MPI_IO_NUM=CHAR(MOD(RANKY+1,100)/10+48) & //CHAR(MOD(RANKY+1,10)+48) - ELSE IF (NPROCY.lt.1000) THEN + ELSE IF (NPROCY.le.1000) THEN MPI_IO_NUM=CHAR(MOD(RANKY+1,1000)/100+48) & //CHAR(MOD(RANKY+1,100)/10+48) & //CHAR(MOD(RANKY+1,10)+48) - ELSE IF (NPROCY.lt.10000) THEN + ELSE IF (NPROCY.le.10000) THEN MPI_IO_NUM=CHAR(MOD(RANKY+1,10000)/1000+48) & //CHAR(MOD(RANKY+1,1000)/100+48) & //CHAR(MOD(RANKY+1,100)/10+48) @@ -1006,7 +1083,7 @@ SUBROUTINE INIT_CHAN_MPI IF (U_BC_YMIN.EQ.0) THEN JSTART=2 ELSE IF (U_BC_YMIN.EQ.1) THEN - JSTART=1 + JSTART=2 ELSE JSTART=2 END IF @@ -1027,7 +1104,7 @@ SUBROUTINE INIT_CHAN_MPI IF (U_BC_YMAX.EQ.0) THEN JEND=NY-1 ELSE IF (U_BC_YMAX.EQ.1) THEN - JEND=NY + JEND=NY-1 ELSE JEND=NY-1 END IF @@ -1308,6 +1385,38 @@ SUBROUTINE APPLY_BC_VEL_MPI END + SUBROUTINE APPLY_BC_SCALAR_MPI +! This subroutine applies the boundary conditions for the Poisson Eq. +! Note, MATL, MATD, etc. are dimensioned in header + INCLUDE 'header' + +! Apply Boundary conditions to velocity field + IF (RANKY.EQ.0) THEN + CALL APPLY_BC_SCALAR_LOWER + END IF + IF (RANKY.EQ.NPROCY-1) THEN + CALL APPLY_BC_SCALAR_UPPER + END IF + + RETURN + END + + SUBROUTINE APPLY_BC_TH_PHYS_MPI +! This subroutine applies the boundary conditions for the Poisson Eq. +! Note, MATL, MATD, etc. are dimensioned in header + INCLUDE 'header' + +! Apply Boundary conditions to velocity field + IF (RANKY.EQ.0) THEN + CALL APPLY_BC_TH_PHYS_LOWER + END IF + IF (RANKY.EQ.NPROCY-1) THEN + CALL APPLY_BC_TH_PHYS_UPPER + END IF + + RETURN + END + SUBROUTINE APPLY_BC_VEL_PHYS_MPI ! This subroutine applies the boundary conditions for the Poisson Eq. ! Note, MATL, MATD, etc. are dimensioned in header @@ -1425,6 +1534,8 @@ SUBROUTINE FFT_XZ_TO_FOURIER(V,VV,JMIN,JMAX) REAL*8 V (0:NX+1,0:NZP+1,0:NY+1) COMPLEX*16 VV (0:NXP ,0:NZ +1,0:NY+1) COMPLEX*16 TMP(0:NX/2,0:NZP+1,0:NY+1) + COMPLEX*16 TMP2(0:NXP-1,0:NZ-1) + COMPLEX*16 TMP_1A(0:NX/2-1,0:NZP-1,0:NY+1) INTEGER I,J,K INTEGER JMIN,JMAX @@ -1439,10 +1550,14 @@ SUBROUTINE FFT_XZ_TO_FOURIER(V,VV,JMIN,JMAX) DO K=0,NZP-1 DO I=0,NKX TMP(I,K,J)=TMP(I,K,J)/NX + TMP_1A(I,K,J)=TMP(I,K,J) END DO DO I=NKX+1,NX/2 TMP(I,K,J)=cmplx(0.d0,0.d0) END DO + DO I=NKX+1,NX/2-1 + TMP_1A(I,K,J)=TMP(I,K,J) + END DO END DO END DO @@ -1450,13 +1565,21 @@ SUBROUTINE FFT_XZ_TO_FOURIER(V,VV,JMIN,JMAX) !write(110+RANK) TMP(0:NX/2,0:NZP-1,1) DO J=JMIN,JMAX - call mpi_alltoall(TMP(0,0,J),1,XY2ZY_1, - & VV(0,0,J),1,XY2ZY_2,MPI_COMM_Z,IERROR) + call mpi_alltoall(TMP_1A(0,0,J),1,XY2ZY_1,TMP2,1,XY2ZY_2, + & MPI_COMM_Z,IERROR) + DO K=0,NZ-1 + DO I=0,NXP-1 + VV(I,K,J)=TMP2(I,K) + END DO + VV(NXP,K,J)=cmplx(0.d0,0.d0) + END DO + DO K=NZ,NZ+1 + DO I=0,NXP + VV(I,K,J)=cmplx(0.d0,0.d0) + ENDDO + ENDDO END DO - !write(120+RANK,'(2E25.15)') VV(0:NXP-1,0:NZ-1,1) - !write(120+RANK) VV(0:NXP-1,0:NZ-1,1) - ! FFT in Z DO J=JMIN,JMAX CALL FFTWND_F77(FFTW_Z_TO_F_PLAN,NXP, @@ -1483,9 +1606,6 @@ SUBROUTINE FFT_XZ_TO_FOURIER(V,VV,JMIN,JMAX) END DO END DO -C$$$ ! write(130+RANK,'(2E25.15)') VV(0:NXP-1,0:NZ-1,1) - !write(130+RANK) VV(0:NXP-1,0:NZ-1,1) - END SUBROUTINE @@ -1497,6 +1617,8 @@ SUBROUTINE FFT_XZ_TO_PHYSICAL(VV,V,JMIN,JMAX) REAL*8 V (0:NX+1,0:NZP+1,0:NY+1) COMPLEX*16 VV (0:NXP ,0:NZ +1,0:NY+1) COMPLEX*16 TMP(0:NX/2,0:NZP+1,0:NY+1) + COMPLEX*16 TMP2(0:NXP-1,0:NZ-1) + COMPLEX*16 TMP_1A(0:NX/2-1,0:NZP-1,0:NY+1) INTEGER I,J,K INTEGER JMIN,JMAX @@ -1509,21 +1631,38 @@ SUBROUTINE FFT_XZ_TO_PHYSICAL(VV,V,JMIN,JMAX) VV(I,NZ-1+K-NKZ,J)=VV(I,NKZ+K,J) END DO END DO - DO I=0,NXP - DO K=NKZ+1,NZ-NKZ-1 + DO K=NKZ+1,NZ-NKZ-1 + DO I=0,NXP VV(I,K,J)=cmplx(0.d0,0.d0) END DO -! DO K=NZ,NZ+1 -! VV(I,K,J)=cmplx(0.d0,0.d0) -! END DO END DO CALL FFTWND_F77(FFTW_Z_TO_P_PLAN,NXP, * VV(0,0,J), NXP+1, 1, VV(0,0,J), NXP+1, 1) END DO DO J=JMIN,JMAX - call mpi_alltoall(VV(0,0,J),1,XY2ZY_2, - & TMP(0,0,J),1,XY2ZY_1,MPI_COMM_Z,IERROR) + DO K=0,NZ-1 + DO I=0,NXP-1 + TMP2(I,K)=VV(I,K,J) + END DO + END DO + call mpi_alltoall(TMP2,1,XY2ZY_2,TMP_1A(0,0,J),1,XY2ZY_1, + & MPI_COMM_Z,IERROR) + DO K=0,NZP-1 + DO I=0,NX/2-1 + TMP(I,K,J)=TMP_1A(I,K,J) + END DO + TMP(NX/2,K,J)=cmplx(0.d0,0.d0) + END DO + DO K=NZP,NZP+1 + DO I=0,NX/2 + TMP(I,K,J)=cmplx(0.d0,0.d0) + ENDDO + ENDDO + + +! call mpi_alltoall(VV(0,0,J),1,XY2ZY_2, +! & TMP(0,0,J),1,XY2ZY_1,MPI_COMM_Z,IERROR) END DO ! FFT in X @@ -1532,10 +1671,8 @@ SUBROUTINE FFT_XZ_TO_PHYSICAL(VV,V,JMIN,JMAX) * TMP(0,0,J), 1, NX/2+1, V(0,0,J), 1, NX+2) END DO - END SUBROUTINE - SUBROUTINE INTEGRATE_Y_VAR(VAR,RES,COMM) INCLUDE 'header' @@ -1560,7 +1697,6 @@ SUBROUTINE INTEGRATE_Y_VAR(VAR,RES,COMM) END SUBROUTINE - SUBROUTINE INTEGRATE_Z_VAR(VAR,RES,COMM) INCLUDE 'header' From 5a1ea84aa0db898e1cf57bf1af1dff3d1e1acb69 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Wed, 11 Nov 2020 10:09:40 +0000 Subject: [PATCH 56/66] Fixed mpi treatment of sponge_th for damping mean --- diablo_2.0/for/user_rhs.f | 2 ++ 1 file changed, 2 insertions(+) diff --git a/diablo_2.0/for/user_rhs.f b/diablo_2.0/for/user_rhs.f index ad949a1..8f74e00 100644 --- a/diablo_2.0/for/user_rhs.f +++ b/diablo_2.0/for/user_rhs.f @@ -243,10 +243,12 @@ SUBROUTINE SPONGE_TH(N) end do end do ! Damp the mean gradient towards TH_0 + if (RANKZ.eq.0) then do j=JSTART_TH(N),JEND_TH(N) CFTH(0,0,j,n)=CFTH(0,0,j,n)-SPONGE_SIGMA(j) & *(CTH(0,0,j,n)-TH_0(J)) end do + end if return end From 323b9eb64290187cda7186f2aa6f37051ca6dfca Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Sat, 24 Apr 2021 09:29:59 +0100 Subject: [PATCH 57/66] Added AMD model to LES --- diablo_2.0/for/les_th.f | 586 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 586 insertions(+) create mode 100644 diablo_2.0/for/les_th.f diff --git a/diablo_2.0/for/les_th.f b/diablo_2.0/for/les_th.f new file mode 100644 index 0000000..caea15a --- /dev/null +++ b/diablo_2.0/for/les_th.f @@ -0,0 +1,586 @@ + subroutine les_chan_th +C This subroutine models the terms owing to the subgrid scale stress +C if the computation is to be treated as an LES not a DNS +C This subroutine should be called when the velocity is in fourier space +C in the periodic directions, on output, the velocity will be +C in physical space. +C It is assumed that the test filter and the LES filter are performed +C by the same operation +C On output S1 should contain |S| which may be used again in les_chan_th +C if for the subgrid scalar dissipation + + include 'header' + include 'header_les_th' + + integer i,j,k,l,m,ij,n + + real*8 S1_mean(0:NY+1) + real*8 NU_T_mean(0:NY+1) + real*8 KAPPA_T_mean(0:NY+1) + + real*8 C_SMAG +! parameter (C_SMAG=0.13d0) +! parameter (C_SMAG=0.01d0) +! parameter (C_SMAG=0.0992d0) + parameter (C_SMAG=0.0433d0) + real*8 C_AMD + parameter (C_AMD=0.2887d0) + real*8 DELTA_Y(0:NY+1),DELTA_YF(0:NY+1) + real*8 deltax,deltay,deltaz + real*8 alpha_sgs + real*8 denominator_sum + +! Array for writing HDF5 files + real*8 Diag(1:NY) + character*20 gname + + character*35 FNAME + +! Array to store the velocity index for each component of the strain rate tensor + integer U_index1(6) + integer U_index2(6) + +! Here, alpha is the test/LES filter width ratio + parameter (alpha_sgs=2.449d0) +! beta is the LES/grid filter width ratio + beta_sgs=3.d0 + +! First, for all models, apply boundary conditions to the velocity field +! (fill ghost cells) to ensure accurate calculation of gradients +C Apply Boundary conditions to velocity field + IF (USE_MPI) THEN + CALL APPLY_BC_VEL_MPI + CALL APPLY_BC_SCALAR_MPI + CALL GHOST_CHAN_MPI + ELSE + CALL APPLY_BC_VEL_LOWER + CALL APPLY_BC_VEL_UPPER + CALL APPLY_BC_SCALAR_LOWER + CALL APPLY_BC_SCALAR_UPPER + END IF + +! If we are using Neuman boundary conditions, over-write the values of the +! velocity at the ghost cells so that the LES model doesn't use the large +! velocity gradient +! Does not yet include Neuman boundary condition for scalar + CALL APPLY_BC_LES + CALL APPLY_BC_SCALAR_LES + +! First, for all models, apply boundary conditions to the velocity field +! (fill ghost cells) to ensure accurate calculation of gradients +C Apply Boundary conditions to velocity field +! IF (USE_MPI) THEN +! CALL APPLY_BC_VEL_MPI +! CALL GHOST_CHAN_MPI +! ELSE +! CALL APPLY_BC_VEL_LOWER +! CALL APPLY_BC_VEL_UPPER +! END IF + +! IF (USE_MPI) THEN +! CALL APPLY_BC_TH_MPI(MATL,MATD,MATU,VEC,N) +! CALL GHOST_CHAN_MPI +! END IF + +! If we are using Neuman boundary conditions, over-write the values of the +! velocity at the ghost cells so that the LES model doesn't use the large +! velocity gradient +! Does not yet include Neuman boundary condition for scalar +! CALL APPLY_BC_LES + + if (LES_MODEL_TYPE.EQ.1) then +! Constant Smagorinsky model + +! APPLY constant SGS Prandlt number + DO N=1,N_TH + do j=1,NY+1 + do k=0,NZP-1 + do i=0,NXM + KAPPA_T(I,K,J,N)=1.d0*NU_T(I,K,J) + end do + end do + end do + end do + +! Convert the velocity to physical space + call FFT_XZ_TO_PHYSICAL(CU1,U1,0,NY+1) + call FFT_XZ_TO_PHYSICAL(CU2,U2,0,NY+1) + call FFT_XZ_TO_PHYSICAL(CU3,U3,0,NY+1) + +! Convert the scalar to physical space + + DO N=1,N_TH + CS1(:,:,:)=CTH(:,:,:,N) + CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) + TH(:,:,:,N)=S1(:,:,:) + END DO + + else if ((LES_MODEL_TYPE.EQ.2).or.(LES_MODEL_TYPE.eq.3)) then +! Here, use a dynamic smagorinsky model with or without scale similar part + + stop 'ERROR: LES_MODEL_TYPE=2, 3 not supported yet in MPI' + + else if (LES_MODEL_TYPE.EQ.4) then +! Anisotrophic minimum-dissipation model Rozema + +! APPLY constant SGS Prandlt number + DO N=1,N_TH + do j=1,NY+1 + do k=0,NZP-1 + do i=0,NXM + KAPPA_T(I,K,J,N)=1.d0*NU_T(I,K,J) + end do + end do + end do + end do + + + else if (LES_MODEL_TYPE.EQ.5) then +! Anisotrophic minimum-dissipation model Verstappen + +! Compute all the velocity and scalar gradients + + call compute_all_gradients_chan + +! Convert the velocity to physical space + call FFT_XZ_TO_PHYSICAL(CU1,U1,0,NY+1) + call FFT_XZ_TO_PHYSICAL(CU2,U2,0,NY+1) + call FFT_XZ_TO_PHYSICAL(CU3,U3,0,NY+1) + +! Convert the scalar to physical space + + DO N=1,N_TH + CS1(:,:,:)=CTH(:,:,:,N) + CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) + TH(:,:,:,N)=S1(:,:,:) + END DO + + deltax = (DX(1)*beta_sgs) + deltaz = (DZ(1)*beta_sgs) + + DO N=1,N_TH + +! First compute at GYF points +! DO J=JSTART,JEND + DO J=JSTART_TH(N),JEND_TH(N) +!Set filter length (based on grid size) in y direction +!Based on constant Smag code + deltay=(DYF(J)*2.d0) + DO K=0,NZP-1 + DO I=0,NXM +!First calculate numerator and store it in S1_th. + S1_th(I,K,J)=-( + & deltax**2*du1dx(I,K,J)*dthetadx(I,K,J,N)**2 + & +deltay**2*du2dy(I,K,J) + & *(0.5d0*(dthetady(I,K,J+1,N)+dthetady(I,K,J,N)))**2 + & +deltaz**2*du3dz(I,K,J)*dthetadz(I,K,J,N)**2 + & +(deltay**2*0.5d0*(du1dy(I,K,J+1)+du1dy(I,K,J)) + & +deltax**2*0.5d0*(du2dx(I,K,J+1)+du2dx(I,K,J))) + & *dthetadx(I,K,J,N) + & *0.5d0*(dthetady(I,K,J+1,N)+dthetady(I,K,J,N)) + & +(deltaz**2*du1dz(I,K,J)+deltax**2*du3dx(I,K,J)) + & *dthetadx(I,K,J,N) + & *dthetadz(I,K,J,N) + & +(deltaz**2*0.5d0*(du2dz(I,K,J+1)+du2dz(I,K,J)) + & +deltay**2*0.5d0*(du3dy(I,K,J+1)+du3dy(I,K,J))) + & *0.5d0*(dthetady(I,K,J+1,N)+dthetady(I,K,J,N)) + & *dthetadz(I,K,J,N) + & ) + + IF (S1_th(I,K,J) <= 0.0d0) THEN + S1_th(I,K,J)=0.0d0 + ELSE + + S1_th(I,K,J)=S1_th(I,K,J)/ + & ((deltax*dthetadx(I,K,J,N))**2 + & +(deltay*0.5d0*(dthetady(I,K,J+1,N)+dthetady(I,K,J,N)))**2 + & +(deltaz*dthetadz(I,K,J,N))**2) + + END IF + + + END DO + END DO + END DO + +! Compute kappa_e at GY points and store in TEMP_th + DO J=2,NY+1 +!Set filter length (based on grid size) in y direction +!Based on constant Smag code + deltay=(DY(J)*2.d0) + DO K=0,NZP-1 + DO I=0,NXM +!First calculate numerator and store it in TEMP_th. + TEMP_th(I,K,J)=-( + & deltax**2*(du1dx(I,K,J)*DYF(j-1)+du1dx(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)) + & *((dthetadx(I,K,J,N)*DYF(j-1)+dthetadx(I,K,J-1,N)*DYF(j)) + & /(2.d0*DY(j)))**2 + & +deltay**2*(du2dy(I,K,J)*DYF(j-1)+du2dy(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)) + & *(dthetady(I,K,J,N))**2 + & +deltaz**2*(du3dz(I,K,J)*DYF(j-1)+du3dz(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)) + & *((dthetadz(I,K,J,N)*DYF(j-1)+dthetadz(I,K,J-1,N)*DYF(j)) + & /(2.d0*DY(j)))**2 + & +(deltay**2*du1dy(I,K,J) + & +deltax**2*du2dx(I,K,J)) + & *(dthetadx(I,K,J,N)*DYF(j-1)+dthetadx(I,K,J-1,N)*DYF(j)) + & /(2.d0*DY(j)) + & *(dthetady(I,K,J,N)) + & +(deltaz**2*(du1dz(I,K,J)*DYF(j-1)+du1dz(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)) + & +deltax**2*(du3dx(I,K,J)*DYF(j-1)+du3dx(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *(dthetadx(I,K,J,N)*DYF(j-1)+dthetadx(I,K,J-1,N)*DYF(j)) + & /(2.d0*DY(j)) + & *(dthetadz(I,K,J,N)*DYF(j-1)+dthetadz(I,K,J-1,N)*DYF(j)) + & /(2.d0*DY(j)) + & +(deltaz**2*du2dz(I,K,J) + & +deltay**2*du3dy(I,K,J)) + & *(dthetady(I,K,J,N)) + & *(dthetadz(I,K,J,N)*DYF(j-1)+dthetadz(I,K,J-1,N)*DYF(j)) + & /(2.d0*DY(j)) + & ) + + IF (TEMP_th(I,K,J) <= 0.0d0) THEN + TEMP_th(I,K,J)=0.0d0 + ELSE + + TEMP_th(I,K,J)=TEMP_th(I,K,J)/ + & ((deltax*(dthetadx(I,K,J,N)*DYF(j-1)+dthetadx(I,K,J-1,N)*DYF(j)) + & /(2.d0*DY(j)))**2 + & +(deltay*dthetady(I,K,J,N))**2 + & +(deltaz*(dthetadz(I,K,J,N)*DYF(j-1)+dthetadz(I,K,J-1,N)*DYF(j)) + & /(2.d0*DY(j)))**2) + + END IF + + + + END DO + END DO + END DO + + +! Now, compute S1_th*dthetadx_i, storing in dthetadx_i +! Need only to compute at GYF points + DO J=JSTART_TH(N),JEND_TH(N) + DO K=0,NZP-1 + DO I=0,NXM + dthetadx(I,K,J,N)=S1_th(I,K,J)*dthetadx(I,K,J,N) +! dthetady is added through an implicit eddy diffusivity + dthetadz(I,K,J,N)=S1_th(I,K,J)*dthetadz(I,K,J,N) + END DO + END DO + END DO + +! Compute the filter lengthscale +! Absorb -2.d0*C_AMD**2.d0 here for efficiency + DO J=1,NY+1 +! At GYF points: +! AMD (based off constant Smag) + DELTA_YF(J)=-2.d0*C_AMD**2.d0 + & *3/(1/(DX(1)*beta_sgs)**2.d0+1/(DYF(J)*2.d0)**2.d0 + & +1/(DZ(1)*beta_sgs)**2.d0) +! & *(DX(1)*beta_sgs*DYF(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) +! Wall Damping +! DELTA_YF(J)= +! & -2.d0*(0.1d0*(1.d0-exp((-GYF(J)/(NU*25.d0))**3.d0)))**2.d0 +! & *(DX(1)*beta_sgs*DYF(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) + + END DO + + DO J=1,NY+1 +! At GY points: +! AMD (based off Constant Smagorinsky) + DELTA_Y(J)=-2.d0*C_AMD**2.d0 + & *3/(1/(DX(1)*beta_sgs)**2.d0+1/(DY(J)*2.d0)**2.d0 + & +1/(DZ(1)*beta_sgs)**2.d0) +! & *(DX(1)*beta_sgs*DY(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) +! Wall Damping +! DELTA_Y(J)= +! & -2.d0*(0.1d0*(1.d0-exp((-GY(J)/(NU*25.d0))**3.d0)))**2.d0 +! & *(DX(1)*beta_sgs*DY(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) + END DO + + +! Get the eddy diffusivity at GY points + DO J=2,NY + DO K=0,NZP-1 + DO I=0,NXM + KAPPA_T(I,K,J,N)=-0.5d0*DELTA_Y(J)*TEMP_th(I,K,J) + END DO + END DO + END DO + +! Now that we have calculated KAPPA_T, set the value at the ghost cells +! by sharing with neighboring processes. This subroutine also sets +! the value of KAPPA_T to zero at both walls + CALL GHOST_LES_MPI_KAPPA_T + +! Convert the scalar flux tensor to Fourier space + +! DO N=1,N_TH + S1(:,:,:)=dthetadx(:,:,:,N) + CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) + Cdthetadx(:,:,:,N)=CS1(:,:,:) +! dthetady is added through an implicit eddy diffusivity + S1(:,:,:)=dthetadz(:,:,:,N) + CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) + Cdthetadz(:,:,:,N)=CS1(:,:,:) +! END DO + +! Now, compute TAU, store in the corresponging dthetadx_i + DO J=1,NY+1 + DO K=0,TNKZ + DO I=0,NXP-1 + Cdthetadx(I,K,J,N)=0.5d0*DELTA_YF(J)*Cdthetadx(I,K,J,N) +! Cthetady(:,:,:,N) is added through an implicit eddy diffusivity +! Cdthetady(I,K,J,N)=DELTA_YF(J)*Cdthetady(I,K,J,N) + Cdthetadz(I,K,J,N)=0.5d0*DELTA_YF(J)*Cdthetadz(I,K,J,N) + END DO + END DO + END DO + + END DO + + end if + + + DO N=1,N_TH + DO J=JSTART_TH(N),JEND_TH(N) + DO K=0,TNKZ + DO I=0,NXP-1 + CFTH(I,K,J,N)=CFTH(I,K,J,N) + & -CIKX(I)*Cdthetadx(I,K,J,N) +! dthetady is added through implicit eddy diffusivity + & -CIKZ(K)*Cdthetadz(I,K,J,N) + END DO + END DO + END DO + END DO + +! Periodically, output mean quantities + IF ((MOD(TIME_STEP,SAVE_STATS_INT).EQ.0).AND.(RK_STEP.EQ.1)) THEN +! Get plane averages + do j=0,NY+1 + S1_mean(j)=0.d0 + NU_T_mean(j)=0.d0 + KAPPA_T_mean(J)=0.d0 + do i=0,NXM + do k=0,NZP-1 + S1_mean(j)=S1_mean(j)+S1(I,K,J) + NU_T_mean(j)=NU_T_mean(j)+NU_T(I,K,J) +! Only set up for single scalar (N = 1) + KAPPA_T_mean(j)=KAPPA_T_mean(j)+KAPPA_T(I,K,J,1) + end do + end do + end do + +! do j=0,NY+1 +! do i=0,NXM +! KAPPA_T_mean(j)=KAPPA_T_mean(j)+KAPPA_T(I,5,J,1) +! end do +! end do + + call mpi_allreduce(mpi_in_place,S1_mean,NY+2,MPI_DOUBLE_PRECISION, + & MPI_SUM,MPI_COMM_Z,ierror) + call mpi_allreduce(mpi_in_place,NU_T_mean,NY+2 + & ,MPI_DOUBLE_PRECISION, + & MPI_SUM,MPI_COMM_Z,ierror) + call mpi_allreduce(mpi_in_place,KAPPA_T_mean,NY+2 + & ,MPI_DOUBLE_PRECISION, + & MPI_SUM,MPI_COMM_Z,ierror) + + do j=0,NY+1 + S1_mean(j)=S1_mean(j)/dble(NX*NZ) + NU_T_mean(j)=NU_T_mean(j)/dble(NX*NZ) + KAPPA_T_mean(j)=KAPPA_T_mean(j)/dble(NX*NZ) + end do + +#ifdef HDF5 + FNAME='mean_les.h5' + IF (RANKZ.eq.0) then + gname='kappa_sgs' + Diag=KAPPA_T_mean(1:NY) + call WriteStatH5(FNAME,gname,Diag) + END IF + +#else + + IF (RANKZ.EQ.0) THEN + IF (USE_MPI) THEN + FNAME='mean_les'//trim(MPI_IO_NUM)//'.txt' + ELSE + FNAME='mean_les.txt' + END IF + open(42,file=FNAME,form='formatted',status='unknown') + + write(42,*) TIME_STEP,TIME,DELTA_T + do j=1,NY + write(42,420) j,GYF(J), + & NU_T_mean(J), + & KAPPA_T_mean(J) + end do + END IF +420 format(I3,' ',2(F20.9,' '),2(F20.9,' ')) + END IF + +#endif + END IF + +999 continue + + RETURN + END + + + subroutine compute_all_gradients_chan +C This subroutine computes all gradients for the filtered velocity field +C and scalar field (testing with one scalar field only). +C The input velocity + scalar field should be in fourier space in the periodic +C directions. +C For use in the LES model in channel flow (2 periodic directions) + include 'header' + include 'header_les_th' + + integer I,J,K,ij,N + + DO J=1,NY + DO K=0,TNKZ + DO I=0,NXP-1 + + + Cdu1dx(I,K,J)=CIKX(I)*CU1(I,K,J) + Cdu1dz(I,K,J)=CIKZ(I)*CU1(I,K,J) + + Cdu2dx(I,K,J)=CIKX(I)*CU2(I,K,J) + Cdu2dy(I,K,J)=(CU2(I,K,J+1)-CU2(I,K,J))/DYF(J) + Cdu2dz(I,K,J)=CIKZ(I)*CU2(I,K,J) + + Cdu3dx(I,K,J)=CIKX(I)*CU3(I,K,J) + Cdu3dz(I,K,J)=CIKZ(K)*CU3(I,K,J) + + END DO + END DO + END DO + DO J=1,NY+1 + DO K=0,TNKZ + DO I=0,NXP-1 + + Cdu1dy(I,K,J)=(CU1(I,K,J)-CU1(I,K,J-1))/DY(J) + + Cdu3dy(I,K,J)=(CU3(I,K,J)-CU3(I,K,J-1))/DY(J) + + END DO + END DO + END DO + + + + DO N=1,N_TH + + DO J=1,NY + DO K=0,TNKZ + DO I=0,NXP-1 + + Cdthetadx(I,K,J,N)=CIKX(I)*CTH(I,K,J,N) + Cdthetadz(I,K,J,N)=CIKZ(I)*CTH(I,K,J,N) + + END DO + END DO + END DO + DO J=1,NY+1 + DO K=0,TNKZ + DO I=0,NXP-1 + + Cdthetady(I,K,J,N)=(CTH(I,K,J,N)-CTH(I,K,J-1,N))/DY(J) + + END DO + END DO + END DO + + + + END DO + + + CALL FFT_XZ_TO_PHYSICAL(Cdu1dx,du1dx,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(Cdu2dy,du2dy,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(Cdu3dz,du3dz,0,NY+1) + + CALL FFT_XZ_TO_PHYSICAL(Cdu2dx,du2dx,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(Cdu3dx,du3dx,0,NY+1) + + CALL FFT_XZ_TO_PHYSICAL(Cdu1dz,du1dz,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(Cdu2dz,du2dz,0,NY+1) + + CALL FFT_XZ_TO_PHYSICAL(Cdu1dy,du1dy,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(Cdu3dy,du3dy,0,NY+1) + + DO N=1,N_TH + CS1(:,:,:)=Cdthetadx(:,:,:,N) + CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) + dthetadx(:,:,:,N)=S1(:,:,:) + + CS1(:,:,:)=Cdthetady(:,:,:,N) + CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) + dthetady(:,:,:,N)=S1(:,:,:) + + CS1(:,:,:)=Cdthetadz(:,:,:,N) + CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) + dthetadz(:,:,:,N)=S1(:,:,:) + + END DO + +!We now have all the gradients in physical space + + + RETURN + END + + + + + SUBROUTINE APPLY_BC_SCALAR_LES + include 'header' + integer i,j,k,N + + DO N=1,N_TH + +! If we are using Neuman boundary conditions, over-write the values of the +! scalar at the ghost cells so that the LES model doesn't use the large +! scalar gradient + IF (TH_BC_YMAX(N).eq.1) THEN + IF ((RANKY.eq.NPROCY-1).or.(.NOT.USE_MPI)) THEN +! We are on process at the upper wall + DO K=0,TNKZ + DO I=0,NXP-1 + CTH(I,K,NY,N)=CTH(I,K,NY-1,N) + CTH(I,K,NY+1,N)=CTH(I,K,NY,N) + END DO + END DO + END IF + END IF + + IF (TH_BC_YMIN(N).eq.1) THEN + IF ((RANKY.eq.0).or.(.NOT.USE_MPI)) THEN +! We are on a process at the bottom wall + DO K=0,TNKZ + DO I=0,NXP-1 + CTH(I,K,1,N)=CTH(I,K,2,N) + CTH(I,K,0,N)=CTH(I,K,1,N) + END DO + END DO + END IF + END IF + + END DO + + RETURN + END + + + + From 62144e0a11dfa6f5586247217fcb777529f2fb4f Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Sat, 24 Apr 2021 09:30:16 +0100 Subject: [PATCH 58/66] Added AMD model to LES --- diablo_2.0/for/les.f | 1048 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 949 insertions(+), 99 deletions(-) diff --git a/diablo_2.0/for/les.f b/diablo_2.0/for/les.f index 2bb8caa..34ff945 100644 --- a/diablo_2.0/for/les.f +++ b/diablo_2.0/for/les.f @@ -1,8 +1,8 @@ subroutine les_chan C This subroutine models the terms owing to the subgrid scale stress C if the computation is to be treated as an LES not a DNS -C This subroutine should be called when the velocity is in fourier space -C in the periodic directions, on output, the velocity will be +C This subroutine should be called when the velocity is in fourier space +C in the periodic directions, on output, the velocity will be C in physical space. C It is assumed that the test filter and the LES filter are performed C by the same operation @@ -12,6 +12,15 @@ subroutine les_chan include 'header' include 'header_les' + REAL*8 TH_real(0:NX+1,0:NZP+1,0:NY+1) + COMPLEX*16 CTMP(0:NXP,0:NZ+1,0:NY+1) + + integer N + REAL*8 DELTA_TMP_Y, DELTA_TMP_YF + REAL*8 C_lb + REAL*8 EPS_DELTA + parameter (EPS_DELTA=0.000000001d0) + parameter (C_lb=0.0625d0) !1/16 integer i,j,k,l,m,ij real*8 S1_mean(0:NY+1) @@ -19,11 +28,14 @@ subroutine les_chan real*8 EPS_SGS1_MEAN(0:NY+1) real*8 U3_bar(0:NY+1) real*8 U1_bar(0:NY+1) - + real*8 C_SMAG parameter (C_SMAG=0.17d0) - real*8 DELTA_Y(0:NY+1),DELTA_YF(0:NY+1) - real*8 alpha_sgs,beta_sgs + real*8 C_AMD + parameter (C_AMD=0.2887d0) + real*8 DELTA_Y(0:NY+1),DELTA_YF(0:NY+1) + real*8 deltax,deltay,deltaz + real*8 alpha_sgs real*8 denominator_sum ! Array for writing HDF5 files @@ -39,7 +51,7 @@ subroutine les_chan ! Here, alpha is the test/LES filter width ratio parameter (alpha_sgs=2.449d0) ! beta is the LES/grid filter width ratio - parameter (beta_sgs=1.5d0) + beta_sgs=3.d0 ! Set the indices that are used when adding the off-diagnoal SGS stress terms IF (RANKY.eq.NPROCY-1) then @@ -79,22 +91,6 @@ subroutine les_chan call compute_strain_chan -! to remove mean shear: first get the mean horizontal velocity -! as in save_stats -! Save the mean velocity -! maybe JSTART to JEND - IF (RANKZ.eq.0) THEN - DO J=0,NY+1 - U1_bar(J)=DBLE(CU1(0,0,J)) - U3_bar(J)=DBLE(CU3(0,0,J)) - END DO - END IF - IF (USE_MPI) THEN - CALL MPI_BCAST(U1_bar,NY+2,MPI_DOUBLE_PRECISION,0, - & MPI_COMM_Z,ierror) - CALL MPI_BCAST(U3_bar,NY+2,MPI_DOUBLE_PRECISION,0, - & MPI_COMM_Z,ierror) - END IF ! Convert the velocity to physical space call FFT_XZ_TO_PHYSICAL(CU1,U1,0,NY+1) @@ -104,60 +100,44 @@ subroutine les_chan ! Compute |S| at GYF points, store in S1 ! Interpolation to GYF points is easy since by definition ! GYF points are exactly midway between neighboring GY points -! Sij4 and Sij6 have the mean shear -! remove the zero value U1,3bar as in -! compute strain - DO J=JSTART,JEND + DO J=JSTART,JEND DO K=0,NZP-1 DO I=0,NXM S1(I,K,J)=SQRT( & 2.d0*Sij1(I,K,J)**2.d0 - & +4.d0*(0.5d0*(Sij4(I,K,J+1)+Sij4(I,K,J) -! Optionally remove mean shear - & -0.5d0*(U1_bar(J)-U1_bar(J-1))/DY(J) - & -0.5d0*(U1_bar(J+1)-U1_bar(J))/DY(J+1) - & ))**2.d0 + & +4.d0*(0.5d0*(Sij4(I,K,J+1)+Sij4(I,K,J)))**2.d0 & +4.d0*Sij5(I,K,J)**2.d0 & +2.d0*Sij2(I,K,J)**2.d0 - & +4.d0*(0.5d0*(Sij6(I,K,J+1)+Sij6(I,K,J) -! Optionally remove mean shear - & -0.5d0*(U3_bar(J)-U3_bar(J-1))/DY(J) - & -0.5d0*(U3_bar(J+1)-U3_bar(J))/DY(J+1) - & ))**2.d0 + & +4.d0*(0.5d0*(Sij6(I,K,J+1)+Sij6(I,K,J)))**2.d0 & +2.d0*Sij3(I,K,J)**2.d0 ) END DO END DO - END DO + END DO ! Compute |S| at GY points and store in TEMP - DO J=2,NY+1 + DO J=2,NY+1 DO K=0,NZP-1 DO I=0,NXM TEMP(I,K,J)=SQRT( - & 2.d0*((Sij1(I,K,J)*DYF(J-1)+Sij1(I,K,J-1)*DYF(J)) - & /(2.d0*DY(J)))**2.d0 - & +4.d0*(Sij4(I,K,J) -! optionally remove mean shear - & -0.5d0*(U1_bar(J)-U1_bar(J-1))/DY(J) - & )**2.d0 - & +4.d0*((Sij5(I,K,J)*DYF(J-1)+Sij5(I,K,J-1)*DYF(J)) - & /(2.d0*DY(J)))**2.d0 - & +2.d0*((Sij2(I,K,J)*DYF(J-1)+Sij2(I,K,J-1)*DYF(J)) - & /(2.d0*DY(J)))**2.d0 - & +4.d0*(Sij6(I,K,J) -! remove mean shear - & -0.5d0*(U3_bar(J)-U3_bar(J-1))/DY(J) - & )**2.d0 - & +2.d0*((Sij3(I,K,J)*DYF(J-1)+Sij3(I,K,J-1)*DYF(J)) - & /(2.d0*DY(J)))**2.d0 + & 2.d0*((Sij1(I,K,J)*DYF(j-1)+Sij1(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +4.d0*Sij4(I,K,J)**2.d0 + & +4.d0*((Sij5(I,K,J)*DYF(j-1)+Sij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +2.d0*((Sij2(I,K,J)*DYF(j-1)+Sij2(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +4.d0*Sij6(I,K,J)**2.d0 + & +2.d0*((Sij3(I,K,J)*DYF(j-1)+Sij3(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 & ) END DO END DO - END DO + END DO + ! Now, compute |S|*S_ij, storing in Sij -! First compute at GYF points - DO J=JSTART,JEND +! First compute at GYF points + DO J=JSTART,JEND DO K=0,NZP-1 DO I=0,NXM Sij1(I,K,J)=S1(I,K,J)*Sij1(I,K,J) @@ -167,11 +147,11 @@ subroutine les_chan Sij3(I,K,J)=S1(I,K,J)*Sij3(I,K,J) END DO END DO - END DO + END DO ! Now, compute at |S|*S_ij at GY points - DO J=2,NY+1 + DO J=2,NY+1 DO K=0,NZP-1 DO I=0,NXM ! The terms dU1/dy and dU3/dy in CSij4(:,:,:) and CSij6(:,:,:) respectively @@ -183,25 +163,25 @@ subroutine les_chan & *(Sij6(I,K,J)-0.5*(U3(I,K,J)-U3(I,K,J-1))/DY(j)) END DO END DO - END DO + END DO ! We now have |S|*S_ij stored in Sij in Physical space ! Compute the filter lengthscale ! Absorb -2.d0*C_SMAG**2.d0 here for effienciency - DO J=1,NY+1 + DO J=1,NY+1 ! At GYF points: ! Constant Smagorinsky DELTA_YF(J)=-2.d0*C_SMAG**2.d0 - & *(DX(1)*beta_sgs*DYF(J)*1.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) + & *(DX(1)*beta_sgs*DYF(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) ! Wall Damping ! DELTA_YF(J)= ! & -2.d0*(0.1d0*(1.d0-exp((-GYF(J)/(NU*25.d0))**3.d0)))**2.d0 ! & *(DX(1)*beta_sgs*DYF(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) - END DO + END DO - DO J=1,NY+1 + DO J=1,NY+1 ! At GY points: ! Constant Smagorinsky DELTA_Y(J)=-2.d0*C_SMAG**2.d0 @@ -210,17 +190,17 @@ subroutine les_chan ! DELTA_Y(J)= ! & -2.d0*(0.1d0*(1.d0-exp((-GY(J)/(NU*25.d0))**3.d0)))**2.d0 ! & *(DX(1)*beta_sgs*DY(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) - END DO + END DO ! Get the eddy viscosity at GY points ! NU_T = (C_S^2 * DELTA^2)*|S| - DO J=2,NY + DO J=2,NY DO K=0,NZP-1 DO I=0,NXM NU_T(I,K,J)=-0.5d0*DELTA_Y(J)*TEMP(I,K,J) END DO END DO - END DO + END DO ! Now that we have calculated NU_T, set the value at the ghost cells ! by sharing with neighboring processes. This subroutine also sets @@ -239,7 +219,7 @@ subroutine les_chan CALL FFT_XZ_TO_FOURIER(Sij6,CSij6,0,NY+1) ! Now, compute TAU, store in the corresponging Sij - DO J=1,NY+1 + DO J=1,NY+1 DO K=0,TNKZ DO I=0,NXP-1 CSij1(I,K,J)=DELTA_YF(J)*CSij1(I,K,J) @@ -249,15 +229,15 @@ subroutine les_chan CSij3(I,K,J)=DELTA_YF(J)*CSij3(I,K,J) END DO END DO - END DO - DO J=2,NY+1 + END DO + DO J=2,NY+1 DO K=0,TNKZ DO I=0,NXP-1 CSij4(I,K,J)=DELTA_Y(J)*CSij4(I,K,J) CSij6(I,K,J)=DELTA_Y(J)*CSij6(I,K,J) END DO END DO - END DO + END DO ! tau_ij is now contained in CSij in Fourier space @@ -265,7 +245,698 @@ subroutine les_chan else if ((LES_MODEL_TYPE.EQ.2).or.(LES_MODEL_TYPE.eq.3)) then ! Here, use a dynamic smagorinsky model with or without scale similar part - stop 'ERROR: LES_MODEL_TYPE=2, 3 not supported yet in MPI' + stop 'ERROR: LES_MODEL_TYPE=2, 3 not supported yet in MPI' + + else if (LES_MODEL_TYPE.EQ.4) then +! Anisotrophic minimum-dissipation model Rozema + +! First, compute the rate of strain tensor S_ij + + call compute_strain_chan + +! Second, compute the rate of rotation tensor omega_ij + + call compute_rotation_chan + +! Convert the velocity to physical space + call FFT_XZ_TO_PHYSICAL(CU1,U1,0,NY+1) + call FFT_XZ_TO_PHYSICAL(CU2,U2,0,NY+1) + call FFT_XZ_TO_PHYSICAL(CU3,U3,0,NY+1) + +!Set filter length (based on grid size) in x,z directions +!Based on constant Smag code + deltax = (DX(1)*beta_sgs)**2.d0 + deltaz = (DZ(1)*beta_sgs)**2.d0 + +! Compute max{0,-delta_k*(I3-I4)}/(I1-I2) at GYF points, store in S1 +! Interpolation to GYF points is easy since by definition +! GYF points are exactly midway between neighboring GY points + DO J=JSTART,JEND +!Set filter length (based on grid size) in y direction +!Based on constant Smag code + deltay=(DYF(J)*2.d0)**2.d0 + DO K=0,NZP-1 + DO I=0,NXM + +!First calculate delta_k*I3 and store it in S1. + S1(I,K,J)= + & deltax*Sij1(I,K,J)**3.d0 + & +deltay*Sij2(I,K,J)**3.d0 + & +deltaz*Sij3(I,K,J)**3.d0 + & +(2.d0*deltax+deltay)*Sij1(I,K,J) + & *(0.5d0*(Sij4(I,K,J+1)+Sij4(I,K,J)))**2.d0 + & +(2.d0*deltax+deltaz)*Sij1(I,K,J)*Sij5(I,K,J)**2.d0 + & +(2.d0*deltay+deltax)*Sij2(I,K,J) + & *(0.5d0*(Sij4(I,K,J+1)+Sij4(I,K,J)))**2.d0 + & +(2.d0*deltay+deltaz)*Sij2(I,K,J) + & *(0.5d0*(Sij6(I,K,J+1)+Sij6(I,K,J)))**2.d0 + & +(2.d0*deltaz+deltax)*Sij3(I,K,J)*Sij5(I,K,J)**2.d0 + & +(2.d0*deltaz+deltay)*Sij3(I,K,J) + & *(0.5d0*(Sij6(I,K,J+1)+Sij6(I,K,J)))**2.d0 + & +2.d0*(deltax+deltay+deltaz) + & *(0.5d0*(Sij4(I,K,J+1)+Sij4(I,K,J))) + & *Sij5(I,K,J)*(0.5d0*(Sij6(I,K,J+1)+Sij6(I,K,J))) + +!Then calculate -delta_k*(I3-I4) = -S1+delta_k*I4 and store in S1. + + S1(I,K,J)=(-S1(I,K,J) + & -deltay*Sij1(I,K,J) + & *(0.5d0*(omgij4(I,K,J+1)+omgij4(I,K,J)))**2.d0 + & -deltaz*Sij1(I,K,J)*omgij5(I,K,J)**2.d0 + & -deltax*Sij2(I,K,J) + & *(0.5d0*(omgij4(I,K,J+1)+omgij4(I,K,J)))**2.d0 + & -deltaz*Sij2(I,K,J) + & *(0.5d0*(omgij6(I,K,J+1)+omgij6(I,K,J)))**2.d0 + & -deltax*Sij3(I,K,J)*omgij5(I,K,J)**2.d0 + & -deltay*Sij3(I,K,J) + & *(0.5d0*(omgij6(I,K,J+1)+omgij6(I,K,J)))**2.d0 + & -2.d0*deltaz*(0.5d0*(Sij4(I,K,J+1)+Sij4(I,K,J))) + & *omgij5(I,K,J)*(0.5d0*(omgij6(I,K,J+1)+omgij6(I,K,J))) + & +2.d0*deltay*Sij5(I,K,J) + & *(0.5d0*(omgij4(I,K,J+1)+omgij4(I,K,J))) + & *(0.5d0*(omgij6(I,K,J+1)+omgij6(I,K,J))) + & -2.d0*deltax*(0.5d0*(Sij6(I,K,J+1)+Sij6(I,K,J))) + & *(0.5d0*(omgij4(I,K,J+1)+omgij4(I,K,J)))*omgij5(I,K,J) + & ) + + IF (S1(I,K,J) <= 0.0d0) THEN + S1(I,K,J)=0.0d0 + ELSE + S1(I,K,J)=S1(I,K,J)/(Sij1(I,K,J)**2.d0 + & +Sij2(I,K,J)**2.d0 + & +Sij3(I,K,J)**2.d0 + & +2.d0*(0.5d0*(Sij4(I,K,J+1)+Sij4(I,K,J)))**2.d0 + & +2.d0*(0.5d0*(omgij4(I,K,J+1)+omgij4(I,K,J)))**2.d0 + & +2.d0*Sij5(I,K,J)**2.d0 + & +2.d0*omgij5(I,K,J)**2.d0 + & +2.d0*(0.5d0*(Sij6(I,K,J+1)+Sij6(I,K,J)))**2.d0 + & +2.d0*(0.5d0*(omgij6(I,K,J+1)+omgij6(I,K,J)))**2.d0) + END IF + + END DO + END DO + END DO + + +! Compute max{0,-delta_k*(I3-I4)}/(I1-I2) at GY points and store in TEMP + DO J=2,NY+1 +!Set filter length (based on grid size) in y direction +!Based on constant Smag code + deltay=(DY(J)*2.d0)**2.d0 + DO K=0,NZP-1 + DO I=0,NXM + +!First calculate delta_k*I3 and store it in TEMP. + TEMP(I,K,J)= + & deltax*((Sij1(I,K,J)*DYF(j-1)+Sij1(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**3.d0 + & +deltay*((Sij2(I,K,J)*DYF(j-1)+Sij2(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**3.d0 + & +deltaz*((Sij3(I,K,J)*DYF(j-1)+Sij3(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**3.d0 + & +(2.d0*deltax+deltay) + & *((Sij1(I,K,J)*DYF(j-1)+Sij1(I,K,J-1)*DYF(j))/(2.d0*DY(j))) + & *Sij4(I,K,J)**2.d0 + & +(2.d0*deltax+deltaz) + & *((Sij1(I,K,J)*DYF(j-1)+Sij1(I,K,J-1)*DYF(j))/(2.d0*DY(j))) + & *((Sij5(I,K,J)*DYF(j-1)+Sij5(I,K,J-1)*DYF(j))/(2.d0*DY(j)))**2.d0 + & +(2.d0*deltay+deltax) + & *((Sij2(I,K,J)*DYF(j-1)+Sij2(I,K,J-1)*DYF(j))/(2.d0*DY(j))) + & *Sij4(I,K,J)**2.d0 + & +(2.d0*deltay+deltaz) + & *((Sij2(I,K,J)*DYF(j-1)+Sij2(I,K,J-1)*DYF(j))/(2.d0*DY(j))) + & *Sij6(I,K,J)**2.d0 + & +(2.d0*deltaz+deltax)* + & *((Sij3(I,K,J)*DYF(j-1)+Sij3(I,K,J-1)*DYF(j))/(2.d0*DY(j))) + & *((Sij5(I,K,J)*DYF(j-1)+Sij5(I,K,J-1)*DYF(j))/(2.d0*DY(j)))**2.d0 + & +(2.d0*deltaz+deltay) + & *((Sij3(I,K,J)*DYF(j-1)+Sij3(I,K,J-1)*DYF(j))/(2.d0*DY(j))) + & *Sij6(I,K,J)**2.d0 + & +2.d0*(deltax+deltay+deltaz)*Sij4(I,K,J)*Sij6(I,K,J) + & *((Sij5(I,K,J)*DYF(j-1)+Sij5(I,K,J-1)*DYF(j))/(2.d0*DY(j)) + & ) + +!Then calculate -(I3-I4) = -TEMP+delta_k*I4 and store in TEMP. + TEMP(I,K,J)=-TEMP(I,K,J) + & -deltay*((Sij1(I,K,J)*DYF(j-1)+Sij1(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *omgij4(I,K,J)**2.d0 + & -deltaz*((Sij1(I,K,J)*DYF(j-1)+Sij1(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *((omgij5(I,K,J)*DYF(j-1)+omgij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & -deltax*((Sij2(I,K,J)*DYF(j-1)+Sij2(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *omgij4(I,K,J)**2.d0 + & -deltaz*((Sij2(I,K,J)*DYF(j-1)+Sij2(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *omgij6(I,K,J)**2.d0 + & -deltax*((Sij3(I,K,J)*DYF(j-1)+Sij3(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *((omgij5(I,K,J)*DYF(j-1)+omgij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & -deltay*((Sij3(I,K,J)*DYF(j-1)+Sij3(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *omgij6(I,K,J)**2.d0 + & -2.d0*deltaz*Sij4(I,K,J) + & *((omgij5(I,K,J)*DYF(j-1)+omgij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *omgij6(I,K,J) + & +2.d0*deltay*((Sij5(I,K,J)*DYF(j-1)+Sij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *omgij4(I,K,J)*omgij6(I,K,J) + & -2.d0*deltax*Sij6(I,K,J)*omgij4(I,K,J) + & *((omgij5(I,K,J)*DYF(j-1)+omgij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + + + IF (TEMP(I,K,J) <= 0.0d0) THEN + TEMP(I,K,J)=0.0d0 + ELSE + TEMP(I,K,J)=TEMP(I,K,J)/ + & (((Sij1(I,K,J)*DYF(j-1)+Sij1(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +((Sij2(I,K,J)*DYF(j-1)+Sij2(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +((Sij3(I,K,J)*DYF(j-1)+Sij3(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +2.d0*Sij4(I,K,J)**2.d0 + & +2.d0*omgij4(I,K,J)**2.d0 + & +2.d0*((Sij5(I,K,J)*DYF(j-1)+Sij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +2.d0*((omgij5(I,K,J)*DYF(j-1)+omgij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +2.d0*Sij6(I,K,J)**2.d0 + & +2.d0*omgij6(I,K,J)**2.d0 ) + + END IF + + END DO + END DO + END DO + +! Now, compute max{0,-delta_k*(I3-I4)}/(I1-I2)*S_ij, storing in Sij +! First compute at GYF points + DO J=JSTART,JEND + DO K=0,NZP-1 + DO I=0,NXM + Sij1(I,K,J)=S1(I,K,J)*Sij1(I,K,J) + Sij5(I,K,J)=S1(I,K,J)*Sij5(I,K,J) +! Sij2 is added through an implicit eddy viscosity + Sij2(I,K,J)=0.d0 + Sij3(I,K,J)=S1(I,K,J)*Sij3(I,K,J) + END DO + END DO + END DO + + +! Now, compute at max{0,-delta_k*(I3-I4)}/(I1-I2)*S_ij at GY points + DO J=2,NY+1 + DO K=0,NZP-1 + DO I=0,NXM +! The terms dU1/dy and dU3/dy in CSij4(:,:,:) and CSij6(:,:,:) respectively +! are subtracted out from Sij here since they are treated implicitly +! in eddy viscosity terms + Sij4(I,K,J)=TEMP(I,K,J) + & *(Sij4(I,K,J)-0.5*(U1(I,K,J)-U1(I,K,J-1))/DY(j)) + Sij6(I,K,J)=TEMP(I,K,J) + & *(Sij6(I,K,J)-0.5*(U3(I,K,J)-U3(I,K,J-1))/DY(j)) + END DO + END DO + END DO + +! We now have max{0,-delta_k*(I3-I4)}/(I1-I2)*S_ij stored in Sij in Physical space + +! Compute -2.d0*C_AMD**2.d0 here for efficiency + DO J=1,NY+1 +! At GYF points: +! AMD (based off constant Smag) + DELTA_YF(J)=-2.d0*C_AMD**2.d0 +! & *(DX(1)*beta_sgs*DYF(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) +! Wall Damping +! DELTA_YF(J)= +! & -2.d0*(0.1d0*(1.d0-exp((-GYF(J)/(NU*25.d0))**3.d0)))**2.d0 +! & *(DX(1)*beta_sgs*DYF(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) + + END DO + + DO J=1,NY+1 +! At GY points: +! AMD (based off Constant Smagorinsky) + DELTA_Y(J)=-2.d0*C_AMD**2.d0 +! & *(DX(1)*beta_sgs*DY(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) +! Wall Damping +! DELTA_Y(J)= +! & -2.d0*(0.1d0*(1.d0-exp((-GY(J)/(NU*25.d0))**3.d0)))**2.d0 +! & *(DX(1)*beta_sgs*DY(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) + END DO + + +! Get the eddy viscosity at GY points +! NU_T = (C_S^2 * DELTA^2)*|S| + DO J=2,NY + DO K=0,NZP-1 + DO I=0,NXM + NU_T(I,K,J)=-0.5d0*DELTA_Y(J)*TEMP(I,K,J) + END DO + END DO + END DO + +! Now that we have calculated NU_T, set the value at the ghost cells +! by sharing with neighboring processes. This subroutine also sets +! the value of NU_T to zero at both walls + CALL GHOST_LES_MPI + +! Convert the stress tensor to Fourier space + + + CALL FFT_XZ_TO_FOURIER(Sij1,CSij1,0,NY+1) +! Sij2 is added through an implicit eddy viscosity +! CALL FFT_XZ_TO_FOURIER(Sij2,CSij2,0,NY+1) + CALL FFT_XZ_TO_FOURIER(Sij3,CSij3,0,NY+1) + CALL FFT_XZ_TO_FOURIER(Sij4,CSij4,0,NY+1) + CALL FFT_XZ_TO_FOURIER(Sij5,CSij5,0,NY+1) + CALL FFT_XZ_TO_FOURIER(Sij6,CSij6,0,NY+1) + +! Now, compute TAU, store in the corresponging Sij + DO J=1,NY+1 + DO K=0,TNKZ + DO I=0,NXP-1 + CSij1(I,K,J)=DELTA_YF(J)*CSij1(I,K,J) + CSij5(I,K,J)=DELTA_YF(J)*CSij5(I,K,J) +! CSij2(:,:,:) is added through an implicit eddy viscosity +! CSij2(I,K,J)=DELTA_YF(J)*CSij2(I,K,J) + CSij3(I,K,J)=DELTA_YF(J)*CSij3(I,K,J) + END DO + END DO + END DO + DO J=2,NY+1 + DO K=0,TNKZ + DO I=0,NXP-1 + CSij4(I,K,J)=DELTA_Y(J)*CSij4(I,K,J) + CSij6(I,K,J)=DELTA_Y(J)*CSij6(I,K,J) + END DO + END DO + END DO + + +! tau_ij is now contained in CSij in Fourier space + + + else if (LES_MODEL_TYPE.EQ.5) then +! Anisotrophic minimum-dissipation model Verstappen + +! First, compute the rate of strain tensor S_ij + + call compute_strain_chan + +! Then compute the rate of strain tensor invariant SI_ij + + call compute_strain_chan_invariant + +! Then compute the rate of rotation tensor invariant omega_ij + + call compute_rotation_chan_invariant + +!---------------------------------------------------------! +! Compute the mean velocity and TH +! Copied from Sr. SAVE_STATS_CHAN +! Get the mean value of the velocities + IF (RANKZ.EQ.0) THEN + ume=dble(CU1(0,0,:)) + vme=dble(CU2(0,0,:)) + wme=dble(CU3(0,0,:)) +c DO n=1,N_TH +c thme(:,n)=dble(CTH(0,0,:,n)) +c END DO + END IF + CALL MPI_BCAST(ume,NY+2,MPI_DOUBLE_PRECISION,0, + & MPI_COMM_Z,ierror) + CALL MPI_BCAST(vme,NY+2,MPI_DOUBLE_PRECISION,0, + & MPI_COMM_Z,ierror) + CALL MPI_BCAST(wme,NY+2,MPI_DOUBLE_PRECISION,0, + & MPI_COMM_Z,ierror) +c IF (N_TH.GT.0) CALL MPI_BCAST(thme,(NY+2)*N_TH, +c & MPI_DOUBLE_PRECISION,0,MPI_COMM_Z,ierror) +!!! We do not need thme here since local theta is needed + + + N=1 ! Only buoyancy/temperature field + CTMP(:,:,:)=CTH(:,:,:,N) + CALL FFT_XZ_TO_PHYSICAL(CTMP,TH_real,0,NY+1) + +! Compute dthetady in fourier space +c DO J=1,NY+1 +c DO K=0,TNKZ +c DO I=0,NXP-1 +! Cdthetady at GY points +c Cdthetady(I,K,J,N)=(CTH(I,K,J,N)-CTH(I,K,J-1,N))/DY(J) +! Cdthetady at GYF points +c Cdthetady_YF(I,K,J,N)=(CTH(I,K,J+1,N)-CTH(I,K,J-1,N)) +c & /(GYF(J+1)-GYF(J-1)) +c END DO +c END DO +c END DO + +! dthetady at GY points in physical space +c CS1(:,:,:)=Cdthetady(:,:,:,N) +c CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) +c dthetady(:,:,:,N)=S1(:,:,:) +! dthetady at GYF points in physical space +c CS1(:,:,:)=Cdthetady_YF(:,:,:,N) +c CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) +c dthetady_YF(:,:,:,N)=S1(:,:,:) + +!=========================================================! + +! Convert the velocity to physical space + call FFT_XZ_TO_PHYSICAL(CU1,U1,0,NY+1) + call FFT_XZ_TO_PHYSICAL(CU2,U2,0,NY+1) + call FFT_XZ_TO_PHYSICAL(CU3,U3,0,NY+1) + +! Compute max{0,-(I3-I4)}/(I1-I2) at GYF points, store in S1 +! Interpolation to GYF points is easy since by definition +! GYF points are exactly midway between neighboring GY points + DO J=JSTART,JEND + DO K=0,NZP-1 + DO I=0,NXM + +!First calculate I3 and store it in S1. + S1(I,K,J)= + & SIij1(I,K,J)**3.d0 + & +SIij2(I,K,J)**3.d0 + & +SIij3(I,K,J)**3.d0 + & +3.d0*SIij1(I,K,J)*(0.5d0*(SIij4(I,K,J+1)+SIij4(I,K,J)))**2.d0 + & +3.d0*SIij1(I,K,J)*SIij5(I,K,J)**2.d0 + & +3.d0*SIij2(I,K,J)*(0.5d0*(SIij4(I,K,J+1)+SIij4(I,K,J)))**2.d0 + & +3.d0*SIij2(I,K,J)*(0.5d0*(SIij6(I,K,J+1)+SIij6(I,K,J)))**2.d0 + & +3.d0*SIij3(I,K,J)*SIij5(I,K,J)**2.d0 + & +3.d0*SIij3(I,K,J)*(0.5d0*(SIij6(I,K,J+1)+SIij6(I,K,J)))**2.d0 + & +6.d0*(0.5d0*(SIij4(I,K,J+1)+SIij4(I,K,J)))*SIij5(I,K,J) + & *(0.5d0*(SIij6(I,K,J+1)+SIij6(I,K,J))) + +!Then calculate -(I3-I4) = -S1+I4 and store in S1. + + S1(I,K,J)=(-S1(I,K,J) + & -SIij1(I,K,J)*(0.5d0*(omgij4(I,K,J+1)+omgij4(I,K,J)))**2.d0 + & -SIij1(I,K,J)*omgij5(I,K,J)**2.d0 + & -SIij2(I,K,J)*(0.5d0*(omgij4(I,K,J+1)+omgij4(I,K,J)))**2.d0 + & -SIij2(I,K,J)*(0.5d0*(omgij6(I,K,J+1)+omgij6(I,K,J)))**2.d0 + & -SIij3(I,K,J)*omgij5(I,K,J)**2.d0 + & -SIij3(I,K,J)*(0.5d0*(omgij6(I,K,J+1)+omgij6(I,K,J)))**2.d0 + & -2.d0*(0.5d0*(SIij4(I,K,J+1)+SIij4(I,K,J)))*omgij5(I,K,J) + & *(0.5d0*(omgij6(I,K,J+1)+omgij6(I,K,J)))! + & +2.d0*SIij5(I,K,J)*(0.5d0*(omgij4(I,K,J+1)+omgij4(I,K,J))) + & *(0.5d0*(omgij6(I,K,J+1)+omgij6(I,K,J))) + & -2.d0*(0.5d0*(SIij6(I,K,J+1)+SIij6(I,K,J))) + & *(0.5d0*(omgij4(I,K,J+1)+omgij4(I,K,J)))*omgij5(I,K,J) + & ) + + IF (S1(I,K,J) <= 0.0d0) THEN + S1(I,K,J)=0.0d0 + ELSE + S1(I,K,J)=S1(I,K,J)/(SIij1(I,K,J)**2.d0 + & +SIij2(I,K,J)**2.d0 + & +SIij3(I,K,J)**2.d0 + & +2.d0*(0.5d0*(SIij4(I,K,J+1)+SIij4(I,K,J)))**2.d0 + & +2.d0*(0.5d0*(omgij4(I,K,J+1)+omgij4(I,K,J)))**2.d0 + & +2.d0*SIij5(I,K,J)**2.d0 + & +2.d0*omgij5(I,K,J)**2.d0 + & +2.d0*(0.5d0*(SIij6(I,K,J+1)+SIij6(I,K,J)))**2.d0 + & +2.d0*(0.5d0*(omgij6(I,K,J+1)+omgij6(I,K,J)))**2.d0) + END IF + + END DO + END DO + END DO + + + +! Compute max{0,-(I3-I4)}/(I1-I2) at GY points and store in TEMP + DO J=2,NY+1 + DO K=0,NZP-1 + DO I=0,NXM + +!First calculate I3 and store it in TEMP. + TEMP(I,K,J)= + & ((SIij1(I,K,J)*DYF(j-1)+SIij1(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**3.d0 + & +((SIij2(I,K,J)*DYF(j-1)+SIij2(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**3.d0 + & +((SIij3(I,K,J)*DYF(j-1)+SIij3(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**3.d0 + & +3.d0*((SIij1(I,K,J)*DYF(j-1)+SIij1(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *SIij4(I,K,J)**2.d0 + & +3.d0*((SIij1(I,K,J)*DYF(j-1)+SIij1(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *((SIij5(I,K,J)*DYF(j-1)+SIij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +3.d0*((SIij2(I,K,J)*DYF(j-1)+SIij2(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *SIij4(I,K,J)**2.d0 + & +3.d0*((SIij2(I,K,J)*DYF(j-1)+SIij2(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *SIij6(I,K,J)**2.d0 + & +3.d0*((SIij3(I,K,J)*DYF(j-1)+SIij3(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *((SIij5(I,K,J)*DYF(j-1)+SIij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +3.d0*((SIij3(I,K,J)*DYF(j-1)+SIij3(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *SIij6(I,K,J)**2.d0 + & +6.d0*SIij4(I,K,J)*SIij6(I,K,J) + & *((SIij5(I,K,J)*DYF(j-1)+SIij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + +!Then calculate -(I3-I4) = -TEMP+I4 and store in TEMP. + TEMP(I,K,J)=-TEMP(I,K,J) + & -((SIij1(I,K,J)*DYF(j-1)+SIij1(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *omgij4(I,K,J)**2.d0 + & -((SIij1(I,K,J)*DYF(j-1)+SIij1(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *((omgij5(I,K,J)*DYF(j-1)+omgij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & -((SIij2(I,K,J)*DYF(j-1)+SIij2(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *omgij4(I,K,J)**2.d0 + & -((SIij2(I,K,J)*DYF(j-1)+SIij2(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *omgij6(I,K,J)**2.d0 + & -((SIij3(I,K,J)*DYF(j-1)+SIij3(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *((omgij5(I,K,J)*DYF(j-1)+omgij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & -((SIij3(I,K,J)*DYF(j-1)+SIij3(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *omgij6(I,K,J)**2.d0 + & -2.d0*SIij4(I,K,J) + & *((omgij5(I,K,J)*DYF(j-1)+omgij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *omgij6(I,K,J) + & +2.d0*((SIij5(I,K,J)*DYF(j-1)+SIij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + & *omgij4(I,K,J)*omgij6(I,K,J) + & -2.d0*SIij6(I,K,J)*omgij4(I,K,J) + & *((omgij5(I,K,J)*DYF(j-1)+omgij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j))) + + IF (TEMP(I,K,J) <= 0.0d0) THEN + TEMP(I,K,J)=0.0d0 + ELSE + TEMP(I,K,J)=TEMP(I,K,J)/ + & (((SIij1(I,K,J)*DYF(j-1)+SIij1(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +((SIij2(I,K,J)*DYF(j-1)+SIij2(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +((SIij3(I,K,J)*DYF(j-1)+SIij3(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +2.d0*SIij4(I,K,J)**2.d0 + & +2.d0*omgij4(I,K,J)**2.d0 + & +2.d0*((SIij5(I,K,J)*DYF(j-1)+SIij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +2.d0*((omgij5(I,K,J)*DYF(j-1)+omgij5(I,K,J-1)*DYF(j)) + & /(2.d0*DY(j)))**2.d0 + & +2.d0*SIij6(I,K,J)**2.d0 + & +2.d0*omgij6(I,K,J)**2.d0 ) + + END IF + + END DO + END DO + END DO + + +! Now, compute max{0,-(I3-I4)}/(I1-I2)*S_ij, storing in Sij +! First compute at GYF points + DO J=JSTART,JEND + DO K=0,NZP-1 + DO I=0,NXM + Sij1(I,K,J)=S1(I,K,J)*Sij1(I,K,J) + Sij5(I,K,J)=S1(I,K,J)*Sij5(I,K,J) +! Sij2 is added through an implicit eddy viscosity + Sij2(I,K,J)=0.d0 + Sij3(I,K,J)=S1(I,K,J)*Sij3(I,K,J) + END DO + END DO + END DO + + +! Now, compute at max{0,-(I3-I4)}/(I1-I2)*S_ij at GY points + DO J=2,NY+1 + DO K=0,NZP-1 + DO I=0,NXM +! The terms dU1/dy and dU3/dy in CSij4(:,:,:) and CSij6(:,:,:) respectively +! are subtracted out from Sij here since they are treated implicitly +! in eddy viscosity terms + Sij4(I,K,J)=TEMP(I,K,J) + & *(Sij4(I,K,J)-0.5*(U1(I,K,J)-U1(I,K,J-1))/DY(j)) + Sij6(I,K,J)=TEMP(I,K,J) + & *(Sij6(I,K,J)-0.5*(U3(I,K,J)-U3(I,K,J-1))/DY(j)) + END DO + END DO + END DO + +! We now have max{0,-(I3-I4)}/(I1-I2)*S_ij stored in Sij in Physical space + + + + +! Compute the filter lengthscale +! Absorb -2.d0*C_AMD**2.d0 here for efficiency + DO J=1,NY+1 +! At GYF points: +! AMD (based off constant Smag) + DELTA_YF(J)=-2.d0*C_AMD**2.d0 + & *3/(1/(DX(1)*beta_sgs)**2.d0+1/(DYF(J)*2.d0)**2.d0 + & +1/(DZ(1)*beta_sgs)**2.d0) +! & *(DX(1)*beta_sgs*DYF(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) +! Wall Damping +! DELTA_YF(J)= +! & -2.d0*(0.1d0*(1.d0-exp((-GYF(J)/(NU*25.d0))**3.d0)))**2.d0 +! & *(DX(1)*beta_sgs*DYF(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) + + END DO + + DO J=1,NY+1 +! At GY points: +! AMD (based off Constant Smagorinsky) + DELTA_Y(J)=-2.d0*C_AMD**2.d0 + & *3/(1/(DX(1)*beta_sgs)**2.d0+1/(DY(J)*2.d0)**2.d0 + & +1/(DZ(1)*beta_sgs)**2.d0) +! & *(DX(1)*beta_sgs*DY(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) +! Wall Damping +! DELTA_Y(J)= +! & -2.d0*(0.1d0*(1.d0-exp((-GY(J)/(NU*25.d0))**3.d0)))**2.d0 +! & *(DX(1)*beta_sgs*DY(J)*2.d0*DZ(1)*beta_sgs)**(2.d0/3.d0) + END DO + +! Get the eddy viscosity at GY points +! NU_T = (C_S^2 * DELTA^2)*|S| + N=1 + DO J=2,NY + DO K=0,NZP-1 + DO I=0,NXM + + DELTA_TMP_Y = -2.d0*C_AMD**2.d0 + & *3/(1/(DX(1)*beta_sgs)**2.d0+1/(DY(J)*2.d0)**2.d0 + & +1/(DZ(1)*beta_sgs)**2.d0 + & + C_lb*MAX(0.0,(TH_real(I,K,J)-TH_real(I,K,J-1))/DY(J))/( + & ((U1(I,K,J)-ume(j))**2.d0*DYF(J-1) + & +(U1(I,K,J-1)-ume(J-1))**2.d0*DYF(J))/(2.d0*DY(J)) + & +((U3(I,K,J)-wme(j))**2.d0*DYF(J-1) + & +(U3(I,K,J-1)-wme(J-1))**2.d0*DYF(J))/(2.d0*DY(J)) + & +((U2(I,K,J)-vme(J))**2.d0)+EPS_DELTA ) + & ) + + NU_T(I,K,J)=-0.5d0*DELTA_TMP_Y*TEMP(I,K,J) +c NU_T(I,K,J)=-0.5d0*DELTA_Y(J)*TEMP(I,K,J) + + END DO + END DO + END DO + +! Now that we have calculated NU_T, set the value at the ghost cells +! by sharing with neighboring processes. This subroutine also sets +! the value of NU_T to zero at both walls + CALL GHOST_LES_MPI + +! Convert the stress tensor to Fourier space + + + CALL FFT_XZ_TO_FOURIER(Sij1,CSij1,0,NY+1) +! Sij2 is added through an implicit eddy viscosity +! CALL FFT_XZ_TO_FOURIER(Sij2,CSij2,0,NY+1) + CALL FFT_XZ_TO_FOURIER(Sij3,CSij3,0,NY+1) + CALL FFT_XZ_TO_FOURIER(Sij4,CSij4,0,NY+1) + CALL FFT_XZ_TO_FOURIER(Sij5,CSij5,0,NY+1) + CALL FFT_XZ_TO_FOURIER(Sij6,CSij6,0,NY+1) + +! Now, compute TAU, store in the corresponging Sij + DO J=1,NY+1 + DO K=0,TNKZ + DO I=0,NXP-1 + IF (J.EQ.NY+1) THEN + DELTA_TMP_YF = -2.d0*C_AMD**2.d0 + & *3/(1/(DX(1)*beta_sgs)**2.d0+1/(DYF(J)*2.d0)**2.d0 + & +1/(DZ(1)*beta_sgs)**2.d0 + & +C_lb*MAX(0.0,(TH_real(I,K,J)-TH_real(I,K,J-1))/ + & (GYF(J)-GYF(J-1)))/( + & (U1(I,K,J)-ume(J))**2.d0+(U3(I,K,J)-wme(J))**2.d0 + & + 0.5*((U2(I,K,J)-vme(J))**2.d0 + + & (U2(I,K,J)-vme(J))**2.d0 )+EPS_DELTA) + & ) + ELSE + DELTA_TMP_YF = -2.d0*C_AMD**2.d0 + & *3/(1/(DX(1)*beta_sgs)**2.d0+1/(DYF(J)*2.d0)**2.d0 + & +1/(DZ(1)*beta_sgs)**2.d0 + & +C_lb*MAX(0.0,(TH_real(I,K,J+1)-TH_real(I,K,J-1))/ + & (GYF(J+1)-GYF(J-1)))/( + & (U1(I,K,J)-ume(J))**2.d0+(U3(I,K,J)-wme(J))**2.d0 + & + 0.5*((U2(I,K,J)-vme(J))**2.d0 + + & (U2(I,K,J+1)-vme(J+1))**2.d0 )+EPS_DELTA) + & ) + ENDIF +c CSij1(I,K,J)=DELTA_YF(J)*CSij1(I,K,J) + CSij1(I,K,J)=DELTA_TMP_YF*CSij1(I,K,J) +c CSij5(I,K,J)=DELTA_YF(J)*CSij5(I,K,J) + CSij5(I,K,J)=DELTA_TMP_YF*CSij5(I,K,J) +! CSij2(:,:,:) is added through an implicit eddy viscosity +! CSij2(I,K,J)=DELTA_YF(J)*CSij2(I,K,J) +c CSij3(I,K,J)=DELTA_YF(J)*CSij3(I,K,J) + CSij3(I,K,J)=DELTA_TMP_YF*CSij3(I,K,J) + + END DO + END DO + END DO + DO J=2,NY+1 + DO K=0,TNKZ + DO I=0,NXP-1 + + DELTA_TMP_Y = -2.d0*C_AMD**2.d0 + & *3/(1/(DX(1)*beta_sgs)**2.d0+1/(DY(J)*2.d0)**2.d0 + & +1/(DZ(1)*beta_sgs)**2.d0 + & +C_lb*MAX(0.0,(TH_real(I,K,J)-TH_real(I,K,J-1))/DY(J))/( + & ((U1(I,K,J)-ume(j))**2.d0*DYF(J-1) + & +(U1(I,K,J-1)-ume(J-1))**2.d0*DYF(J))/(2.d0*DY(J)) + & +((U3(I,K,J)-wme(j))**2.d0*DYF(J-1) + & +(U3(I,K,J-1)-wme(J-1))**2.d0*DYF(J))/(2.d0*DY(J)) + & +((U2(I,K,J)-vme(J))**2.d0)+EPS_DELTA ) + & ) + + + +c CSij4(I,K,J)=DELTA_Y(J)*CSij4(I,K,J) + CSij4(I,K,J)=DELTA_TMP_Y*CSij4(I,K,J) +c CSij6(I,K,J)=DELTA_Y(J)*CSij6(I,K,J) + CSij6(I,K,J)=DELTA_TMP_Y*CSij6(I,K,J) + END DO + END DO + END DO + + +! tau_ij is now contained in CSij in Fourier space end if @@ -283,7 +954,7 @@ subroutine les_chan CF3(I,K,J)=CF3(I,K,J) & -CIKX(I)*CSij5(I,K,J) & -(CSij6(I,K,J+1)-CSij6(I,K,J))/DYF(J) - & -CIKZ(K)*CSij3(I,K,J) + & -CIKZ(K)*CSij3(I,K,J) END DO END DO END DO @@ -453,7 +1124,6 @@ subroutine les_chan end do end do end do - call mpi_allreduce(mpi_in_place,S1_mean,NY+2,MPI_DOUBLE_PRECISION, & MPI_SUM,MPI_COMM_Z,ierror) call mpi_allreduce(mpi_in_place,NU_T_mean,NY+2 @@ -469,7 +1139,6 @@ subroutine les_chan EPS_SGS1_MEAN(j)=EPS_SGS1_MEAN(j)/dble(NX*NZ) end do - #ifdef HDF5 FNAME='mean_les.h5' @@ -517,8 +1186,6 @@ subroutine les_chan RETURN END - - subroutine compute_strain_chan C This subroutine computes S_ij for the filtered velocity field C The input velocity field should be in fourier space in the periodic @@ -529,16 +1196,14 @@ subroutine compute_strain_chan integer I,J,K,ij - DO J=1,NY+1 + DO J=1,NY DO K=0,TNKZ DO I=0,NXP-1 CSij1(I,K,J)=CIKX(I)*CU1(I,K,J) + CSij2(I,K,J)=(CU2(I,K,J+1)-CU2(I,K,J))/DYF(j) CSij3(I,K,J)=CIKZ(K)*CU3(I,K,J) CSij5(I,K,J)=0.5d0*(CIKZ(K)*CU1(I,K,J) & +CIKX(I)*CU3(I,K,J)) - if (J /= Ny + 1) then - CSij2(I,K,J)=(CU2(I,K,J+1)-CU2(I,K,J))/DYF(j) - end if END DO END DO END DO @@ -553,9 +1218,6 @@ subroutine compute_strain_chan END DO END DO - ! Need to communicate down CSij2 to each j = Ny + 1 - call ghost_CSij2_mpi - CALL FFT_XZ_TO_PHYSICAL(CSij1,Sij1,0,NY+1) CALL FFT_XZ_TO_PHYSICAL(CSij2,Sij2,0,NY+1) @@ -570,6 +1232,188 @@ subroutine compute_strain_chan END + subroutine compute_rotation_chan +C This subroutine computes omgij (omega_ij) for the filtered velocity field +C The input velocity field should be in fourier space in the periodic +C directions. +C For use in the LES model in channel flow (2 periodic directions) + include 'header' + include 'header_les' + + integer I,J,K,ij + + + + DO J=1,NY + DO K=0,TNKZ + DO I=0,NXP-1 + CSij1(I,K,J)=CIKX(I)*CU1(I,K,J) + CSij2(I,K,J)=(CU2(I,K,J+1)-CU2(I,K,J))/DYF(j) + CSij3(I,K,J)=CIKZ(K)*CU3(I,K,J) + Comgij5(I,K,J)=0.5d0*(CIKZ(K)*CU1(I,K,J) + & -CIKX(I)*CU3(I,K,J)) +C ^ this is omega_ik, this is anti-cyclic permutation + END DO + END DO + END DO + DO J=1,NY+1 + DO K=0,TNKZ + DO I=0,NXP-1 + IF (DY(j).ne.0.d0) then + Comgij4(I,K,J)=0.5d0*((CU1(I,K,J)-CU1(I,K,J-1))/DY(j) + & -CIKX(I)*CU2(I,K,J) ) +C ^ this is omega_ij, cyclic permutation + Comgij6(I,K,J)=0.5d0*(CIKZ(K)*CU2(I,K,J) + & -(CU3(I,K,J)-CU3(I,K,J-1))/DY(j) ) +C ^ this is omega_jk, cyclic permutation + END IF + END DO + END DO + END DO + + +C CALL FFT_XZ_TO_PHYSICAL(CSij1,Sij1,0,NY+1) +C CALL FFT_XZ_TO_PHYSICAL(CSij2,Sij2,0,NY+1) +C CALL FFT_XZ_TO_PHYSICAL(CSij3,Sij3,0,NY+1) + + CALL FFT_XZ_TO_PHYSICAL(Comgij4,omgij4,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(Comgij5,omgij5,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(Comgij6,omgij6,0,NY+1) + +! We now have S_ij in Physical space + + RETURN + END + + subroutine compute_strain_chan_invariant +C This subroutine computes S_ij for the filtered velocity field +C The input velocity field should be in fourier space in the periodic +C directions. +C For use in the LES model in channel flow (2 periodic directions) + include 'header' + include 'header_les' + + integer I,J,K,ij + real*8 deltax,deltay,deltaz + +!Set filter length (based on grid size) in x,z directions +!Based on constant Smag code + deltax = (DX(1)*beta_sgs) + deltaz = (DZ(1)*beta_sgs) + + DO J=1,NY +!Set filter length (based on grid size) in y direction +!Based on constant Smag code + deltay=(DYF(J)*2.d0) + DO K=0,TNKZ + DO I=0,NXP-1 + CSIij1(I,K,J)=CIKX(I)*CU1(I,K,J) + CSIij2(I,K,J)=(CU2(I,K,J+1)-CU2(I,K,J))/DYF(j) + CSIij3(I,K,J)=CIKZ(K)*CU3(I,K,J) + CSIij5(I,K,J)=0.5d0*(CIKZ(K)*CU1(I,K,J) + & *(deltaz/deltax) + & +CIKX(I)*CU3(I,K,J) + & *(deltax/deltaz) ) + END DO + END DO + END DO + DO J=1,NY+1 + deltay=(DY(J)*2.d0) + DO K=0,TNKZ + DO I=0,NXP-1 + CSIij4(I,K,J)=0.5d0*((CU1(I,K,J)-CU1(I,K,J-1))/DY(j) + & *(deltay/deltax) + & +CIKX(I)*CU2(I,K,J) + & *(deltax/deltay) ) + CSIij6(I,K,J)=0.5d0*(CIKZ(K)*CU2(I,K,J) + & *(deltaz/deltay) + & +(CU3(I,K,J)-CU3(I,K,J-1))/DY(j) + & *(deltay/deltaz) ) + END DO + END DO + END DO + + + CALL FFT_XZ_TO_PHYSICAL(CSIij1,SIij1,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(CSIij2,SIij2,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(CSIij3,SIij3,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(CSIij4,SIij4,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(CSIij5,SIij5,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(CSIij6,SIij6,0,NY+1) + +! We now have S_ij in Physical space + + RETURN + END + + + + subroutine compute_rotation_chan_invariant +C This subroutine computes omgij (omega_ij) for the filtered velocity field +C The input velocity field should be in fourier space in the periodic +C directions. +C For use in the LES model in channel flow (2 periodic directions) + include 'header' + include 'header_les' + + integer I,J,K,ij + real*8 deltax,deltay,deltaz + +!Set filter length (based on grid size) in x,z directions +!Based on constant Smag code + deltax = (DX(1)*beta_sgs) + deltaz = (DZ(1)*beta_sgs) + + DO J=1,NY + deltay=(DYF(J)*2.d0) + DO K=0,TNKZ + DO I=0,NXP-1 +C CSij1(I,K,J)=CIKX(I)*CU1(I,K,J) +C CSij2(I,K,J)=(CU2(I,K,J+1)-CU2(I,K,J))/DYF(j) +C CSij3(I,K,J)=CIKZ(K)*CU3(I,K,J) + Comgij5(I,K,J)=0.5d0*(CIKZ(K)*CU1(I,K,J) + & *(deltaz/deltax) + & -CIKX(I)*CU3(I,K,J) + & *(deltax/deltaz) ) +C ^ this is omega_ik, this is anti-cyclic permutation + END DO + END DO + END DO + DO J=1,NY+1 + deltay=(DY(J)*2.d0) + if (DY(j).ne.0.d0) then + DO K=0,TNKZ + DO I=0,NXP-1 + Comgij4(I,K,J)=0.5d0*((CU1(I,K,J)-CU1(I,K,J-1))/DY(j) + & *(deltay/deltax) + & -CIKX(I)*CU2(I,K,J) + & *(deltax/deltay) ) +C ^ this is omega_ij, cyclic permutation + Comgij6(I,K,J)=0.5d0*(CIKZ(K)*CU2(I,K,J) + & *(deltaz/deltay) + & -(CU3(I,K,J)-CU3(I,K,J-1))/DY(j) + & *(deltay/deltaz) ) +C ^ this is omega_jk, cyclic permutation + END DO + END DO + end if + END DO + + +C CALL FFT_XZ_TO_PHYSICAL(CSij1,Sij1,0,NY+1) +C CALL FFT_XZ_TO_PHYSICAL(CSij2,Sij2,0,NY+1) +C CALL FFT_XZ_TO_PHYSICAL(CSij3,Sij3,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(Comgij4,omgij4,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(Comgij5,omgij5,0,NY+1) + CALL FFT_XZ_TO_PHYSICAL(Comgij6,omgij6,0,NY+1) + +! We now have S_ij in Physical space + + RETURN + END + + + subroutine les_filter_chan(A,jstart,jend) ! This subroutine applies the les filter to the input field ! The indices to the start and end of the array in the y-direction @@ -596,7 +1440,7 @@ subroutine les_filter_chan(A,jstart,jend) ! W0=1.d0/2.d0 ! W1=1.d0/4.d0 ! W2=0.d0 - Wm1_j=1.d0/4.d0 + Wm1_j=1.d0/4.d0 W0_j=1.d0/2.d0 W1_j=1.d0/4.d0 ! The following is for the 5-point trapezoidal rule, alpha*beta=9 @@ -639,7 +1483,7 @@ subroutine les_filter_chan(A,jstart,jend) end do ip1(NXM)=0 do i=0,NX-3 - ip2(i)=i+2 + ip2(i)=i+2 end do ip2(NX-2)=0 ip2(NXM)=1 @@ -650,9 +1494,9 @@ subroutine les_filter_chan(A,jstart,jend) B(i,k,j)=Wm2*A(im2(i),k,j)+Wm1*A(im1(i),k,j)+W0*A(i,k,j) & +W1*A(ip1(i),k,j)+W2*A(ip2(i),k,j) end do - end do + end do end do - + ! Apply filter in the z-diretion ! B=Wm2*CSHIFT(B,-2,2)+Wm1*CSHIFT(B,-1,2)+W0*B+W1*CSHIFT(B,1,2) ! & +W2*CSHIFT(B,2,2) @@ -672,7 +1516,7 @@ subroutine les_filter_chan(A,jstart,jend) end do kp1(NZM)=0 do k=0,NZ-3 - kp2(k)=k+2 + kp2(k)=k+2 end do kp2(NZ-2)=0 kp2(NZM)=1 @@ -683,7 +1527,7 @@ subroutine les_filter_chan(A,jstart,jend) A(i,k,j)=Wm2*B(i,km2(k),j)+Wm1*B(i,km1(k),j)+W0*B(i,k,j) & +W1*B(i,kp1(k),j)+W2*B(i,kp2(k),j) end do - end do + end do end do ! Apply filter in the vertical direction at all physical cells @@ -696,7 +1540,7 @@ subroutine les_filter_chan(A,jstart,jend) ! do j=jstart+1,jend-1 ! do k=0,NZM ! do i=0,NXM -! B(i,k,j)=Wm1_j*B(i,k,j-1)+W0_j*B(i,k,j)+W1_j*B(i,k,j+1) +! B(i,k,j)=Wm1_j*B(i,k,j-1)+W0_j*B(i,k,j)+W1_j*B(i,k,j+1) ! end do ! end do ! end do @@ -727,7 +1571,7 @@ subroutine les_filter_chan_fourier(A,jstart,jend) real*8 PI, LX, LZ integer NKX,NKZ,TNKZ - real*8 KX(0:NX/3),KZ(0:2*(NZ/3)) + real*8 KX(0:NX/3),KZ(0:2*(NZ/3)) real*8 A(0:NX+1,0:NZ+1,0:NY+1) @@ -737,7 +1581,7 @@ subroutine les_filter_chan_fourier(A,jstart,jend) complex*16 CB(0:NX/2,0:NZ+1,0:NY+1) - equivalence (B,CB) +ccc equivalence (B,CB) !by J.Liu, 18/Jan/2020 ! Set the ratio of filter scales parameter (alpha=2.d0) @@ -749,7 +1593,7 @@ subroutine les_filter_chan_fourier(A,jstart,jend) LX=PI LZ=2.d0*PI - + ! Get the wavenumber vectors: NKX=NX/3 DO I=0,NKX @@ -771,7 +1615,7 @@ subroutine les_filter_chan_fourier(A,jstart,jend) B(i,k,j)=A(i,k,j) end do enddo - end do + end do ! Convert to fourier space @@ -792,13 +1636,13 @@ subroutine les_filter_chan_fourier(A,jstart,jend) ! Now, convert back to physical space call fft_xz_to_physical(CB,B,jstart,jend) - do j=jstart,jend + do j=jstart,jend do k=0,NZM do i=0,NXM A(i,k,j)=B(i,k,j) end do end do - end do + end do return end @@ -836,7 +1680,7 @@ SUBROUTINE APPLY_BC_LES IF (U_BC_YMIN.eq.1) THEN IF ((RANKY.eq.0).or.(.NOT.USE_MPI)) THEN -! We are on a process at the bottom wall +! We are on a process at the bottom wall DO K=0,TNKZ DO I=0,NXP-1 CU1(I,K,1)=CU1(I,K,2) @@ -848,7 +1692,7 @@ SUBROUTINE APPLY_BC_LES IF (W_BC_YMIN.eq.1) THEN IF ((RANKY.eq.0).or.(.NOT.USE_MPI)) THEN -! We are on a process at the bottom wall +! We are on a process at the bottom wall DO K=0,TNKZ DO I=0,NXP-1 CU3(I,K,1)=CU3(I,K,2) @@ -860,3 +1704,9 @@ SUBROUTINE APPLY_BC_LES RETURN END + + + + + + From 522c53cfb4d81133fed04c7404dc46d2eb84299d Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Sat, 24 Apr 2021 09:30:37 +0100 Subject: [PATCH 59/66] Added AMD model to LES --- diablo_2.0/for/header_les | 52 +++++++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 8 deletions(-) diff --git a/diablo_2.0/for/header_les b/diablo_2.0/for/header_les index 0ee2d43..52f643f 100644 --- a/diablo_2.0/for/header_les +++ b/diablo_2.0/for/header_les @@ -11,6 +11,7 @@ real*8 NU_U3(0:NY+1) REAL*8 cross + REAL*8 beta_sgs REAL*8 Sij1(0:NX+1,0:NZP+1,0:NY+1) REAL*8 Sij2(0:NX+1,0:NZP+1,0:NY+1) @@ -26,15 +27,50 @@ COMPLEX*16 CSij5(0:NXP,0:NZ+1,0:NY+1) COMPLEX*16 CSij6(0:NXP,0:NZ+1,0:NY+1) - EQUIVALENCE (TEMP,CTEMP) - & ,(Sij1,CSij1) - & ,(Sij2,CSij2) - & ,(Sij3,CSij3) - & ,(Sij4,CSij4) - & ,(Sij5,CSij5) - & ,(Sij6,CSij6) + REAL*8 SIij1(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 SIij2(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 SIij3(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 SIij4(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 SIij5(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 SIij6(0:NX+1,0:NZP+1,0:NY+1) - COMMON /LES_VARS_2/ Sij1, Sij2, Sij3, Sij4, Sij5, Sij6, TEMP + COMPLEX*16 CSIij1(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 CSIij2(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 CSIij3(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 CSIij4(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 CSIij5(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 CSIij6(0:NXP,0:NZ+1,0:NY+1) + + REAL*8 omgij4(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 omgij5(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 omgij6(0:NX+1,0:NZP+1,0:NY+1) + COMPLEX*16 Comgij4(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 Comgij5(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 Comgij6(0:NXP,0:NZ+1,0:NY+1) +! EQUIVALENCE (TEMP,CTEMP) +! & ,(Sij1,CSij1) +! & ,(Sij2,CSij2) +! & ,(Sij3,CSij3) +! & ,(Sij4,CSij4) +! & ,(Sij5,CSij5) +! & ,(Sij6,CSij6) +! & ,(SIij1,CSIij1) +! & ,(SIij2,CSIij2) +! & ,(SIij3,CSIij3) +! & ,(SIij4,CSIij4) +! & ,(SIij5,CSIij5) +! & ,(SIij6,CSIij6) +! & ,(omgij4,Comgij4) +! & ,(omgij5,Comgij5) +! & ,(omgij6,Comgij6) + + COMMON /LES_VARS_2/ Sij1, Sij2, Sij3, Sij4, Sij5, Sij6, TEMP + COMMON /LES_VARS_2C/ CSij1,CSij2,CSij3,CSij4,CSij5,CSij6,CTEMP + COMMON /LES_VARS_3/ omgij4, omgij5, omgij6 + COMMON /LES_VARS_3C/ Comgij4, Comgij5, Comgij6 + COMMON /LES_VARS_4/ SIij1, SIij2, SIij3, SIij4, SIij5, SIij6 + COMMON /LES_VARS_4C/ CSIij1,CSIij2,CSIij3,CSIij4,CSIij5,CSIij6 + COMMON /LES_VARS_5/ beta_sgs From 59c40af66268da5e6f4e4695d733949b90f2eba1 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Sat, 24 Apr 2021 09:30:52 +0100 Subject: [PATCH 60/66] Added AMD model to LES --- diablo_2.0/for/header_les_th | 68 ++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 diablo_2.0/for/header_les_th diff --git a/diablo_2.0/for/header_les_th b/diablo_2.0/for/header_les_th new file mode 100644 index 0000000..41d650d --- /dev/null +++ b/diablo_2.0/for/header_les_th @@ -0,0 +1,68 @@ +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| +! Parameters for Large Eddy Simulation +!----*|--.---------.---------.---------.---------.---------.---------.-|-------| + +! Variables for dynamic Smagrinsky + REAL*8 TEMP_th(0:NX+1,0:NZP+1,0:NY+1) + COMPLEX*16 CTEMP_th(0:NXP,0:NZ+1,0:NY+1) + REAL*8 S1_th(0:NX+1,0:NZP+1,0:NY+1) + COMPLEX*16 CS1_th(0:NXP,0:NZ+1,0:NY+1) + +! Variables for plane-averaged momentum budget + real*8 NU_U1(0:NY+1) + real*8 NU_U3(0:NY+1) + + REAL*8 cross + REAL*8 beta_sgs + + REAL*8 du1dx(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 du2dx(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 du3dx(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 du1dy(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 du2dy(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 du3dy(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 du1dz(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 du2dz(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 du3dz(0:NX+1,0:NZP+1,0:NY+1) + REAL*8 dthetadx(0:NX+1,0:NZP+1,0:NY+1,1:N_TH) + REAL*8 dthetady(0:NX+1,0:NZP+1,0:NY+1,1:N_TH) + REAL*8 dthetadz(0:NX+1,0:NZP+1,0:NY+1,1:N_TH) +!======dthetady_YF at GYF points in physical space=====! +! REAL*8 dthetady_YF(0:NX+1,0:NZP+1,0:NY+1,1:N_TH) + + COMPLEX*16 Cdu1dx(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 Cdu2dx(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 Cdu3dx(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 Cdu1dy(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 Cdu2dy(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 Cdu3dy(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 Cdu1dz(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 Cdu2dz(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 Cdu3dz(0:NXP,0:NZ+1,0:NY+1) + COMPLEX*16 Cdthetadx(0:NXP,0:NZ+1,0:NY+1,1:N_TH) + COMPLEX*16 Cdthetady(0:NXP,0:NZ+1,0:NY+1,1:N_TH) + COMPLEX*16 Cdthetadz(0:NXP,0:NZ+1,0:NY+1,1:N_TH) +!======Cdthetady_YF at GYF points in fourier space=====! +! COMPLEX*16 Cdthetady_YF(0:NXP,0:NZ+1,0:NY+1,1:N_TH) + +c EQUIVALENCE (TEMP_th,CTEMP_th) +c & ,(S1_th,CS1_th) +c & ,(du1dx,Cdu1dx) +c & ,(du2dx,Cdu2dx) +c & ,(du3dx,Cdu3dx) +c & ,(du1dy,Cdu1dy) +c & ,(du2dy,Cdu2dy) +c & ,(du3dy,Cdu3dy) +c & ,(du1dz,Cdu1dz) +c & ,(du2dz,Cdu2dz) +c & ,(du3dz,Cdu3dz) +c & ,(dthetadx,Cdthetadx) +c & ,(dthetady,Cdthetady) +c & ,(dthetadz,Cdthetadz) + + + COMMON /LES_VARS_3/ beta_sgs, TEMP_th, S1_th + COMMON /LES_VARS_3/ du1dx, du2dx, du3dx, du1dy, du2dy, du3dy + COMMON /LES_VARS_3/ du1dz, du2dz, du3dz + COMMON /LES_VARS_3/ dthetadx, dthetady, dthetadz + From a271b3cb614e36eb9d394f4c6c6f72e517b7b530 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Sat, 24 Apr 2021 09:31:05 +0100 Subject: [PATCH 61/66] Added AMD model to LES --- diablo_2.0/for/Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/diablo_2.0/for/Makefile b/diablo_2.0/for/Makefile index 8e8b3cf..95aeae8 100644 --- a/diablo_2.0/for/Makefile +++ b/diablo_2.0/for/Makefile @@ -38,9 +38,9 @@ USEROPTS = -O3 -cpp endif ifeq ($(LES),TRUE) -LES_o = les.o +LES_o = les.o les_th.o else -LES_o = no_les.o +LES_o = no_les.o endif ALL2ALL=1 @@ -112,6 +112,8 @@ courant.o: courant.f channel.o fft.o $(MPI) header grid_def ifeq ($(LES),TRUE) les.o: les.f fft.o header header_les grid_def $(COMPILER) $(COMPOPTS) -c les.f +les_th.o: les_th.f fft.o header header_les_th grid_def + $(COMPILER) $(COMPOPTS) -c les_th.f else no_les.o: dummy_code/no_les.f $(COMPILER) $(COMPOPTS) -c dummy_code/no_les.f From eac585c14addd627eeec97d009be7d5c316cbae7 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Sat, 24 Apr 2021 09:36:31 +0100 Subject: [PATCH 62/66] Removed some debugging lines --- diablo_2.0/for/les_th.f | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/diablo_2.0/for/les_th.f b/diablo_2.0/for/les_th.f index caea15a..d4435ec 100644 --- a/diablo_2.0/for/les_th.f +++ b/diablo_2.0/for/les_th.f @@ -20,8 +20,6 @@ subroutine les_chan_th real*8 C_SMAG ! parameter (C_SMAG=0.13d0) -! parameter (C_SMAG=0.01d0) -! parameter (C_SMAG=0.0992d0) parameter (C_SMAG=0.0433d0) real*8 C_AMD parameter (C_AMD=0.2887d0) @@ -62,32 +60,9 @@ subroutine les_chan_th ! If we are using Neuman boundary conditions, over-write the values of the ! velocity at the ghost cells so that the LES model doesn't use the large ! velocity gradient -! Does not yet include Neuman boundary condition for scalar CALL APPLY_BC_LES CALL APPLY_BC_SCALAR_LES -! First, for all models, apply boundary conditions to the velocity field -! (fill ghost cells) to ensure accurate calculation of gradients -C Apply Boundary conditions to velocity field -! IF (USE_MPI) THEN -! CALL APPLY_BC_VEL_MPI -! CALL GHOST_CHAN_MPI -! ELSE -! CALL APPLY_BC_VEL_LOWER -! CALL APPLY_BC_VEL_UPPER -! END IF - -! IF (USE_MPI) THEN -! CALL APPLY_BC_TH_MPI(MATL,MATD,MATU,VEC,N) -! CALL GHOST_CHAN_MPI -! END IF - -! If we are using Neuman boundary conditions, over-write the values of the -! velocity at the ghost cells so that the LES model doesn't use the large -! velocity gradient -! Does not yet include Neuman boundary condition for scalar -! CALL APPLY_BC_LES - if (LES_MODEL_TYPE.EQ.1) then ! Constant Smagorinsky model @@ -378,12 +353,6 @@ subroutine les_chan_th end do end do -! do j=0,NY+1 -! do i=0,NXM -! KAPPA_T_mean(j)=KAPPA_T_mean(j)+KAPPA_T(I,5,J,1) -! end do -! end do - call mpi_allreduce(mpi_in_place,S1_mean,NY+2,MPI_DOUBLE_PRECISION, & MPI_SUM,MPI_COMM_Z,ierror) call mpi_allreduce(mpi_in_place,NU_T_mean,NY+2 From 789031ce010fc1742f48fb4e9c9c0805c17a4b64 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Sat, 24 Apr 2021 09:47:44 +0100 Subject: [PATCH 63/66] Updated the les_chan_th calling sequence --- diablo_2.0/for/channel.f | 79 ++++++---------------------------------- 1 file changed, 11 insertions(+), 68 deletions(-) diff --git a/diablo_2.0/for/channel.f b/diablo_2.0/for/channel.f index e938792..3656fdb 100644 --- a/diablo_2.0/for/channel.f +++ b/diablo_2.0/for/channel.f @@ -306,77 +306,20 @@ SUBROUTINE RK_CHAN_1 C In the process, Ui is converted to physical space call les_chan -C APPLY constant SGS Prandlt number - DO N=1,N_TH - do j=1,NY+1 - do k=0,NZP-1 - do i=0,NXM - KAPPA_T(I,K,J,N)=1.d0*NU_T(I,K,J) - end do - end do - end do - end do - -C Add the horizontal diffusive terms using explicit timestepping -C This is already done for the viscous terms inside les_chan.f +C Re-apply the boundary conditions for velocity +C In the case of Neumann (applied stress) BCs, these were changed in the LES + call APPLY_BC_VEL_PHYS_MPI - DO N=1,N_TH + CALL FFT_XZ_TO_FOURIER(U1,CU1,0,NY+1) + CALL FFT_XZ_TO_FOURIER(U2,CU2,0,NY+1) + CALL FFT_XZ_TO_FOURIER(U3,CU3,0,NY+1) - DO J=1,NY+1 - DO K=0,TNKZ - DO I=0,NXP-1 - CS1(I,K,J)=CIKX(I)*CTH(I,K,J,N) - END DO - END DO - END DO - CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) - do j=1,NY+1 - do k=0,NZP-1 - do i=0,NXM - S1(I,K,J)=KAPPA_T(I,K,J,N)*S1(I,K,J) - end do - end do - end do - CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) - DO J=JSTART_TH(N),JEND_TH(N) - DO K=0,TNKZ - DO I=0,NXP-1 - CFTH(I,K,J,N)=CFTH(I,K,J,N)+CIKX(I)*CS1(I,K,J) - END DO - END DO - END DO + call les_chan_th - DO J=1,NY+1 - DO K=0,TNKZ - DO I=0,NXP-1 - CS1(I,K,J)=CIKZ(K)*CTH(I,K,J,N) - END DO - END DO - END DO - CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) - do j=1,NY+1 - do k=0,NZP-1 - do i=0,NXM - S1(I,K,J)=KAPPA_T(I,K,J,N)*S1(I,K,J) - end do - end do - end do - CALL FFT_XZ_TO_FOURIER(S1,CS1,0,NY+1) - DO J=JSTART_TH(N),JEND_TH(N) - DO K=0,TNKZ - DO I=0,NXP-1 - CFTH(I,K,J,N)=CFTH(I,K,J,N)+CIKZ(K)*CS1(I,K,J) - END DO - END DO - END DO - END DO ! end do n - -! Now, convert TH to physical space for calculation of nonlinear terms - DO N=1,N_TH - CS1(:,:,:)=CTH(:,:,:,N) - CALL FFT_XZ_TO_PHYSICAL(CS1,S1,0,NY+1) - TH(:,:,:,N)=S1(:,:,:) - END DO +C Re-apply the boundary conditions for velocity +C In the case of Neumann (applied stress) BCs, these were changed in the LES + call APPLY_BC_VEL_PHYS_MPI + call APPLY_BC_TH_PHYS_MPI ELSE C If the subgrid model hasn't been called, then it is necessary to From 86ad4dd3bade89501c83291df74936316e63bc50 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Fri, 1 Jul 2022 09:37:39 +0100 Subject: [PATCH 64/66] updated file directory --- diablo_2.0/post_process/matlab/readmean_h5.m | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/diablo_2.0/post_process/matlab/readmean_h5.m b/diablo_2.0/post_process/matlab/readmean_h5.m index bd61c89..2fa4185 100644 --- a/diablo_2.0/post_process/matlab/readmean_h5.m +++ b/diablo_2.0/post_process/matlab/readmean_h5.m @@ -3,7 +3,7 @@ % User settings.... % Set the run directory -base_dir='/local/data/public/jrt51/testing'; +base_dir='../../KH_test'; NY=65; % Here, NY should match the value in grid_def.all N_TH=1; % The number of scalars Re = 1000; NU=1/Re; % Enter the Reynolds number or viscosity from input.dat From 8764a500c2ee15da69934e70999ab273d04f2393 Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Fri, 1 Jul 2022 09:38:17 +0100 Subject: [PATCH 65/66] updated to match KH_test parameters --- diablo_2.0/post_process/matlab/plot_movie_xy.m | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/diablo_2.0/post_process/matlab/plot_movie_xy.m b/diablo_2.0/post_process/matlab/plot_movie_xy.m index 9b36a42..bd1beb2 100644 --- a/diablo_2.0/post_process/matlab/plot_movie_xy.m +++ b/diablo_2.0/post_process/matlab/plot_movie_xy.m @@ -1,13 +1,13 @@ % This script shows how to load in 2D slices and make a movie of the simulation output % Run after readmean.m -LX=1000; -NX=512; +LX=30; +NX=128; x=linspace(0,LX,NX); filename=[base_dir '/movie.h5']; -drhodx=3e-8; +drhodx=0; for k=1:nk k @@ -40,7 +40,7 @@ axis tight shading interp -colormap(jet(256)); +colormap(parula(256)); colorbar M(k)=getframe(gcf); clf; From f0cac67c5a50f1564f916146aa1d4ab17d46fb5c Mon Sep 17 00:00:00 2001 From: johnryantaylor Date: Fri, 1 Jul 2022 09:38:43 +0100 Subject: [PATCH 66/66] Update input.dat --- diablo_2.0/KH_test/input.dat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/diablo_2.0/KH_test/input.dat b/diablo_2.0/KH_test/input.dat index 3ae5853..d4b7198 100644 --- a/diablo_2.0/KH_test/input.dat +++ b/diablo_2.0/KH_test/input.dat @@ -21,7 +21,7 @@ C CREATE_FLOW_TH(1) Create new field or Read from DIABLO_TH.START C FILTER_TH(1) FILTER_INT(1) (If and how often to filter) .FALSE. 10 C RI_TAU(1) PR(1) - 0.15 1.0 + 0.05 1.0 C When including scalar advection, include 6 lines like the following for each scalar