WSL/SLF GitLab Repository

Commit 3873ce3f authored by Daniela Brito Melo's avatar Daniela Brito Melo
Browse files

Version 1.0

parents
#GEOMETRY
L_X= 6.40 #0.785407 #1.570814 # 12.56637 #3.141628 #6.283256
L_Y= 6.40 #6.40 #0.785407 #1.570817 #6.283256
L_Z= 6.40 #1.000000
c_factor= 0.10000
f_factor= 0.90000
noise_factor= F
#TIME_STEPPING
NSTEPS= 18000000 #2000000
DT= 0.00005 # time step (s)
TR_STEPS= 0
TR_DT_FACTOR= 1.0
#SIMULATION PARAMETERS
RE= 100000000
U_STAR= 1.0
RI_SC1= 0.0
PR_SC1= 0.71766988
PR_SGS_SC1= 0.71766988
PR_SC2= 0.631959376
PR_SGS_SC2= 0.631959376
#FLAGS
SGS= T
SC1= T
SC2= T
SLOPE= F # T for sloping surface
ALPHA= 0.0
FORCE_X= 0.06800000 #0.02500000 #15625 # CORRESPONDS TO USTAR=1 WITH LZ=0.5
FORCE_Y= 0.0 #-0.914
#GEOSTROPHIC
coriol= 0.00 #0.13942923 # actual coriolis parameter times 1000
ug= 0.00 #6.0000000
vg= 0.00 #0.0000000
#SGS
MODEL= 5 #1->STAT; 2->DYN; 3->SDEP 4->LAGR SCALE-SIM 5->LAGR SCALE-DEP
FILTER= 1 #1->CO 2->GSS 3->TH
CS_CNT= 10
I_DYN= 1000 # number of initial time steps in which the static Smagorinsky model is used
CO= 0.16
WALL_DMP= F
#################################################################
#MOMENTUM IC
VI1= F #CONSTANT
VI2= T #LOG: based on FORCE_X and FORCE_Y
VI3= F #POISEUILLE
VI4= F #LINEAR
VNF= 5.0
U0= 2.0
V0= 0.0
W0= 0.0
DUDZ0= 0.0
ZTURB= 100.0
#MOMENTUM BC
LLBC= STD
LUBC= STD
LLBC_SPECIAL= WALL_LAW #SM_CUBES,DTM_DDFA,WALL_LAW,STD,IBM_DFA
LUBC_SPECIAL= STD
LLBC_U= DRC #NEU,DRC,WLL
LLBC_V= DRC
LLBC_W= DRC
LUBC_U= NEU
LUBC_V= NEU
LUBC_W= DRC
DLBC_U= 0.0
DLBC_V= 0.0
DLBV_W= 0.0
DUBC_U= 0.0
DUBC_V= 0.0
DUBC_W= 0.0
#ROUGHNESS (FOR IC OR WALL_LAW)
LBC_DZ0= 0.0001 #0.00001
DSPL_H= 0.0
WALL_C= 0.5
#IBM DFA
TRIINTERFACE= '../surfaces/wavy_hill/wavy_hill.dat'
LPS= T
PHI_ROT= 0
IT_LPS= 3
NITER_F= 1
#DRS MODEL (check specific parameters)
DRSM= F
#################################################################
#OUTPUT
VERBOSE= F
PROFILE= T
#STD OUTPUT
WBASE= 1000 # frequency for average ustar, mean vertical profiles, etc.
C_CNT1= 100000000 #100000 # frequency for instantaneous field
C_CNT2= 100000000 #100000 # instantaneous slice frequency (3 xz-, 3 yz-, 1 xy-slices per process)
P_CNT3= 100000 #ta1 field
C_CNT3= 1000 # ta1 sampling frequency
HAVG_CNT3= F # 'T': save only horizontal averages of ta1 field / 'F': save whole 3D ta1 field
C_CNT4= 6000000 #1000000 # frequency for field for input for another simulation
inlet_count= 50000000
C_CNT5= 1000 # frequency for imitating eddy covariance sampling in the center of the domain
Z_EDDY_COV= 100 #1.9 # height (m) of eddy covariance sampling. If Z_EDDY_COV > L_Z, the eddy covariance output is omitted.
#SCALAR1
SC1_INIT= 1.0 # initial temperature (non-dimensional) at the second lowest grid point level, 1.0 corresponds to the value of T_SCALE in PARAM.f90
DSC1DZ_INIT= 0.001408 # slope for initial vertical profile (1/m)
NF_SC1= 0.01 # noise factor
Z_TURB_SC1= 100.0 # parameter controlling where noise is added
Z_ENT_SC1= 100.0 # initial height (m) of entrainment layer, only relevant if smaller than height of domain
ENT_SC1_INIT= 0.0 # temperature (non-dimensional) that is initially added at the height of the entrainment layer
ENT_DSC1DZ_INIT= 0.00001 # slope for initially adding a linear function within the entrainment layer
LLBC_SC1= 'DRC' # type of lower boundary condition
LUBC_SC1= 'NEU'
DLBC_SC1= 1.0 # value of lower boundary condition
DUBC_SC1= 0.1223 #0.0504
#SCALAR2
SC2_INIT= 0.00261312855682389 #0.0006421356723696423 # initial specific humidity (kg/kg)
DSC2DZ_INIT= -0.000057 #0.0 # slope for initial vertical profile
NF_SC2= 0.001
Z_TURB_SC2= 100.0
Z_ENT_SC2= 100.0
ENT_SC2_INIT= 0.0
ENT_DSC2DZ_INIT= -0.0000001
LLBC_SC2= 'DRC'
LUBC_SC2= 'NEU'
DLBC_SC2= 0.00261312855682389 #1605339180924
DUBC_SC2= -0.0036
################################################################
#LSM
LSM= T
NP_MAX= 20000 # Maximum number of particles
LSM_FREQ= 1.0 # Update frequency of the LSM model - time advancement
AVERAGE_FREQ= 1000 # equal or smaller than LSM_INIT - time averaging of the LES field needed for LSM SGS dissipation
PRINT1_FREQ= 25000 # output of particle data every LSM steps - i.e every PRINT1_FREQ*LSM_FREQ "LES" timesteps
LSM_INIT= 500000 # when the LSM algo is started - !!! NOTE - AVERAGE_FREQ < LSM_INIT !!!
P_RHO= 918.4 # Density of particles in [kg/m3]
D_M= 0.00026 #0.0002 #0.0002 # mean diameter [m]
D_S= 0.00013 #0.0001 #0.0001 # std. deviation of particles [m]
D_MIN= 0.00005 # bounds for min and max diameter
D_MAX= 0.002
PPP_MIN= 5000 # particles per parcel minimum
PPP_MAX= 250000 # particles per parcel maximum
B_ENE= 1.E-10 # cohesion energy ( NOT needed for hot particles)
R_DEP= F # read initial distribution pattern for particles - mostly not needed
FILE_DEP= '' # path for the file for the initial distribution of particles
INIT_DEP= 10000.0 # kg/m2
MODE= "saltation" # possible: 'snowfall' / 'saltation' / 'point_release' / 'line_release'
OUTLET= F # 'F': periodic bc for particles / 'T' : particles disappear at the boundary
PARTICLE_TYPE= "inertial" # possibility: 'passive' / 'inertial' : drag force + gravity / 'settling' : superimposed w velocity
SETTLING_VEL= 0.0 # set the settling velocity
molec_weight_water= 0.018015 # molecular weight of water [M mol^{-1}]
R_universal= 8.314 # universal gas constant [J mol{-1} K{-1}]
surface_tension= 0.0728 # surface tension between water and air interface
water_rho= 918.4 # density of water
c_p_a= 1005.4671 # specific heat at constant pressure for air
c_L= 2035.68566 # specific heat capacity for ice ( taken at -10 C )
c_p_v= 1854.2224 # specific heat capacity for vapor
sublimation= T
Schiller= T
Mason= F
thorpe_mason= F
lp_conc= T
lp_flux= T
lp_stress= T
lp_diss= F
lp_massbal= T
lp_restart= F
lp_mombal= T
#################################################################
# WIND FARM
num_of_rows= 4
num_of_columns= 4
spacing_x= 24
spacing_y= 16
wind_farm= F
Ct= 1.3333333
Cp= 0.80000
local_tip_speed_ratio= 8.00000
wt_averaging_time= 1.00000
staggered= F
start_disk= 3000
wind_farm_debug= F
------------------------------------------------------------
User guide for LES-LSM
------------------------------------------------------------
Main developer (LES): Marco Giometto (mgiometto@gmail.com )
Main developers (LSM): Francesco Comola (francesco.comola.gmail.com)
Varun Sharma (varun.sharma@epfl.ch)
Contributors : Armin Sigmund (armin.sigmund@epfl.ch)
Daniela Brito Melo (daniela.britomelo@epfl.ch)
------------------------------------------------------------
Ecole Polythecnique Fédérale de Lausanne, EPFL
Switzerland
August 2021
------------------------------------------------------------
Compiling and running the LES-LSM code in Linux environment
------------------------------------------------------------
------------------------------------------------------------
1. Install a Fortran Intel compiler + MPI libraries
For example the "Intel oneAPI Base Toolkit" + "Intel oneAPI HPC Toolkit".
You can download it from the following website and install it following the instructions provided:
https://software.intel.com/content/www/us/en/develop/tools/oneapi/all-toolkits.html#gs.19gr9f
------------------------------------------------------------
1.1 Prepare your system as explaining in the Get Started Guide
In Ubuntu, run on your terminal:
sudo apt update
sudo apt -y install cmake pkg-config build-essential
You should also set the Environment variables. To do it permanently, you can add to the end of your bashrc file (~/.bashrc) the following line:
source /opt/intel/oneapi/setvars.sh
------------------------------------------------------------
------------------------------------------------------------
2. Compile the code
------------------------------------------------------------
2.1 Install makedepf90
------------------------------------------------------------
Run on your terminal:
sudo apt install makedepf90
------------------------------------------------------------
2.2. Edit the makefile according to your architecture
Choose the correct architecture (variable ARCH). If you are running it on your personal computer use 'PC_INTEL'.
------------------------------------------------------------
2.3 Define the number of MPI tasks you will use
Open the source file 'src/PARAM.f90'.
Define the value of 'nproc' equal to a power of 2: 2, 4, 8, 16, 32, ...
[Yes, it is hard coded. We are sorry. If you change your mind, do not forget to compile the code again.]
------------------------------------------------------------
2.4 Compile the code
Run on your terminal:
make clean
make
The executable 'program.exe' will be created.
------------------------------------------------------------
------------------------------------------------------------
3. Run the code
------------------------------------------------------------
3.1 Edit the input file 'PARAMETERS.py_0' according to your problem of interest
Do not add any space between the variable name and the equal sign.
Do not change the order of any line.
Do not add extra blanck lines.
Do not delete any blanck lines.
------------------------------------------------------------
3.2.a) Run the code directly in the terminal
Type the following commands:
ulimit -c unlimited
ulimit -s unlimited
export OMP_NUM_THREADS=2 [write the number of OMP threads per MPI task that you want]
nohup mpirun -np 8 ./program.exe & [write the number of MPI tasks that you want (= nproc !!!)]
Note: "nohup (...) &" will append the output to file 'nohup.out'
------------------------------------------------------------
OR
------------------------------------------------------------
3.2.b) Run the code via a bash script
Create a new file. For example, run the following command on terminal:
vim launch_les.sh
And write the following on it:
#!/bin/bash
ulimit -c unlimited
ulimit -s unlimited
export OMP_NUM_THREADS=2
nohup mpirun -np 8 ./program.exe &
Save and close the file.
Make your launch script an executable by running the following command:
chmod +x ./launch_les.sh
Run the code by typing the following command:
./launch_les.sh
------------------------------------------------------------
------------------------------------------------------------
Congratulations! The code is running!
Thank you for using it!
------------------------------------------------------------
------------------------------------------------------------
NOTE 1: There are other hard coded parametes that you can specify at 'src/PARAM.f90'.
For example:
- Grid size: variables NX, NY and NZ_TOT (NZ_TOT = number of grid cells in z + 1).
It is advisable to use a power of 2 for NX and NY and a power of 2 + 1 for NZ_TOT,
especially because the MPI parallelization divides the domain into subdomains of equal size along the vertical direction.
- Fluid kinematic viscosity: variable NU_MOLEC
- Fluid density: variable F_RHO
NOTE 2: While the roughness length for momentum is included in the 'PARAMETERS.py_0' file (variable LBC_DZ0),
the roughness lengths for temperature and humidity (LBC_SC_ZO) are specified in 'src/BC.f90' as a fraction of the roughness length for momentum.
------------------------------------------------------------
------------------------------------------------------------
#!/bin/bash -l
#SBATCH --job-name="snowshine_with_omp_2_all"
#SBATCH --output=snowshine.%j.o
#SBATCH --error=snowshine.%j.e
#SBATCH --nodes=8
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=2
#SBATCH --constraint=gpu
#SBATCH --time=24:00:00
##SBATCH --partition=debug
#SBATCH --account=s569
#SBATCH --mail-type=ALL
#SBATCH --mail-user=varun.sharma@epfl.ch
module load slurm
module load daint-gpu
module switch PrgEnv-cray PrgEnv-intel
#mkdir turbine_data
#mkdir wind_farm_forces
#mkdir wind_farm_setup
#cp ../default_turbine.txt .
#module load Score-P/3.0-CrayIntel-2016.11
#module load Scalasca/2.3.1-CrayIntel-2016.11
#module load cray-libsci
#export SCOREP_EXPERIMENT_DIRECTORY=scorep_sum_trace
#export SCOREP_ENABLE_TRACING=true
#export SCOREP_ENABLE_PROFILING=true
#export SCOREP_TOTAL_MEMORY=1024M
ulimit -s unlimited
ulimit -c unlimited
export OMP_NUM_THREADS=2
#export KMP_AFFINITY=scatter
#numactl --interleave=all
#/usr/bin/time -p srun --distribution=block:block --exclusive --hint=nomultithread numactl --interleave=all ./program.exe
/usr/bin/time -p srun --hint=nomultithread ./program.exe
#srun ./program.exe
echo " ****** SIMULATION FINISHED ! ****** "
exit
#!/bin/bash
ulimit -c unlimited
ulimit -s unlimited
export OMP_NUM_THREADS=2
nohup mpirun -np 8 ./program.exe &
#!/bin/bash
#################################################################################
#
# MAKEFILE WITH AUTOMATIC DEPENDENCIES for program prova.f90
#
#################################################################################
#
# Date of last update: 08/05/2013
#
#
# Author: Marco Giometto
#
# Environment: Ubuntu 12.04
#
#
# Description:
#
# Some precompiler flags are there and used in the OUTPUT_SPC.f90, this is done
# so that anyone who uses the code can implement his output in specific standalone
# modules without affecting the memory allocation at compile time of the code.
#
#
#################################################################################
#preprocessor stuff --------------------- please modify
BDG = yes
MPI = yes
SCALARS = yes
#define shell
SHELL = /bin/sh
#architecture ---------------------------- please modify
#ARCH = PC_GNU
ARCH = PC_INTEL
#ARCH = BELLATRIX_INTEL
#ARCH = CSCS_INTEL
#ARCH = DENEB_INTEL
#ARCH = MAMMOUTH
#mpi library path
MPIMOD = /usr/local/lib/mpi.mod
#executable name
EXE = program.exe
#mode
MODE = TRACEBACK
#MODE = AGGRESSIVE
#dependencies creator
MAKEDEP = ./src/makedepf90
#directory for the .o files
OBJDIR = ./src/.obj
MODDIR = ./src/.mod
#directories for sources
SRCS = ./src/*.f90
SRCS += ./src/src_sgs/*.f90
SRCS += ./src/src_sc/*.f90
SRCS += ./src/src_bdg/*.f90
SRCS += ./src/src_mpi/*.f90
FPPF = -DLES #this does nothing at the moment
#FPPF += -DUCL
FPPF += -DMPI
FPPF += -DBDG
#################################################################################
# setting compilers and libraries depending on the pc
#################################################################################
#settings for PC with GNU compiler (f90)
ifeq ($(ARCH),PC_GNU)
ifeq ($(MPI),yes)
FC = mpif90
else
FC = gfortran
endif
FFLAGS = -O3 -cpp -m64 -g -fopenmp -fbounds-check -I$(OBJDIR) -J$(MODDIR)
# -Wall -fbounds-check -fbacktrace
LIBPATH = -L/home/mg/Phd/libraries/bin_gfortran
LIBS = $(LIBPATH) -lfftw3
endif
#settings for PC with INTEL compiler (ifort)
ifeq ($(ARCH),PC_INTEL)
ifeq ($(MPI),yes)
FC = mpiifort -DMPI
else
FC = ifort
endif
FFLAGS = -fpp -O3 -mkl -assume byterecl -qopenmp -no-wrap-margin $(FPPF) -module $(MODDIR) -I$(OBJDIR)
#FFLAGS = -fpp -g -O0 -traceback -check bounds -assume byterecl -fpe0 -debug all -no-wrap-margin -mkl -openmp -module $(MODDIR) -I$(OBJDIR)
LDFLAGS = #-mcmodel medium
LIBPATH = #-L/home/mg/Phd/libraries/bin_gfortran
LIBS = $(LIBPATH) #-mkl -lmkl_lapack95_lp64
endif
#settings for ROSA with INTEL compiler (ifort)
ifeq ($(ARCH),DENEB_INTEL)
ifeq ($(MPI),yes)
FC = mpiifort
else
FC = ifort
endif
FFLAGS = -fpp $(FPPF) -mkl -assume byterecl -O3 -qopenmp -module $(MODDIR) -I$(OBJDIR) -axSSE4.2
#LDFLAGS = -mcmodel=medium
LIBPATH = -L/ssoft/fftw/3.3.4/RH6/intel-15.0.0/x86_E5v2/intelmpi/lib
LIBS = $(LIBPATH) -lfftw3
endif
#settings for ROSA with INTEL compiler (ifort)
ifeq ($(ARCH),BELLATRIX_INTEL)
ifeq ($(MPI),yes)
FC = mpiifort
else
FC = ifort
endif
FFLAGS = -fpp $(FPPF) -mkl -check bounds -traceback -assume byterecl -O3 -openmp -module $(MODDIR) -I$(OBJDIR) -axSSE4.2
#LDFLAGS = -mcmodel=medium
LIBPATH = -L/ssoft/fftw/3.3.4/RH6/intel-15.0.0/x86_E5v2/intelmpi/lib
LIBS = $(LIBPATH) -lfftw3
endif
#settings for DORA with INTEL compiler (ifort)
ifeq ($(ARCH),CSCS_INTEL)
FC = ftn
#FC = scorep --user --static ftn
FFLAGS = -fpp -O3 -assume byterecl -shared-intel -dynamic -mkl -qopenmp -no-wrap-margin $(FPPF) -module $(MODDIR) -I$(OBJDIR)
#FFLAGS = -fpp -g -O0 -traceback -assume byterecl -dynamic -check bounds -fpe0 -no-wrap-margin -mkl -qopenmp $(FPPF) -module $(MODDIR) -I$(OBJDIR)
#FFLAGS = -fpp $(FPPF) -g -debug all -mkl -traceback -check bounds -assume byterecl -no-wrap-margin -qopenmp -dynamic -shared-intel -mcmodel=large -qopt-streaming-stores always -module $(MODDIR) -I$(OBJDIR)
LDFLAGS =
#LIBPATH = -L/opt/software/fftw/3.3.3/intel-intelmpi/13.0.1-4.1.0/lib
LIBS = $(LIBPATH) #-lfftw3
endif
#settings for ROSA with INTEL compiler (ifort)
ifeq ($(ARCH),MAMMOUTH)
FC = mpif90
FFLAGS = -fpp $(FPPF) -mkl -traceback -check bounds -assume byterecl -O3 -openmp -module $(MODDIR) -I$(OBJDIR)
#LDFLAGS = -mcmodel=medium
#LIBPATH = -L/opt/software/fftw/3.3.3/intel-intelmpi/13.0.1-4.1.0/lib
LIBS = $(LIBPATH) -lfftw3
endif
#################################################################################
# various checks and launch the makefile for compilation
#################################################################################
# preprocessor flags settings
COMPSTR = '$$(FC) -c -o $$@ $$(FFLAGS) $$<'
#include the dependency list created by makedepf90 below
include .depend
.depend: $(SRC)
#cp $(MPIMOD) ./src/.mod/
$(MAKEDEP) -r $(COMPSTR) -b $(OBJDIR) -o $(EXE) $(SRCS) > .depend
#################################################################################
# cleaning and extra useful commands
#################################################################################
.PHONY : clean
#cleaning out everything
clean:
rm .depend
rm ./src/.obj/*
rm ./src/.mod/*
rm program.exe
clear
@echo "All unnecessary files wiped out!"
File added
MODULE BC
USE TYPES
USE PARAM
USE SERVICE
USE SIM_PARAM
use stretch
IMPLICIT NONE
!---------------------------------------------------------------------------------
CONTAINS
!!--------------------------------------------------------------------------------
!! OBJECT: SUBROUTINE SET_ZO()
!!--------------------------------------------------------------------------------
!!
!! LAST CHECKED/MODIFIED: Marco Giometto ( mgiometto@gmail.com ) on 16/10/2012
!!
!! ITS DONE LIKE THIS SO THAT CAN BE NICELY CONNECTED WITH REMOTE OR DIFF VALUES
!!
!!--------------------------------------------------------------------------------
SUBROUTINE SET_LBC_ZO(LBC_ZO)
IMPLICIT NONE
REAL(RPREC),DIMENSION(:,:),INTENT(INOUT) :: LBC_ZO
LBC_ZO=LBC_DZ0
!LBC_ZO(16:48,:) = LBC_DZ0 * 10.0_rprec
END SUBROUTINE SET_LBC_ZO
!!--------------------------------------------------------------------------------
!! OBJECT: SUBROUTINE SET_LBC_SC_ZO()
!!--------------------------------------------------------------------------------
!!
!! LAST CHECKED/MODIFIED: Marco Giometto ( mgiometto@gmail.com ) on 16/10/2012
!!
!! ITS DONE LIKE THIS SO THAT CAN BE NICELY CONNECTED WITH REMOTE OR DIFF VALUES
!!
!!--------------------------------------------------------------------------------
SUBROUTINE SET_LBC_SC_ZO(LBC_SC_ZO,SCALAR)
IMPLICIT NONE
REAL(RPREC),DIMENSION(:,:),INTENT(INOUT) :: LBC_SC_ZO
INTEGER,INTENT(IN) :: SCALAR
IF(SCALAR .EQ. 1) then
LBC_SC_ZO=LBC_DZ0*0.1_rprec
ELSEIF(SCALAR .EQ. 2) then
LBC_SC_ZO=LBC_DZ0*0.1_rprec
ENDIF
END SUBROUTINE SET_LBC_SC_ZO
!!--------------------------------------------------------------------------------
!! OBJECT: SUBROUTINE SET_INLET(U,V,W,BC_INLET,U_REF,Z_REF,ALPHA)
!!--------------------------------------------------------------------------------
!!
!! LAST CHECKED/MODIFIED: Marco Giometto ( mgiometto@gmail.com ) on 16/10/2012
!!
!! U/V/W(LD,NY,0:NZ)
!!
!!--------------------------------------------------------------------------------
SUBROUTINE SET_INLET()
use SP_SC1,only:SC1
use SP_SC2,only:SC2
!use SP_SC3,only:SC3
IMPLICIT NONE
character(256) :: opath,filename
IF(VERBOSE) WRITE (*,*) 'IN SET_INLET'
write(opath,'(a)') trim(out_path)//'inlet_slices/'
write(filename,'(a,i0)') trim(opath)//'u_YZ.c',coord
open(111,file=trim(filename),status='unknown',access='direct',recl=8*ny*(nz-1))
read(111,rec=jt) u((nx/2),1:ny,1:nz-1)
close(111)
write(filename,'(a,i0)') trim(opath)//'v_YZ.c',coord
open(111,file=trim(filename),status='unknown',access='direct',recl=8*ny*(nz-1))
read(111,rec=jt) v((nx/2),1:ny,1:nz-1)
close(111)
write(filename,'(a,i0)') trim(opath)//'w_YZ.c',coord