From d2fab3604d00064a25e4587646efe6aae6f93e95 Mon Sep 17 00:00:00 2001 From: GeorgeGayno-NOAA <52789452+GeorgeGayno-NOAA@users.noreply.github.com> Date: Mon, 17 Aug 2020 13:14:25 -0400 Subject: [PATCH] Port repository to Orion Updates to compile repo on Orion. New scripts to run regression tests on Orion. New script to run the grid creation step on Orion. Update link_fixdirs.sh to link to Orion fixed directories. Remove some references to Tide/Gyre. See #96 and #134 for more details. --- driver_scripts/driver_grid.orion.sh | 133 ++++++++++++++++++++++ fix/link_fixdirs.sh | 10 +- modulefiles/build.orion | 15 +-- modulefiles/module-setup.sh.inc | 6 - reg_tests/chgres_cube/c192.fv3.history.sh | 9 ++ reg_tests/chgres_cube/c192.gfs.grib2.sh | 9 ++ reg_tests/chgres_cube/c96.fv3.nemsio.sh | 9 ++ reg_tests/chgres_cube/c96.fv3.netcdf.sh | 9 ++ reg_tests/chgres_cube/c96.fv3.restart.sh | 9 ++ reg_tests/chgres_cube/c96.gfs.nemsio.sh | 9 ++ reg_tests/chgres_cube/c96.gfs.sigio.sh | 14 +++ reg_tests/chgres_cube/c96.regional.sh | 9 ++ reg_tests/chgres_cube/driver.orion.sh | 133 ++++++++++++++++++++++ reg_tests/global_cycle/C768.fv3gfs.sh | 9 ++ reg_tests/global_cycle/driver.orion.sh | 61 ++++++++++ reg_tests/grid_gen/c96.uniform.sh | 8 ++ reg_tests/grid_gen/driver.orion.sh | 72 ++++++++++++ reg_tests/grid_gen/gfdl.regional.sh | 8 ++ reg_tests/ice_blend/driver.orion.sh | 54 +++++++++ reg_tests/snow2mdl/driver.orion.sh | 50 ++++++++ sorc/machine-setup.sh | 9 -- ush/load_fv3gfs_modules.sh | 3 - 22 files changed, 617 insertions(+), 31 deletions(-) create mode 100755 driver_scripts/driver_grid.orion.sh create mode 100755 reg_tests/chgres_cube/driver.orion.sh create mode 100755 reg_tests/global_cycle/driver.orion.sh create mode 100755 reg_tests/grid_gen/driver.orion.sh create mode 100755 reg_tests/ice_blend/driver.orion.sh create mode 100755 reg_tests/snow2mdl/driver.orion.sh diff --git a/driver_scripts/driver_grid.orion.sh b/driver_scripts/driver_grid.orion.sh new file mode 100755 index 000000000..66a2955d5 --- /dev/null +++ b/driver_scripts/driver_grid.orion.sh @@ -0,0 +1,133 @@ +#!/bin/bash + +#SBATCH -J fv3_grid_driver +#SBATCH -A fv3-cpu +#SBATCH --open-mode=truncate +#SBATCH -o log.fv3_grid_driver +#SBATCH -e log.fv3_grid_driver +#SBATCH --nodes=1 --ntasks-per-node=24 +#SBATCH -q debug +#SBATCH -t 00:30:00 + +#----------------------------------------------------------------------- +# Driver script to create a cubic-sphere based model grid on Orion. +# +# Produces the following files (netcdf, each tile in separate file): +# 1) 'mosaic' and 'grid' files containing lat/lon and other +# records that describe the model grid. +# 2) 'oro' files containing land mask, terrain and gravity +# wave drag fields. +# 3) Surface climo fields, such as soil type, vegetation +# greenness and albedo. +# +# Note: The sfc_climo_gen program only runs with an +# mpi task count that is a multiple of six. This is +# an ESMF library requirement. Large grids may require +# tasks spread across multiple nodes. The orography code +# benefits from threads. +# +# To run, do the following: +# +# 1) Set "C" resolution, "res" - Example: res=96. +# 2) Set grid type ("gtype"). Valid choices are +# "uniform" - global uniform grid +# "stretch" - global stretched grid +# "nest" - global stretched grid with nest +# "regional_gfdl" - stand-alone gfdl regional grid +# "regional_esg" - stand-alone extended Schmidt +# gnomonic (esg) regional grid +# 3) For "stretch" and "nest" grids, set the stretching factor - +# "stretch_fac", and center lat/lon of highest resolution +# tile - "target_lat" and "target_lon". +# 4) For "nest" grids, set the refinement ratio - "refine_ratio", +# the starting/ending i/j index location within the parent +# tile - "istart_nest", "jstart_nest", "iend_nest", "jend_nest" +# 5) For "regional_gfdl" grids, set the "halo". Default is three +# rows/columns. +# 6) For "regional_esg" grids, set center lat/lon of grid, +# - "target_lat/lon" - the i/j dimensions - "i/jdim", the +# x/y grid spacing - "delx/y", and halo. +# 7) Set working directory - TMPDIR - and path to the repository +# clone - home_dir. +# 8) Submit script: "sbatch $script". +# 9) All files will be placed in "out_dir". +# +#----------------------------------------------------------------------- + +set -x + +source ../sorc/machine-setup.sh > /dev/null 2>&1 +source ../modulefiles/build.$target +module list + +#----------------------------------------------------------------------- +# Set grid specs here. +#----------------------------------------------------------------------- + +export gtype=uniform # 'uniform', 'stretch', 'nest', + # 'regional_gfdl', 'regional_esg' + +if [ $gtype = uniform ]; then + export res=96 +elif [ $gtype = stretch ]; then + export res=96 + export stretch_fac=1.5 # Stretching factor for the grid + export target_lon=-97.5 # Center longitude of the highest resolution tile + export target_lat=35.5 # Center latitude of the highest resolution tile +elif [ $gtype = nest ] || [ $gtype = regional_gfdl ]; then + export res=96 + export stretch_fac=1.5 # Stretching factor for the grid + export target_lon=-97.5 # Center longitude of the highest resolution tile + export target_lat=35.5 # Center latitude of the highest resolution tile + export refine_ratio=3 # The refinement ratio + export istart_nest=27 # Starting i-direction index of nest grid in parent tile supergrid + export jstart_nest=37 # Starting j-direction index of nest grid in parent tile supergrid + export iend_nest=166 # Ending i-direction index of nest grid in parent tile supergrid + export jend_nest=164 # Ending j-direction index of nest grid in parent tile supergrid + export halo=3 # Lateral boundary halo +elif [ $gtype = regional_esg ] ; then + export res=-999 # equivalent res is computed. + export target_lon=-97.5 # Center longitude of grid + export target_lat=35.5 # Center latitude of grid + export idim=301 # Dimension of grid in 'i' direction + export jdim=200 # Dimension of grid in 'j' direction + export delx=0.0585 # Grid spacing (in degrees) in the 'i' direction + # on the SUPERGRID (which has twice the resolution of + # the model grid). The physical grid spacing in the 'i' + # direction is related to delx as follows: + # distance = 2*delx*(circumf_Earth/360 deg) + export dely=0.0585 # Grid spacing (in degrees) in the 'j' direction. + export halo=3 # number of row/cols for halo +fi + +#----------------------------------------------------------------------- +# Check paths. +# home_dir - location of repository. +# TMPDIR - working directory. +# out_dir - where files will be placed upon completion. +#----------------------------------------------------------------------- + +export home_dir=$SLURM_SUBMIT_DIR/.. +export TMPDIR=/work/noaa/stmp/$LOGNAME/fv3_grid.$gtype +export out_dir=/work/noaa/stmp/$LOGNAME/my_grids + +#----------------------------------------------------------------------- +# Should not need to change anything below here. +#----------------------------------------------------------------------- + +export APRUN=time +export APRUN_SFC=srun +export OMP_NUM_THREADS=24 +export OMP_STACKSIZE=2048m +export machine=ORION + +ulimit -a +ulimit -s 199000000 + +#----------------------------------------------------------------------- +# Start script. +#----------------------------------------------------------------------- + +$home_dir/ush/fv3gfs_driver_grid.sh + +exit diff --git a/fix/link_fixdirs.sh b/fix/link_fixdirs.sh index 718692317..2f7017a7f 100755 --- a/fix/link_fixdirs.sh +++ b/fix/link_fixdirs.sh @@ -9,20 +9,20 @@ machine=${2} if [ $# -lt 2 ]; then set +x echo '***ERROR*** must specify two arguements: (1) RUN_ENVIR, (2) machine' - echo ' Syntax: link_fv3gfs.sh ( nco | emc ) ( cray | dell | hera | jet )' + echo ' Syntax: link_fv3gfs.sh ( nco | emc ) ( cray | dell | hera | jet | orion )' exit 1 fi if [ $RUN_ENVIR != emc -a $RUN_ENVIR != nco ]; then set +x echo '***ERROR*** unsupported run environment' - echo 'Syntax: link_fv3gfs.sh ( nco | emc ) ( cray | dell | hera | jet )' + echo 'Syntax: link_fv3gfs.sh ( nco | emc ) ( cray | dell | hera | jet | orion )' exit 1 fi -if [ $machine != cray -a $machine != hera -a $machine != dell -a $machine != jet ]; then +if [ $machine != cray -a $machine != hera -a $machine != dell -a $machine != jet -a $machine != orion ]; then set +x echo '***ERROR*** unsupported machine' - echo 'Syntax: link_fv3gfs.sh ( nco | emc ) ( cray | dell | hera | jet )' + echo 'Syntax: link_fv3gfs.sh ( nco | emc ) ( cray | dell | hera | jet | orion )' exit 1 fi @@ -43,6 +43,8 @@ elif [ $machine = "hera" ]; then FIX_DIR="/scratch1/NCEPDEV/global/glopara/fix" elif [ $machine = "jet" ]; then FIX_DIR="/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix" +elif [ $machine = "orion" ]; then + FIX_DIR="/work/noaa/global/glopara/fix" fi for dir in fix_am fix_fv3 fix_orog fix_fv3_gmted2010 fix_sfc_climo; do [[ -d $dir ]] && rm -rf $dir diff --git a/modulefiles/build.orion b/modulefiles/build.orion index 0adf7b990..145ec66c8 100644 --- a/modulefiles/build.orion +++ b/modulefiles/build.orion @@ -10,22 +10,19 @@ module use -a /apps/contrib/NCEPLIBS/orion/modulefiles module load w3nco/2.1.0 module load nemsio/2.3.0 module load bacio/2.2.0 -module load sp/2.1.0 module load sfcio/1.2.0 module load sigio/2.2.0 module load gfsio/1.2.0 -module load w3emc/2.5.0 +module load w3emc/2.4.0 module load ip/3.1.0 module load nemsiogfs/2.3.0 module load landsfcutil/2.2.0 - -# George V's version. -module use -a /apps/contrib/NCEPLIBS/lib/modulefiles -module load g2-intel-sandybridge/2.5.0 - +module load g2/3.1.1 +module load sp/2.0.3 export Jasper_ROOT="/apps/jasper-1.900.1" -module load netcdf/4.7.2-parallel -module load esmf/8.0.0_ParallelNetCDF +module use -a /apps/contrib/NCEPLIBS/lib/modulefiles +module load netcdfp/4.7.4.release +module load esmflocal/8_0_0.release export WGRIB2_ROOT="/work/noaa/da/ggayno/save/wgrib2" diff --git a/modulefiles/module-setup.sh.inc b/modulefiles/module-setup.sh.inc index 48d2cd723..551381537 100644 --- a/modulefiles/module-setup.sh.inc +++ b/modulefiles/module-setup.sh.inc @@ -45,12 +45,6 @@ elif [[ -d /gpfs/hps && -e /etc/SuSE-release ]] ; then module use /gpfs/hps/nco/ops/nwprod/modulefiles module use /gpfs/hps/nco/ops/nwprod/lib/modulefiles module use /usrx/local/prod/modulefiles -elif [[ -d /dcom && -d /hwrf ]] ; then - # We are on NOAA Tide or Gyre - if ( ! eval module help > /dev/null 2>&1 ) ; then - source /usrx/local/Modules/default/init/$__ms_shell - fi - module purge elif [[ -L /usrx && "$( readlink /usrx 2> /dev/null )" =~ dell ]] ; then # We are on NOAA Mars or Venus if ( ! eval module help > /dev/null 2>&1 ) ; then diff --git a/reg_tests/chgres_cube/c192.fv3.history.sh b/reg_tests/chgres_cube/c192.fv3.history.sh index b29c66303..b76aae083 100755 --- a/reg_tests/chgres_cube/c192.fv3.history.sh +++ b/reg_tests/chgres_cube/c192.fv3.history.sh @@ -50,8 +50,17 @@ echo "Ending at: " `date` #----------------------------------------------------------------------------- # Compare output from chgres to baseline set of data. +# +# orion's nccmp utility does not work with the netcdf +# required to run ufs_utils. So swap it. #----------------------------------------------------------------------------- +machine=${machine:-NULL} +if [ $machine == 'orion' ]; then + module unload netcdfp/4.7.4.release + module load netcdf/4.7.2 +fi + cd $DATA test_failed=0 diff --git a/reg_tests/chgres_cube/c192.gfs.grib2.sh b/reg_tests/chgres_cube/c192.gfs.grib2.sh index 1ec76820f..0615f6832 100755 --- a/reg_tests/chgres_cube/c192.gfs.grib2.sh +++ b/reg_tests/chgres_cube/c192.gfs.grib2.sh @@ -45,8 +45,17 @@ echo "Ending at: " `date` #----------------------------------------------------------------------------- # Compare output from chgres to baseline set of data. +# +# orion's nccmp utility does not work with the netcdf +# required to run ufs_utils. So swap it. #----------------------------------------------------------------------------- +machine=${machine:-NULL} +if [ $machine == 'orion' ]; then + module unload netcdfp/4.7.4.release + module load netcdf/4.7.2 +fi + cd $DATA test_failed=0 diff --git a/reg_tests/chgres_cube/c96.fv3.nemsio.sh b/reg_tests/chgres_cube/c96.fv3.nemsio.sh index 43da0dc6d..5cb4fe7cf 100755 --- a/reg_tests/chgres_cube/c96.fv3.nemsio.sh +++ b/reg_tests/chgres_cube/c96.fv3.nemsio.sh @@ -41,8 +41,17 @@ echo "Ending at: " `date` #----------------------------------------------------------------------------- # Compare output from chgres to baseline set of data. +# +# orion's nccmp utility does not work with the netcdf +# required to run ufs_utils. So swap it. #----------------------------------------------------------------------------- +machine=${machine:-NULL} +if [ $machine == 'orion' ]; then + module unload netcdfp/4.7.4.release + module load netcdf/4.7.2 +fi + cd $DATA test_failed=0 diff --git a/reg_tests/chgres_cube/c96.fv3.netcdf.sh b/reg_tests/chgres_cube/c96.fv3.netcdf.sh index 8000f592c..615e39475 100755 --- a/reg_tests/chgres_cube/c96.fv3.netcdf.sh +++ b/reg_tests/chgres_cube/c96.fv3.netcdf.sh @@ -42,8 +42,17 @@ echo "Ending at: " `date` #----------------------------------------------------------------------------- # Compare output from chgres to baseline set of data. +# +# orion's nccmp utility does not work with the netcdf +# required to run ufs_utils. So swap it. #----------------------------------------------------------------------------- +machine=${machine:-NULL} +if [ $machine == 'orion' ]; then + module unload netcdfp/4.7.4.release + module load netcdf/4.7.2 +fi + cd $DATA test_failed=0 diff --git a/reg_tests/chgres_cube/c96.fv3.restart.sh b/reg_tests/chgres_cube/c96.fv3.restart.sh index 6e7107e5b..4e012a2e0 100755 --- a/reg_tests/chgres_cube/c96.fv3.restart.sh +++ b/reg_tests/chgres_cube/c96.fv3.restart.sh @@ -51,8 +51,17 @@ echo "Ending at: " `date` #----------------------------------------------------------------------------- # Compare output from chgres to baseline set of data. +# +# orion's nccmp utility does not work with the netcdf +# required to run ufs_utils. So swap it. #----------------------------------------------------------------------------- +machine=${machine:-NULL} +if [ $machine == 'orion' ]; then + module unload netcdfp/4.7.4.release + module load netcdf/4.7.2 +fi + cd $DATA test_failed=0 diff --git a/reg_tests/chgres_cube/c96.gfs.nemsio.sh b/reg_tests/chgres_cube/c96.gfs.nemsio.sh index a6d51a0b5..0f06d71d1 100755 --- a/reg_tests/chgres_cube/c96.gfs.nemsio.sh +++ b/reg_tests/chgres_cube/c96.gfs.nemsio.sh @@ -45,8 +45,17 @@ echo "Ending at: " `date` #----------------------------------------------------------------------------- # Compare output from chgres to baseline set of data. +# +# orion's nccmp utility does not work with the netcdf +# required to run ufs_utils. So swap it. #----------------------------------------------------------------------------- +machine=${machine:-NULL} +if [ $machine == 'orion' ]; then + module unload netcdfp/4.7.4.release + module load netcdf/4.7.2 +fi + cd $DATA test_failed=0 diff --git a/reg_tests/chgres_cube/c96.gfs.sigio.sh b/reg_tests/chgres_cube/c96.gfs.sigio.sh index 840dbf9eb..3a1e00fa3 100755 --- a/reg_tests/chgres_cube/c96.gfs.sigio.sh +++ b/reg_tests/chgres_cube/c96.gfs.sigio.sh @@ -9,6 +9,12 @@ set -x +# Orion won't let me set the ulimit in the driver script. Set it here. +machine=${machine:-NULL} +if [ $machine == 'orion' ]; then + ulimit -s 199000000 +fi + export DATA=$OUTDIR/c96_gfs_sigio rm -fr $DATA @@ -41,8 +47,16 @@ echo "Ending at: " `date` #----------------------------------------------------------------------------- # Compare output from chgres to baseline set of data. +# +# orion's nccmp utility does not work with the netcdf +# required to run ufs_utils. So swap it. #----------------------------------------------------------------------------- +if [ $machine == 'orion' ]; then + module unload netcdfp/4.7.4.release + module load netcdf/4.7.2 +fi + cd $DATA test_failed=0 diff --git a/reg_tests/chgres_cube/c96.regional.sh b/reg_tests/chgres_cube/c96.regional.sh index 9d15e9533..cecb199cc 100755 --- a/reg_tests/chgres_cube/c96.regional.sh +++ b/reg_tests/chgres_cube/c96.regional.sh @@ -47,8 +47,17 @@ echo "Ending at: " `date` #----------------------------------------------------------------------------- # Compare output from chgres to baseline set of data. +# +# orion's nccmp utility does not work with the netcdf +# required to run ufs_utils. So swap it. #----------------------------------------------------------------------------- +machine=${machine:-NULL} +if [ $machine == 'orion' ]; then + module unload netcdfp/4.7.4.release + module load netcdf/4.7.2 +fi + cd $DATA test_failed=0 diff --git a/reg_tests/chgres_cube/driver.orion.sh b/reg_tests/chgres_cube/driver.orion.sh new file mode 100755 index 000000000..3026082e1 --- /dev/null +++ b/reg_tests/chgres_cube/driver.orion.sh @@ -0,0 +1,133 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run the chgres_cube regression tests on Orion. +# +# Set OUTDIR to your working directory. Set the PROJECT_CODE and QUEUE +# as appropriate. To see which projects you are authorized to use, +# type "saccount_params". +# +# Invoke the script with no arguments. A series of daily-chained +# regression tests will be submitted. To check the queue, type: +# "squeue -u $LOGNAME". +# +# The run output will be stored in OUTDIR. Log output from the suite +# will be in LOG_FILE. Once the suite has completed, a summary is +# placed in SUM_FILE. +# +# A test fails when its output does not match the baseline files as +# determined by the "nccmp" utility. The baseline files are stored in +# HOMEreg. +# +#----------------------------------------------------------------------------- + +set -x + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +source ../../modulefiles/build.$target +module list + +export OUTDIR=/work/noaa/stmp/$LOGNAME/chgres_reg_tests +PROJECT_CODE="fv3-cpu" +QUEUE="debug" + +#----------------------------------------------------------------------------- +# Should not have to change anything below here. HOMEufs is the root +# directory of your UFS_UTILS clone. HOMEreg contains the input data +# and baseline data for each test. +#----------------------------------------------------------------------------- + +export HOMEufs=$PWD/../.. + +export HOMEreg=/work/noaa/da/ggayno/save/ufs_utils.git/reg_tests/chgres_cube + +export NCCMP=/apps/nccmp-1.8.5/bin/nccmp + +LOG_FILE=regression.log +SUM_FILE=summary.log +rm -f $LOG_FILE $SUM_FILE + +export OMP_STACKSIZE=1024M + +export APRUN=srun + +export machine=orion + +rm -fr $OUTDIR + +#----------------------------------------------------------------------------- +# Initialize C96 using FV3 warm restart files. +#----------------------------------------------------------------------------- + +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST1=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.restart \ + -o $LOG_FILE -e $LOG_FILE ./c96.fv3.restart.sh) + +#----------------------------------------------------------------------------- +# Initialize C192 using FV3 tiled history files. +#----------------------------------------------------------------------------- + +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST2=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c192.fv3.history \ + --open-mode=append -o $LOG_FILE -e $LOG_FILE -d afterok:$TEST1 ./c192.fv3.history.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using FV3 gaussian nemsio files. +#----------------------------------------------------------------------------- + +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST3=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.nemsio \ + --open-mode=append -o $LOG_FILE -e $LOG_FILE -d afterok:$TEST2 ./c96.fv3.nemsio.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using spectral GFS sigio/sfcio files. +#----------------------------------------------------------------------------- + +export OMP_NUM_THREADS=6 # needs to match cpus-per-task +TEST4=$(sbatch --parsable --ntasks-per-node=3 --cpus-per-task=6 --nodes=2 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.sigio \ + --open-mode=append -o $LOG_FILE -e $LOG_FILE -d afterok:$TEST3 ./c96.gfs.sigio.sh) + +#----------------------------------------------------------------------------- +# Initialize C96 using spectral GFS gaussian nemsio files. +#----------------------------------------------------------------------------- + +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST5=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.gfs.nemsio \ + --open-mode=append -o $LOG_FILE -e $LOG_FILE -d afterok:$TEST4 ./c96.gfs.nemsio.sh) + +#----------------------------------------------------------------------------- +# Initialize regional C96 using FV3 gaussian nemsio files. +#----------------------------------------------------------------------------- + +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST6=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.regional \ + --open-mode=append -o $LOG_FILE -e $LOG_FILE -d afterok:$TEST5 ./c96.regional.sh) + +#----------------------------------------------------------------------------- +# Initialize global C192 using GFS GRIB2 files. +#----------------------------------------------------------------------------- + +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST7=$(sbatch --parsable --ntasks-per-node=6 --nodes=1 -t 0:05:00 -A $PROJECT_CODE -q $QUEUE -J c192.gfs.grib2 \ + --open-mode=append -o $LOG_FILE -e $LOG_FILE -d afterok:$TEST6 ./c192.gfs.grib2.sh) + +#----------------------------------------------------------------------------- +# Initialize global C96 using FV3 gaussian netcdf files. +#----------------------------------------------------------------------------- + +export OMP_NUM_THREADS=1 # needs to match cpus-per-task +TEST8=$(sbatch --parsable --ntasks-per-node=6 --nodes=2 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J c96.fv3.netcdf \ + --open-mode=append -o $LOG_FILE -e $LOG_FILE -d afterok:$TEST7 ./c96.fv3.netcdf.sh) + +#----------------------------------------------------------------------------- +# Create summary log. +#----------------------------------------------------------------------------- + +sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J chgres_summary -o $LOG_FILE -e $LOG_FILE \ + --open-mode=append -q $QUEUE -d afterok:$TEST8 << EOF +#!/bin/sh +grep -a '<<<' $LOG_FILE > $SUM_FILE +EOF + +exit 0 diff --git a/reg_tests/global_cycle/C768.fv3gfs.sh b/reg_tests/global_cycle/C768.fv3gfs.sh index 20909b6f3..b35743496 100755 --- a/reg_tests/global_cycle/C768.fv3gfs.sh +++ b/reg_tests/global_cycle/C768.fv3gfs.sh @@ -49,6 +49,15 @@ if [ $iret -ne 0 ]; then exit $iret fi +# orion's nccmp utility does not work with the netcdf +# required to run global_cycle. So swap it. + +machine=${machine:-NULL} +if [[ "$machine" = 'orion' ]]; then + module unload netcdfp/4.7.4.release + module load netcdf/4.7.2 +fi + test_failed=0 cd $DATA diff --git a/reg_tests/global_cycle/driver.orion.sh b/reg_tests/global_cycle/driver.orion.sh new file mode 100755 index 000000000..63c51f9f7 --- /dev/null +++ b/reg_tests/global_cycle/driver.orion.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run global_cycle regression test on Orion. +# +# Set $DATA to your working directory. Set the project code (SBATCH -A) +# and queue (SBATCH -q) as appropriate. +# +# Invoke the script as follows: sbatch $script +# +# Log output is placed in regression.log. A summary is +# placed in summary.log +# +# The test fails when its output does not match the baseline files +# as determined by the 'nccmp' utility. This baseline files are +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +#SBATCH -J cycle_reg_test +#SBATCH -A fv3-cpu +#SBATCH --open-mode=truncate +#SBATCH -o regression.log +#SBATCH -e regression.log +#SBATCH --nodes=1 --ntasks-per-node=6 +#SBATCH -q debug +#SBATCH -t 00:05:00 + +set -x + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +source ../../modulefiles/build.$target + +export DATA=/work/noaa/stmp/$LOGNAME/reg_tests.cycle + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +export HOMEreg=/work/noaa/da/ggayno/save/ufs_utils.git/reg_tests/global_cycle + +export OMP_NUM_THREADS_CY=2 + +export APRUNCY="srun" + +export NWPROD=$PWD/../.. + +export COMOUT=$DATA + +export NCCMP=/apps/nccmp-1.8.5/bin/nccmp + +export machine='orion' + +reg_dir=$PWD + +./C768.fv3gfs.sh + +cp $DATA/summary.log $reg_dir + +exit diff --git a/reg_tests/grid_gen/c96.uniform.sh b/reg_tests/grid_gen/c96.uniform.sh index d01f5f66d..73da0389d 100755 --- a/reg_tests/grid_gen/c96.uniform.sh +++ b/reg_tests/grid_gen/c96.uniform.sh @@ -33,8 +33,16 @@ echo "Ending at: " `date` #----------------------------------------------------------------------------- # Compare output to baseline set of data. +# +# Note: orion's nccmp utility does not work with the netcdf +# required to run ufs_utils. So swap it. #----------------------------------------------------------------------------- +if [[ "$machine" = "ORION" ]] ;then + module unload netcdfp/4.7.4.release + module load netcdf/4.7.2 +fi + cd $out_dir/C96 test_failed=0 diff --git a/reg_tests/grid_gen/driver.orion.sh b/reg_tests/grid_gen/driver.orion.sh new file mode 100755 index 000000000..2ae73ff2d --- /dev/null +++ b/reg_tests/grid_gen/driver.orion.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run grid generation regression tests on Orion. +# +# Set WORK_DIR to your working directory. Set the PROJECT_CODE and QUEUE +# as appropriate. To see which projects you are authorized to use, +# type "saccount_params". +# +# Invoke the script with no arguments. A series of daily- +# chained jobs will be submitted. To check the queue, type: +# "squeue -u $LOGNAME". +# +# Log output from the suite will be in LOG_FILE. Once the suite +# has completed, a summary is placed in SUM_FILE. +# +# A test fails when its output does not match the baseline files as +# determined by the "nccmp" utility. The baseline files are stored in +# HOMEreg +# +#----------------------------------------------------------------------------- + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +source ../../modulefiles/build.$target + +set -x + +export WORK_DIR=/work/noaa/stmp/$LOGNAME/reg_tests.grid +QUEUE="batch" +PROJECT_CODE="fv3-cpu" + +#----------------------------------------------------------------------------- +# Should not have to change anything below here. +#----------------------------------------------------------------------------- + +LOG_FILE=regression.log +SUM_FILE=summary.log +export home_dir=$PWD/../.. +export APRUN=time +export APRUN_SFC=srun +export OMP_STACKSIZE=2048m +export OMP_NUM_THREADS=24 +export machine=ORION +export NCCMP=/apps/nccmp-1.8.5/bin/nccmp +export HOMEreg=/work/noaa/da/ggayno/save/ufs_utils.git/reg_tests/grid_gen/baseline_data + +rm -fr $WORK_DIR + +#----------------------------------------------------------------------------- +# C96 uniform grid +#----------------------------------------------------------------------------- + +TEST1=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:15:00 -A $PROJECT_CODE -q $QUEUE -J c96.uniform \ + -o $LOG_FILE -e $LOG_FILE ./c96.uniform.sh) + +#----------------------------------------------------------------------------- +# C96 regional grid +#----------------------------------------------------------------------------- + +TEST2=$(sbatch --parsable --ntasks-per-node=24 --nodes=1 -t 0:10:00 -A $PROJECT_CODE -q $QUEUE -J gfdl.regional \ + --open-mode=append -o $LOG_FILE -e $LOG_FILE -d afterok:$TEST1 ./gfdl.regional.sh) + +#----------------------------------------------------------------------------- +# Create summary log. +#----------------------------------------------------------------------------- + +sbatch --nodes=1 -t 0:01:00 -A $PROJECT_CODE -J grid_summary -o $LOG_FILE -e $LOG_FILE \ + --open-mode=append -q $QUEUE -d afterok:$TEST2 << EOF +#!/bin/sh +grep -a '<<<' $LOG_FILE > $SUM_FILE +EOF diff --git a/reg_tests/grid_gen/gfdl.regional.sh b/reg_tests/grid_gen/gfdl.regional.sh index 54fb934d8..05eb31e6c 100755 --- a/reg_tests/grid_gen/gfdl.regional.sh +++ b/reg_tests/grid_gen/gfdl.regional.sh @@ -42,8 +42,16 @@ echo "Ending at: " `date` #----------------------------------------------------------------------------- # Compare output to baseline set of data. +# +# Note: orion's nccmp utility does not work with the netcdf +# required to run ufs_utils. So swap it. #----------------------------------------------------------------------------- +if [[ "$machine" = "ORION" ]] ;then + module unload netcdfp/4.7.4.release + module load netcdf/4.7.2 +fi + cd $out_dir/C424 test_failed=0 diff --git a/reg_tests/ice_blend/driver.orion.sh b/reg_tests/ice_blend/driver.orion.sh new file mode 100755 index 000000000..188ede2bb --- /dev/null +++ b/reg_tests/ice_blend/driver.orion.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run ice_blend regression test on Orion. +# +# Set $DATA to your working directory. Set the project code (SBATCH -A) +# and queue (SBATCH -q) as appropriate. +# +# Invoke the script as follows: sbatch $script +# +# Log output is placed in regression.log. A summary is +# placed in summary.log +# +# The test fails when its output does not match the baseline file +# as determined by the 'cmp' command. The baseline file is +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +#SBATCH -J ice_blend +#SBATCH -A fv3-cpu +#SBATCH --open-mode=truncate +#SBATCH -o regression.log +#SBATCH -e regression.log +#SBATCH --ntasks=1 +#SBATCH -q debug +#SBATCH -t 00:03:00 + +set -x + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +source ../../modulefiles/build.$target + +export DATA="/work/noaa/stmp/$LOGNAME/reg_test.ice_blend" + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +export WGRIB=/apps/contrib/NCEPLIBS/orion/utils/grib_util.v1.2.0/exec/wgrib +export WGRIB2=/apps/contrib/NCEPLIBS/orion/utils/grib_util.v1.2.0/exec/wgrib2 +export COPYGB=/apps/contrib/NCEPLIBS/lib/NCEPLIBS-grib_util/v1.1.1/exec/copygb +export COPYGB2=/apps/contrib/NCEPLIBS/orion/utils/grib_util.v1.2.0/exec/copygb2 +export CNVGRIB=/apps/contrib/NCEPLIBS/orion/utils/grib_util.v1.2.0/exec/cnvgrib + +export HOMEreg=/work/noaa/da/ggayno/save/ufs_utils.git/reg_tests/ice_blend +export HOMEgfs=$PWD/../.. + +rm -fr $DATA + +./ice_blend.sh + +exit 0 diff --git a/reg_tests/snow2mdl/driver.orion.sh b/reg_tests/snow2mdl/driver.orion.sh new file mode 100755 index 000000000..fbc331510 --- /dev/null +++ b/reg_tests/snow2mdl/driver.orion.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +#----------------------------------------------------------------------------- +# +# Run snow2mdl regression test on Orion. +# +# Set $DATA to your working directory. Set the project code (SBATCH -A) +# and queue (SBATCH -q) as appropriate. +# +# Invoke the script as follows: sbatch $script +# +# Log output is placed in regression.log. A summary is +# placed in summary.log +# +# The test fails when its output does not match the baseline file +# as determined by the 'cmp' command. The baseline file is +# stored in HOMEreg. +# +#----------------------------------------------------------------------------- + +#SBATCH -J snow +#SBATCH -A fv3-cpu +#SBATCH --open-mode=truncate +#SBATCH -o regression.log +#SBATCH -e regression.log +#SBATCH --ntasks=1 +#SBATCH -q debug +#SBATCH -t 00:03:00 + +set -x + +source ../../sorc/machine-setup.sh > /dev/null 2>&1 +source ../../modulefiles/build.$target + +export DATA="/work/noaa/stmp/$LOGNAME/reg_tests.snow2mdl" + +#----------------------------------------------------------------------------- +# Should not have to change anything below. +#----------------------------------------------------------------------------- + +rm -fr $DATA + +export HOMEreg=/work/noaa/da/ggayno/save/ufs_utils.git/reg_tests/snow2mdl +export HOMEgfs=$PWD/../.. +export WGRIB=/apps/contrib/NCEPLIBS/orion/utils/grib_util.v1.2.0/exec/wgrib +export WGRIB2=/apps/contrib/NCEPLIBS/orion/utils/grib_util.v1.2.0/exec/wgrib2 + +./snow2mdl.sh + +exit 0 diff --git a/sorc/machine-setup.sh b/sorc/machine-setup.sh index 8389f2b0e..08e500537 100644 --- a/sorc/machine-setup.sh +++ b/sorc/machine-setup.sh @@ -77,15 +77,6 @@ elif [[ -L /usrx && "$( readlink /usrx 2> /dev/null )" =~ dell ]] ; then fi target=wcoss_dell_p3 module purge - -elif [[ -d /dcom && -d /hwrf ]] ; then - # We are on NOAA Tide or Gyre - if ( ! eval module help > /dev/null 2>&1 ) ; then - echo load the module command 1>&2 - source /usrx/local/Modules/default/init/$__ms_shell - fi - target=wcoss - module purge elif [[ -d /glade ]] ; then # We are on NCAR Yellowstone if ( ! eval module help > /dev/null 2>&1 ) ; then diff --git a/ush/load_fv3gfs_modules.sh b/ush/load_fv3gfs_modules.sh index 8972b6575..a8bb23662 100755 --- a/ush/load_fv3gfs_modules.sh +++ b/ush/load_fv3gfs_modules.sh @@ -24,9 +24,6 @@ elif [[ -d /gpfs/hps && -e /etc/SuSE-release ]] ; then elif [[ -L /usrx && "$( readlink /usrx 2> /dev/null )" =~ dell ]] ; then # We are on NOAA Mars or Venus module load module_base.wcoss_dell_p3 -elif [[ -d /dcom && -d /hwrf ]] ; then - # We are on NOAA Tide or Gyre - module load module_base.wcoss elif [[ -d /glade ]] ; then # We are on NCAR Yellowstone module load module_base.cheyenne