diff --git a/.cicd/Jenkinsfile b/.cicd/Jenkinsfile index 64a45b1808..890e92b82b 100644 --- a/.cicd/Jenkinsfile +++ b/.cicd/Jenkinsfile @@ -13,7 +13,9 @@ pipeline { // choice(name: 'SRW_PLATFORM_FILTER', choices: ['all', 'cheyenne', 'gaea', 'hera', 'jet', 'orion', 'pclusternoaav2use1', 'azclusternoaav2eus1', 'gclusternoaav2usc1'], description: 'Specify the platform(s) to use') // Use the line below to enable the PW AWS cluster // choice(name: 'SRW_PLATFORM_FILTER', choices: ['all', 'cheyenne', 'gaea', 'hera', 'jet', 'orion', 'pclusternoaav2use1'], description: 'Specify the platform(s) to use') - choice(name: 'SRW_PLATFORM_FILTER', choices: ['all', 'cheyenne', 'gaea', 'hera', 'jet', 'orion'], description: 'Specify the platform(s) to use') + // Use the line below to enable hera + // choice(name: 'SRW_PLATFORM_FILTER', choices: ['all', 'cheyenne', 'gaea', 'hera', 'jet', 'orion'], description: 'Specify the platform(s) to use') + choice(name: 'SRW_PLATFORM_FILTER', choices: ['cheyenne', 'gaea', 'jet', 'orion'], description: 'Specify the platform(s) to use') // Allow job runner to filter based on compiler choice(name: 'SRW_COMPILER_FILTER', choices: ['all', 'gnu', 'intel'], description: 'Specify the compiler(s) to use to build') // Uncomment the following line to re-enable comprehensive tests diff --git a/.gitignore b/.gitignore index dafefc0695..6d4734c975 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ fix/ include/ lib/ share/ +modulefiles/extrn_comp_build/ sorc/*/ tests/WE2E/WE2E_test_info.csv tests/WE2E/*.txt diff --git a/Externals.cfg b/Externals.cfg index 04d60aa80c..8ecf73073a 100644 --- a/Externals.cfg +++ b/Externals.cfg @@ -12,7 +12,7 @@ protocol = git repo_url = https://github.com/ufs-community/ufs-weather-model # Specify either a branch name or a hash but not both. #branch = develop -hash = 84b28ec +hash = e051e0e local_path = sorc/ufs-weather-model required = True @@ -44,5 +44,23 @@ hash = 8d21f25 local_path = sorc/rrfs_utl required = True +[arl_nexus] +protocol = git +repo_url = https://github.com/noaa-oar-arl/NEXUS +# Specify either a branch name or a hash but not both. +#branch = develop +hash = 3842818 +local_path = sorc/arl_nexus +required = True + +[aqm-utils] +protocol = git +repo_url = https://github.com/NOAA-EMC/AQM-utils +# Specify either a branch name or a hash but not both. +#branch = develop +hash = 81a99ca +local_path = sorc/AQM-utils +required = True + [externals_description] schema_version = 1.0.0 diff --git a/devbuild.sh b/devbuild.sh index d31ed0eca7..f70797fb74 100755 --- a/devbuild.sh +++ b/devbuild.sh @@ -15,8 +15,8 @@ OPTIONS compiler to use; default depends on platform (e.g. intel | gnu | cray | gccgfortran) -a, --app=APPLICATION - weather model application to build - (e.g. ATM | ATMW | S2S | S2SW) + weather model application to build; for example, ATMAQ for Online-CMAQ + (e.g. ATM | ATMAQ | ATMW | S2S | S2SW) --ccpp="CCPP_SUITE1,CCPP_SUITE2..." CCPP suites (CCPP_SUITES) to include in build; delimited with ',' --enable-options="OPTION1,OPTION2,..." @@ -56,8 +56,7 @@ TARGETS all = builds all apps Or any combinations of (ufs, ufs_utils, upp, gsi, rrfs_utils) -NOTE: This script is for internal developer use only; -See User's Guide for detailed build instructions +NOTE: See User's Guide for detailed build instructions EOF_USAGE } @@ -87,6 +86,8 @@ Settings: BUILD_UPP=${BUILD_UPP} BUILD_GSI=${BUILD_GSI} BUILD_RRFS_UTILS=${BUILD_RRFS_UTILS} + BUILD_NEXUS=${BUILD_NEXUS} + BUILD_AQM_UTILS=${BUILD_AQM_UTILS} EOF_SETTINGS } @@ -123,6 +124,8 @@ BUILD_UFS_UTILS="off" BUILD_UPP="off" BUILD_GSI="off" BUILD_RRFS_UTILS="off" +BUILD_NEXUS="off" +BUILD_AQM_UTILS="off" # Make options CLEAN=false @@ -182,6 +185,8 @@ while :; do upp) DEFAULT_BUILD=false; BUILD_UPP="on" ;; gsi) DEFAULT_BUILD=false; BUILD_GSI="on" ;; rrfs_utils) DEFAULT_BUILD=false; BUILD_RRFS_UTILS="on" ;; + nexus) DEFAULT_BUILD=false; BUILD_NEXUS="on" ;; + aqm_utils) DEFAULT_BUILD=false; BUILD_AQM_UTILS="on" ;; # unknown -?*|?*) usage_error "Unknown option $1" ;; *) break @@ -189,17 +194,11 @@ while :; do shift done -# choose default apps to build -if [ "${DEFAULT_BUILD}" = true ]; then - BUILD_UFS="on" - BUILD_UFS_UTILS="on" - BUILD_UPP="on" -fi - # Ensure uppercase / lowercase ============================================ APPLICATION="${APPLICATION^^}" PLATFORM="${PLATFORM,,}" COMPILER="${COMPILER,,}" +EXTERNALS="${EXTERNALS^^}" # check if PLATFORM is set if [ -z $PLATFORM ] ; then @@ -207,11 +206,31 @@ if [ -z $PLATFORM ] ; then usage exit 0 fi - # set PLATFORM (MACHINE) MACHINE="${PLATFORM}" printf "PLATFORM(MACHINE)=${PLATFORM}\n" >&2 +# choose default apps to build +if [ "${DEFAULT_BUILD}" = true ]; then + BUILD_UFS="on" + BUILD_UFS_UTILS="on" + BUILD_UPP="on" +fi + +# Choose components to build for Online-CMAQ +if [ "${APPLICATION}" = "ATMAQ" ]; then + if [ "${DEFAULT_BUILD}" = true ]; then + BUILD_NEXUS="on" + BUILD_AQM_UTILS="on" + BUILD_UPP="off" + fi + if [ "${PLATFORM}" = "wcoss2" ]; then + BUILD_POST_STAT="on" + else + BUILD_POST_STAT="off" + fi +fi + set -eu # automatically determine compiler @@ -304,7 +323,8 @@ CMAKE_SETTINGS="\ -DBUILD_UFS_UTILS=${BUILD_UFS_UTILS}\ -DBUILD_UPP=${BUILD_UPP}\ -DBUILD_GSI=${BUILD_GSI}\ - -DBUILD_RRFS_UTILS=${BUILD_RRFS_UTILS}" + -DBUILD_NEXUS=${BUILD_NEXUS}\ + -DBUILD_AQM_UTILS=${BUILD_AQM_UTILS}" if [ ! -z "${APPLICATION}" ]; then CMAKE_SETTINGS="${CMAKE_SETTINGS} -DAPP=${APPLICATION}" @@ -318,6 +338,29 @@ fi if [ ! -z "${DISABLE_OPTIONS}" ]; then CMAKE_SETTINGS="${CMAKE_SETTINGS} -DDISABLE_OPTIONS=${DISABLE_OPTIONS}" fi +if [ "${APPLICATION}" = "ATMAQ" ]; then + CMAKE_SETTINGS="${CMAKE_SETTINGS} -DCPL_AQM=ON -DBUILD_POST_STAT=${BUILD_POST_STAT}" + + # Copy module files to designated directory + EXTRN_BUILD_MOD_DIR="${SRW_DIR}/modulefiles/extrn_comp_build" + mkdir -p ${EXTRN_BUILD_MOD_DIR} + if [ "${BUILD_UFS}" = "on" ]; then + cp "${SRW_DIR}/sorc/ufs-weather-model/modulefiles/ufs_${PLATFORM}.${COMPILER}.lua" "${EXTRN_BUILD_MOD_DIR}/mod_ufs-weather-model.lua" + cp "${SRW_DIR}/sorc/ufs-weather-model/modulefiles/ufs_common.lua" ${EXTRN_BUILD_MOD_DIR} + fi + if [ "${BUILD_UFS_UTILS}" = "on" ]; then + cp "${SRW_DIR}/sorc/UFS_UTILS/modulefiles/build.${PLATFORM}.${COMPILER}.lua" "${EXTRN_BUILD_MOD_DIR}/mod_ufs-utils.lua" + fi + if [ "${BUILD_UPP}" = "on" ]; then + cp "${SRW_DIR}/sorc/UPP/modulefiles/${PLATFORM}.lua" "${EXTRN_BUILD_MOD_DIR}/mod_upp.lua" + fi + if [ "${BUILD_NEXUS}" = "on" ]; then + cp "${SRW_DIR}/sorc/AQM-utils/parm/nexus_modulefiles/${PLATFORM}.${COMPILER}.lua" "${EXTRN_BUILD_MOD_DIR}/mod_nexus.lua" + fi + if [ "${BUILD_AQM_UTILS}" = "on" ]; then + cp "${SRW_DIR}/sorc/AQM-utils/modulefiles/build_${PLATFORM}.${COMPILER}.lua" "${EXTRN_BUILD_MOD_DIR}/mod_aqm-utils.lua" + fi +fi # make settings MAKE_SETTINGS="-j ${BUILD_JOBS}" @@ -393,6 +436,16 @@ if [ $USE_SUB_MODULES = true ]; then printf "... Loading RRFS_UTILS modules ...\n" load_module "" fi + if [ $BUILD_NEXUS = "on" ]; then + printf "... Loading NEXUS modules ...\n" + module use ${SRW_DIR}/sorc/arl_nexus/modulefiles + load_module "" + fi + if [ $BUILD_AQM_UTILS = "on" ]; then + printf "... Loading AQM-utils modules ...\n" + module use ${SRW_DIR}/sorc/AQM-utils/modulefiles + load_module "" + fi else module use ${SRW_DIR}/modulefiles module load ${MODULE_FILE} diff --git a/docs/UsersGuide/source/ConfigWorkflow.rst b/docs/UsersGuide/source/ConfigWorkflow.rst index 57d8151cbb..6d03dc0fdc 100644 --- a/docs/UsersGuide/source/ConfigWorkflow.rst +++ b/docs/UsersGuide/source/ConfigWorkflow.rst @@ -151,7 +151,7 @@ METplus Parameters * ``SS`` refers to the two-digit valid seconds of the hour ``CCPA_OBS_DIR``: (Default: "") - User-specified location of top-level directory where CCPA hourly precipitation files used by METplus are located. This parameter needs to be set for both user-provided observations and for observations that are retrieved from the NOAA :term:`HPSS` (if the user has access) via the ``get_obs_ccpa_tn`` task. (This task is activated in the workflow by setting ``RUN_TASK_GET_OBS_CCPA: true``). + User-specified location of top-level directory where CCPA hourly precipitation files used by METplus are located. This parameter needs to be set for both user-provided observations and for observations that are retrieved from the NOAA :term:`HPSS` (if the user has access) via the ``TN_GET_OBS_CCPA`` task. (This task is activated in the workflow by setting ``RUN_TASK_GET_OBS_CCPA: true``). METplus configuration files require the use of a predetermined directory structure and file names. If the CCPA files are user-provided, they need to follow the anticipated naming structure: ``{YYYYMMDD}/ccpa.t{HH}z.01h.hrap.conus.gb2``, where YYYYMMDD and HH are as described in the note :ref:`above `. When pulling observations from NOAA HPSS, the data retrieved will be placed in the ``CCPA_OBS_DIR`` directory. This path must be defind as ``//ccpa/proc``. METplus is configured to verify 01-, 03-, 06-, and 24-h accumulated precipitation using hourly CCPA files. @@ -159,7 +159,7 @@ METplus Parameters There is a problem with the valid time in the metadata for files valid from 19 - 00 UTC (i.e., files under the "00" directory). The script to pull the CCPA data from the NOAA HPSS (``scripts/exregional_get_obs_ccpa.sh``) has an example of how to account for this and organize the data into a more intuitive format. When a fix is provided, it will be accounted for in the ``exregional_get_obs_ccpa.sh`` script. ``MRMS_OBS_DIR``: (Default: "") - User-specified location of top-level directory where MRMS composite reflectivity files used by METplus are located. This parameter needs to be set for both user-provided observations and for observations that are retrieved from the NOAA :term:`HPSS` (if the user has access) via the ``get_obs_mrms_tn`` task (activated in the workflow by setting ``RUN_TASK_GET_OBS_MRMS: true``). When pulling observations directly from NOAA HPSS, the data retrieved will be placed in this directory. Please note, this path must be defind as ``//mrms/proc``. + User-specified location of top-level directory where MRMS composite reflectivity files used by METplus are located. This parameter needs to be set for both user-provided observations and for observations that are retrieved from the NOAA :term:`HPSS` (if the user has access) via the ``TN_GET_OBS_MRMS`` task (activated in the workflow by setting ``RUN_TASK_GET_OBS_MRMS: true``). When pulling observations directly from NOAA HPSS, the data retrieved will be placed in this directory. Please note, this path must be defind as ``//mrms/proc``. METplus configuration files require the use of a predetermined directory structure and file names. Therefore, if the MRMS files are user-provided, they need to follow the anticipated naming structure: ``{YYYYMMDD}/MergedReflectivityQCComposite_00.50_{YYYYMMDD}-{HH}{mm}{SS}.grib2``, where YYYYMMDD and {HH}{mm}{SS} are as described in the note :ref:`above `. @@ -167,7 +167,7 @@ METplus Parameters METplus is configured to look for a MRMS composite reflectivity file for the valid time of the forecast being verified; since MRMS composite reflectivity files do not always exactly match the valid time, a script (within the main script that retrieves MRMS data from the NOAA HPSS) is used to identify and rename the MRMS composite reflectivity file to match the valid time of the forecast. The script to pull the MRMS data from the NOAA HPSS has an example of the expected file-naming structure: ``scripts/exregional_get_obs_mrms.sh``. This script calls the script used to identify the MRMS file closest to the valid time: ``ush/mrms_pull_topofhour.py``. ``NDAS_OBS_DIR``: (Default: "") - User-specified location of the top-level directory where NDAS prepbufr files used by METplus are located. This parameter needs to be set for both user-provided observations and for observations that are retrieved from the NOAA :term:`HPSS` (if the user has access) via the ``get_obs_ndas_tn`` task (activated in the workflow by setting ``RUN_TASK_GET_OBS_NDAS: true``). When pulling observations directly from NOAA HPSS, the data retrieved will be placed in this directory. Please note, this path must be defined as ``//ndas/proc``. METplus is configured to verify near-surface variables hourly and upper-air variables at 00 and 12 UTC with NDAS prepbufr files. + User-specified location of the top-level directory where NDAS prepbufr files used by METplus are located. This parameter needs to be set for both user-provided observations and for observations that are retrieved from the NOAA :term:`HPSS` (if the user has access) via the ``TN_GET_OBS_NDAS`` task (activated in the workflow by setting ``RUN_TASK_GET_OBS_NDAS: true``). When pulling observations directly from NOAA HPSS, the data retrieved will be placed in this directory. Please note, this path must be defined as ``//ndas/proc``. METplus is configured to verify near-surface variables hourly and upper-air variables at 00 and 12 UTC with NDAS prepbufr files. METplus configuration files require the use of predetermined file names. Therefore, if the NDAS files are user-provided, they need to follow the anticipated naming structure: ``prepbufr.ndas.{YYYYMMDDHH}``, where YYYYMMDDHH is as described in the note :ref:`above `. The script to pull the NDAS data from the NOAA HPSS (``scripts/exregional_get_obs_ndas.sh``) has an example of how to rename the NDAS data into a more intuitive format with the valid time listed in the file name. @@ -215,7 +215,7 @@ Directory Parameters ----------------------- ``EXPT_BASEDIR``: (Default: "") - The full path to the base directory in which the experiment directory (``EXPT_SUBDIR``) will be created. If this is not specified or if it is set to an empty string, it will default to ``${HOMEdir}/../expt_dirs``, where ``${HOMEdir}`` contains the full path to the ``ufs-srweather-app`` directory. + The full path to the base directory in which the experiment directory (``EXPT_SUBDIR``) will be created. If this is not specified or if it is set to an empty string, it will default to ``${HOMEdir}/../expt_dirs``, where ``${HOMEdir}`` contains the full path to the ``ufs-srweather-app`` directory. If set to a relative path, the provided path will be appended to the default value ``${HOMEdir}/../expt_dirs``. For example, if ``EXPT_BASEDIR=some/relative/path`` (i.e. a path that does not begin with ``/``), the value of ``EXPT_BASEDIR`` used by the workflow will be ``EXPT_BASEDIR=${HOMEdir}/../expt_dirs/some/relative/path``. ``EXPT_SUBDIR``: (Default: "") The user-designated name of the experiment directory (*not* its full path). The full path to the experiment directory, which will be contained in the variable ``EXPTDIR``, will be: @@ -285,7 +285,7 @@ Set File Name Parameters Name of the file (a shell script) containing definitions of the primary and secondary experiment variables (parameters). This file is sourced by many scripts (e.g., the J-job scripts corresponding to each workflow task) in order to make all the experiment variables available in those scripts. The primary variables are defined in the default configuration script (``config_defaults.yaml``) and in ``config.yaml``. The secondary experiment variables are generated by the experiment generation script. ``EXTRN_MDL_VAR_DEFNS_FN``: (Default: "extrn_mdl_var_defns") - Name of the file (a shell script) containing the definitions of variables associated with the external model from which :term:`ICs` or :term:`LBCs` are generated. This file is created by the ``GET_EXTRN_*_TN`` task because the values of the variables it contains are not known before this task runs. The file is then sourced by the ``MAKE_ICS_TN`` and ``MAKE_LBCS_TN`` tasks. + Name of the file (a shell script) containing the definitions of variables associated with the external model from which :term:`ICs` or :term:`LBCs` are generated. This file is created by the ``TN_GET_EXTRN_*`` task because the values of the variables it contains are not known before this task runs. The file is then sourced by the ``TN_MAKE_ICS`` and ``TN_MAKE_LBCS`` tasks. ``WFLOW_LAUNCH_SCRIPT_FN``: (Default: "launch_FV3LAM_wflow.sh") Name of the script that can be used to (re)launch the experiment's Rocoto workflow. @@ -386,17 +386,17 @@ Verification Parameters ``GET_OBS``: (Default: "get_obs") Set the name of the Rocoto workflow task used to load proper module files for ``GET_OBS_*`` tasks. Users typically do not need to change this value. -``VX_TN``: (Default: "run_vx") +``TN_VX``: (Default: "run_vx") Set the name of the Rocoto workflow task used to load proper module files for ``VX_*`` tasks. Users typically do not need to change this value. -``VX_ENSGRID_TN``: (Default: "run_ensgridvx") +``TN_VX_ENSGRID``: (Default: "run_ensgridvx") Set the name of the Rocoto workflow task that runs METplus grid-to-grid ensemble verification for 1-h accumulated precipitation. Users typically do not need to change this value. -``VX_ENSGRID_PROB_REFC_TN``: (Default: "run_ensgridvx_prob_refc") +``TN_VX_ENSGRID_PROB_REFC``: (Default: "run_ensgridvx_prob_refc") Set the name of the Rocoto workflow task that runs METplus grid-to-grid verification for ensemble probabilities for composite reflectivity. Users typically do not need to change this value. ``MAXTRIES_VX_ENSGRID_PROB_REFC``: (Default: 1) - Maximum number of times to attempt ``VX_ENSGRID_PROB_REFC_TN``. + Maximum number of times to attempt ``TN_VX_ENSGRID_PROB_REFC``. .. _NCOModeParms: @@ -410,7 +410,7 @@ A standard set of environment variables has been established for *nco* mode to s Only *community* mode is fully supported for this release. *nco* mode is used by those at the Environmental Modeling Center (EMC) and Global Systems Laboratory (GSL) who are working on pre-implementation operational testing. Other users should run the SRW App in *community* mode. ``envir, NET, model_ver, RUN``: - Standard environment variables defined in the NCEP Central Operations WCOSS Implementation Standards document. These variables are used in forming the path to various directories containing input, output, and workflow files. The variables are defined in the `WCOSS Implementation Standards `__ document (pp. 4-5) as follows: + Standard environment variables defined in the NCEP Central Operations WCOSS Implementation Standards document. These variables are used in forming the path to various directories containing input, output, and workflow files. The variables are defined in the `WCOSS Implementation Standards `__ document (pp. 4-5) as follows: ``envir``: (Default: "para") Set to "test" during the initial testing phase, "para" when running in parallel (on a schedule), and "prod" in production. @@ -427,40 +427,42 @@ A standard set of environment variables has been established for *nco* mode to s ``OPSROOT``: (Default: "") The operations root directory in *nco* mode. +.. _workflow-switches: + WORKFLOW SWITCHES Configuration Parameters ============================================= -These parameters set flags that determine whether various workflow tasks should be run. When non-default parameters are selected for the variables in this section, they should be added to the ``workflow_switches:`` section of the ``config.yaml`` file. Note that the ``MAKE_GRID_TN``, ``MAKE_OROG_TN``, and ``MAKE_SFC_CLIMO_TN`` are all :term:`cycle-independent` tasks, i.e., if they are run, they only run once at the beginning of the workflow before any cycles are run. +These parameters set flags that determine whether various workflow tasks should be run. When non-default parameters are selected for the variables in this section, they should be added to the ``workflow_switches:`` section of the ``config.yaml`` file. Note that the ``TN_MAKE_GRID``, ``TN_MAKE_OROG``, and ``TN_MAKE_SFC_CLIMO`` are all :term:`cycle-independent` tasks, i.e., if they are run, they only run once at the beginning of the workflow before any cycles are run. Baseline Workflow Tasks -------------------------- ``RUN_TASK_MAKE_GRID``: (Default: true) - Flag that determines whether to run the grid file generation task (``MAKE_GRID_TN``). If this is set to true, the grid generation task is run and new grid files are generated. If it is set to false, then the scripts look for pre-generated grid files in the directory specified by ``GRID_DIR`` (see :numref:`Section %s ` below). Valid values: ``True`` | ``False`` + Flag that determines whether to run the grid file generation task (``TN_MAKE_GRID``). If this is set to true, the grid generation task is run and new grid files are generated. If it is set to false, then the scripts look for pre-generated grid files in the directory specified by ``GRID_DIR`` (see :numref:`Section %s ` below). Valid values: ``True`` | ``False`` ``RUN_TASK_MAKE_OROG``: (Default: true) - Same as ``RUN_TASK_MAKE_GRID`` but for the orography generation task (``MAKE_OROG_TN``). Flag that determines whether to run the orography file generation task (``MAKE_OROG_TN``). If this is set to true, the orography generation task is run and new orography files are generated. If it is set to false, then the scripts look for pre-generated orography files in the directory specified by ``OROG_DIR`` (see :numref:`Section %s ` below). Valid values: ``True`` | ``False`` + Same as ``RUN_TASK_MAKE_GRID`` but for the orography generation task (``TN_MAKE_OROG``). Flag that determines whether to run the orography file generation task (``TN_MAKE_OROG``). If this is set to true, the orography generation task is run and new orography files are generated. If it is set to false, then the scripts look for pre-generated orography files in the directory specified by ``OROG_DIR`` (see :numref:`Section %s ` below). Valid values: ``True`` | ``False`` ``RUN_TASK_MAKE_SFC_CLIMO``: (Default: true) - Same as ``RUN_TASK_MAKE_GRID`` but for the surface climatology generation task (``MAKE_SFC_CLIMO_TN``). Flag that determines whether to run the surface climatology file generation task (``MAKE_SFC_CLIMO_TN``). If this is set to true, the surface climatology generation task is run and new surface climatology files are generated. If it is set to false, then the scripts look for pre-generated surface climatology files in the directory specified by ``SFC_CLIMO_DIR`` (see :numref:`Section %s ` below). Valid values: ``True`` | ``False`` + Same as ``RUN_TASK_MAKE_GRID`` but for the surface climatology generation task (``TN_MAKE_SFC_CLIMO``). Flag that determines whether to run the surface climatology file generation task (``TN_MAKE_SFC_CLIMO``). If this is set to true, the surface climatology generation task is run and new surface climatology files are generated. If it is set to false, then the scripts look for pre-generated surface climatology files in the directory specified by ``SFC_CLIMO_DIR`` (see :numref:`Section %s ` below). Valid values: ``True`` | ``False`` ``RUN_TASK_GET_EXTRN_ICS``: (Default: true) - Flag that determines whether to run the ``GET_EXTRN_ICS_TN`` task. + Flag that determines whether to run the ``TN_GET_EXTRN_ICS`` task. ``RUN_TASK_GET_EXTRN_LBCS``: (Default: true) - Flag that determines whether to run the ``GET_EXTRN_LBCS_TN`` task. + Flag that determines whether to run the ``TN_GET_EXTRN_LBCS`` task. ``RUN_TASK_MAKE_ICS``: (Default: true) - Flag that determines whether to run the ``MAKE_ICS_TN`` task. + Flag that determines whether to run the ``TN_MAKE_ICS`` task. ``RUN_TASK_MAKE_LBCS``: (Default: true) - Flag that determines whether to run the ``MAKE_LBCS_TN`` task. + Flag that determines whether to run the ``TN_MAKE_LBCS`` task. ``RUN_TASK_RUN_FCST``: (Default: true) - Flag that determines whether to run the ``RUN_FCST_TN`` task. + Flag that determines whether to run the ``TN_RUN_FCST`` task. ``RUN_TASK_RUN_POST``: (Default: true) - Flag that determines whether to run the ``RUN_POST_TN`` task. Valid values: ``True`` | ``False`` + Flag that determines whether to run the ``TN_RUN_POST`` task. Valid values: ``True`` | ``False`` .. _VXTasks: @@ -468,13 +470,13 @@ Verification Tasks -------------------- ``RUN_TASK_GET_OBS_CCPA``: (Default: false) - Flag that determines whether to run the ``GET_OBS_CCPA_TN`` task, which retrieves the :term:`CCPA` hourly precipitation files used by METplus from NOAA :term:`HPSS`. See :numref:`Section %s ` for additional parameters related to this task. + Flag that determines whether to run the ``TN_GET_OBS_CCPA`` task, which retrieves the :term:`CCPA` hourly precipitation files used by METplus from NOAA :term:`HPSS`. See :numref:`Section %s ` for additional parameters related to this task. ``RUN_TASK_GET_OBS_MRMS``: (Default: false) - Flag that determines whether to run the ``GET_OBS_MRMS_TN`` task, which retrieves the :term:`MRMS` composite reflectivity files used by METplus from NOAA HPSS. See :numref:`Section %s ` for additional parameters related to this task. + Flag that determines whether to run the ``TN_GET_OBS_MRMS`` task, which retrieves the :term:`MRMS` composite reflectivity files used by METplus from NOAA HPSS. See :numref:`Section %s ` for additional parameters related to this task. ``RUN_TASK_GET_OBS_NDAS``: (Default: false) - Flag that determines whether to run the ``GET_OBS_NDAS_TN`` task, which retrieves the :term:`NDAS` PrepBufr files used by METplus from NOAA HPSS. See :numref:`Section %s ` for additional parameters related to this task. + Flag that determines whether to run the ``TN_GET_OBS_NDAS`` task, which retrieves the :term:`NDAS` PrepBufr files used by METplus from NOAA HPSS. See :numref:`Section %s ` for additional parameters related to this task. ``RUN_TASK_VX_GRIDSTAT``: (Default: false) Flag that determines whether to run the grid-stat verification task. The :ref:`MET Grid-Stat tool ` provides verification statistics for a matched forecast and observation grid. See :numref:`Section %s ` for additional parameters related to this task. Valid values: ``True`` | ``False`` @@ -508,7 +510,7 @@ Basic Task Parameters For each workflow task, certain parameter values must be passed to the job scheduler (e.g., Slurm), which submits a job for the task. Typically, users do not need to adjust the default values. - ``MAKE_GRID_TN``: (Default: "make_grid") + ``TN_MAKE_GRID``: (Default: "make_grid") Set the name of this :term:`cycle-independent` Rocoto workflow task. Users typically do not need to change this value. ``NNODES_MAKE_GRID``: (Default: 1) @@ -628,7 +630,7 @@ MAKE_OROG Configuration Parameters Non-default parameters for the ``make_orog`` task are set in the ``task_make_orog:`` section of the ``config.yaml`` file. -``MAKE_OROG_TN``: (Default: "make_orog") +``TN_MAKE_OROG``: (Default: "make_orog") Set the name of this :term:`cycle-independent` Rocoto workflow task. Users typically do not need to change this value. ``NNODES_MAKE_OROG``: (Default: 1) @@ -653,7 +655,7 @@ Non-default parameters for the ``make_orog`` task are set in the ``task_make_oro Controls the size of the stack for threads created by the OpenMP implementation. ``OROG_DIR``: (Default: "") - The directory containing pre-generated orography files to use when ``MAKE_OROG_TN`` is set to false. + The directory containing pre-generated orography files to use when ``TN_MAKE_OROG`` is set to false. .. _make-sfc-climo: @@ -662,7 +664,7 @@ MAKE_SFC_CLIMO Configuration Parameters Non-default parameters for the ``make_sfc_climo`` task are set in the ``task_make_sfc_climo:`` section of the ``config.yaml`` file. -``MAKE_SFC_CLIMO_TN``: "make_sfc_climo" +``TN_MAKE_SFC_CLIMO``: "make_sfc_climo" Set the name of this :term:`cycle-independent` Rocoto workflow task. Users typically do not need to change this value. ``NNODES_MAKE_SFC_CLIMO``: (Default: 2) @@ -687,7 +689,9 @@ Non-default parameters for the ``make_sfc_climo`` task are set in the ``task_mak Controls the size of the stack for threads created by the OpenMP implementation. ``SFC_CLIMO_DIR``: (Default: "") - The directory containing pre-generated surface climatology files to use when ``MAKE_SFC_CLIMO_TN`` is set to false. + The directory containing pre-generated surface climatology files to use when ``TN_MAKE_SFC_CLIMO`` is set to false. + +.. _task_get_extrn_ics: GET_EXTRN_ICS Configuration Parameters ========================================= @@ -701,7 +705,7 @@ Basic Task Parameters For each workflow task, certain parameter values must be passed to the job scheduler (e.g., Slurm), which submits a job for the task. -``GET_EXTRN_ICS_TN``: (Default: "get_extrn_ics") +``TN_GET_EXTRN_ICS``: (Default: "get_extrn_ics") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_GET_EXTRN_ICS``: (Default: 1) @@ -717,7 +721,7 @@ For each workflow task, certain parameter values must be passed to the job sched Maximum number of times to attempt the task. ``EXTRN_MDL_NAME_ICS``: (Default: "FV3GFS") - The name of the external model that will provide fields from which initial condition (IC) files, surface files, and 0-th hour boundary condition files will be generated for input into the forecast model. Valid values: ``"GSMGFS"`` | ``"FV3GFS"`` | ``"RAP"`` | ``"HRRR"`` | ``"NAM"`` + The name of the external model that will provide fields from which initial condition (IC) files, surface files, and 0-th hour boundary condition files will be generated for input into the forecast model. Valid values: ``"GSMGFS"`` | ``"FV3GFS"`` | ``"GEFS"`` | ``"GDAS"`` | ``"RAP"`` | ``"HRRR"`` | ``"NAM"`` ``EXTRN_MDL_ICS_OFFSET_HRS``: (Default: 0) Users may wish to start a forecast using forecast data from a previous cycle of an external model. This variable indicates how many hours earlier the external model started than the FV3 forecast configured here. For example, if the forecast should start from a 6-hour forecast of the GFS, then ``EXTRN_MDL_ICS_OFFSET_HRS: "6"``. @@ -770,6 +774,7 @@ Set parameters associated with NOMADS online data. ``NOMADS_file_type``: (Default: "nemsio") Flag controlling the format of the data. Valid values: ``"GRIB2"`` | ``"grib2"`` | ``"NEMSIO"`` | ``"nemsio"`` +.. _task_get_extrn_lbcs: GET_EXTRN_LBCS Configuration Parameters ========================================== @@ -783,7 +788,7 @@ Basic Task Parameters For each workflow task, certain parameter values must be passed to the job scheduler (e.g., Slurm), which submits a job for the task. -``GET_EXTRN_LBCS_TN``: (Default: "get_extrn_lbcs") +``TN_GET_EXTRN_LBCS``: (Default: "get_extrn_lbcs") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_GET_EXTRN_LBCS``: (Default: 1) @@ -799,7 +804,7 @@ For each workflow task, certain parameter values must be passed to the job sched Maximum number of times to attempt the task. ``EXTRN_MDL_NAME_LBCS``: (Default: "FV3GFS") - The name of the external model that will provide fields from which lateral boundary condition (LBC) files (except for the 0-th hour LBC file) will be generated for input into the forecast model. Valid values: ``"GSMGFS"`` | ``"FV3GFS"`` | ``"RAP"`` | ``"HRRR"`` | ``"NAM"`` + The name of the external model that will provide fields from which lateral boundary condition (LBC) files (except for the 0-th hour LBC file) will be generated for input into the forecast model. Valid values: ``"GSMGFS"`` | ``"FV3GFS"`` | ``"GEFS"`` | ``"GDAS"`` | ``"RAP"`` | ``"HRRR"`` | ``"NAM"`` ``LBC_SPEC_INTVL_HRS``: (Default: "6") The interval (in integer hours) at which LBC files will be generated. This is also referred to as the *boundary update interval*. Note that the model selected in ``EXTRN_MDL_NAME_LBCS`` must have data available at a frequency greater than or equal to that implied by ``LBC_SPEC_INTVL_HRS``. For example, if ``LBC_SPEC_INTVL_HRS`` is set to "6", then the model must have data available at least every 6 hours. It is up to the user to ensure that this is the case. @@ -854,7 +859,7 @@ Basic Task Parameters For each workflow task, certain parameter values must be passed to the job scheduler (e.g., Slurm), which submits a job for the task. -``MAKE_ICS_TN``: (Default: "make_ics") +``TN_MAKE_ICS``: (Default: "make_ics") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_MAKE_ICS``: (Default: 4) @@ -881,7 +886,7 @@ For each workflow task, certain parameter values must be passed to the job sched FVCOM Parameter ------------------- ``USE_FVCOM``: (Default: false) - Flag that specifies whether to update surface conditions in FV3-:term:`LAM` with fields generated from the Finite Volume Community Ocean Model (:term:`FVCOM`). If set to true, lake/sea surface temperatures, ice surface temperatures, and ice placement will be overwritten using data provided by FVCOM. Setting ``USE_FVCOM`` to true causes the executable ``process_FVCOM.exe`` in the ``MAKE_ICS_TN`` task to run. This, in turn, modifies the file ``sfc_data.nc`` generated by ``chgres_cube`` during the ``make_ics`` task. Note that the FVCOM data must already be interpolated to the desired FV3-LAM grid. Valid values: ``True`` | ``False`` + Flag that specifies whether to update surface conditions in FV3-:term:`LAM` with fields generated from the Finite Volume Community Ocean Model (:term:`FVCOM`). If set to true, lake/sea surface temperatures, ice surface temperatures, and ice placement will be overwritten using data provided by FVCOM. Setting ``USE_FVCOM`` to true causes the executable ``process_FVCOM.exe`` in the ``TN_MAKE_ICS`` task to run. This, in turn, modifies the file ``sfc_data.nc`` generated by ``chgres_cube`` during the ``make_ics`` task. Note that the FVCOM data must already be interpolated to the desired FV3-LAM grid. Valid values: ``True`` | ``False`` ``FVCOM_WCSTART``: (Default: "cold") Define if this is a "warm" start or a "cold" start. Setting this to "warm" will read in ``sfc_data.nc`` generated in a RESTART directory. Setting this to "cold" will read in the ``sfc_data.nc`` generated from ``chgres_cube`` in the ``make_ics`` portion of the workflow. Valid values: ``"cold"`` | ``"COLD"`` | ``"warm"`` | ``"WARM"`` @@ -898,7 +903,7 @@ MAKE_LBCS Configuration Parameters Non-default parameters for the ``make_lbcs`` task are set in the ``task_make_lbcs:`` section of the ``config.yaml`` file. -``MAKE_LBCS_TN``: (Default: "make_lbcs") +``TN_MAKE_LBCS``: (Default: "make_lbcs") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_MAKE_LBCS``: (Default: 4) @@ -934,7 +939,7 @@ Basic Task Parameters For each workflow task, certain parameter values must be passed to the job scheduler (e.g., Slurm), which submits a job for the task. -``RUN_FCST_TN``: (Default: "run_fcst") +``TN_RUN_FCST``: (Default: "run_fcst") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_RUN_FCST``: (Default: "") @@ -1008,14 +1013,14 @@ Write-Component (Quilting) Parameters ----------------------------------------- .. note:: - The :term:`UPP` (called by the ``RUN_POST_TN`` task) cannot process output on the native grid types ("GFDLgrid" and "ESGgrid"), so output fields are interpolated to a **write component grid** before writing them to an output file. The output files written by the UFS Weather Model use an Earth System Modeling Framework (:term:`ESMF`) component, referred to as the **write component**. This model component is configured with settings in the ``model_configure`` file, as described in `Section 4.2.3 `__ of the UFS Weather Model documentation. + The :term:`UPP` (called by the ``TN_RUN_POST`` task) cannot process output on the native grid types ("GFDLgrid" and "ESGgrid"), so output fields are interpolated to a **write component grid** before writing them to an output file. The output files written by the UFS Weather Model use an Earth System Modeling Framework (:term:`ESMF`) component, referred to as the **write component**. This model component is configured with settings in the ``model_configure`` file, as described in `Section 4.2.3 `__ of the UFS Weather Model documentation. ``QUILTING``: (Default: true) .. attention:: The regional grid requires the use of the write component, so users generally should not need to change the default value for ``QUILTING``. - Flag that determines whether to use the write component for writing forecast output files to disk. If set to true, the forecast model will output files named ``dynf$HHH.nc`` and ``phyf$HHH.nc`` (where ``HHH`` is the 3-digit forecast hour) containing dynamics and physics fields, respectively, on the write-component grid. For example, the output files for the 3rd hour of the forecast would be ``dynf$003.nc`` and ``phyf$003.nc``. (The regridding from the native FV3-LAM grid to the write-component grid is done by the forecast model.) If ``QUILTING`` is set to false, then the output file names are ``fv3_history.nc`` and ``fv3_history2d.nc``, and they contain fields on the native grid. Although the UFS Weather Model can run without quilting, the regional grid requires the use of the write component. Therefore, QUILTING should be set to true when running the SRW App. If ``QUILTING`` is set to false, the ``RUN_POST_TN`` (meta)task cannot run because the :term:`UPP` code called by this task cannot process fields on the native grid. In that case, the ``RUN_POST_TN`` (meta)task will be automatically removed from the Rocoto workflow XML. The :ref:`INLINE POST ` option also requires ``QUILTING`` to be set to true in the SRW App. Valid values: ``True`` | ``False`` + Flag that determines whether to use the write component for writing forecast output files to disk. If set to true, the forecast model will output files named ``dynf$HHH.nc`` and ``phyf$HHH.nc`` (where ``HHH`` is the 3-digit forecast hour) containing dynamics and physics fields, respectively, on the write-component grid. For example, the output files for the 3rd hour of the forecast would be ``dynf$003.nc`` and ``phyf$003.nc``. (The regridding from the native FV3-LAM grid to the write-component grid is done by the forecast model.) If ``QUILTING`` is set to false, then the output file names are ``fv3_history.nc`` and ``fv3_history2d.nc``, and they contain fields on the native grid. Although the UFS Weather Model can run without quilting, the regional grid requires the use of the write component. Therefore, QUILTING should be set to true when running the SRW App. If ``QUILTING`` is set to false, the ``TN_RUN_POST`` (meta)task cannot run because the :term:`UPP` code called by this task cannot process fields on the native grid. In that case, the ``TN_RUN_POST`` (meta)task will be automatically removed from the Rocoto workflow XML. The :ref:`INLINE POST ` option also requires ``QUILTING`` to be set to true in the SRW App. Valid values: ``True`` | ``False`` ``PRINT_ESMF``: (Default: false) Flag that determines whether to output extra (debugging) information from :term:`ESMF` routines. Note that the write component uses ESMF library routines to interpolate from the native forecast model grid to the user-specified output grid (which is defined in the model configuration file ``model_configure`` in the forecast run directory). Valid values: ``True`` | ``False`` @@ -1162,7 +1167,7 @@ Basic Task Parameters For each workflow task, certain parameter values must be passed to the job scheduler (e.g., Slurm), which submits a job for the task. -``RUN_POST_TN``: (Default: "run_post") +``TN_RUN_POST``: (Default: "run_post") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_RUN_POST``: (Default: 2) @@ -1224,7 +1229,7 @@ GET_OBS_CCPA Configuration Parameters Non-default parameters for the ``get_obs_ccpa`` task are set in the ``task_get_obs_ccpa:`` section of the ``config.yaml`` file. -``GET_OBS_CCPA_TN``: (Default: "get_obs_ccpa") +``TN_GET_OBS_CCPA``: (Default: "get_obs_ccpa") Set the name of this Rocoto workflow task. Users typically do not need to change this value. See :numref:`Section %s ` for more information about the verification tasks. ``NNODES_GET_OBS_CCPA``: (Default: 1) @@ -1246,7 +1251,7 @@ GET_OBS_MRMS Configuration Parameters Non-default parameters for the ``get_obs_mrms`` task are set in the ``task_get_obs_mrms:`` section of the ``config.yaml`` file. See :numref:`Section %s ` for more information about the verification tasks. -``GET_OBS_MRMS_TN``: (Default: "get_obs_mrms") +``TN_GET_OBS_MRMS``: (Default: "get_obs_mrms") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_GET_OBS_MRMS``: (Default: 1) @@ -1268,7 +1273,7 @@ GET_OBS_NDAS Configuration Parameters Non-default parameters for the ``get_obs_ndas`` task are set in the ``task_get_obs_ndas:`` section of the ``config.yaml`` file. See :numref:`Section %s ` for more information about the verification tasks. -``GET_OBS_NDAS_TN``: (Default: "get_obs_ndas") +``TN_GET_OBS_NDAS``: (Default: "get_obs_ndas") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_GET_OBS_NDAS``: (Default: 1) @@ -1291,7 +1296,7 @@ VX_GRIDSTAT Configuration Parameters Non-default parameters for the ``run_gridstatvx`` task are set in the ``task_run_vx_gridstat:`` section of the ``config.yaml`` file. -``VX_GRIDSTAT_TN``: (Default: "run_gridstatvx") +``TN_VX_GRIDSTAT``: (Default: "run_gridstatvx") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_GRIDSTAT``: (Default: 1) @@ -1312,7 +1317,7 @@ VX_GRIDSTAT_REFC Configuration Parameters Non-default parameters for the ``run_gridstatvx_refc`` task are set in the ``task_run_vx_gridstat_refc:`` section of the ``config.yaml`` file. -``VX_GRIDSTAT_REFC_TN``: (Default: "run_gridstatvx_refc") +``TN_VX_GRIDSTAT_REFC``: (Default: "run_gridstatvx_refc") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_GRIDSTAT``: (Default: 1) @@ -1333,7 +1338,7 @@ VX_GRIDSTAT_RETOP Configuration Parameters Non-default parameters for the ``run_gridstatvx_retop`` task are set in the ``task_run_vx_gridstat_retop:`` section of the ``config.yaml`` file. -``VX_GRIDSTAT_RETOP_TN``: (Default: "run_gridstatvx_retop") +``TN_VX_GRIDSTAT_RETOP``: (Default: "run_gridstatvx_retop") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_GRIDSTAT``: (Default: 1) @@ -1354,7 +1359,7 @@ VX_GRIDSTAT_03h Configuration Parameters Non-default parameters for the ``run_gridstatvx_03h`` task are set in the ``task_run_vx_gridstat_03h:`` section of the ``config.yaml`` file. -``VX_GRIDSTAT_03h_TN``: (Default: "run_gridstatvx_03h") +``TN_VX_GRIDSTAT_03h``: (Default: "run_gridstatvx_03h") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_GRIDSTAT``: (Default: 1) @@ -1375,7 +1380,7 @@ VX_GRIDSTAT_06h Configuration Parameters Non-default parameters for the ``run_gridstatvx_06h`` task are set in the ``task_run_vx_gridstat_06h:`` section of the ``config.yaml`` file. -``VX_GRIDSTAT_06h_TN``: (Default: "run_gridstatvx_06h") +``TN_VX_GRIDSTAT_06h``: (Default: "run_gridstatvx_06h") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_GRIDSTAT``: (Default: 1) @@ -1396,7 +1401,7 @@ VX_GRIDSTAT_24h Configuration Parameters Non-default parameters for the ``run_gridstatvx_24h`` task are set in the ``task_run_vx_gridstat_24h:`` section of the ``config.yaml`` file. -``VX_GRIDSTAT_24h_TN``: (Default: "run_gridstatvx_24h") +``TN_VX_GRIDSTAT_24h``: (Default: "run_gridstatvx_24h") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_GRIDSTAT``: (Default: 1) @@ -1418,7 +1423,7 @@ VX_POINTSTAT Configuration Parameters Non-default parameters for the ``run_pointstatvx`` task are set in the ``task_run_vx_pointstat:`` section of the ``config.yaml`` file. -``VX_POINTSTAT_TN``: (Default: "run_pointstatvx") +``TN_VX_POINTSTAT``: (Default: "run_pointstatvx") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_POINTSTAT``: (Default: 1) @@ -1440,31 +1445,31 @@ VX_ENSGRID Configuration Parameters Non-default parameters for the ``run_ensgridvx_*`` tasks are set in the ``task_run_vx_ensgrid:`` section of the ``config.yaml`` file. -``VX_ENSGRID_03h_TN``: (Default: "run_ensgridvx_03h") +``TN_VX_ENSGRID_03h``: (Default: "run_ensgridvx_03h") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``MAXTRIES_VX_ENSGRID_03h``: (Default: 1) Maximum number of times to attempt the task. -``VX_ENSGRID_06h_TN``: (Default: "run_ensgridvx_06h") +``TN_VX_ENSGRID_06h``: (Default: "run_ensgridvx_06h") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``MAXTRIES_VX_ENSGRID_06h``: (Default: 1) Maximum number of times to attempt the task. -``VX_ENSGRID_24h_TN``: (Default: "run_ensgridvx_24h") +``TN_VX_ENSGRID_24h``: (Default: "run_ensgridvx_24h") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``MAXTRIES_VX_ENSGRID_24h``: (Default: 1) Maximum number of times to attempt the task. -``VX_ENSGRID_RETOP_TN``: (Default: "run_ensgridvx_retop") +``TN_VX_ENSGRID_RETOP``: (Default: "run_ensgridvx_retop") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``MAXTRIES_VX_ENSGRID_RETOP``: (Default: 1) Maximum number of times to attempt the task. -``VX_ENSGRID_PROB_RETOP_TN``: (Default: "run_ensgridvx_prob_retop") +``TN_VX_ENSGRID_PROB_RETOP``: (Default: "run_ensgridvx_prob_retop") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``MAXTRIES_VX_ENSGRID_PROB_RETOP``: (Default: 1) @@ -1488,7 +1493,7 @@ VX_ENSGRID_REFC Configuration Parameters Non-default parameters for the ``run_ensgridvx_refc`` task are set in the ``task_run_vx_ensgrid_refc:`` section of the ``config.yaml`` file. -``VX_ENSGRID_REFC_TN``: (Default: "run_ensgridvx_refc") +``TN_VX_ENSGRID_REFC``: (Default: "run_ensgridvx_refc") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_ENSGRID``: (Default: 1) @@ -1509,7 +1514,7 @@ VX_ENSGRID_MEAN Configuration Parameters Non-default parameters for the ``run_ensgridvx_mean`` task are set in the ``task_run_vx_ensgrid_mean:`` section of the ``config.yaml`` file. -``VX_ENSGRID_MEAN_TN``: (Default: "run_ensgridvx_mean") +``TN_VX_ENSGRID_MEAN``: (Default: "run_ensgridvx_mean") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_ENSGRID_MEAN``: (Default: 1) @@ -1530,7 +1535,7 @@ VX_ENSGRID_MEAN_03h Configuration Parameters Non-default parameters for the ``run_ensgridvx_mean_03h`` task are set in the ``task_run_vx_ensgrid_mean_03h:`` section of the ``config.yaml`` file. -``VX_ENSGRID_MEAN_03h_TN``: (Default: "run_ensgridvx_mean_03h") +``TN_VX_ENSGRID_MEAN_03h``: (Default: "run_ensgridvx_mean_03h") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_ENSGRID_MEAN``: (Default: 1) @@ -1551,7 +1556,7 @@ VX_ENSGRID_MEAN_06h Configuration Parameters Non-default parameters for the ``run_ensgridvx_mean_06h`` task are set in the ``task_run_vx_ensgrid_mean_06h:`` section of the ``config.yaml`` file. -``VX_ENSGRID_MEAN_06h_TN``: (Default: "run_ensgridvx_mean_06h") +``TN_VX_ENSGRID_MEAN_06h``: (Default: "run_ensgridvx_mean_06h") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_ENSGRID_MEAN``: (Default: 1) @@ -1572,7 +1577,7 @@ VX_ENSGRID_MEAN_24h Configuration Parameters Non-default parameters for the ``run_ensgridvx_mean_24h`` task are set in the ``task_run_vx_ensgrid_mean_24h:`` section of the ``config.yaml`` file. -``VX_ENSGRID_MEAN_24h_TN``: (Default: "run_ensgridvx_mean_24h") +``TN_VX_ENSGRID_MEAN_24h``: (Default: "run_ensgridvx_mean_24h") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_ENSGRID_MEAN``: (Default: 1) @@ -1593,7 +1598,7 @@ VX_ENSGRID_PROB Configuration Parameters Non-default parameters for the ``run_ensgridvx_prob`` task are set in the ``task_run_vx_ensgrid_prob:`` section of the ``config.yaml`` file. -``VX_ENSGRID_PROB_TN``: (Default: "run_ensgridvx_prob") +``TN_VX_ENSGRID_PROB``: (Default: "run_ensgridvx_prob") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_ENSGRID_PROB``: (Default: 1) @@ -1614,7 +1619,7 @@ VX_ENSGRID_PROB_03h Configuration Parameters Non-default parameters for the ``run_ensgridvx_prob_03h`` task are set in the ``task_run_vx_ensgrid_prob_03h:`` section of the ``config.yaml`` file. -``VX_ENSGRID_PROB_03h_TN``: (Default: "run_ensgridvx_prob_03h") +``TN_VX_ENSGRID_PROB_03h``: (Default: "run_ensgridvx_prob_03h") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_ENSGRID_PROB``: (Default: 1) @@ -1635,7 +1640,7 @@ VX_ENSGRID_PROB_06h Configuration Parameters Non-default parameters for the ``run_ensgridvx_prob_06h`` task are set in the ``task_run_vx_ensgrid_prob_06h:`` section of the ``config.yaml`` file. -``VX_ENSGRID_PROB_06h_TN``: (Default: "run_ensgridvx_prob_06h") +``TN_VX_ENSGRID_PROB_06h``: (Default: "run_ensgridvx_prob_06h") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_ENSGRID_PROB``: (Default: 1) @@ -1656,7 +1661,7 @@ VX_ENSGRID_PROB_24h Configuration Parameters Non-default parameters for the ``run_ensgridvx_prob_24h`` task are set in the ``task_run_vx_ensgrid_prob_24h:`` section of the ``config.yaml`` file. -``VX_ENSGRID_PROB_24h_TN``: (Default: "run_ensgridvx_prob_24h") +``TN_VX_ENSGRID_PROB_24h``: (Default: "run_ensgridvx_prob_24h") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_ENSGRID_PROB``: (Default: 1) @@ -1678,7 +1683,7 @@ VX_ENSPOINT Configuration Parameters Non-default parameters for the ``run_enspointvx`` task are set in the ``task_run_vx_enspoint:`` section of the ``config.yaml`` file. -``VX_ENSPOINT_TN``: (Default: "run_enspointvx") +``TN_VX_ENSPOINT``: (Default: "run_enspointvx") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_ENSPOINT``: (Default: 1) @@ -1699,7 +1704,7 @@ VX_ENSPOINT_MEAN Configuration Parameters Non-default parameters for the ``run_enspointvx_mean`` task are set in the ``task_run_vx_enspoint_mean:`` section of the ``config.yaml`` file. -``VX_ENSPOINT_MEAN_TN``: (Default: "run_enspointvx_mean") +``TN_VX_ENSPOINT_MEAN``: (Default: "run_enspointvx_mean") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_ENSPOINT_MEAN``: (Default: 1) @@ -1720,7 +1725,7 @@ VX_ENSPOINT_PROB Configuration Parameters Non-default parameters for the ``run_enspointvx_prob`` task are set in the ``task_run_vx_enspoint_prob:`` section of the ``config.yaml`` file. -``VX_ENSPOINT_PROB_TN``: (Default: "run_enspointvx_prob") +``TN_VX_ENSPOINT_PROB``: (Default: "run_enspointvx_prob") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_VX_ENSPOINT_PROB``: (Default: 1) @@ -1747,7 +1752,7 @@ Basic Task Parameters For each workflow task, certain parameter values must be passed to the job scheduler (e.g., Slurm), which submits a job for the task. Typically, users do not need to adjust the default values. -``PLOT_ALLVARS_TN``: (Default: "plot_allvars") +``TN_PLOT_ALLVARS``: (Default: "plot_allvars") Set the name of this Rocoto workflow task. Users typically do not need to change this value. ``NNODES_PLOT_ALLVARS``: (Default: 1) @@ -1768,17 +1773,20 @@ Additional Parameters Typically, the following parameters must be set explicitly by the user in the configuration file (``config.yaml``) when executing the plotting tasks. ``COMOUT_REF``: (Default: "") - The directory where the GRIB2 files from post-processing are located. In *community* mode (i.e., when ``RUN_ENVIR: "community"``), this directory will correspond to the location in the experiment directory where the post-processed output can be found (e.g., ``$EXPTDIR/$DATE_FIRST_CYCL/postprd``). In *nco* mode, this directory should be set to the location of the COMOUT directory and end with ``$PDY/$cyc``. + The directory where the GRIB2 files from post-processing are located. In *community* mode (i.e., when ``RUN_ENVIR: "community"``), this directory will correspond to the location in the experiment directory where the post-processed output can be found (e.g., ``$EXPTDIR/$DATE_FIRST_CYCL/postprd``). In *nco* mode, this directory should be set to the location of the ``COMOUT`` directory and end with ``$PDY/$cyc``. For more detail on *nco* standards and directory naming conventions, see `WCOSS Implementation Standards `__ (particularly pp. 4-5). ``PLOT_FCST_START``: (Default: 0) The starting forecast hour for the plotting task. For example, if a forecast starts at 18h/18z, this is considered the 0th forecast hour, so "starting forecast hour" should be 0, not 18. If a forecast starts at 18h/18z, but the user only wants plots from the 6th forecast hour on, "starting forecast hour" should be 6. ``PLOT_FCST_INC``: (Default: 3) - Forecast hour increment for the plotting task. This may be the same as ``INCR_CYCL_FREQ``, or it may be a multiple of ``INCR_CYCL_FREQ``. For example, if ``INCR_CYCL_FREQ`` is set to 3, there will be forecast output every three hours for the duration of the forecast. If the user wants plots of all of this output, they should set ``PLOT_FCST_INC: 3``. If the user only wants plots for some of the output (e.g., every 6 hours), they should set ``PLOT_FCST_INC: 6``. However, there must be forecast output available at the designated increments to produce the plots. In this example, setting ``PLOT_FCST_INC: 7`` would produce an error because there is only forecast output available for hours 3, 6, 9, ..., etc. + Forecast hour increment for the plotting task. For example, if the user wants plots for each forecast hour, they should set ``PLOT_FCST_INC: 1``. If the user only wants plots for some of the output (e.g., every 6 hours), they should set ``PLOT_FCST_INC: 6``. ``PLOT_FCST_END``: (Default: "") The last forecast hour for the plotting task. For example, if a forecast run for 24 hours, and the user wants plots for each available hour of forecast output, they should set ``PLOT_FCST_END: 24``. If the user only wants plots from the first 12 hours of the forecast, the "last forecast hour" should be 12. +``PLOT_DOMAINS``: (Default: ["conus"]) + Domains to plot. Currently supported options are ["conus"], ["regional"], or both (i.e., ["conus", "regional"]). + Global Configuration Parameters =================================== diff --git a/docs/UsersGuide/source/Glossary.rst b/docs/UsersGuide/source/Glossary.rst index 2386d83177..731af20a31 100644 --- a/docs/UsersGuide/source/Glossary.rst +++ b/docs/UsersGuide/source/Glossary.rst @@ -67,6 +67,9 @@ Glossary cycle-independent Describes a workflow task that only needs to be run once per experiment, regardless of the number of cycles in the experiment. + data assimilation + Data assimilation is the process of combining observations, model data, and error statistics to achieve the best estimate of the state of a system. One of the major sources of error in weather and climate forecasts is uncertainty related to the initial conditions that are used to generate future predictions. Even the most precise instruments have a small range of unavoidable measurement error, which means that tiny measurement errors (e.g., related to atmospheric conditions and instrument location) can compound over time. These small differences result in very similar forecasts in the short term (i.e., minutes, hours), but they cause widely divergent forecasts in the long term. Errors in weather and climate forecasts can also arise because models are imperfect representations of reality. Data assimilation systems seek to mitigate these problems by combining the most timely observational data with a "first guess" of the atmospheric state (usually a previous forecast) and other sources of data to provide a "best guess" analysis of the atmospheric state to start a weather or climate simulation. When combined with an "ensemble" of model runs (many forecasts with slightly different conditions), data assimilation helps predict a range of possible atmospheric states, giving an overall measure of uncertainty in a given forecast. + dycore dynamical core Global atmospheric model based on fluid dynamics principles, including Euler's equations of motion. @@ -188,6 +191,9 @@ Glossary NWP Numerical Weather Prediction (NWP) takes current observations of weather and processes them with computer models to forecast the future state of the weather. + NWS + The `National Weather Service `__ (NWS) is an agency of the United States government that is tasked with providing weather forecasts, warnings of hazardous weather, and other weather-related products to organizations and the public for the purposes of protection, safety, and general information. It is a part of the National Oceanic and Atmospheric Administration (NOAA) branch of the Department of Commerce. + Orography The branch of physical geography dealing with mountains. @@ -229,6 +235,9 @@ Glossary Umbrella repository A repository that houses external code, or "externals," from additional repositories. + Updraft helicity + Helicity measures the rotation in a storm's updraft (rising) air. Significant rotation increases the probability that the storm will produce severe weather, including tornadoes. See http://ww2010.atmos.uiuc.edu/(Gh)/guides/mtr/svr/modl/fcst/params/hel.rxml for more details on updraft helicity. + UPP The `Unified Post Processor `__ is software developed at :term:`NCEP` and used operationally to post-process raw output from a variety of :term:`NCEP`'s :term:`NWP` models, including the :term:`FV3`. diff --git a/docs/UsersGuide/source/InputOutputFiles.rst b/docs/UsersGuide/source/InputOutputFiles.rst index 8456049c93..0b6b364613 100644 --- a/docs/UsersGuide/source/InputOutputFiles.rst +++ b/docs/UsersGuide/source/InputOutputFiles.rst @@ -257,7 +257,7 @@ The environment variables ``FIXgsm``, ``TOPO_DIR``, and ``SFC_CLIMO_INPUT_DIR`` Initial Condition/Lateral Boundary Condition File Formats and Source ----------------------------------------------------------------------- -The SRW Application currently supports raw initial and lateral boundary conditions from numerous models (i.e., FV3GFS, NAM, RAP, HRRR). The data can be provided in three formats: :term:`NEMSIO`, :term:`netCDF`, or :term:`GRIB2`. +The SRW Application currently supports raw initial and lateral boundary conditions from numerous models (i.e., FV3GFS, GEFS, GDAS, NAM, RAP, HRRR). The data can be provided in three formats: :term:`NEMSIO`, :term:`netCDF`, or :term:`GRIB2`. To download the model input data for the 12-hour "out-of-the-box" experiment configuration in ``config.community.yaml`` file, run: @@ -286,7 +286,7 @@ The paths to ``EXTRN_MDL_SOURCE_BASEDIR_ICS`` and ``EXTRN_MDL_SOURCE_BASEDIR_LBC EXTRN_MDL_SOURCE_BASEDIR_LBCS: EXTRN_MDL_DATA_STORES: disk -The two ``EXTRN_MDL_SOURCE_BASEDIR_*CS`` variables describe where the :term:`IC ` and :term:`LBC ` file directories are located, respectively. For ease of reusing ``config.yaml`` across experiments, it is recommended that users set up the raw :term:`IC/LBC ` file paths to include the model name (e.g., FV3GFS, NAM, RAP, HRRR), data format (e.g., grib2, nemsio), and date (in ``YYYYMMDDHH`` format). For example: ``/path-to/input_model_data/FV3GFS/grib2/2019061518/``. While there is flexibility to modify these settings, this structure will provide the most reusability for multiple dates when using the SRW Application workflow. +The two ``EXTRN_MDL_SOURCE_BASEDIR_*CS`` variables describe where the :term:`IC ` and :term:`LBC ` file directories are located, respectively. For ease of reusing ``config.yaml`` across experiments, it is recommended that users set up the raw :term:`IC/LBC ` file paths to include the model name (e.g., FV3GFS, GEFS, GDAS, NAM, RAP, HRRR), data format (e.g., grib2, nemsio), and date (in ``YYYYMMDDHH`` format). For example: ``/path-to/input_model_data/FV3GFS/grib2/2019061518/``. While there is flexibility to modify these settings, this structure will provide the most reusability for multiple dates when using the SRW Application workflow. When files are pulled from NOAA :term:`HPSS` (rather than downloaded from the data bucket), the naming convention looks something like: @@ -296,17 +296,22 @@ When files are pulled from NOAA :term:`HPSS` (rather than downloaded from the da * ICs: ``gfs.t{cycle}z.atmanl.nemsio`` and ``gfs.t{cycle}z.sfcanl.nemsio``; * LBCs: ``gfs.t{cycle}z.atmf{fhr}.nemsio`` +* GDAS (NETCDF): + + * ICs: ``gdas.t{cycle}z.atmf{fhr}.nc`` and ``gdas.t{cycle}z.sfcf{fhr}.nc``; + * LBCs: ``gdas.t{cycle}z.atmf{fhr}.nc`` + * RAP (GRIB2): ``rap.t{cycle}z.wrfprsf{fhr}.grib2`` * HRRR (GRIB2): ``hrrr.t{cycle}z.wrfprsf{fhr}.grib2`` where: * ``{cycle}`` corresponds to the 2-digit hour of the day when the forecast cycle starts, and - * ``{fhr}`` corresponds to the 2- or 3-digit nth hour of the forecast (3-digits for FV3GFS data and 2 digits for RAP/HRRR data). + * ``{fhr}`` corresponds to the 2- or 3-digit nth hour of the forecast (3-digits for FV3GFS/GDAS data and 2 digits for RAP/HRRR data). For example, a forecast using FV3GFS GRIB2 data that starts at 18h00 UTC would have a {cycle} value of 18, which is the 000th forecast hour. The LBCS file for 21h00 UTC would be named ``gfs.t18z.pgrb2.0p25.f003``. -In some cases, it may be necessary to specify values for ``EXTRN_MDL_FILES_*CS``variables. This is often the case with HRRR and RAP data. An example ``config.yaml`` excerpt using HRRR and RAP data appears below: +In some cases, it may be necessary to specify values for ``EXTRN_MDL_FILES_*CS`` variables. This is often the case with HRRR and RAP data. An example ``config.yaml`` excerpt using HRRR and RAP data appears below: .. code-block:: console @@ -345,6 +350,8 @@ NOMADS: https://nomads.ncep.noaa.gov/pub/data/nccf/com/{model}/prod, where model * GFS (GRIB2 or NEMSIO) - available for the last 10 days https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/prod/ +* GDAS (NETCDF) sfc files - available for the last 2 days + https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/prod * NAM - available for the last 8 days https://nomads.ncep.noaa.gov/pub/data/nccf/com/nam/prod/ * RAP - available for the last 2 days @@ -355,6 +362,8 @@ NOMADS: https://nomads.ncep.noaa.gov/pub/data/nccf/com/{model}/prod, where model AWS S3 Data Buckets: * GFS: https://registry.opendata.aws/noaa-gfs-bdp-pds/ +* GEFS: https://registry.opendata.aws/noaa-gefs/ +* GDAS: https://registry.opendata.aws/noaa-gfs-bdp-pds/ * HRRR: https://registry.opendata.aws/noaa-hrrr-pds/ (necessary fields for initializing available for dates 2015 and newer) Google Cloud: diff --git a/docs/UsersGuide/source/RunSRW.rst b/docs/UsersGuide/source/RunSRW.rst index 1d74541837..f1aeb92427 100644 --- a/docs/UsersGuide/source/RunSRW.rst +++ b/docs/UsersGuide/source/RunSRW.rst @@ -181,8 +181,8 @@ Configuration parameters in the ``config_defaults.yaml`` file appear in :numref: | | FCST_MODEL, WFLOW_XML_FN, GLOBAL_VAR_DEFNS_FN, | | | EXTRN_MDL_VAR_DEFNS_FN, WFLOW_LAUNCH_SCRIPT_FN, WFLOW_LAUNCH_LOG_FN, | | | CCPP_PHYS_SUITE, GRID_GEN_METHOD, DATE_FIRST_CYCL, DATE_LAST_CYCL, | - | | INCR_CYCL_FREQ, FCST_LEN_HRS, GET_OBS, VX_TN, VX_ENSGRID_TN, | - | | VX_ENSGRID_PROB_REFC_TN, MAXTRIES_VX_ENSGRID_PROB_REFC, | + | | INCR_CYCL_FREQ, FCST_LEN_HRS, GET_OBS, TN_VX, TN_VX_ENSGRID, | + | | TN_VX_ENSGRID_PROB_REFC, MAXTRIES_VX_ENSGRID_PROB_REFC, | | | PREEXISTING_DIR_METHOD, VERBOSE, DEBUG, COMPILER | +-----------------------------+-----------------------------------------------------------------------+ | NCO | envir, NET, model_ver, RUN, OPSROOT | @@ -194,7 +194,7 @@ Configuration parameters in the ``config_defaults.yaml`` file appear in :numref: | | RUN_TASK_VX_GRIDSTAT, RUN_TASK_VX_POINTSTAT, RUN_TASK_VX_ENSGRID, | | | RUN_TASK_VX_ENSPOINT | +-----------------------------+-----------------------------------------------------------------------+ - | task_make_grid | MAKE_GRID_TN, NNODES_MAKE_GRID, PPN_MAKE_GRID, WTIME_MAKE_GRID, | + | task_make_grid | TN_MAKE_GRID, NNODES_MAKE_GRID, PPN_MAKE_GRID, WTIME_MAKE_GRID, | | | MAXTRIES_MAKE_GRID, GRID_DIR, ESGgrid_LON_CTR, ESGgrid_LAT_CTR, | | | ESGgrid_DELX, ESGgrid_DELY, ESGgrid_NX, ESGgrid_NY, ESGgrid_PAZI, | | | ESGgrid_WIDE_HALO_WIDTH, GFDLgrid_LON_T6_CTR, GFDLgrid_LAT_T6_CTR, | @@ -203,16 +203,16 @@ Configuration parameters in the ``config_defaults.yaml`` file appear in :numref: | | GFDLgrid_JSTART_OF_RGNL_DOM_ON_T6G, GFDLgrid_JEND_OF_RGNL_DOM_ON_T6G, | | | GFDLgrid_USE_NUM_CELLS_IN_FILENAMES | +-----------------------------+-----------------------------------------------------------------------+ - | task_make_orog | MAKE_OROG_TN, NNODES_MAKE_OROG, PPN_MAKE_OROG, WTIME_MAKE_OROG, | + | task_make_orog | TN_MAKE_OROG, NNODES_MAKE_OROG, PPN_MAKE_OROG, WTIME_MAKE_OROG, | | | MAXTRIES_MAKE_OROG, KMP_AFFINITY_MAKE_OROG, OMP_NUM_THREADS_MAKE_OROG | | | OMP_STACKSIZE_MAKE_OROG, OROG_DIR | +-----------------------------+-----------------------------------------------------------------------+ - | task_make_sfc_climo | MAKE_SFC_CLIMO_TN, NNODES_MAKE_SFC_CLIMO, PPN_MAKE_SFC_CLIMO, | + | task_make_sfc_climo | TN_MAKE_SFC_CLIMO, NNODES_MAKE_SFC_CLIMO, PPN_MAKE_SFC_CLIMO, | | | WTIME_MAKE_SFC_CLIMO, MAXTRIES_MAKE_SFC_CLIMO, | | | KMP_AFFINITY_MAKE_SFC_CLIMO, OMP_NUM_THREADS_MAKE_SFC_CLIMO, | | | OMP_STACKSIZE_MAKE_SFC_CLIMO, SFC_CLIMO_DIR | +-----------------------------+-----------------------------------------------------------------------+ - | task_get_extrn_ics | GET_EXTRN_ICS_TN, NNODES_GET_EXTRN_ICS, PPN_GET_EXTRN_ICS, | + | task_get_extrn_ics | TN_GET_EXTRN_ICS, NNODES_GET_EXTRN_ICS, PPN_GET_EXTRN_ICS, | | | WTIME_GET_EXTRN_ICS, MAXTRIES_GET_EXTRN_ICS, EXTRN_MDL_NAME_ICS, | | | EXTRN_MDL_ICS_OFFSET_HRS, FV3GFS_FILE_FMT_ICS, | | | EXTRN_MDL_SYSBASEDIR_ICS, USE_USER_STAGED_EXTRN_FILES, | @@ -220,23 +220,23 @@ Configuration parameters in the ``config_defaults.yaml`` file appear in :numref: | | EXTRN_MDL_FILES_ICS, EXTRN_MDL_FILES_ICS, EXTRN_MDL_DATA_STORES, | | | NOMADS, NOMADS_file_type | +-----------------------------+-----------------------------------------------------------------------+ - | task_get_extrn_lbcs | GET_EXTRN_LBCS_TN, NNODES_GET_EXTRN_LBCS, PPN_GET_EXTRN_LBCS, | + | task_get_extrn_lbcs | TN_GET_EXTRN_LBCS, NNODES_GET_EXTRN_LBCS, PPN_GET_EXTRN_LBCS, | | | WTIME_GET_EXTRN_LBCS, MAXTRIES_GET_EXTRN_LBCS, EXTRN_MDL_NAME_LBCS, | | | LBC_SPEC_INTVL_HRS, EXTRN_MDL_LBCS_OFFSET_HRS, FV3GFS_FILE_FMT_LBCS, | | | EXTRN_MDL_SYSBASEDIR_LBCS, USE_USER_STAGED_EXTRN_FILES, | | | EXTRN_MDL_SOURCE_BASEDIR_LBCS, EXTRN_MDL_FILES_LBCS, | | | EXTRN_MDL_DATA_STORE, NOMADS, NOMADS_file_type | +-----------------------------+-----------------------------------------------------------------------+ - | task_make_ics | MAKE_ICS_TN, NNODES_MAKE_ICS, PPN_MAKE_ICS, WTIME_MAKE_ICS, | + | task_make_ics | TN_MAKE_ICS, NNODES_MAKE_ICS, PPN_MAKE_ICS, WTIME_MAKE_ICS, | | | MAXTRIES_MAKE_ICS, KMP_AFFINITY_MAKE_ICS, OMP_NUM_THREADS_MAKE_ICS, | | | OMP_STACKSIZE_MAKE_ICS, USE_FVCOM, FVCOM_WCSTART, FVCOM_DIR, | | | FVCOM_FILE | +-----------------------------+-----------------------------------------------------------------------+ - | task_make_lbcs | MAKE_LBCS_TN, NNODES_MAKE_LBCS, PPN_MAKE_LBCS, WTIME_MAKE_LBCS, | + | task_make_lbcs | TN_MAKE_LBCS, NNODES_MAKE_LBCS, PPN_MAKE_LBCS, WTIME_MAKE_LBCS, | | | MAXTRIES_MAKE_LBCS, KMP_AFFINITY_MAKE_LBCS, OMP_NUM_THREADS_MAKE_LBCS,| | | OMP_STACKSIZE_MAKE_LBCS | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_fcst | RUN_FCST_TN, NNODES_RUN_FCST, PPN_RUN_FCST, WTIME_RUN_FCST, | + | task_run_fcst | TN_RUN_FCST, NNODES_RUN_FCST, PPN_RUN_FCST, WTIME_RUN_FCST, | | | MAXTRIES_RUN_FCST, KMP_AFFINITY_RUN_FCST, OMP_NUM_THREADS_RUN_FCST, | | | OMP_STACKSIZE_RUN_FCST, DT_ATMOS, RESTART_INTERVAL, WRITE_DOPOST, | | | LAYOUT_X, LAYOUT_Y, BLOCKSIZE, QUILTING, PRINT_ESMF, | @@ -252,7 +252,7 @@ Configuration parameters in the ``config_defaults.yaml`` file appear in :numref: | | FV3_NML_VARNAME_TO_SFC_CLIMO_FIELD_MAPPING, | | | CYCLEDIR_LINKS_TO_FIXam_FILES_MAPPING | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_post | RUN_POST_TN, NNODES_RUN_POST, PPN_RUN_POST, WTIME_RUN_POST, | + | task_run_post | TN_RUN_POST, NNODES_RUN_POST, PPN_RUN_POST, WTIME_RUN_POST, | | | MAXTRIES_RUN_POST, KMP_AFFINITY_RUN_POST, OMP_NUM_THREADS_RUN_POST, | | | OMP_STACKSIZE_RUN_POST, SUB_HOURLY_POST, DT_SUB_HOURLY_POST_MNTS, | | | USE_CUSTOM_POST_CONFIG_FILE, CUSTOM_POST_CONFIG_FP, | @@ -268,77 +268,77 @@ Configuration parameters in the ``config_defaults.yaml`` file appear in :numref: | | LSM_SPP_TSCALE, LSM_SPP_LSCALE, ISEED_LSM_SPP, LSM_SPP_VAR_LIST, | | | LSM_SPP_MAG_LIST, HALO_BLEND | +-----------------------------+-----------------------------------------------------------------------+ - | task_get_obs_ccpa | GET_OBS_CCPA_TN, NNODES_GET_OBS_CCPA, PPN_GET_OBS_CCPA, | + | task_get_obs_ccpa | TN_GET_OBS_CCPA, NNODES_GET_OBS_CCPA, PPN_GET_OBS_CCPA, | | | WTIME_GET_OBS_CCPA, MAXTRIES_GET_OBS_CCPA | +-----------------------------+-----------------------------------------------------------------------+ - | task_get_obs_mrms | GET_OBS_MRMS_TN, NNODES_GET_OBS_MRMS, PPN_GET_OBS_MRMS, | + | task_get_obs_mrms | TN_GET_OBS_MRMS, NNODES_GET_OBS_MRMS, PPN_GET_OBS_MRMS, | | | WTIME_GET_OBS_MRMS, MAXTRIES_GET_OBS_MRMS | +-----------------------------+-----------------------------------------------------------------------+ - | task_get_obs_ndas | GET_OBS_NDAS_TN, NNODES_GET_OBS_NDAS, PPN_GET_OBS_NDAS, | + | task_get_obs_ndas | TN_GET_OBS_NDAS, NNODES_GET_OBS_NDAS, PPN_GET_OBS_NDAS, | | | WTIME_GET_OBS_NDAS, MAXTRIES_GET_OBS_NDAS | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_gridstat | VX_GRIDSTAT_TN, NNODES_VX_GRIDSTAT, PPN_VX_GRIDSTAT, | + | task_run_vx_gridstat | TN_VX_GRIDSTAT, NNODES_VX_GRIDSTAT, PPN_VX_GRIDSTAT, | | | WTIME_VX_GRIDSTAT, MAXTRIES_VX_GRIDSTAT | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_gridstat_refc | VX_GRIDSTAT_REFC_TN, NNODES_VX_GRIDSTAT, PPN_VX_GRIDSTAT, | + | task_run_vx_gridstat_refc | TN_VX_GRIDSTAT_REFC, NNODES_VX_GRIDSTAT, PPN_VX_GRIDSTAT, | | | WTIME_VX_GRIDSTAT, MAXTRIES_VX_GRIDSTAT_REFC | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_gridstat_retop | VX_GRIDSTAT_RETOP_TN, NNODES_VX_GRIDSTAT, PPN_VX_GRIDSTAT, | + | task_run_vx_gridstat_retop | TN_VX_GRIDSTAT_RETOP, NNODES_VX_GRIDSTAT, PPN_VX_GRIDSTAT, | | | WTIME_VX_GRIDSTAT, MAXTRIES_VX_GRIDSTAT_RETOP | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_gridstat_03h | VX_GRIDSTAT_03h_TN, NNODES_VX_GRIDSTAT, PPN_VX_GRIDSTAT, | + | task_run_vx_gridstat_03h | TN_VX_GRIDSTAT_03h, NNODES_VX_GRIDSTAT, PPN_VX_GRIDSTAT, | | | WTIME_VX_GRIDSTAT, MAXTRIES_VX_GRIDSTAT_03h | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_gridstat_06h | VX_GRIDSTAT_06h_TN, NNODES_VX_GRIDSTAT, PPN_VX_GRIDSTAT, | + | task_run_vx_gridstat_06h | TN_VX_GRIDSTAT_06h, NNODES_VX_GRIDSTAT, PPN_VX_GRIDSTAT, | | | WTIME_VX_GRIDSTAT, MAXTRIES_VX_GRIDSTAT_06h | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_gridstat_24h | VX_GRIDSTAT_24h_TN, NNODES_VX_GRIDSTAT, PPN_VX_GRIDSTAT, | + | task_run_vx_gridstat_24h | TN_VX_GRIDSTAT_24h, NNODES_VX_GRIDSTAT, PPN_VX_GRIDSTAT, | | | WTIME_VX_GRIDSTAT, MAXTRIES_VX_GRIDSTAT_24h | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_pointstat | VX_POINTSTAT_TN, NNODES_VX_POINTSTAT, PPN_VX_POINTSTAT, | + | task_run_vx_pointstat | TN_VX_POINTSTAT, NNODES_VX_POINTSTAT, PPN_VX_POINTSTAT, | | | WTIME_VX_POINTSTAT, MAXTRIES_VX_POINTSTAT | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_ensgrid | VX_ENSGRID_03h_TN, MAXTRIES_VX_ENSGRID_03h, VX_ENSGRID_06h_TN, | - | | MAXTRIES_VX_ENSGRID_06h, VX_ENSGRID_24h_TN, MAXTRIES_VX_ENSGRID_24h, | - | | VX_ENSGRID_RETOP_TN, MAXTRIES_VX_ENSGRID_RETOP, | - | | VX_ENSGRID_PROB_RETOP_TN, MAXTRIES_VX_ENSGRID_PROB_RETOP, | + | task_run_vx_ensgrid | TN_VX_ENSGRID_03h, MAXTRIES_VX_ENSGRID_03h, TN_VX_ENSGRID_06h, | + | | MAXTRIES_VX_ENSGRID_06h, TN_VX_ENSGRID_24h, MAXTRIES_VX_ENSGRID_24h, | + | | TN_VX_ENSGRID_RETOP, MAXTRIES_VX_ENSGRID_RETOP, | + | | TN_VX_ENSGRID_PROB_RETOP, MAXTRIES_VX_ENSGRID_PROB_RETOP, | | | NNODES_VX_ENSGRID, PPN_VX_ENSGRID, WTIME_VX_ENSGRID, | | | MAXTRIES_VX_ENSGRID | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_ensgrid_refc | VX_ENSGRID_REFC_TN, NNODES_VX_ENSGRID, PPN_VX_ENSGRID, | + | task_run_vx_ensgrid_refc | TN_VX_ENSGRID_REFC, NNODES_VX_ENSGRID, PPN_VX_ENSGRID, | | | WTIME_VX_ENSGRID, MAXTRIES_VX_ENSGRID_REFC | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_ensgrid_mean | VX_ENSGRID_MEAN_TN, NNODES_VX_ENSGRID_MEAN, PPN_VX_ENSGRID_MEAN, | + | task_run_vx_ensgrid_mean | TN_VX_ENSGRID_MEAN, NNODES_VX_ENSGRID_MEAN, PPN_VX_ENSGRID_MEAN, | | | WTIME_VX_ENSGRID_MEAN, MAXTRIES_VX_ENSGRID_MEAN | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_ensgrid_mean_03h| VX_ENSGRID_MEAN_03h_TN, NNODES_VX_ENSGRID_MEAN, PPN_VX_ENSGRID_MEAN, | + | task_run_vx_ensgrid_mean_03h| TN_VX_ENSGRID_MEAN_03h, NNODES_VX_ENSGRID_MEAN, PPN_VX_ENSGRID_MEAN, | | | WTIME_VX_ENSGRID_MEAN, MAXTRIES_VX_ENSGRID_MEAN_03h | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_ensgrid_mean_06h| VX_ENSGRID_MEAN_06h_TN, NNODES_VX_ENSGRID_MEAN, PPN_VX_ENSGRID_MEAN, | + | task_run_vx_ensgrid_mean_06h| TN_VX_ENSGRID_MEAN_06h, NNODES_VX_ENSGRID_MEAN, PPN_VX_ENSGRID_MEAN, | | | WTIME_VX_ENSGRID_MEAN, MAXTRIES_VX_ENSGRID_MEAN_06h | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_ensgrid_mean_24h| VX_ENSGRID_MEAN_24h_TN, NNODES_VX_ENSGRID_MEAN, PPN_VX_ENSGRID_MEAN, | + | task_run_vx_ensgrid_mean_24h| TN_VX_ENSGRID_MEAN_24h, NNODES_VX_ENSGRID_MEAN, PPN_VX_ENSGRID_MEAN, | | | WTIME_VX_ENSGRID_MEAN, MAXTRIES_VX_ENSGRID_MEAN_24h | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_ensgrid_prob | VX_ENSGRID_PROB_TN, NNODES_VX_ENSGRID_PROB, PPN_VX_ENSGRID_PROB, | + | task_run_vx_ensgrid_prob | TN_VX_ENSGRID_PROB, NNODES_VX_ENSGRID_PROB, PPN_VX_ENSGRID_PROB, | | | WTIME_VX_ENSGRID_PROB, MAXTRIES_VX_ENSGRID_PROB | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_ensgrid_prob_03h| VX_ENSGRID_PROB_03h_TN, NNODES_VX_ENSGRID_PROB, PPN_VX_ENSGRID_PROB, | + | task_run_vx_ensgrid_prob_03h| TN_VX_ENSGRID_PROB_03h, NNODES_VX_ENSGRID_PROB, PPN_VX_ENSGRID_PROB, | | | WTIME_VX_ENSGRID_PROB, MAXTRIES_VX_ENSGRID_PROB_03h | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_ensgrid_prob_06h| VX_ENSGRID_PROB_06h_TN, NNODES_VX_ENSGRID_PROB, PPN_VX_ENSGRID_PROB, | + | task_run_vx_ensgrid_prob_06h| TN_VX_ENSGRID_PROB_06h, NNODES_VX_ENSGRID_PROB, PPN_VX_ENSGRID_PROB, | | | WTIME_VX_ENSGRID_PROB, MAXTRIES_VX_ENSGRID_PROB_06h | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_ensgrid_prob_24h| VX_ENSGRID_PROB_24h_TN, NNODES_VX_ENSGRID_PROB, PPN_VX_ENSGRID_PROB, | + | task_run_vx_ensgrid_prob_24h| TN_VX_ENSGRID_PROB_24h, NNODES_VX_ENSGRID_PROB, PPN_VX_ENSGRID_PROB, | | | WTIME_VX_ENSGRID_PROB, MAXTRIES_VX_ENSGRID_PROB_24h | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_enspoint | VX_ENSPOINT_TN, NNODES_VX_ENSPOINT, PPN_VX_ENSPOINT, | + | task_run_vx_enspoint | TN_VX_ENSPOINT, NNODES_VX_ENSPOINT, PPN_VX_ENSPOINT, | | | WTIME_VX_ENSPOINT, MAXTRIES_VX_ENSPOINT | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_enspoint_mean | VX_ENSPOINT_MEAN_TN, NNODES_VX_ENSPOINT_MEAN, PPN_VX_ENSPOINT_MEAN, | + | task_run_vx_enspoint_mean | TN_VX_ENSPOINT_MEAN, NNODES_VX_ENSPOINT_MEAN, PPN_VX_ENSPOINT_MEAN, | | | WTIME_VX_ENSPOINT_MEAN, MAXTRIES_VX_ENSPOINT_MEAN | +-----------------------------+-----------------------------------------------------------------------+ - | task_run_vx_enspoint_prob | VX_ENSPOINT_PROB_TN, NNODES_VX_ENSPOINT_PROB, PPN_VX_ENSPOINT_PROB, | + | task_run_vx_enspoint_prob | TN_VX_ENSPOINT_PROB, NNODES_VX_ENSPOINT_PROB, PPN_VX_ENSPOINT_PROB, | | | WTIME_VX_ENSPOINT_PROB, MAXTRIES_VX_ENSPOINT_PROB | +-----------------------------+-----------------------------------------------------------------------+ @@ -537,8 +537,8 @@ To configure an experiment and python environment for a general Linux or Mac sys .. _PlotOutput: -Plot the Output ------------------ +Plotting Configuration (optional) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ An optional Python plotting task (PLOT_ALLVARS) can be activated in the workflow to generate plots for the :term:`FV3`-:term:`LAM` post-processed :term:`GRIB2` output over the :term:`CONUS`. It generates graphics plots for a number of variables, including: @@ -561,12 +561,12 @@ the same cycle starting date/time and forecast hours. Other parameters may diffe .. _Cartopy: Cartopy Shapefiles -^^^^^^^^^^^^^^^^^^^^^ +````````````````````` The Python plotting tasks require a path to the directory where the Cartopy Natural Earth shapefiles are located. The medium scale (1:50m) cultural and physical shapefiles are used to create coastlines and other geopolitical borders on the map. On `Level 1 `__ systems, this path is already set in the system's machine file using the variable ``FIXshp``. Users on other systems will need to download the shapefiles and update the path of ``$FIXshp`` in the machine file they are using (e.g., ``$SRW/ush/machine/macos.yaml`` for a generic MacOS system, where ``$SRW`` is the path to the ``ufs-srweather-app`` directory). The subset of shapefiles required for the plotting task can be obtained from the `SRW Data Bucket `__. The full set of medium-scale (1:50m) Cartopy shapefiles can be downloaded `here `__. Task Configuration -^^^^^^^^^^^^^^^^^^^^^ +````````````````````` Users will need to add or modify certain variables in ``config.yaml`` to run the plotting task(s). At a minimum, users must set ``RUN_TASK_PLOT_ALLVARS`` to true in the ``workflow_switches:`` section: @@ -593,16 +593,16 @@ When plotting output from a single experiment, no further adjustments are necess corresponds to the cycle date and hour in YYYYMMDDHH format (e.g., ``2019061518``). Plotting the Difference Between Two Experiments -``````````````````````````````````````````````````` +"""""""""""""""""""""""""""""""""""""""""""""""""" -When plotting the difference between two experiments, users must set the baseline experiment directory using the ``COMOUT_REF`` template variable. For example, in *community* mode, users can set: +When plotting the difference between two experiments (``expt1`` and ``expt2``), users must set the ``COMOUT_REF`` template variable in ``expt2``'s ``config.yaml`` file to point at forecast output from the ``expt1`` directory. For example, in *community* mode, users can set ``COMOUT_REF`` as follows in the ``expt2`` configuration file: .. code-block:: console task_plot_allvars: - COMOUT_REF: '${EXPT_BASEDIR}/${EXPT_SUBDIR}/${PDY}${cyc}/postprd' + COMOUT_REF: '${EXPT_BASEDIR}/expt1/${PDY}${cyc}/postprd' -In *community* mode, using default directory names and settings, ``$COMOUT_REF`` will resemble ``/path/to/expt_dirs/test_community/2019061518/postprd``. Additional details on the plotting variables are provided in :numref:`Section %s `. +This will ensure that ``expt2`` can produce a difference plot comparing ``expt1`` and ``expt2``. In *community* mode, using default directory names and settings, ``$COMOUT_REF`` will resemble ``/path/to/expt_dirs/test_community/2019061518/postprd``. Additional details on the plotting variables are provided in :numref:`Section %s `. The output files (in ``.png`` format) will be located in the ``postprd`` directory for the experiment. diff --git a/docs/UsersGuide/source/SSHIntro.rst b/docs/UsersGuide/source/SSHIntro.rst new file mode 100644 index 0000000000..006232f225 --- /dev/null +++ b/docs/UsersGuide/source/SSHIntro.rst @@ -0,0 +1,172 @@ +:orphan: + +.. _SSHIntro: + +====================================== +Introduction to SSH & Data Transfer +====================================== + +.. attention:: + + Note that all port numbers, IP addresses, and SSH keys included in this chapter are placeholders and do not refer to known systems. They are used purely for illustrative purposes, and users should modify the commands to correspond to their actual systems. + +A Secure SHell (SSH) tunnel creates an encrypted connection between two computer systems. This secure connection allows users to access and use a remote system via the command line on their local machine. SSH connections can also be used to transfer data securely between two systems. Many HPC platforms, including NOAA `Level 1 systems `__, are accessed via SSH from the user's own computer. + +.. attention:: + + Note that the instructions on this page assume that users are working on a UNIX-like system (i.e., Linux or MacOS). They may not work as-is on Windows systems, but users can adapt them for Windows or use a tool such as Cygwin, which enables the use of UNIX-like commands on Windows. Users may also consider installing a virtual machine such as VirtualBox. + +.. _CreateSSH: + +Creating an SSH Tunnel +============================ + +Create an SSH Key +-------------------- + +To generate an SSH key, open a terminal window and run: + +.. code-block:: console + + ssh-keygen -t rsa + +Hit enter three times to accept defaults, or if customization is desired: + + * Enter the file in which to save the key (for example: ``~/.ssh/id_rsa``) + * Enter passphrase (empty for no passphrase) + * Enter same passphrase again + +To see the SSH public key contents, run: + +.. code-block:: console + + cat id_rsa.pub + +SSH Into a Remote Machine +---------------------------- + +This process differs somewhat from system to system. However, this section provides general guidance. + +Create/Edit an SSH Configuration File (``~/.ssh/config``) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If it exists, the SSH ``config`` file is located in the hidden ``.ssh`` directory. If it does not exist, opening it will create the file. In a terminal window, run: + +.. code-block:: console + + vi ~/.ssh/config + +Press ``i`` to edit the file, and add an entry in the following format: + +.. code-block:: console + + Host + Hostname + User + IdentityFile ~/.ssh/ + +When finished, hit the ``esc`` key and type ``:wq`` to write the data to the file and quit the file editor. + +.. note:: + + The ``IdentityFile`` line is not required unless the user has multiple SSH keys. However, there is no harm in adding it. + +Concretely, a user logging into an AWS cluster might enter something similar to the following. + +.. code-block:: console + + Host aws + Hostname 50.60.700.80 + User Jane.Doe + IdentityFile ~/.ssh/id_rsa + +Users attempting to authenticate via SSH on GitHub might create the following code block instead: + +.. code-block:: console + + Host github + Hostname github.com + User git + IdentityFile ~/.ssh/id_ed25519 + +SSH Into the Remote System +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To SSH into the remote system, simply run: + +.. code-block:: console + + ssh + +where ```` is the "name_of_your_choice" that was added to the ``config`` file. For example, a user logging into the AWS cluster above would type: + +.. code-block:: console + + ssh aws + +This will create an SSH tunnel between the user's local system and the AWS cluster. The user will be able to work on the AWS cluster by running commands in the terminal window. + +In some cases, the user may be asked if they want to connect: + +.. code-block:: console + + The authenticity of host '50.60.700.80 (50.60.700.80)' can't be established. + ECDSA key fingerprint is SHA256:a0ABbC4cdeDEfFghi+j3kGHlO5mnIJKLMop7NOqPrQR. + Are you sure you want to continue connecting (yes/no/[fingerprint])? + +Enter ``yes`` to continue connecting. The user is responsible for verifying that they are connecting to the correct system. + +.. _SSHDataTransfer: + +Data Transfer via SSH +============================ + +Introduction +--------------- + +Users who are working on a remote cloud or HPC system may want to copy files (e.g., graphics plots) to or from their local system. Users can run the ``scp`` command in a new terminal/command prompt window to securely copy these files from their remote system to their local system or vice versa. The structure of the command is: + +.. code-block:: console + + scp [OPTION] [user@]SRC_HOST:]file1 [user@]DEST_HOST:]file2 + +Here, ``SRC_HOST`` refers to the system where the files are currently located. ``DEST_HOST`` refers to the system that the files will be copied to. ``file1`` is the path to the file or directory to copy, and ``file2`` is the location that the file or directory should be copied to on the ``DEST_HOST`` system. + +.. _SSHDownload: + +Download the Data from a Remote System to a Local System +----------------------------------------------------------- + +.. note:: + + Users should transfer data to or from non-`Level 1 `__ platforms using the recommended approach for that platform. This section outlines some basic guidance, but users may need to supplement with research of their own. On Level 1 systems, users may find it helpful to refer to the `RDHPCS CommonDocs Wiki `__. + +To download data using ``scp``, users can typically adjust one of the following commands for use on their system: + +.. code-block:: console + + scp username@your-IP-address:/path/to/file_or_directory_1 /path/to/file_or_directory_2 + # OR + scp -P 12345 username@localhost:/path/to/file_or_directory_1 path/to/file_or_directory_2 + +To copy an entire directory, use ``scp -r`` instead of ``scp``. + +Users who know the IP address of their remote system can use the first command. For example: + +.. code-block:: console + + scp Jane.Doe@10.20.300.40:/contrib/Jane.Doe/expt_dirs/test_community/2019061518/postprd/*.png /Users/janedoe/plots + +This command will copy all files ending in ``.png`` from the remote ``test_community/2019061518/postprd/`` experiment subdirectory into Jane Doe's local ``plots`` directory. + +Users who know their ``localhost`` port number should use the second command and, if requested, enter the password to the remote system. For example: + +.. code-block:: console + + scp -P 3355 Jane.Doe@localhost:/lustre/Jane.Doe/expt_dirs/test_community/2019061518/postprd/*.png . + +This command will copy all files ending in ``.png`` from the ``test_community/2019061518/postprd/`` experiment subdirectory on a remote HPC system into Jane Doe's present working directory (``.``). + +.. attention:: + + Note that all port numbers, IP addresses, and SSH keys included in this chapter are placeholders and do not refer to known systems. They are used purely for illustrative purposes, and users should modify the commands to correspond to their actual systems. \ No newline at end of file diff --git a/docs/UsersGuide/source/TemplateVars.rst b/docs/UsersGuide/source/TemplateVars.rst index 8950de80c8..2677888f5a 100644 --- a/docs/UsersGuide/source/TemplateVars.rst +++ b/docs/UsersGuide/source/TemplateVars.rst @@ -1,8 +1,8 @@ .. _TemplateVars: -=============================================================== -Using Template Variables in the Experiment Configuration Files -=============================================================== +====================== +Template Variables +====================== The SRW App's experiment configuration system supports the use of template variables in ``config_defaults.yaml`` and ``config.yaml``. A template variable --- or "template" --- is an experiment configuration variable that contains references to values of other variables. @@ -54,17 +54,22 @@ To generate difference plots, users must use the template variable ``COMOUT_REF` to indicate where the :term:`GRIB2` files from post-processing are located. In *community* mode (i.e., when ``RUN_ENVIR: "community"``), this directory will -take the form ``/path/to/exptdir/$PDY$cyc/postprd``, where ``$PDY`` refers to the -cycle date in YYYYMMDD format, and ``$cyc`` refers to the starting hour of the cycle. +take the form ``/path/to/expt_dirs/expt_name/$PDY$cyc/postprd``, where ``$PDY`` refers +to the cycle date in YYYYMMDD format, and ``$cyc`` refers to the starting hour of the cycle. (These variables are set in previous tasks based on the value of ``DATE_FIRST_CYCL``.) -Concretely, users can set ``COMOUT_REF`` as follows: +Given two experiments, ``expt1`` and ``expt2``, users can generate difference plots by +setting ``COMOUT_REF`` in the ``expt2`` configuration file (``config.yaml``) as follows: .. code-block:: console - COMOUT_REF: '${EXPT_BASEDIR}/${EXPT_SUBDIR}/${PDY}${cyc}/postprd' + COMOUT_REF: '${EXPT_BASEDIR}/expt1/${PDY}${cyc}/postprd' -In *nco* mode, this directory should be set to the location of the ``COMOUT`` directory -(``${COMOUT}`` in the example below) and end with ``${PDY}/${cyc}``. For example: +The ``expt2`` workflow already knows where to find its own post-processed output, so +``COMOUT_REF`` should point to post-processed output for the other experiment (``expt1``). + +In *nco* mode, this directory should be set to the location of the first experiment's +``COMOUT`` directory (``${COMOUT}`` in the example below) and end with ``${PDY}/${cyc}``. +For example: .. code-block:: console diff --git a/docs/UsersGuide/source/Tutorial.rst b/docs/UsersGuide/source/Tutorial.rst new file mode 100644 index 0000000000..bf3711ca87 --- /dev/null +++ b/docs/UsersGuide/source/Tutorial.rst @@ -0,0 +1,592 @@ +.. _Tutorial: + +============= +Tutorials +============= + +This chapter walks users through experiment configuration options for various severe weather events. It assumes that users have already (1) :ref:`built the SRW App ` successfully and (2) run the out-of-the-box case contained in ``config.community.yaml`` (and copied to ``config.yaml`` in :numref:`Step %s ` or :numref:`Step %s `) to completion. + +Users can run through the entire set of tutorials or jump to the one that interests them most. The five tutorials address different skills: + + #. :ref:`Severe Weather Over Indianapolis `: Change physics suites and compare graphics plots. + #. :ref:`Cold Air Damming `: Coming soon! + #. :ref:`Southern Plains Winter Weather Event `: Coming soon! + #. :ref:`Halloween Storm `: Coming soon! + #. :ref:`Hurricane Barry `: Coming soon! + +Each section provides a summary of the weather event and instructions for configuring an experiment. + +.. _fcst1: + +Sample Forecast #1: Severe Weather Over Indianapolis +======================================================= + +**Objective:** Modify physics options and compare forecast outputs for similar experiments using the graphics plotting task. + +Weather Summary +-------------------- + +A surface boundary associated with a vorticity maximum over the northern Great Plains moved into an unstable environment over Indianapolis, which led to the development of isolated severe thunderstorms before it congealed into a convective line. The moist air remained over the southern half of the area on the following day. The combination of moist air with daily surface heating resulted in isolated thunderstorms that produced small hail. + +**Weather Phenomena:** Numerous tornado and wind reports (6/15) and hail reports (6/16) + + * `Storm Prediction Center (SPC) Storm Report for 20190615 `__ + * `Storm Prediction Center (SPC) Storm Report for 20190616 `__ + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/IndySevereWeather18z.gif + :alt: Radar animation of severe weather over Indianapolis on June 15, 2019 starting at 18z. The animation shows areas of heavy rain and tornado reports moving from west to east over Indianapolis. + + *Severe Weather Over Indianapolis Starting at 18z* + +Data +------- + +On `Level 1 `__ systems, users can find data for the Indianapolis Severe Weather Forecast in the usual input model data locations (see :numref:`Section %s ` for a list). The data can also be downloaded from the `UFS SRW Application Data Bucket `__. + + * FV3GFS data for the first forecast (``control``) is located at: + + * https://noaa-ufs-srw-pds.s3.amazonaws.com/index.html#input_model_data/FV3GFS/grib2/2019061518/ + + * HRRR and RAP data for the second forecast (``test_expt``) is located at: + + * https://noaa-ufs-srw-pds.s3.amazonaws.com/index.html#input_model_data/HRRR/2019061518/ + * https://noaa-ufs-srw-pds.s3.amazonaws.com/index.html#input_model_data/RAP/2019061518/ + +Load the Regional Workflow +------------------------------- + +To load the regional workflow environment, source the lmod-setup file. Then load the workflow conda environment. From the ``ufs-srweather-app`` directory, run: + +.. code-block:: console + + source etc/lmod-setup.sh # OR: source etc/lmod-setup.csh when running in a csh/tcsh shell + module use /path/to/ufs-srweather-app/modulefiles + module load wflow_ + +where ```` is a valid, lowercased machine name (see ``MACHINE`` in :numref:`Section %s ` for valid values). + +After loading the workflow, users should follow the instructions printed to the console. Usually, the instructions will tell the user to run ``conda activate regional_workflow``. For example, a user on Hera with permissions on the ``nems`` project may issue the following commands to load the regional workflow (replacing ``User.Name`` with their actual username): + +.. code-block:: console + + source /scratch1/NCEPDEV/nems/User.Name/ufs-srweather-app/etc/lmod-setup.sh hera + module use /scratch1/NCEPDEV/nems/User.Name/ufs-srweather-app/modulefiles + module load wflow_hera + conda activate regional_workflow + +Configuration +------------------------- + +Navigate to the ``ufs-srweather-app/ush`` directory. The default (or "control") configuration for this experiment is based on the ``config.community.yaml`` file in that directory. Users can copy this file into ``config.yaml`` if they have not already done so: + +.. code-block:: console + + cd /path/to/ufs-srweather-app/ush + cp config.community.yaml config.yaml + +Users can save the location of the ``ush`` directory in an environment variable (``$USH``). This makes it easier to navigate between directories later. For example: + +.. code-block:: console + + export USH=/path/to/ufs-srweather-app/ush + +Users should substitute ``/path/to/ufs-srweather-app/ush`` with the actual path on their system. As long as a user remains logged into their system, they can run ``cd $USH``, and it will take them to the ``ush`` directory. The variable will need to be reset for each login session. + +Experiment 1: Control +^^^^^^^^^^^^^^^^^^^^^^^^ + +Edit the configuration file (``config.yaml``) to include the variables and values in the sample configuration excerpts below. + +.. Hint:: + + To open the configuration file in the command line, users may run the command: + + .. code-block:: console + + vi config.yaml + + To modify the file, hit the ``i`` key and then make any changes required. To close and save, hit the ``esc`` key and type ``:wq`` to write the changes to the file and exit/quit the file. Users may opt to use their preferred code editor instead. + +Start in the ``user:`` section and change the ``MACHINE`` and ``ACCOUNT`` variables. For example, when running on a personal MacOS device, users might set: + +.. code-block:: console + + user: + RUN_ENVIR: community + MACHINE: macos + ACCOUNT: none + +For a detailed description of these variables, see :numref:`Section %s `. + +Users do not need to change the ``platform:`` section of the configuration file for this tutorial. The default parameters in the ``platform:`` section pertain to METplus verification, which is not addressed here. For more information on verification, see :numref:`Chapter %s `. + +In the ``workflow:`` section of ``config.yaml``, update ``EXPT_SUBDIR`` and ``PREDEF_GRID_NAME``. + +.. code-block:: console + + workflow: + USE_CRON_TO_RELAUNCH: false + EXPT_SUBDIR: control + CCPP_PHYS_SUITE: FV3_GFS_v16 + PREDEF_GRID_NAME: SUBCONUS_Ind_3km + DATE_FIRST_CYCL: '2019061518' + DATE_LAST_CYCL: '2019061518' + FCST_LEN_HRS: 12 + PREEXISTING_DIR_METHOD: rename + VERBOSE: true + COMPILER: intel + +.. _CronNote: + +.. note:: + + Users may also want to set ``USE_CRON_TO_RELAUNCH: true`` and add ``CRON_RELAUNCH_INTVL_MNTS: 3``. This will automate submission of workflow tasks when running the experiment. However, not all systems have :term:`cron`. + +``EXPT_SUBDIR:`` This variable can be changed to any name the user wants. This tutorial uses ``control`` to establish a baseline, or "control", forecast. Users can choose any name they want, from "gfsv16_physics_fcst" to "forecast1" to "a;skdfj". However, the best names will indicate useful information about the experiment. For example, this tutorial helps users to compare the output from two different forecasts: one that uses the FV3_GFS_v16 physics suite and one that uses the FV3_RRFS_v1beta physics suite. Therefore, "gfsv16_physics_fcst" could be a good alternative directory name. + +``PREDEF_GRID_NAME:`` This experiment uses the SUBCONUS_Ind_3km grid, rather than the default RRFS_CONUS_25km grid. The SUBCONUS_Ind_3km grid is a high-resolution grid (with grid cell size of approximately 3km) that covers a small area of the U.S. centered over Indianapolis, IN. For more information on this grid, see :numref:`Section %s `. + +For a detailed description of other ``workflow:`` variables, see :numref:`Section %s `. + +In the ``workflow_switches:`` section, turn on the plotting task by setting ``RUN_TASK_PLOT_ALLVARS`` to true. All other variables should remain as they are. + +.. code-block:: console + + workflow_switches: + RUN_TASK_MAKE_GRID: true + RUN_TASK_MAKE_OROG: true + RUN_TASK_MAKE_SFC_CLIMO: true + RUN_TASK_GET_OBS_CCPA: false + RUN_TASK_GET_OBS_MRMS: false + RUN_TASK_GET_OBS_NDAS: false + RUN_TASK_VX_GRIDSTAT: false + RUN_TASK_VX_POINTSTAT: false + RUN_TASK_VX_ENSGRID: false + RUN_TASK_VX_ENSPOINT: false + RUN_TASK_PLOT_ALLVARS: true + +For a detailed description of the ``workflow-switches:`` variables, see :numref:`Section %s `. + +In the ``task_get_extrn_ics:`` section, add ``USE_USER_STAGED_EXTRN_FILES`` and ``EXTRN_MDL_SOURCE_BASEDIR_ICS``. Users will need to adjust the file path to reflect the location of data on their system (see :numref:`Section %s ` for locations on `Level 1 `__ systems). + +.. code-block:: console + + task_get_extrn_ics: + EXTRN_MDL_NAME_ICS: FV3GFS + FV3GFS_FILE_FMT_ICS: grib2 + USE_USER_STAGED_EXTRN_FILES: true + EXTRN_MDL_SOURCE_BASEDIR_ICS: /path/to/UFS_SRW_App/develop/input_model_data/FV3GFS/grib2/${yyyymmddhh} + +For a detailed description of the ``task_get_extrn_ics:`` variables, see :numref:`Section %s `. + +Similarly, in the ``task_get_extrn_lbcs:`` section, add ``USE_USER_STAGED_EXTRN_FILES`` and ``EXTRN_MDL_SOURCE_BASEDIR_LBCS``. Users will need to adjust the file path to reflect the location of data on their system (see :numref:`Section %s ` for locations on Level 1 systems). + +.. code-block:: console + + task_get_extrn_lbcs: + EXTRN_MDL_NAME_LBCS: FV3GFS + LBC_SPEC_INTVL_HRS: 6 + FV3GFS_FILE_FMT_LBCS: grib2 + USE_USER_STAGED_EXTRN_FILES: true + EXTRN_MDL_SOURCE_BASEDIR_LBCS: /path/to/UFS_SRW_App/develop/input_model_data/FV3GFS/grib2/${yyyymmddhh} + +For a detailed description of the ``task_get_extrn_lbcs:`` variables, see :numref:`Section %s `. + +Users do not need to modify the ``task_run_fcst:`` section for this tutorial. + +Lastly, in the ``task_plot_allvars:`` section, add ``PLOT_FCST_INC: 6`` and ``PLOT_DOMAINS: ["regional"]``. Users may also want to add ``PLOT_FCST_START: 0`` and ``PLOT_FCST_END: 12`` explicitly, but these can be omitted since the default values are the same as the forecast start and end time respectively. + +.. code-block:: console + + task_plot_allvars: + COMOUT_REF: "" + PLOT_FCST_INC: 6 + PLOT_DOMAINS: ["regional"] + +``PLOT_FCST_INC:`` This variable indicates the forecast hour increment for the plotting task. By setting the value to ``6``, the task will generate a ``.png`` file for every 6th forecast hour starting from 18z on June 15, 2019 (the 0th forecast hour) through the 12th forecast hour (June 16, 2019 at 06z). + +``PLOT_DOMAINS:`` The plotting scripts are designed to generate plots over the entire CONUS by default, but by setting this variable to ["regional"], the experiment will generate plots for the smaller SUBCONUS_Ind_3km regional domain instead. + +After configuring the forecast, users can generate the forecast by running: + +.. code-block:: console + + ./generate_FV3LAM_wflow.py + +To see experiment progress, users should navigate to their experiment directory. Then, use the ``rocotorun`` command to launch new workflow tasks and ``rocotostat`` to check on experiment progress. + +.. code-block:: console + + cd /path/to/expt_dirs/control + rocotorun -w FV3LAM_wflow.xml -d FV3LAM_wflow.db -v 10 + rocotostat -w FV3LAM_wflow.xml -d FV3LAM_wflow.db -v 10 + +Users will need to rerun the ``rocotorun`` and ``rocotostat`` commands above regularly and repeatedly to continue submitting workflow tasks and receiving progress updates. + +.. note:: + + When using cron to automate the workflow submission (as described :ref:`above `), users can omit the ``rocotorun`` command and simply use ``rocotostat`` to check on progress periodically. + +Users can save the location of the ``control`` directory in an environment variable (``$CONTROL``). This makes it easier to navigate between directories later. For example: + +.. code-block:: console + + export CONTROL=/path/to/expt_dirs/control + +Users should substitute ``/path/to/expt_dirs/control`` with the actual path on their system. As long as a user remains logged into their system, they can run ``cd $CONTROL``, and it will take them to the ``control`` experiment directory. The variable will need to be reset for each login session. + +Experiment 2: Test +^^^^^^^^^^^^^^^^^^^^^^ + +Once the control case is running, users can return to the ``config.yaml`` file (in ``$USH``) and adjust the parameters for a new forecast. Most of the variables will remain the same. However, users will need to adjust ``EXPT_SUBDIR`` and ``CCPP_PHYS_SUITE`` in the ``workflow:`` section as follows: + +.. code-block:: console + + workflow: + EXPT_SUBDIR: test_expt + CCPP_PHYS_SUITE: FV3_RRFS_v1beta + +``EXPT_SUBDIR:`` This name must be different than the ``EXPT_SUBDIR`` name used in the previous forecast experiment. Otherwise, the first forecast experiment will be overwritten. ``test_expt`` is used here, but the user may select a different name if desired. + +``CCPP_PHYS_SUITE:`` The FV3_RRFS_v1beta physics suite was specifically created for convection-allowing scales and is the precursor to the operational physics suite that will be used in the Rapid Refresh Forecast System (:term:`RRFS`). + +.. hint:: + + Later, users may want to conduct additional experiments using the FV3_HRRR and FV3_WoFS_v0 physics suites. Like FV3_RRFS_v1beta, these physics suites were designed for use with high-resolution grids for storm-scale predictions. + +Next, users will need to modify the data parameters in ``task_get_extrn_ics:`` and ``task_get_extrn_lbcs:`` to use HRRR and RAP data rather than FV3GFS data. Users will need to change the following lines in each section: + +.. code-block:: console + + task_get_extrn_ics: + EXTRN_MDL_NAME_ICS: HRRR + EXTRN_MDL_SOURCE_BASEDIR_ICS: /path/to/UFS_SRW_App/develop/input_model_data/HRRR/${yyyymmddhh} + task_get_extrn_lbcs: + EXTRN_MDL_NAME_LBCS: RAP + EXTRN_MDL_SOURCE_BASEDIR_LBCS: /path/to/UFS_SRW_App/develop/input_model_data/RAP/${yyyymmddhh} + EXTRN_MDL_LBCS_OFFSET_HRS: '-0' + +HRRR and RAP data are better than FV3GFS data for use with the FV3_RRFS_v1beta physics scheme because these datasets use the same physics :term:`parameterizations` that are in the FV3_RRFS_v1beta suite. They focus on small-scale weather phenomena involved in storm development, so forecasts tend to be more accurate when HRRR/RAP data are paired with FV3_RRFS_v1beta and a high-resolution (e.g., 3-km) grid. Using HRRR/RAP data with FV3_RRFS_v1beta also limits the "spin-up adjustment" that takes place when initializing with model data coming from different physics. + +``EXTRN_MDL_LBCS_OFFSET_HRS:`` This variable allows users to use lateral boundary conditions (LBCs) from a previous forecast run that was started earlier than the start time of the forecast being configured in this experiment. This variable is set to 0 by default except when using RAP data; with RAP data, the default value is 3, so the forecast will look for LBCs from a forecast started 3 hours earlier (i.e., at 2019061515 --- 15z --- instead of 2019061518). To avoid this, users must set ``EXTRN_MDL_LBCS_OFFSET_HRS`` explicitly. + +Add a section to ``config.yaml`` to increase the maximum wall time (``WTIME_RUN_POST``) for the postprocessing tasks. The wall time is the maximum length of time a task is allowed to run. On some systems, the default of 15 minutes may be enough, but on others (e.g., NOAA Cloud), the post-processing time exceeds 15 minutes, so the tasks fail. + +.. code-block:: console + + task_run_post: + WTIME_RUN_POST: 00:20:00 + +Lastly, users must set the ``COMOUT_REF`` variable in the ``task_plot_allvars:`` section to create difference plots that compare output from the two experiments. ``COMOUT_REF`` is a template variable, so it references other workflow variables within it (see :numref:`Section %s ` for details on template variables). ``COMOUT_REF`` should provide the path to the ``control`` experiment forecast output using single quotes as shown below: + +.. code-block:: console + + task_plot_allvars: + COMOUT_REF: '${EXPT_BASEDIR}/control/${PDY}${cyc}/postprd' + +Here, ``$EXPT_BASEDIR`` is the path to the main experiment directory (named ``expt_dirs`` by default). ``$PDY`` refers to the cycle date in YYYYMMDD format, and ``$cyc`` refers to the starting hour of the cycle. ``postprd`` contains the post-processed data from the experiment. Therefore, ``COMOUT_REF`` will refer to ``control/2019061518/postprd`` and compare those plots against the ones in ``test_expt/2019061518/postprd``. + +After configuring the forecast, users can generate the second forecast by running: + +.. code-block:: console + + ./generate_FV3LAM_wflow.py + +To see experiment progress, users should navigate to their experiment directory. As in the first forecast, the following commands allow users to launch new workflow tasks and check on experiment progress. + +.. code-block:: console + + cd /path/to/expt_dirs/test_expt + rocotorun -w FV3LAM_wflow.xml -d FV3LAM_wflow.db -v 10 + rocotostat -w FV3LAM_wflow.xml -d FV3LAM_wflow.db -v 10 + +.. note:: + + When using cron to automate the workflow submission (as described :ref:`above `), users can omit the ``rocotorun`` command and simply use ``rocotostat`` to check on progress periodically. + +.. note:: + + If users have not automated their workflow using cron, they will need to ensure that they continue issuing ``rocotorun`` commands to launch all of the tasks in each experiment. While switching between experiment directories to run ``rocotorun`` and ``rocotostat`` commands in both directories is possible, it may be easier to finish the ``control`` experiment's tasks before starting on ``test_expt``. + + +Compare and Analyze Results +----------------------------- + +Navigate to ``test_expt/2019061518/postprd``. This directory contains the post-processed data generated by the :term:`UPP` from the forecast. After the ``plot_allvars`` task completes, this directory will contain ``.png`` images for several forecast variables including 2-m temperature, 2-m dew point temperature, 10-m winds, accumulated precipitation, composite reflectivity, and surface-based CAPE/CIN. Plots with a ``_diff`` label in the file name are plots that compare the ``control`` forecast and the ``test_expt`` forecast. + +Copy ``.png`` Files onto Local System +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Users who are working on the cloud or on an HPC cluster may want to copy the ``.png`` files onto their local system to view in their preferred image viewer. Detailed instructions are available in the :ref:`Introduction to SSH & Data Transfer `. + +In summary, users can run the ``scp`` command in a new terminal/command prompt window to securely copy files from a remote system to their local system if an SSH tunnel is already established between the local system and the remote system. Users can adjust one of the following commands for their system: + +.. code-block:: console + + scp username@your-IP-address:/path/to/source_file_or_directory /path/to/destination_file_or_directory + # OR + scp -P 12345 username@localhost:/path/to/source_file_or_directory path/to/destination_file_or_directory + +Users would need to modify ``username``, ``your-IP-address``, ``-P 12345``, and the file paths to reflect their systems' information. See the :ref:`Introduction to SSH & Data Transfer ` for example commands. + +.. _ComparePlots: + +Compare Images +^^^^^^^^^^^^^^^^^^ + +The plots generated by the experiment cover a variety of variables. After downloading the ``.png`` plots, users can open and view the plots on their local system in their preferred image viewer. :numref:`Table %s ` lists the available plots (``hhh`` corresponds to the three-digit forecast hour): + +.. _DiffPlots: + +.. table:: Sample Indianapolis Forecast Plots + + +-----------------------------------------+-----------------------------------+ + | Field | File Name | + +=========================================+===================================+ + | 2-meter dew point temperature | 2mdew_diff_regional_fhhh.png | + +-----------------------------------------+-----------------------------------+ + | 2-meter temperature | 2mt_diff_regional_fhhh.png | + +-----------------------------------------+-----------------------------------+ + | 10-meter winds | 10mwind_diff_regional_fhhh.png | + +-----------------------------------------+-----------------------------------+ + | 250-hPa winds | 250wind_diff_regional_fhhh.png | + +-----------------------------------------+-----------------------------------+ + | Accumulated precipitation | qpf_diff_regional_fhhh.png | + +-----------------------------------------+-----------------------------------+ + | Composite reflectivity | refc_diff_regional_fhhh.png | + +-----------------------------------------+-----------------------------------+ + | Surface-based CAPE/CIN | sfcape_diff_regional_fhhh.png | + +-----------------------------------------+-----------------------------------+ + | Sea level pressure | slp_diff_regional_fhhh.png | + +-----------------------------------------+-----------------------------------+ + | Max/Min 2 - 5 km updraft helicity | uh25_diff_regional_fhhh.png | + +-----------------------------------------+-----------------------------------+ + +Each difference plotting ``.png`` file contains three subplots. The plot for the second experiment (``test_expt``) appears in the top left corner, and the plot for the first experiment (``control``) appears in the top right corner. The difference plot that compares both experiments appear at the bottom. Areas of white signify no difference between the plots. Therefore, if the forecast output from both experiments is exactly the same, the difference plot will show a white square (see :ref:`Sea Level Pressure ` for an example). If the forecast output from both experiments is extremely different, the plot will show lots of color. + +In general, it is expected that the results for ``test_expt`` (using FV3_RRFS_v1beta physics and HRRR/RAP data) will be more accurate than the results for ``control`` (using FV3_GFS_v16 physics and FV3GFS data) because the physics in ``test_expt`` is designed for high-resolution, storm-scale prediction over a short period of time. The ``control`` experiment physics is better for predicting the evolution of larger scale weather phenomena, like jet stream movement and cyclone development, since the cumulus physics in the FV3_GFS_v16 suite is not configured to run at 3-km resolution. + +Analysis +^^^^^^^^^^^ + +.. _fcst1_slp: + +Sea Level Pressure +````````````````````` +In the Sea Level Pressure (SLP) plots, the ``control`` and ``test_expt`` plots are nearly identical at forecast hour f000, so the difference plot is entirely white. + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/fcst1_plots/slp_diff_regional_f000.png + :align: center + + *Difference Plot for Sea Level Pressure at f000* + +As the forecast continues, the results begin to diverge, as evidenced by the spattering of light blue dispersed across the f006 SLP difference plot. + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/fcst1_plots/slp_diff_regional_f006.png + :align: center + + *Difference Plot for Sea Level Pressure at f006* + +The predictions diverge further by f012, where a solid section of light blue in the top left corner of the difference plot indicates that to the northwest of Indianapolis, the SLP predictions for the ``control`` forecast were slightly lower than the predictions for the ``test_expt`` forecast. + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/fcst1_plots/slp_diff_regional_f012.png + :align: center + + *Difference Plot for Sea Level Pressure at f012* + +.. _fcst1_refc: + +Composite Reflectivity +`````````````````````````` + +Reflectivity images visually represent the weather based on the energy (measured in decibels [dBZ]) reflected back from radar. Composite reflectivity generates an image based on reflectivity scans at multiple elevation angles, or "tilts", of the antenna. See https://www.weather.gov/jetstream/refl for a more detailed explanation of composite reflectivity. + +At f000, the ``test_expt`` plot (top left) is showing more severe weather than the ``control`` plot (top right). The ``test_expt`` plot shows a vast swathe of the Indianapolis region covered in yellow with spots of orange, corresponding to composite reflectivity values of 35+ dBZ. The ``control`` plot radar image covers a smaller area of the grid, and with the exception of a few yellow spots, composite reflectivity values are <35 dBZ. The difference plot (bottom) shows areas where the ``test_expt`` plot (red) and the ``control`` plot (blue) have reflectivity values greater than 20 dBZ. The ``test_expt`` plot has significantly more areas with high composite reflectivity values. + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/fcst1_plots/refc_diff_regional_f000.png + :align: center + + *Composite Reflectivity at f000* + +As the forecast progresses, the radar images resemble each other more (see :numref:`Figure %s `). Both the ``test_expt`` and ``control`` plots show the storm gaining energy (with more orange and red areas), rotating counterclockwise, and moving east. Thus, both forecasts do a good job of picking up on the convection. However, the ``test_expt`` forecast still indicates a higher-energy storm with more areas of *dark* red. It appears that the ``test_expt`` case was able to resolve more discrete storms over northwest Indiana and in the squall line. The ``control`` plot has less definition and depicts widespread storms concentrated together over the center of the state. + +.. _refc006: + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/fcst1_plots/refc_diff_regional_f006.png + :align: center + + *Composite reflectivity at f006 shows storm gathering strength* + +At forecast hour 12, the plots for each forecast show a similar evolution of the storm with both resolving a squall line. The ``test_expt`` plot shows a more intense squall line with discrete cells (areas of high composite reflectivity in dark red), which could lead to severe weather. The ``control`` plot shows an overall decrease in composite reflectivity values compared to f006. It also orients the squall line more northward with less intensity, possibly due to convection from the previous forecast runs cooling the atmosphere. In short, ``test_expt`` suggests that the storm will still be going strong at 06z on June 15, 2019, whereas the ``control`` suggests that the storm will begin to let up. + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/fcst1_plots/refc_diff_regional_f012.png + :align: center + + *Composite Reflectivity at f012* + +.. _fcst1_sfcape: + +Surface-Based CAPE/CIN +`````````````````````````` + +Background +"""""""""""" + +The National Weather Service (:term:`NWS`) defines Surface-Based Convective Available Potential Energy (CAPE) as "the amount of fuel available to a developing thunderstorm." According to NWS, CAPE "describes the instabilily of the atmosphere and provides an approximation of updraft strength within a thunderstorm. A higher value of CAPE means the atmosphere is more unstable and would therefore produce a stronger updraft" (see `NWS, What is CAPE? `__ for further explanation). + +According to the NWS `Storm Prediction Center `__, Convective Inhibition (CIN) "represents the 'negative' area on a sounding that must be overcome for storm initiation." In effect, it measures negative buoyancy (-B) --- the opposite of CAPE, which measures positive buoyancy (B or B+) of an air parcel. + +.. + More CAPE/CIN info: https://www.e-education.psu.edu/files/meteo361/image/Section4/cape_primer0301.html + +Interpreting the Plots +"""""""""""""""""""""""" + +CAPE measures are represented on the plots using color. They range in value from 100-5000 Joules per kilogram (J/kg). Lower values are represented by cool colors and higher values are represented by warm colors. In general, values of approximately 1000+ J/kg can lead to severe thunderstorms, although this is also dependent on season and location. + +CIN measures are displayed on the plots using hatch marks: + + * ``*`` means CIN <= -500 J/kg + * ``+`` means -500 < CIN <= -250 J/kg + * ``/`` means -250 < CIN <= -100 J/kg + * ``.`` means -100 < CIN <= -25 J/kg + +In general, the higher the CIN values are (i.e., the closer they are to zero), the lower the convective inhibition and the greater the likelihood that a storm will develop. Low CIN values (corresponding to high convective inhibition) make it unlikely that a storm will develop even in the presence of high CAPE. + +At the 0th forecast hour, the ``test_expt`` plot (below, left) shows lower values of CAPE and higher values of CIN than in the ``control`` plot (below, right). This means that ``test_expt`` is projecting lower potential energy available for a storm but also lower inhibition, which means that less energy would be required for a storm to develop. The difference between the two plots is particularly evident in the southwest corner of the difference plot, which shows a 1000+ J/kg difference between the two plots. + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/fcst1_plots/sfcape_diff_regional_f000.png + :width: 1200 + :align: center + + *CAPE/CIN Difference Plot at f000* + +At the 6th forecast hour, both ``test_expt`` and ``control`` plots are forecasting higher CAPE values overall. Both plots also predict higher CAPE values to the southwest of Indianapolis than to the northeast. This makes sense because the storm was passing from west to east. However, the difference plot shows that the ``control`` forecast is predicting higher CAPE values primarily to the southwest of Indianapolis, whereas ``test_expt`` is projecting a rise in CAPE values throughout the region. The blue region of the difference plot indicates where ``test_expt`` predictions are higher than the ``control`` predictions; the red/orange region shows places where ``control`` predicts significantly higher CAPE values than ``test_expt`` does. + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/fcst1_plots/sfcape_diff_regional_f006.png + :width: 1200 + :align: center + + *CAPE/CIN Difference Plot at f006* + +At the 12th forecast hour, the ``control`` plot indicates that CAPE may be decreasing overall. ``test_expt``, however, shows that areas of high CAPE remain and continue to grow, particularly to the east. The blue areas of the difference plot indicate that ``test_expt`` is predicting higher CAPE than ``control`` everywhere but in the center of the plot. + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/fcst1_plots/sfcape_diff_regional_f012.png + :width: 1200 + :align: center + + *CAPE/CIN Difference Plot at f012* + +Try It! +---------- + +Option 1: Adjust frequency of forecast plots. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For a simple extension of this tutorial, users can adjust ``PLOT_FCST_INC`` to output plots more frequently. For example, users can set ``PLOT_FCST_INC: 1`` to produce plots for every hour of the forecast. This would allow users to conduct a more fine-grained visual comparison of how each forecast evolved. + +Option 2: Compare output from additional physics suites. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Users are encouraged to conduct additional experiments using the FV3_HRRR and FV3_WoFS_v0 physics suites. Like FV3_RRFS_v1beta, these physics suites were designed for use with high-resolution grids for storm-scale predictions. Compare them to each other or to the control! + +Users may find the difference plots for :term:`updraft helicity` particularly informative. The FV3_GFS_v16 physics suite does not contain updraft helicity output in its ``diag_table`` files, so the difference plot generated in this tutorial is empty. However, high updraft helicity values increase the probability that a storm will become a supercell thunderstorm or a tornado. Comparing the results from two physics suites that measure this parameter can therefore prove insightful. + +.. _fcst2: + +Sample Forecast #2: Cold Air Damming +======================================== + +Weather Summary +----------------- + +Cold air damming occurs when cold dense air is topographically trapped along the leeward (downwind) side of a mountain. Starting on February 3, 2020, weather conditions leading to cold air damming began to develop east of the Appalachian mountains. By February 6-7, 2020, this cold air damming caused high winds, flash flood advisories, and wintery conditions. + +**Weather Phenomena:** Cold air damming + + * `Storm Prediction Center (SPC) Storm Report for 20200205 `__ + * `Storm Prediction Center (SPC) Storm Report for 20200206 `__ + * `Storm Prediction Center (SPC) Storm Report for 20200207 `__ + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/ColdAirDamming.jpg + :alt: Radar animation of precipitation resulting from cold air damming in the southern Appalachian mountains. + + *Precipitation Resulting from Cold Air Damming East of the Appalachian Mountains* + +Tutorial Content +------------------- + +Coming Soon! + +.. _fcst3: + +Sample Forecast #3: Southern Plains Winter Weather Event +=========================================================== + +Weather Summary +-------------------- + +A polar vortex brought arctic air to much of the U.S. and Mexico. A series of cold fronts and vorticity disturbances helped keep this cold air in place for an extended period of time resulting in record-breaking cold temperatures for many southern states and Mexico. This particular case captures two winter weather disturbances between February 14, 2021 at 06z and February 17, 2021 at 06z that brought several inches of snow to Oklahoma City. A lull on February 16, 2021 resulted in record daily low temperatures. + +**Weather Phenomena:** Snow and record-breaking cold temperatures + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/SouthernPlainsWinterWeather.jpg + :alt: Radar animation of the Southern Plains Winter Weather Event centered over Oklahoma City. Animation starts on February 14, 2021 at 6h00 UTC and ends on February 17, 2021 at 6h00 UTC. + + *Southern Plains Winter Weather Event Over Oklahoma City* + +.. COMMENT: Upload a png to the SRW wiki and change the hyperlink to point to that. + +Tutorial Content +------------------- + +Coming Soon! + +.. _fcst4: + +Sample Forecast #4: Halloween Storm +======================================= + +Weather Summary +-------------------- + +A line of severe storms brought strong winds, flash flooding, and tornadoes to the eastern half of the US. + +**Weather Phenomena:** Flooding and high winds + + * `Storm Prediction Center (SPC) Storm Report for 20191031 `__ + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/HalloweenStorm.jpg + :alt: Radar animation of the Halloween Storm that swept across the Eastern United States in 2019. + + *Halloween Storm 2019* + +Tutorial Content +------------------- + +Coming Soon! + +.. _fcst5: + +Sample Forecast #5: Hurricane Barry +======================================= + +Weather Summary +-------------------- + +Hurricane Barry made landfall in Louisiana on July 11, 2019 as a Category 1 hurricane. It produced widespread flooding in the region and had a peak wind speed of 72 mph and a minimum pressure of 992 hPa. + +**Weather Phenomena:** Flooding, wind, and tornado reports + + * `Storm Prediction Center (SPC) Storm Report for 20190713 `__ + * `Storm Prediction Center (SPC) Storm Report for 20190714 `__ + +.. figure:: https://github.com/ufs-community/ufs-srweather-app/wiki/HurricaneBarry_Making_Landfall.jpg + :alt: Radar animation of Hurricane Barry making landfall. + + *Hurricane Barry Making Landfall* + +Tutorial Content +------------------- + +Coming Soon! diff --git a/docs/UsersGuide/source/VXCases.rst b/docs/UsersGuide/source/VXCases.rst index 09903fdaa4..2198ce10c4 100644 --- a/docs/UsersGuide/source/VXCases.rst +++ b/docs/UsersGuide/source/VXCases.rst @@ -201,7 +201,7 @@ Compare Once the experiment has completed (i.e., all tasks have "SUCCEEDED" and the end of the ``log.launch_FV3LAM_wflow`` file lists "Workflow status: SUCCESS"), users can compare their forecast results against the forecast results provided in the ``Indy-Severe-Weather`` directory downloaded in :numref:`Section %s `. This directory contains the forecast output and plots from NOAA developers under the ``postprd`` directory and METplus verification files under the ``metprd`` directory. -Qualitative Comparision of the Plots +Qualitative Comparison of the Plots ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Comparing the plots is relatively straightforward since they are in ``.png`` format, and most computers can render them in their default image viewer. :numref:`Table %s ` lists plots that are available every 6 hours of the forecast (where ``hhh`` is replaced by the three-digit forecast hour): diff --git a/docs/UsersGuide/source/index.rst b/docs/UsersGuide/source/index.rst index 9fc9c9acbb..651881337e 100644 --- a/docs/UsersGuide/source/index.rst +++ b/docs/UsersGuide/source/index.rst @@ -16,6 +16,7 @@ UFS Short-Range Weather App Users Guide ContainerQuickstart BuildSRW RunSRW + Tutorial Components InputOutputFiles LAMGrids diff --git a/jobs/JREGIONAL_PLOT_ALLVARS b/jobs/JREGIONAL_PLOT_ALLVARS index 29f4371a4e..49eb422c3a 100755 --- a/jobs/JREGIONAL_PLOT_ALLVARS +++ b/jobs/JREGIONAL_PLOT_ALLVARS @@ -75,6 +75,7 @@ $SCRIPTSdir/exregional_plot_allvars.py \ --inc ${PLOT_FCST_INC:-1} \ --comout ${COMOUT} \ --cartopy-dir ${FIXshp} \ + --plot-domains "${PLOT_DOMAINS[@]}" \ --domain ${GRID_NAME} || \ print_err_msg_exit "\ Call to ex-script corresponding to J-job \"${scrfunc_fn}\" failed." @@ -89,6 +90,7 @@ if [ ! -z $COMOUT_REF ]; then --comout-1 ${COMOUT} \ --comout-2 ${COMOUT_REF} \ --cartopy-dir ${FIXshp} \ + --plot-domains "${PLOT_DOMAINS[@]}" \ --domain ${GRID_NAME} || \ print_err_msg_exit "\ Call to ex-script corresponding to J-job \"${scrfunc_fn}\" failed." diff --git a/jobs/JREGIONAL_POINT_SOURCE b/jobs/JREGIONAL_POINT_SOURCE index 67cdb087c0..65f3a739d3 100755 --- a/jobs/JREGIONAL_POINT_SOURCE +++ b/jobs/JREGIONAL_POINT_SOURCE @@ -8,7 +8,7 @@ #----------------------------------------------------------------------- # . $USHdir/source_util_funcs.sh -source_config_for_task "task_make_grid|task_run_fcst|cpl_aqm_parm" ${GLOBAL_VAR_DEFNS_FP} +source_config_for_task "task_run_fcst|cpl_aqm_parm" ${GLOBAL_VAR_DEFNS_FP} . $USHdir/job_preamble.sh # #----------------------------------------------------------------------- @@ -57,7 +57,7 @@ using NEXUS which will output for FV3 (in NetCDF format). if [ $RUN_ENVIR = "nco" ]; then export INPUT_DATA="${COMIN}" else - export INPUT_DATA="${COMIN}${SLASH_ENSMEM_SUBDIR}" + export INPUT_DATA="${COMIN}${SLASH_ENSMEM_SUBDIR}/INPUT" fi mkdir_vrfy -p "${INPUT_DATA}" # @@ -77,7 +77,7 @@ mkdir_vrfy -p "${DATA}" # #----------------------------------------------------------------------- # -time $SCRIPTSdir/exregional_point_source.sh || \ +$SCRIPTSdir/exregional_point_source.sh || \ print_err_msg_exit "\ Call to ex-script corresponding to J-job \"${scrfunc_fn}\" failed." # diff --git a/modulefiles/build_cheyenne_gnu.lua b/modulefiles/build_cheyenne_gnu.lua index 705eac625a..da6d887e6e 100644 --- a/modulefiles/build_cheyenne_gnu.lua +++ b/modulefiles/build_cheyenne_gnu.lua @@ -14,7 +14,7 @@ setenv("MKLROOT", "/glade/u/apps/opt/intel/2022.1/mkl/latest") load(pathJoin("ncarcompilers", os.getenv("ncarcompilers_ver") or "0.5.0")) unload("netcdf") -prepend_path("MODULEPATH","/glade/work/epicufsrt/GMTB/tools/gnu/11.2.0/hpc-stack-v1.2.0/modulefiles/stack") +prepend_path("MODULEPATH","/glade/work/epicufsrt/contrib/hpc-stack/gnu11.2.0/modulefiles/stack") load(pathJoin("hpc", os.getenv("hpc_ver") or "1.2.0")) load(pathJoin("hpc-gnu", os.getenv("hpc_gnu_ver") or "11.2.0")) load(pathJoin("hpc-mpt", os.getenv("hpc_mpt_ver") or "2.25")) @@ -25,8 +25,8 @@ load(pathJoin("g2", os.getenv("g2_ver") or "3.4.5")) load(pathJoin("esmf", os.getenv("esmf_ver") or "8.3.0b09")) load(pathJoin("netcdf", os.getenv("netcdf_ver") or "4.7.4")) load(pathJoin("libpng", os.getenv("libpng_ver") or "1.6.37")) -load(pathJoin("pio", os.getenv("pio_ver") or "2.5.3")) -load(pathJoin("fms", os.getenv("fms_ver") or "2022.01")) +load(pathJoin("pio", os.getenv("pio_ver") or "2.5.7")) +load(pathJoin("fms", os.getenv("fms_ver") or "2022.04")) setenv("CMAKE_C_COMPILER","mpicc") setenv("CMAKE_CXX_COMPILER","mpicxx") diff --git a/modulefiles/build_cheyenne_intel.lua b/modulefiles/build_cheyenne_intel.lua index 3f2e8a73b6..ebbaa1d069 100644 --- a/modulefiles/build_cheyenne_intel.lua +++ b/modulefiles/build_cheyenne_intel.lua @@ -14,7 +14,7 @@ load(pathJoin("python", os.getenv("python_ver") or "3.7.9")) load(pathJoin("ncarcompilers", os.getenv("ncarcompilers_ver") or "0.5.0")) unload("netcdf") -prepend_path("MODULEPATH","/glade/work/epicufsrt/GMTB/tools/intel/2022.1/hpc-stack-v1.2.0_6eb6/modulefiles/stack") +prepend_path("MODULEPATH","/glade/work/epicufsrt/contrib/hpc-stack/intel2022.1/modulefiles/stack") load(pathJoin("hpc", os.getenv("hpc_ver") or "1.2.0")) load(pathJoin("hpc-intel", os.getenv("hpc_intel_ver") or "2022.1")) load(pathJoin("hpc-mpt", os.getenv("hpc_mpt_ver") or "2.25")) @@ -25,8 +25,8 @@ load(pathJoin("g2", os.getenv("g2_ver") or "3.4.5")) load(pathJoin("esmf", os.getenv("esmf_ver") or "8.3.0b09")) load(pathJoin("netcdf", os.getenv("netcdf_ver") or "4.7.4")) load(pathJoin("libpng", os.getenv("libpng_ver") or "1.6.37")) -load(pathJoin("pio", os.getenv("pio_ver") or "2.5.3")) -load(pathJoin("fms", os.getenv("fms_ver") or "2022.01")) +load(pathJoin("pio", os.getenv("pio_ver") or "2.5.7")) +load(pathJoin("fms", os.getenv("fms_ver") or "2022.04")) setenv("CMAKE_C_COMPILER","mpicc") setenv("CMAKE_CXX_COMPILER","mpicpc") diff --git a/modulefiles/build_jet_intel.lua b/modulefiles/build_jet_intel.lua index a2fbeadf60..01c120daa3 100644 --- a/modulefiles/build_jet_intel.lua +++ b/modulefiles/build_jet_intel.lua @@ -10,7 +10,7 @@ load("sutils") load(pathJoin("cmake", os.getenv("cmake_ver") or "3.20.1")) -prepend_path("MODULEPATH","/lfs4/HFIP/hfv3gfs/nwprod/hpc-stack/libs/modulefiles/stack") +prepend_path("MODULEPATH","/mnt/lfs4/HFIP/hfv3gfs/role.epic/hpc-stack/libs/intel-2022.1.2/modulefiles/stack") load(pathJoin("hpc", os.getenv("hpc_ver") or "1.2.0")) load(pathJoin("hpc-intel", os.getenv("hpc_intel_ver") or "2022.1.2")) load(pathJoin("hpc-impi", os.getenv("hpc_impi_ver") or "2022.1.2")) diff --git a/modulefiles/build_orion_intel.lua b/modulefiles/build_orion_intel.lua index 92795c6f8c..3b150e6d48 100644 --- a/modulefiles/build_orion_intel.lua +++ b/modulefiles/build_orion_intel.lua @@ -11,7 +11,7 @@ load("noaatools") load(pathJoin("cmake", os.getenv("cmake_ver") or "3.22.1")) load(pathJoin("python", os.getenv("python_ver") or "3.9.2")) -prepend_path("MODULEPATH","/apps/contrib/NCEP/libs/hpc-stack/modulefiles/stack") +prepend_path("MODULEPATH","/work/noaa/epic-ps/role-epic-ps/hpc-stack/libs/intel-2022.1.2/modulefiles/stack") load(pathJoin("hpc", os.getenv("hpc_ver") or "1.2.0")) load(pathJoin("hpc-intel", os.getenv("hpc_intel_ver") or "2022.1.2")) load(pathJoin("hpc-impi", os.getenv("hpc_impi_ver") or "2022.1.2")) diff --git a/modulefiles/build_wcoss2_intel.lua b/modulefiles/build_wcoss2_intel.lua index c8956142ee..cdcd4869b8 100644 --- a/modulefiles/build_wcoss2_intel.lua +++ b/modulefiles/build_wcoss2_intel.lua @@ -11,20 +11,12 @@ load(pathJoin("PrgEnv-intel", os.getenv("PrgEnv_intel_ver"))) load(pathJoin("intel", os.getenv("intel_ver"))) load(pathJoin("craype", os.getenv("craype_ver"))) load(pathJoin("cray-mpich", os.getenv("cray_mpich_ver"))) - load(pathJoin("cmake", os.getenv("cmake_ver"))) - -setenv("HPC_OPT","/apps/ops/para/libs") -prepend_path("MODULEPATH", pathJoin("/apps/ops/para/libs/modulefiles/compiler/intel", os.getenv("intel_ver"))) -prepend_path("MODULEPATH", pathJoin("/apps/ops/para/libs/modulefiles/mpi/intel", os.getenv("intel_ver"), "cray-mpich", os.getenv("cray_mpich_ver"))) - load(pathJoin("jasper", os.getenv("jasper_ver"))) load(pathJoin("zlib", os.getenv("zlib_ver"))) load(pathJoin("libpng", os.getenv("libpng_ver"))) load(pathJoin("hdf5", os.getenv("hdf5_ver"))) load(pathJoin("netcdf", os.getenv("netcdf_ver"))) -load(pathJoin("pio", os.getenv("pio_ver"))) -load(pathJoin("esmf", os.getenv("esmf_ver"))) load(pathJoin("fms", os.getenv("fms_ver"))) load(pathJoin("bacio", os.getenv("bacio_ver"))) load(pathJoin("crtm", os.getenv("crtm_ver"))) @@ -32,16 +24,33 @@ load(pathJoin("g2", os.getenv("g2_ver"))) load(pathJoin("g2tmpl", os.getenv("g2tmpl_ver"))) load(pathJoin("ip", os.getenv("ip_ver"))) load(pathJoin("sp", os.getenv("sp_ver"))) -load(pathJoin("w3nco", os.getenv("w3nco_ver"))) +load(pathJoin("w3emc", os.getenv("w3emc_ver"))) +load(pathJoin("w3nco", os.getenv("w3nco_ver"))) load(pathJoin("libjpeg", os.getenv("libjpeg_ver"))) load(pathJoin("cray-pals", os.getenv("cray_pals_ver"))) - -load(pathJoin("w3emc", os.getenv("w3emc_ver"))) load(pathJoin("nemsio", os.getenv("nemsio_ver"))) load(pathJoin("sigio", os.getenv("sigio_ver"))) load(pathJoin("sfcio", os.getenv("sfcio_ver"))) load(pathJoin("wrf_io", os.getenv("wrf_io_ver"))) +load(pathJoin("wgrib2", os.getenv("wgrib2_ver"))) +load(pathJoin("bufr", os.getenv("bufr_ver"))) +load(pathJoin("nemsiogfs", os.getenv("nemsiogfs_ver"))) + + +setenv("HPC_OPT","/apps/ops/para/libs") +prepend_path("MODULEPATH", pathJoin("/apps/ops/para/libs/modulefiles/compiler/intel", os.getenv("intel_para_ver"))) +prepend_path("MODULEPATH", pathJoin("/apps/ops/para/libs/modulefiles/mpi/intel", os.getenv("intel_para_ver"), "cray-mpich", os.getenv("cray_mpich_para_ver"))) + +load(pathJoin("pio", os.getenv("pio_ver"))) + +prepend_path("MODULEPATH", pathJoin("/apps/dev/lmodules/intel", os.getenv("intel_dev_ver"))) +prepend_path("MODULEPATH", pathJoin("/apps/dev/modulefiles/mpi/intel", os.getenv("intel_dev_ver"), "cray-mpich", os.getenv("cray_mpich_dev_ver"))) + +load(pathJoin("esmf", os.getenv("esmf_ver"))) +load(pathJoin("gftl_shared", os.getenv("gftl_shared_ver"))) +load(pathJoin("mapl", os.getenv("mapl_ver"))) + setenv("CMAKE_C_COMPILER","cc") setenv("CMAKE_CXX_COMPILER","CC") diff --git a/modulefiles/srw_common.lua b/modulefiles/srw_common.lua index 622f497342..9eb529294c 100644 --- a/modulefiles/srw_common.lua +++ b/modulefiles/srw_common.lua @@ -4,15 +4,15 @@ load_any("png/1.6.35", "libpng/1.6.37") load_any("netcdf/4.7.4", "netcdf-c/4.7.4") load_any("netcdf/4.7.4", "netcdf-fortran/4.5.4") -load_any("pio/2.5.3", "parallelio/2.5.2") +load_any("pio/2.5.7", "parallelio/2.5.2") load_any("esmf/8.3.0b09", "esmf/8.2.0") -load("fms/2022.01") +load("fms/2022.04") load("bufr/11.7.0") load("bacio/2.4.1") -load("crtm/2.3.0") +load("crtm/2.4.0") load("g2/3.4.5") -load("g2tmpl/1.10.0") +load("g2tmpl/1.10.2") load("ip/3.3.3") load("sp/2.3.3") load("w3emc/2.9.2") diff --git a/modulefiles/tasks/hera/aqm_ics.local.lua b/modulefiles/tasks/hera/aqm_ics.local.lua new file mode 100644 index 0000000000..836582f847 --- /dev/null +++ b/modulefiles/tasks/hera/aqm_ics.local.lua @@ -0,0 +1,2 @@ +load("miniconda_online-cmaq") +load(pathJoin("nco", os.getenv("nco_ver") or "4.9.3")) diff --git a/modulefiles/tasks/hera/aqm_lbcs.local.lua b/modulefiles/tasks/hera/aqm_lbcs.local.lua new file mode 100644 index 0000000000..23370a8d60 --- /dev/null +++ b/modulefiles/tasks/hera/aqm_lbcs.local.lua @@ -0,0 +1 @@ +load(pathJoin("nco", os.getenv("nco_ver") or "4.9.3")) diff --git a/modulefiles/tasks/hera/fire_emission.local.lua b/modulefiles/tasks/hera/fire_emission.local.lua new file mode 100644 index 0000000000..d1afe2451e --- /dev/null +++ b/modulefiles/tasks/hera/fire_emission.local.lua @@ -0,0 +1,2 @@ +load("hpss") +load("miniconda_regional_workflow") diff --git a/modulefiles/tasks/hera/miniconda_online-cmaq.lua b/modulefiles/tasks/hera/miniconda_online-cmaq.lua new file mode 100644 index 0000000000..9712c40628 --- /dev/null +++ b/modulefiles/tasks/hera/miniconda_online-cmaq.lua @@ -0,0 +1,5 @@ +prepend_path("MODULEPATH", "/contrib/miniconda3/modulefiles") +load(pathJoin("miniconda3", os.getenv("miniconda3_ver") or "4.12.0")) + +setenv("AQM_ENV_FP", "/scratch2/NCEPDEV/naqfc/RRFS_CMAQ/PY_VENV") +setenv("AQM_ENV", "online-cmaq") diff --git a/modulefiles/tasks/hera/nexus_emission.local.lua b/modulefiles/tasks/hera/nexus_emission.local.lua new file mode 100644 index 0000000000..027881a685 --- /dev/null +++ b/modulefiles/tasks/hera/nexus_emission.local.lua @@ -0,0 +1,2 @@ +load(pathJoin("nco", os.getenv("nco_ver") or "4.9.3")) +load("miniconda_online-cmaq") diff --git a/modulefiles/tasks/hera/nexus_gfs_sfc.local.lua b/modulefiles/tasks/hera/nexus_gfs_sfc.local.lua new file mode 100644 index 0000000000..d1afe2451e --- /dev/null +++ b/modulefiles/tasks/hera/nexus_gfs_sfc.local.lua @@ -0,0 +1,2 @@ +load("hpss") +load("miniconda_regional_workflow") diff --git a/modulefiles/tasks/hera/nexus_post_split.local.lua b/modulefiles/tasks/hera/nexus_post_split.local.lua new file mode 100644 index 0000000000..836582f847 --- /dev/null +++ b/modulefiles/tasks/hera/nexus_post_split.local.lua @@ -0,0 +1,2 @@ +load("miniconda_online-cmaq") +load(pathJoin("nco", os.getenv("nco_ver") or "4.9.3")) diff --git a/modulefiles/tasks/hera/point_source.local.lua b/modulefiles/tasks/hera/point_source.local.lua new file mode 100644 index 0000000000..0ef3de3b66 --- /dev/null +++ b/modulefiles/tasks/hera/point_source.local.lua @@ -0,0 +1 @@ +load("miniconda_online-cmaq") diff --git a/modulefiles/tasks/hera/pre_post_stat.local.lua b/modulefiles/tasks/hera/pre_post_stat.local.lua new file mode 100644 index 0000000000..23370a8d60 --- /dev/null +++ b/modulefiles/tasks/hera/pre_post_stat.local.lua @@ -0,0 +1 @@ +load(pathJoin("nco", os.getenv("nco_ver") or "4.9.3")) diff --git a/modulefiles/tasks/hera/run_vx.local.lua b/modulefiles/tasks/hera/run_vx.local.lua index ce3212dfe9..7f0710c9d1 100644 --- a/modulefiles/tasks/hera/run_vx.local.lua +++ b/modulefiles/tasks/hera/run_vx.local.lua @@ -1,3 +1,9 @@ -append_path("MODULEPATH", "/contrib/anaconda/modulefiles") +--[[ +Loading intel is really only necessary when running verification tasks +with the COMPILER experiment parameter set to "gnu" because in that case, +the intel libraries aren't loaded, but the MET/METplus vx software still +needs them because it's built using the intel compiler. This line can +be removed if/when there is a version of MET/METplus built using GNU. +--]] load(pathJoin("intel", os.getenv("intel_ver") or "18.0.5.274")) -load(pathJoin("anaconda", os.getenv("anaconda_ver") or "latest")) +load("miniconda_regional_workflow") diff --git a/modulefiles/tasks/orion/aqm_ics.local.lua b/modulefiles/tasks/orion/aqm_ics.local.lua new file mode 100644 index 0000000000..027881a685 --- /dev/null +++ b/modulefiles/tasks/orion/aqm_ics.local.lua @@ -0,0 +1,2 @@ +load(pathJoin("nco", os.getenv("nco_ver") or "4.9.3")) +load("miniconda_online-cmaq") diff --git a/modulefiles/tasks/orion/aqm_lbcs.local.lua b/modulefiles/tasks/orion/aqm_lbcs.local.lua new file mode 100644 index 0000000000..23370a8d60 --- /dev/null +++ b/modulefiles/tasks/orion/aqm_lbcs.local.lua @@ -0,0 +1 @@ +load(pathJoin("nco", os.getenv("nco_ver") or "4.9.3")) diff --git a/modulefiles/tasks/orion/miniconda_online-cmaq.lua b/modulefiles/tasks/orion/miniconda_online-cmaq.lua new file mode 100644 index 0000000000..3b01515fb6 --- /dev/null +++ b/modulefiles/tasks/orion/miniconda_online-cmaq.lua @@ -0,0 +1,4 @@ +load(pathJoin("miniconda", os.getenv("miniconda_ver") or "4.12.0")) + +setenv("SRW_ENV", "/work/noaa/fv3-cam/RRFS_CMAQ/PY_VENV") +setenv("SRW_ENV", "online-cmaq") diff --git a/modulefiles/tasks/orion/nexus_emission.local.lua b/modulefiles/tasks/orion/nexus_emission.local.lua new file mode 100644 index 0000000000..027881a685 --- /dev/null +++ b/modulefiles/tasks/orion/nexus_emission.local.lua @@ -0,0 +1,2 @@ +load(pathJoin("nco", os.getenv("nco_ver") or "4.9.3")) +load("miniconda_online-cmaq") diff --git a/modulefiles/tasks/orion/nexus_post_split.local.lua b/modulefiles/tasks/orion/nexus_post_split.local.lua new file mode 100644 index 0000000000..027881a685 --- /dev/null +++ b/modulefiles/tasks/orion/nexus_post_split.local.lua @@ -0,0 +1,2 @@ +load(pathJoin("nco", os.getenv("nco_ver") or "4.9.3")) +load("miniconda_online-cmaq") diff --git a/modulefiles/tasks/orion/point_source.local.lua b/modulefiles/tasks/orion/point_source.local.lua new file mode 100644 index 0000000000..0ef3de3b66 --- /dev/null +++ b/modulefiles/tasks/orion/point_source.local.lua @@ -0,0 +1 @@ +load("miniconda_online-cmaq") diff --git a/modulefiles/tasks/wcoss2/aqm_ics.local.lua b/modulefiles/tasks/wcoss2/aqm_ics.local.lua new file mode 100644 index 0000000000..bbff3a76ba --- /dev/null +++ b/modulefiles/tasks/wcoss2/aqm_ics.local.lua @@ -0,0 +1,6 @@ +load("python_regional_workflow") + +load(pathJoin("udunits", os.getenv("udunits_ver"))) +load(pathJoin("gsl", os.getenv("gsl_ver"))) +load(pathJoin("netcdf", os.getenv("netcdf_ver"))) +load(pathJoin("nco", os.getenv("nco_ver"))) diff --git a/modulefiles/tasks/wcoss2/aqm_lbcs.local.lua b/modulefiles/tasks/wcoss2/aqm_lbcs.local.lua new file mode 100644 index 0000000000..156f2a917a --- /dev/null +++ b/modulefiles/tasks/wcoss2/aqm_lbcs.local.lua @@ -0,0 +1,5 @@ +load("python_regional_workflow") + +load(pathJoin("udunits", os.getenv("udunits_ver"))) +load(pathJoin("gsl", os.getenv("gsl_ver"))) +load(pathJoin("nco", os.getenv("nco_ver"))) diff --git a/modulefiles/tasks/wcoss2/bias_correction_o3.local.lua b/modulefiles/tasks/wcoss2/bias_correction_o3.local.lua new file mode 100644 index 0000000000..13d82a8da9 --- /dev/null +++ b/modulefiles/tasks/wcoss2/bias_correction_o3.local.lua @@ -0,0 +1,5 @@ +load("python_regional_workflow") + +load(pathJoin("wgrib2", os.getenv("wgrib2_ver"))) +load(pathJoin("libjpeg", os.getenv("libjpeg_ver"))) +load(pathJoin("grib_util", os.getenv("grib_util_ver"))) diff --git a/modulefiles/tasks/wcoss2/bias_correction_pm25.local.lua b/modulefiles/tasks/wcoss2/bias_correction_pm25.local.lua new file mode 100644 index 0000000000..13d82a8da9 --- /dev/null +++ b/modulefiles/tasks/wcoss2/bias_correction_pm25.local.lua @@ -0,0 +1,5 @@ +load("python_regional_workflow") + +load(pathJoin("wgrib2", os.getenv("wgrib2_ver"))) +load(pathJoin("libjpeg", os.getenv("libjpeg_ver"))) +load(pathJoin("grib_util", os.getenv("grib_util_ver"))) diff --git a/modulefiles/tasks/wcoss2/fire_emission.local.lua b/modulefiles/tasks/wcoss2/fire_emission.local.lua new file mode 100644 index 0000000000..3370fa018b --- /dev/null +++ b/modulefiles/tasks/wcoss2/fire_emission.local.lua @@ -0,0 +1 @@ +load("python_regional_workflow") diff --git a/modulefiles/tasks/wcoss2/make_grid.local.lua b/modulefiles/tasks/wcoss2/make_grid.local.lua index 3370fa018b..a898bfa85d 100644 --- a/modulefiles/tasks/wcoss2/make_grid.local.lua +++ b/modulefiles/tasks/wcoss2/make_grid.local.lua @@ -1 +1,4 @@ load("python_regional_workflow") + +load(pathJoin("envvar", os.getenv("envvar_ver"))) + diff --git a/modulefiles/tasks/wcoss2/make_ics.local.lua b/modulefiles/tasks/wcoss2/make_ics.local.lua index 3370fa018b..a898bfa85d 100644 --- a/modulefiles/tasks/wcoss2/make_ics.local.lua +++ b/modulefiles/tasks/wcoss2/make_ics.local.lua @@ -1 +1,4 @@ load("python_regional_workflow") + +load(pathJoin("envvar", os.getenv("envvar_ver"))) + diff --git a/modulefiles/tasks/wcoss2/make_lbcs.local.lua b/modulefiles/tasks/wcoss2/make_lbcs.local.lua index 3370fa018b..a898bfa85d 100644 --- a/modulefiles/tasks/wcoss2/make_lbcs.local.lua +++ b/modulefiles/tasks/wcoss2/make_lbcs.local.lua @@ -1 +1,4 @@ load("python_regional_workflow") + +load(pathJoin("envvar", os.getenv("envvar_ver"))) + diff --git a/modulefiles/tasks/wcoss2/make_orog.local.lua b/modulefiles/tasks/wcoss2/make_orog.local.lua index 3370fa018b..a898bfa85d 100644 --- a/modulefiles/tasks/wcoss2/make_orog.local.lua +++ b/modulefiles/tasks/wcoss2/make_orog.local.lua @@ -1 +1,4 @@ load("python_regional_workflow") + +load(pathJoin("envvar", os.getenv("envvar_ver"))) + diff --git a/modulefiles/tasks/wcoss2/make_sfc_climo.local.lua b/modulefiles/tasks/wcoss2/make_sfc_climo.local.lua index 3370fa018b..a898bfa85d 100644 --- a/modulefiles/tasks/wcoss2/make_sfc_climo.local.lua +++ b/modulefiles/tasks/wcoss2/make_sfc_climo.local.lua @@ -1 +1,4 @@ load("python_regional_workflow") + +load(pathJoin("envvar", os.getenv("envvar_ver"))) + diff --git a/modulefiles/tasks/wcoss2/nexus_emission.local.lua b/modulefiles/tasks/wcoss2/nexus_emission.local.lua new file mode 100644 index 0000000000..156f2a917a --- /dev/null +++ b/modulefiles/tasks/wcoss2/nexus_emission.local.lua @@ -0,0 +1,5 @@ +load("python_regional_workflow") + +load(pathJoin("udunits", os.getenv("udunits_ver"))) +load(pathJoin("gsl", os.getenv("gsl_ver"))) +load(pathJoin("nco", os.getenv("nco_ver"))) diff --git a/modulefiles/tasks/wcoss2/nexus_post_split.local.lua b/modulefiles/tasks/wcoss2/nexus_post_split.local.lua new file mode 100644 index 0000000000..156f2a917a --- /dev/null +++ b/modulefiles/tasks/wcoss2/nexus_post_split.local.lua @@ -0,0 +1,5 @@ +load("python_regional_workflow") + +load(pathJoin("udunits", os.getenv("udunits_ver"))) +load(pathJoin("gsl", os.getenv("gsl_ver"))) +load(pathJoin("nco", os.getenv("nco_ver"))) diff --git a/modulefiles/tasks/wcoss2/point_source.local.lua b/modulefiles/tasks/wcoss2/point_source.local.lua new file mode 100644 index 0000000000..3370fa018b --- /dev/null +++ b/modulefiles/tasks/wcoss2/point_source.local.lua @@ -0,0 +1 @@ +load("python_regional_workflow") diff --git a/modulefiles/tasks/wcoss2/post_stat_o3.local.lua b/modulefiles/tasks/wcoss2/post_stat_o3.local.lua new file mode 100644 index 0000000000..13d82a8da9 --- /dev/null +++ b/modulefiles/tasks/wcoss2/post_stat_o3.local.lua @@ -0,0 +1,5 @@ +load("python_regional_workflow") + +load(pathJoin("wgrib2", os.getenv("wgrib2_ver"))) +load(pathJoin("libjpeg", os.getenv("libjpeg_ver"))) +load(pathJoin("grib_util", os.getenv("grib_util_ver"))) diff --git a/modulefiles/tasks/wcoss2/post_stat_pm25.local.lua b/modulefiles/tasks/wcoss2/post_stat_pm25.local.lua new file mode 100644 index 0000000000..13d82a8da9 --- /dev/null +++ b/modulefiles/tasks/wcoss2/post_stat_pm25.local.lua @@ -0,0 +1,5 @@ +load("python_regional_workflow") + +load(pathJoin("wgrib2", os.getenv("wgrib2_ver"))) +load(pathJoin("libjpeg", os.getenv("libjpeg_ver"))) +load(pathJoin("grib_util", os.getenv("grib_util_ver"))) diff --git a/modulefiles/tasks/wcoss2/pre_post_stat.local.lua b/modulefiles/tasks/wcoss2/pre_post_stat.local.lua new file mode 100644 index 0000000000..bbff3a76ba --- /dev/null +++ b/modulefiles/tasks/wcoss2/pre_post_stat.local.lua @@ -0,0 +1,6 @@ +load("python_regional_workflow") + +load(pathJoin("udunits", os.getenv("udunits_ver"))) +load(pathJoin("gsl", os.getenv("gsl_ver"))) +load(pathJoin("netcdf", os.getenv("netcdf_ver"))) +load(pathJoin("nco", os.getenv("nco_ver"))) diff --git a/modulefiles/tasks/wcoss2/run_fcst.local.lua b/modulefiles/tasks/wcoss2/run_fcst.local.lua index 3370fa018b..3ba04f0af5 100644 --- a/modulefiles/tasks/wcoss2/run_fcst.local.lua +++ b/modulefiles/tasks/wcoss2/run_fcst.local.lua @@ -1 +1,10 @@ load("python_regional_workflow") + +unload("cray_mpich") +unload("netcdf") +load(pathJoin("cray-mpich", os.getenv("cray_mpich_ver"))) +load(pathJoin("netcdf", os.getenv("netcdf_ver"))) + +load(pathJoin("envvar", os.getenv("envvar_ver"))) +load(pathJoin("libjpeg", os.getenv("libjpeg_ver"))) +load(pathJoin("cray-pals", os.getenv("cray_pals_ver"))) diff --git a/modulefiles/tasks/wcoss2/run_post.local.lua b/modulefiles/tasks/wcoss2/run_post.local.lua index 3370fa018b..69325a8a4a 100644 --- a/modulefiles/tasks/wcoss2/run_post.local.lua +++ b/modulefiles/tasks/wcoss2/run_post.local.lua @@ -1 +1,3 @@ load("python_regional_workflow") + +load(pathJoin("cray-pals", os.getenv("cray_pals_ver"))) diff --git a/parm/FV3.input.yml b/parm/FV3.input.yml index 3ed36d8462..2f83576c5a 100644 --- a/parm/FV3.input.yml +++ b/parm/FV3.input.yml @@ -413,3 +413,127 @@ FV3_GFS_v16: ldebug: False surf_map_nml: !!python/none +FV3_GFS_v17_p8: + cires_ugwp_nml: + launch_level: 27 + fv_core_nml: + <<: *gfs_v15_fv_core + agrid_vel_rst: True + d2_bg_k1: 0.2 + d2_bg_k2: 0.04 + delt_max: 0.002 + dnats: 0 + do_sat_adj: False + do_vort_damp: !!python/none + full_zs_filter: !!python/none + fv_sg_adj: 450 + hord_dp: -5 + hord_mt: 5 + hord_tm: 5 + hord_vt: 5 + hord_tr: 8 + k_split: 6 + n_split: 6 + n_sponge: 10 + nord: 2 + nudge_dz: False + n_zs_filter: !!python/none + range_warn: True + res_latlon_dynamics: '' + rf_fast: !!python/none + tau: 10.0 + gfdl_cloud_microphysics_nml: + <<: *gfs_gfdl_cloud_mp + mp_time: 150.0 + reiflag: 2 + rthresh: 1.0e-06 + sedi_transport: True + tau_l2v: 225.0 + tau_v2l: 150.0 + gfs_physics_nml: + <<: *gfs_v15_gfs_physics + active_gases: h2o_co2_o3_n2o_ch4_o2 + bl_mynn_edmf: 1 + bl_mynn_edmf_mom: 1 + bl_mynn_tkeadvect: True + cdmbgwd: [4.0, 0.15, 1.0, 1.0] + cplchm: False + decfl: 10 + do_gsl_drag_ls_bl: False + do_gsl_drag_ss: True + do_gsl_drag_tofd: False + do_myjpbl: !!python/none + do_myjsfc: !!python/none + do_mynnedmf: False + do_mynnsfclay: False + do_RRTMGP: False + do_ugwp_v0: True + do_ugwp_v0_nst_only: False + do_ugwp_v0_orog_only: False + do_ugwp_v1: False + do_ugwp_v1_orog_only: False + do_ysu: !!python/none + doGP_cldoptics_LUT: False + doGP_lwscat: False + dt_inner: 150 + fhzero: 6 + frac_grid: True + gwd_opt: 2 + hybedmf: False + iaer: 1011 + ialb: 2 + iau_inc_files: !!python/none + icloud_bl: 1 + icliq_sw: 2 + iems: 2 + imp_physics: 8 + iopt_alb: 1 + iopt_crs: 2 + iopt_dveg: 4 + iopt_rad: 3 + iopt_sfc: 3 + iopt_stc: 3 + iovr: 3 + isatmedmf: 1 + ldiag_ugwp: !!python/none + lgfdlmprad: False + lradar: False + lseaspray: True + lsm: 2 + lsoil: 4 + lsoil_lsm: 4 + ltaerosol: False + lw_file_clouds: rrtmgp-cloud-optics-coeffs-lw.nc + lw_file_gas: rrtmgp-data-lw-g128-210809.nc + min_lakeice: 0.15 + min_seaice: 0.15 + nsradar_reset: !!python/none + nstf_name: [2, 0, 0, 0, 0] + prautco: [0.00015, 0.00015] + psautco: [0.0008, 0.0005] + qdiag3d: False + ras: False + rrtmgp_nBandsLW: 16 + rrtmgp_nBandsSW: 14 + rrtmgp_nGptsLW: 128 + rrtmgp_nGptsSW: 112 + satmedmf: True + sedi_semi: True + sfclay_compute_flux: !!python/none + shinhong: !!python/none + sw_file_clouds: rrtmgp-cloud-optics-coeffs-sw.nc + sw_file_gas: rrtmgp-data-sw-g112-210809.nc + ttendlim: -999 + xkzminv: !!python/none + xkzm_m: !!python/none + xkzm_h: !!python/none + mpp_io_nml: + deflate_level: 1 + shuffle: 1 + namsfc: + fsicl: 0 + fsics: 0 + landice: False + ldebug: False + surf_map_nml: !!python/none + diff --git a/parm/FV3LAM_wflow.xml b/parm/FV3LAM_wflow.xml index fda2b630c7..4c430f6c54 100644 --- a/parm/FV3LAM_wflow.xml +++ b/parm/FV3LAM_wflow.xml @@ -27,47 +27,47 @@ Workflow task names. {%- else %} {%- endif %} - - - - - - - - - + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {%- if partition_default is not none %} @@ -170,7 +170,7 @@ tasks; and the "FCST" type is used for the RUN_FCST_TN task. @@ -200,16 +200,16 @@ MODULES_RUN_TASK_FP script. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&MAKE_GRID_TN;" "&JOBSdir;/JREGIONAL_MAKE_GRID" + &LOAD_MODULES_RUN_TASK_FP; "&TN_MAKE_GRID;" "&JOBSdir;/JREGIONAL_MAKE_GRID" {{ nnodes_make_grid }}:ppn={{ ppn_make_grid }} {{ wtime_make_grid }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &MAKE_GRID_TN; - &LOGDIR;/&MAKE_GRID_TN;&LOGEXT; + &TN_MAKE_GRID; + &LOGDIR;/&TN_MAKE_GRID;&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -225,16 +225,16 @@ MODULES_RUN_TASK_FP script. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&MAKE_OROG_TN;" "&JOBSdir;/JREGIONAL_MAKE_OROG" + &LOAD_MODULES_RUN_TASK_FP; "&TN_MAKE_OROG;" "&JOBSdir;/JREGIONAL_MAKE_OROG" {{ nnodes_make_orog }}:ppn={{ ppn_make_orog }} {{ wtime_make_orog }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &MAKE_OROG_TN; - &LOGDIR;/&MAKE_OROG_TN;&LOGEXT; + &TN_MAKE_OROG; + &LOGDIR;/&TN_MAKE_OROG;&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -246,7 +246,7 @@ MODULES_RUN_TASK_FP script. - &EXPTDIR;/grid/&MAKE_GRID_TN;&CMPEXT; + &EXPTDIR;/grid/&TN_MAKE_GRID;&CMPEXT; &RUN_TASK_MAKE_GRID;FALSE @@ -258,16 +258,16 @@ MODULES_RUN_TASK_FP script. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&MAKE_SFC_CLIMO_TN;" "&JOBSdir;/JREGIONAL_MAKE_SFC_CLIMO" + &LOAD_MODULES_RUN_TASK_FP; "&TN_MAKE_SFC_CLIMO;" "&JOBSdir;/JREGIONAL_MAKE_SFC_CLIMO" {{ nnodes_make_sfc_climo }}:ppn={{ ppn_make_sfc_climo }} {{ wtime_make_sfc_climo }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &MAKE_SFC_CLIMO_TN; - &LOGDIR;/&MAKE_SFC_CLIMO_TN;&LOGEXT; + &TN_MAKE_SFC_CLIMO; + &LOGDIR;/&TN_MAKE_SFC_CLIMO;&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -279,13 +279,13 @@ MODULES_RUN_TASK_FP script. - - &EXPTDIR;/grid/&MAKE_GRID_TN;&CMPEXT; + + &EXPTDIR;/grid/&TN_MAKE_GRID;&CMPEXT; &RUN_TASK_MAKE_GRID;FALSE - - &EXPTDIR;/orog/&MAKE_OROG_TN;&CMPEXT; + + &EXPTDIR;/orog/&TN_MAKE_OROG;&CMPEXT; &RUN_TASK_MAKE_OROG;FALSE @@ -298,10 +298,10 @@ MODULES_RUN_TASK_FP script. ************************************************************************ ************************************************************************ --> - + &RSRV_HPSS; - &LOAD_MODULES_RUN_TASK_FP; "&GET_EXTRN_ICS_TN;" "&JOBSdir;/JREGIONAL_GET_EXTRN_MDL_FILES" + &LOAD_MODULES_RUN_TASK_FP; "&TN_GET_EXTRN_ICS;" "&JOBSdir;/JREGIONAL_GET_EXTRN_MDL_FILES" {{ nnodes_get_extrn_ics }}:ppn={{ ppn_get_extrn_ics }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_get_extrn_ics }} @@ -311,8 +311,8 @@ MODULES_RUN_TASK_FP script. {%- if machine not in ["WCOSS2"] %} &SCHED_NATIVE_CMD; {%- endif %} - &GET_EXTRN_ICS_TN; - &LOGDIR;/&GET_EXTRN_ICS_TN;_@Y@m@d@H&LOGEXT; + &TN_GET_EXTRN_ICS; + &LOGDIR;/&TN_GET_EXTRN_ICS;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -329,10 +329,10 @@ MODULES_RUN_TASK_FP script. ************************************************************************ ************************************************************************ --> - + &RSRV_HPSS; - &LOAD_MODULES_RUN_TASK_FP; "&GET_EXTRN_LBCS_TN;" "&JOBSdir;/JREGIONAL_GET_EXTRN_MDL_FILES" + &LOAD_MODULES_RUN_TASK_FP; "&TN_GET_EXTRN_LBCS;" "&JOBSdir;/JREGIONAL_GET_EXTRN_MDL_FILES" {{ nnodes_get_extrn_lbcs }}:ppn={{ ppn_get_extrn_lbcs }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_get_extrn_lbcs }} @@ -342,8 +342,8 @@ MODULES_RUN_TASK_FP script. {%- if machine not in ["WCOSS2"] %} &SCHED_NATIVE_CMD; {%- endif %} - &GET_EXTRN_LBCS_TN; - &LOGDIR;/&GET_EXTRN_LBCS_TN;_@Y@m@d@H&LOGEXT; + &TN_GET_EXTRN_LBCS; + &LOGDIR;/&TN_GET_EXTRN_LBCS;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -367,16 +367,16 @@ MODULES_RUN_TASK_FP script. {%- endif %} {%- if run_task_make_ics %} - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&MAKE_ICS_TN;" "&JOBSdir;/JREGIONAL_MAKE_ICS" + &LOAD_MODULES_RUN_TASK_FP; "&TN_MAKE_ICS;" "&JOBSdir;/JREGIONAL_MAKE_ICS" {{ nnodes_make_ics }}:ppn={{ ppn_make_ics }} {{ wtime_make_ics }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &MAKE_ICS_TN;{{ uscore_ensmem_name }} - &LOGDIR;/&MAKE_ICS_TN;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; + &TN_MAKE_ICS;{{ uscore_ensmem_name }} + &LOGDIR;/&TN_MAKE_ICS;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -389,20 +389,20 @@ MODULES_RUN_TASK_FP script. - + - - &EXPTDIR;/grid/&MAKE_GRID_TN;&CMPEXT; + + &EXPTDIR;/grid/&TN_MAKE_GRID;&CMPEXT; &RUN_TASK_MAKE_GRID;FALSE - - &EXPTDIR;/orog/&MAKE_OROG_TN;&CMPEXT; + + &EXPTDIR;/orog/&TN_MAKE_OROG;&CMPEXT; &RUN_TASK_MAKE_OROG;FALSE - - &EXPTDIR;/sfc_climo/&MAKE_SFC_CLIMO_TN;&CMPEXT; + + &EXPTDIR;/sfc_climo/&TN_MAKE_SFC_CLIMO;&CMPEXT; &RUN_TASK_MAKE_SFC_CLIMO;FALSE @@ -415,16 +415,16 @@ MODULES_RUN_TASK_FP script. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&MAKE_LBCS_TN;" "&JOBSdir;/JREGIONAL_MAKE_LBCS" + &LOAD_MODULES_RUN_TASK_FP; "&TN_MAKE_LBCS;" "&JOBSdir;/JREGIONAL_MAKE_LBCS" {{ nnodes_make_lbcs }}:ppn={{ ppn_make_lbcs }} {{ wtime_make_lbcs }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &MAKE_LBCS_TN;{{ uscore_ensmem_name }} - &LOGDIR;/&MAKE_LBCS_TN;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; + &TN_MAKE_LBCS;{{ uscore_ensmem_name }} + &LOGDIR;/&TN_MAKE_LBCS;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -437,20 +437,20 @@ MODULES_RUN_TASK_FP script. - + - - &EXPTDIR;/grid/&MAKE_GRID_TN;&CMPEXT; + + &EXPTDIR;/grid/&TN_MAKE_GRID;&CMPEXT; &RUN_TASK_MAKE_GRID;FALSE - - &EXPTDIR;/orog/&MAKE_OROG_TN;&CMPEXT; + + &EXPTDIR;/orog/&TN_MAKE_OROG;&CMPEXT; &RUN_TASK_MAKE_OROG;FALSE - - &EXPTDIR;/sfc_climo/&MAKE_SFC_CLIMO_TN;&CMPEXT; + + &EXPTDIR;/sfc_climo/&TN_MAKE_SFC_CLIMO;&CMPEXT; &RUN_TASK_MAKE_SFC_CLIMO;FALSE @@ -463,10 +463,10 @@ MODULES_RUN_TASK_FP script. ************************************************************************ ************************************************************************ --> - + &RSRV_FCST; - &LOAD_MODULES_RUN_TASK_FP; "&RUN_FCST_TN;" "&JOBSdir;/JREGIONAL_RUN_FCST" + &LOAD_MODULES_RUN_TASK_FP; "&TN_RUN_FCST;" "&JOBSdir;/JREGIONAL_RUN_FCST" {%- if machine in ["JET", "HERA", "LINUX"] %} {{ ncores_run_fcst }} {{ native_run_fcst }} @@ -480,8 +480,8 @@ MODULES_RUN_TASK_FP script. {%- endif %} &SCHED_NATIVE_CMD; {{ wtime_run_fcst }} - &RUN_FCST_TN;{{ uscore_ensmem_name }} - &LOGDIR;/&RUN_FCST_TN;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; + &TN_RUN_FCST;{{ uscore_ensmem_name }} + &LOGDIR;/&TN_RUN_FCST;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -494,8 +494,8 @@ MODULES_RUN_TASK_FP script. - - + + @@ -525,16 +525,16 @@ later below for other output times. 000 00 - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&RUN_POST_TN;" "&JOBSdir;/JREGIONAL_RUN_POST" + &LOAD_MODULES_RUN_TASK_FP; "&TN_RUN_POST;" "&JOBSdir;/JREGIONAL_RUN_POST" {{ nnodes_run_post }}:ppn={{ ppn_run_post }} {{ wtime_run_post }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &RUN_POST_TN;{{ uscore_ensmem_name }}_f#fhr##fmn# - &LOGDIR;/&RUN_POST_TN;{{ uscore_ensmem_name }}_f#fhr##fmn#_@Y@m@d@H&LOGEXT; + &TN_RUN_POST;{{ uscore_ensmem_name }}_f#fhr##fmn# + &LOGDIR;/&TN_RUN_POST;{{ uscore_ensmem_name }}_f#fhr##fmn#_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -555,7 +555,7 @@ names in the dependencies. --> - + &DYN_DIR;f{{ first_fv3_file_tstr }}.nc &PHY_DIR;f{{ first_fv3_file_tstr }}.nc @@ -576,22 +576,22 @@ variable (along with the fmn variable in the inner metatask) allows the block of code inside the tag to be identical to the ones later below for other output times. --> - + 000 {% for min in range(delta_min, 60, delta_min) %}{{ " %02d" % min }}{% endfor %} - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&RUN_POST_TN;" "&JOBSdir;/JREGIONAL_RUN_POST" + &LOAD_MODULES_RUN_TASK_FP; "&TN_RUN_POST;" "&JOBSdir;/JREGIONAL_RUN_POST" {{ nnodes_run_post }}:ppn={{ ppn_run_post }} {{ wtime_run_post }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &RUN_POST_TN;{{ uscore_ensmem_name }}_f#fhr##fmn# - &LOGDIR;/&RUN_POST_TN;{{ uscore_ensmem_name }}_f#fhr##fmn#_@Y@m@d@H&LOGEXT; + &TN_RUN_POST;{{ uscore_ensmem_name }}_f#fhr##fmn# + &LOGDIR;/&TN_RUN_POST;{{ uscore_ensmem_name }}_f#fhr##fmn#_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -606,7 +606,7 @@ for other output times. - + &DYN_DIR;f#fhr#:#fmn#:00.nc &PHY_DIR;f#fhr#:#fmn#:00.nc @@ -621,7 +621,7 @@ for other output times. {%- endif %} - + {%- if sub_hourly_post %} {% for h in range(0, fcst_len_hrs+1) %}{{ " %03d" % h }}{% endfor %} - + {%- endif %} &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&RUN_POST_TN;" "&JOBSdir;/JREGIONAL_RUN_POST" + &LOAD_MODULES_RUN_TASK_FP; "&TN_RUN_POST;" "&JOBSdir;/JREGIONAL_RUN_POST" {{ nnodes_run_post }}:ppn={{ ppn_run_post }} {{ wtime_run_post }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; {%- if sub_hourly_post %} - &RUN_POST_TN;{{ uscore_ensmem_name }}_f#fhr##fmn# - &LOGDIR;/&RUN_POST_TN;{{ uscore_ensmem_name }}_f#fhr##fmn#_@Y@m@d@H&LOGEXT; + &TN_RUN_POST;{{ uscore_ensmem_name }}_f#fhr##fmn# + &LOGDIR;/&TN_RUN_POST;{{ uscore_ensmem_name }}_f#fhr##fmn#_@Y@m@d@H&LOGEXT; {%- else %} - &RUN_POST_TN;{{ uscore_ensmem_name }}_f#fhr# - &LOGDIR;/&RUN_POST_TN;{{ uscore_ensmem_name }}_f#fhr#_@Y@m@d@H&LOGEXT; + &TN_RUN_POST;{{ uscore_ensmem_name }}_f#fhr# + &LOGDIR;/&TN_RUN_POST;{{ uscore_ensmem_name }}_f#fhr#_@Y@m@d@H&LOGEXT; {%- endif %} GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; @@ -677,7 +677,7 @@ always zero). - + {%- if sub_hourly_post %} &DYN_DIR;f#fhr#:#fmn#:00.nc @@ -715,16 +715,16 @@ the tag to be identical to the ones above for other output times. {{ "%03d" % fcst_len_hrs }} 00 - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&RUN_POST_TN;" "&JOBSdir;/JREGIONAL_RUN_POST" + &LOAD_MODULES_RUN_TASK_FP; "&TN_RUN_POST;" "&JOBSdir;/JREGIONAL_RUN_POST" {{ nnodes_run_post }}:ppn={{ ppn_run_post }} {{ wtime_run_post }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &RUN_POST_TN;{{ uscore_ensmem_name }}_f#fhr##fmn# - &LOGDIR;/&RUN_POST_TN;{{ uscore_ensmem_name }}_f#fhr##fmn#_@Y@m@d@H&LOGEXT; + &TN_RUN_POST;{{ uscore_ensmem_name }}_f#fhr##fmn# + &LOGDIR;/&TN_RUN_POST;{{ uscore_ensmem_name }}_f#fhr##fmn#_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -739,7 +739,7 @@ the tag to be identical to the ones above for other output times. - + &DYN_DIR;f#fhr#:#fmn#:00.nc &PHY_DIR;f#fhr#:#fmn#:00.nc @@ -758,18 +758,18 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&PLOT_ALLVARS_TN;" "&JOBSdir;/JREGIONAL_PLOT_ALLVARS" + &LOAD_MODULES_RUN_TASK_FP; "&TN_PLOT_ALLVARS;" "&JOBSdir;/JREGIONAL_PLOT_ALLVARS" {{ nnodes_plot_allvars }}:ppn={{ ppn_plot_allvars }} {{ wtime_plot_allvars }} &NCORES_PER_NODE; {%- if machine not in ["WCOSS2"] %} &SCHED_NATIVE_CMD; {%- endif %} - &PLOT_ALLVARS_TN; - &LOGDIR;/&PLOT_ALLVARS_TN;_@Y@m@d@H&LOGEXT; + &TN_PLOT_ALLVARS; + &LOGDIR;/&TN_PLOT_ALLVARS;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -781,11 +781,19 @@ the tag to be identical to the ones above for other output times. ENSMEM_INDX#{{ ensmem_indx_name }}# + +{#- Redundant dependency to simplify jinja code. +This dependency will always evaluate to true. It is included because +rocoto does not allow empty , , and other tags. Without +it, we'd have to include more jinja if-statements here. +#} + TRUETRUE {%- if write_dopost %} - - {%- else %} - + + {%- elif run_task_run_post %} + {%- endif %} + @@ -796,7 +804,7 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_HPSS; &LOAD_MODULES_RUN_TASK_FP; "&GET_OBS;" "&JOBSdir;/JREGIONAL_GET_OBS_CCPA" @@ -807,8 +815,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_get_obs_ccpa }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &GET_OBS_CCPA_TN; - &LOGDIR;/&GET_OBS_CCPA_TN;_@Y@m@d@H&LOGEXT; + &TN_GET_OBS_CCPA; + &LOGDIR;/&TN_GET_OBS_CCPA;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -828,7 +836,7 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_HPSS; &LOAD_MODULES_RUN_TASK_FP; "&GET_OBS;" "&JOBSdir;/JREGIONAL_GET_OBS_MRMS" @@ -839,8 +847,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_get_obs_mrms }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &GET_OBS_MRMS_TN; - &LOGDIR;/&GET_OBS_MRMS_TN;_@Y@m@d@H&LOGEXT; + &TN_GET_OBS_MRMS; + &LOGDIR;/&TN_GET_OBS_MRMS;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -861,7 +869,7 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_HPSS; &LOAD_MODULES_RUN_TASK_FP; "&GET_OBS;" "&JOBSdir;/JREGIONAL_GET_OBS_NDAS" @@ -872,8 +880,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_get_obs_ndas }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &GET_OBS_NDAS_TN; - &LOGDIR;/&GET_OBS_NDAS_TN;_@Y@m@d@H&LOGEXT; + &TN_GET_OBS_NDAS; + &LOGDIR;/&TN_GET_OBS_NDAS;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -892,10 +900,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_GRIDSTAT" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_GRIDSTAT" {{ nnodes_vx_gridstat }}:ppn={{ ppn_vx_gridstat }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_gridstat }} @@ -903,8 +911,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_gridstat }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_GRIDSTAT_TN; - &LOGDIR;/&VX_GRIDSTAT_TN;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; + &TN_VX_GRIDSTAT; + &LOGDIR;/&TN_VX_GRIDSTAT;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -922,22 +930,18 @@ the tag to be identical to the ones above for other output times. {%- endif %} - {%- if run_task_get_obs_ccpa %} - - {%- if write_dopost %} - - {%- else %} - - {%- endif %} - - {%- else %} - {%- if write_dopost %} - - {%- else %} - - {%- endif %} +{#- Redundant dependency to simplify jinja code. #} + TRUETRUE + {%- if run_task_get_obs_ccpa %} + {%- endif %} + {%- if write_dopost %} + + {%- elif run_task_run_post %} + + {%- endif %} + @@ -948,10 +952,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_GRIDSTAT" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_GRIDSTAT" {{ nnodes_vx_gridstat }}:ppn={{ ppn_vx_gridstat }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_gridstat }} @@ -959,8 +963,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_gridstat }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_GRIDSTAT_REFC_TN; - &LOGDIR;/&VX_GRIDSTAT_REFC_TN;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; + &TN_VX_GRIDSTAT_REFC; + &LOGDIR;/&TN_VX_GRIDSTAT_REFC;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -977,22 +981,18 @@ the tag to be identical to the ones above for other output times. {%- endif %} - {%- if run_task_get_obs_mrms %} - - {%- if write_dopost %} - - {%- else %} - - {%- endif %} - - {%- else %} - {%- if write_dopost %} - - {%- else %} - - {%- endif %} +{#- Redundant dependency to simplify jinja code. #} + TRUETRUE + {%- if run_task_get_obs_mrms %} + + {%- endif %} + {%- if write_dopost %} + + {%- elif run_task_run_post %} + {%- endif %} + @@ -1003,10 +1003,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_GRIDSTAT" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_GRIDSTAT" {{ nnodes_vx_gridstat }}:ppn={{ ppn_vx_gridstat }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_gridstat }} @@ -1014,8 +1014,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_gridstat }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_GRIDSTAT_RETOP_TN; - &LOGDIR;/&VX_GRIDSTAT_RETOP_TN;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; + &TN_VX_GRIDSTAT_RETOP; + &LOGDIR;/&TN_VX_GRIDSTAT_RETOP;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1032,22 +1032,18 @@ the tag to be identical to the ones above for other output times. {%- endif %} - {%- if run_task_get_obs_mrms %} - - {%- if write_dopost %} - - {%- else %} - - {%- endif %} - - {%- else %} - {%- if write_dopost %} - - {%- else %} - - {%- endif %} +{#- Redundant dependency to simplify jinja code. #} + TRUETRUE + {%- if run_task_get_obs_mrms %} + + {%- endif %} + {%- if write_dopost %} + + {%- elif run_task_run_post %} + {%- endif %} + @@ -1058,10 +1054,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_GRIDSTAT" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_GRIDSTAT" {{ nnodes_vx_gridstat }}:ppn={{ ppn_vx_gridstat }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_gridstat }} @@ -1069,8 +1065,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_gridstat }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_GRIDSTAT_03h_TN; - &LOGDIR;/&VX_GRIDSTAT_03h_TN;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; + &TN_VX_GRIDSTAT_03h; + &LOGDIR;/&TN_VX_GRIDSTAT_03h;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1088,7 +1084,7 @@ the tag to be identical to the ones above for other output times. {%- endif %} - + @@ -1099,10 +1095,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_GRIDSTAT" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_GRIDSTAT" {{ nnodes_vx_gridstat }}:ppn={{ ppn_vx_gridstat }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_gridstat }} @@ -1110,8 +1106,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_gridstat }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_GRIDSTAT_06h_TN; - &LOGDIR;/&VX_GRIDSTAT_06h_TN;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; + &TN_VX_GRIDSTAT_06h; + &LOGDIR;/&TN_VX_GRIDSTAT_06h;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1129,7 +1125,7 @@ the tag to be identical to the ones above for other output times. {%- endif %} - + @@ -1140,10 +1136,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_GRIDSTAT" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_GRIDSTAT" {{ nnodes_vx_gridstat }}:ppn={{ ppn_vx_gridstat }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_gridstat }} @@ -1151,8 +1147,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_gridstat }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_GRIDSTAT_24h_TN; - &LOGDIR;/&VX_GRIDSTAT_24h_TN;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; + &TN_VX_GRIDSTAT_24h; + &LOGDIR;/&TN_VX_GRIDSTAT_24h;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1170,7 +1166,7 @@ the tag to be identical to the ones above for other output times. {%- endif %} - + @@ -1181,10 +1177,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_POINTSTAT" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_POINTSTAT" {{ nnodes_vx_pointstat }}:ppn={{ ppn_vx_pointstat }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_pointstat }} @@ -1192,8 +1188,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_pointstat }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_POINTSTAT_TN; - &LOGDIR;/&VX_POINTSTAT_TN;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; + &TN_VX_POINTSTAT; + &LOGDIR;/&TN_VX_POINTSTAT;{{ uscore_ensmem_name }}_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1209,22 +1205,18 @@ the tag to be identical to the ones above for other output times. {%- endif %} - {%- if run_task_get_obs_ndas %} - - {%- if write_dopost %} - - {%- else %} - - {%- endif %} - - {%- else %} - {%- if write_dopost %} - - {%- else %} - - {%- endif %} +{#- Redundant dependency to simplify jinja code. #} + TRUETRUE + {%- if run_task_get_obs_ndas %} + {%- endif %} + {%- if write_dopost %} + + {%- elif run_task_run_post %} + + {%- endif %} + @@ -1239,10 +1231,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID" {{ nnodes_vx_ensgrid }}:ppn={{ ppn_vx_ensgrid }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid }} @@ -1250,8 +1242,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_TN; - &LOGDIR;/&VX_ENSGRID_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID; + &LOGDIR;/&TN_VX_ENSGRID;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1276,10 +1268,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID" {{ nnodes_vx_ensgrid }}:ppn={{ ppn_vx_ensgrid }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid }} @@ -1287,8 +1279,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_03h_TN; - &LOGDIR;/&VX_ENSGRID_03h_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_03h; + &LOGDIR;/&TN_VX_ENSGRID_03h;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1302,7 +1294,7 @@ the tag to be identical to the ones above for other output times. ACCUM03 - + @@ -1313,10 +1305,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID" {{ nnodes_vx_ensgrid }}:ppn={{ ppn_vx_ensgrid }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid }} @@ -1324,8 +1316,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_06h_TN; - &LOGDIR;/&VX_ENSGRID_06h_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_06h; + &LOGDIR;/&TN_VX_ENSGRID_06h;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1339,7 +1331,7 @@ the tag to be identical to the ones above for other output times. ACCUM06 - + @@ -1350,10 +1342,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID" {{ nnodes_vx_ensgrid }}:ppn={{ ppn_vx_ensgrid }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid }} @@ -1361,8 +1353,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_24h_TN; - &LOGDIR;/&VX_ENSGRID_24h_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_24h; + &LOGDIR;/&TN_VX_ENSGRID_24h;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1376,7 +1368,7 @@ the tag to be identical to the ones above for other output times. ACCUM24 - + @@ -1386,10 +1378,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID" {{ nnodes_vx_ensgrid }}:ppn={{ ppn_vx_ensgrid }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid }} @@ -1397,8 +1389,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_REFC_TN; - &LOGDIR;/&VX_ENSGRID_REFC_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_REFC; + &LOGDIR;/&TN_VX_ENSGRID_REFC;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1421,10 +1413,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID" {{ nnodes_vx_ensgrid }}:ppn={{ ppn_vx_ensgrid }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid }} @@ -1432,8 +1424,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_RETOP_TN; - &LOGDIR;/&VX_ENSGRID_RETOP_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_RETOP; + &LOGDIR;/&TN_VX_ENSGRID_RETOP;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1455,10 +1447,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_MEAN" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_MEAN" {{ nnodes_vx_ensgrid_mean }}:ppn={{ ppn_vx_ensgrid_mean }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid_mean }} @@ -1466,8 +1458,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid_mean }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_MEAN_TN; - &LOGDIR;/&VX_ENSGRID_MEAN_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_MEAN; + &LOGDIR;/&TN_VX_ENSGRID_MEAN;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1481,7 +1473,7 @@ the tag to be identical to the ones above for other output times. ACCUM01 - + @@ -1491,10 +1483,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_PROB" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_PROB" {{ nnodes_vx_ensgrid_prob }}:ppn={{ ppn_vx_ensgrid_prob }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid_prob }} @@ -1502,8 +1494,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid_prob }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_PROB_TN; - &LOGDIR;/&VX_ENSGRID_PROB_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_PROB; + &LOGDIR;/&TN_VX_ENSGRID_PROB;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1517,7 +1509,7 @@ the tag to be identical to the ones above for other output times. ACCUM01 - + @@ -1527,10 +1519,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_MEAN" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_MEAN" {{ nnodes_vx_ensgrid_mean }}:ppn={{ ppn_vx_ensgrid_mean }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid_mean }} @@ -1538,8 +1530,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid_mean }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_MEAN_03h_TN; - &LOGDIR;/&VX_ENSGRID_MEAN_03h_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_MEAN_03h; + &LOGDIR;/&TN_VX_ENSGRID_MEAN_03h;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1553,7 +1545,7 @@ the tag to be identical to the ones above for other output times. ACCUM03 - + @@ -1563,10 +1555,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_PROB" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_PROB" {{ nnodes_vx_ensgrid_prob }}:ppn={{ ppn_vx_ensgrid_prob }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid_prob }} @@ -1574,8 +1566,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid_prob }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_PROB_03h_TN; - &LOGDIR;/&VX_ENSGRID_PROB_03h_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_PROB_03h; + &LOGDIR;/&TN_VX_ENSGRID_PROB_03h;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1589,7 +1581,7 @@ the tag to be identical to the ones above for other output times. ACCUM03 - + @@ -1600,10 +1592,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_MEAN" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_MEAN" {{ nnodes_vx_ensgrid_mean }}:ppn={{ ppn_vx_ensgrid_mean }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid_mean }} @@ -1611,8 +1603,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid_mean }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_MEAN_06h_TN; - &LOGDIR;/&VX_ENSGRID_MEAN_06h_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_MEAN_06h; + &LOGDIR;/&TN_VX_ENSGRID_MEAN_06h;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1626,7 +1618,7 @@ the tag to be identical to the ones above for other output times. ACCUM06 - + @@ -1636,10 +1628,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_PROB" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_PROB" {{ nnodes_vx_ensgrid_prob }}:ppn={{ ppn_vx_ensgrid_prob }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid_prob }} @@ -1647,8 +1639,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid_prob }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_PROB_06h_TN; - &LOGDIR;/&VX_ENSGRID_PROB_06h_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_PROB_06h; + &LOGDIR;/&TN_VX_ENSGRID_PROB_06h;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1662,7 +1654,7 @@ the tag to be identical to the ones above for other output times. ACCUM06 - + @@ -1673,10 +1665,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_MEAN" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_MEAN" {{ nnodes_vx_ensgrid_mean }}:ppn={{ ppn_vx_ensgrid_mean }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid_mean }} @@ -1684,8 +1676,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid_mean }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_MEAN_24h_TN; - &LOGDIR;/&VX_ENSGRID_MEAN_24h_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_MEAN_24h; + &LOGDIR;/&TN_VX_ENSGRID_MEAN_24h;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1699,7 +1691,7 @@ the tag to be identical to the ones above for other output times. ACCUM24 - + @@ -1709,10 +1701,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_PROB" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_PROB" {{ nnodes_vx_ensgrid_prob }}:ppn={{ ppn_vx_ensgrid_prob }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid_prob }} @@ -1720,8 +1712,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid_prob }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_PROB_24h_TN; - &LOGDIR;/&VX_ENSGRID_PROB_24h_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_PROB_24h; + &LOGDIR;/&TN_VX_ENSGRID_PROB_24h;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1735,7 +1727,7 @@ the tag to be identical to the ones above for other output times. ACCUM24 - + @@ -1745,10 +1737,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_PROB" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_PROB" {{ nnodes_vx_ensgrid_prob }}:ppn={{ ppn_vx_ensgrid_prob }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid_prob }} @@ -1756,8 +1748,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid_prob }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_PROB_REFC_TN; - &LOGDIR;/&VX_ENSGRID_PROB_REFC_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_PROB_REFC; + &LOGDIR;/&TN_VX_ENSGRID_PROB_REFC;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1770,7 +1762,7 @@ the tag to be identical to the ones above for other output times. VARREFC - + @@ -1780,10 +1772,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_PROB" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSGRID_PROB" {{ nnodes_vx_ensgrid_prob }}:ppn={{ ppn_vx_ensgrid_prob }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_ensgrid_prob }} @@ -1791,8 +1783,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_ensgrid_prob }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSGRID_PROB_RETOP_TN; - &LOGDIR;/&VX_ENSGRID_PROB_RETOP_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSGRID_PROB_RETOP; + &LOGDIR;/&TN_VX_ENSGRID_PROB_RETOP;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1805,7 +1797,7 @@ the tag to be identical to the ones above for other output times. VARRETOP - + @@ -1816,10 +1808,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSPOINT" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSPOINT" {{ nnodes_vx_enspoint }}:ppn={{ ppn_vx_enspoint }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_enspoint }} @@ -1827,8 +1819,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_enspoint }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSPOINT_TN; - &LOGDIR;/&VX_ENSPOINT_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSPOINT; + &LOGDIR;/&TN_VX_ENSPOINT;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1849,10 +1841,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSPOINT_MEAN" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSPOINT_MEAN" {{ nnodes_vx_enspoint_mean }}:ppn={{ ppn_vx_enspoint_mean }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_enspoint_mean }} @@ -1860,8 +1852,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_enspoint_mean }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSPOINT_MEAN_TN; - &LOGDIR;/&VX_ENSPOINT_MEAN_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSPOINT_MEAN; + &LOGDIR;/&TN_VX_ENSPOINT_MEAN;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1873,7 +1865,7 @@ the tag to be identical to the ones above for other output times. FHR {% for h in range(0, fcst_len_hrs+1) %}{{ " %02d" % h }}{% endfor %} - + @@ -1882,10 +1874,10 @@ the tag to be identical to the ones above for other output times. ************************************************************************ ************************************************************************ --> - + &RSRV_DEFAULT; - &LOAD_MODULES_RUN_TASK_FP; "&VX_TN;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSPOINT_PROB" + &LOAD_MODULES_RUN_TASK_FP; "&TN_VX;" "&JOBSdir;/JREGIONAL_RUN_VX_ENSPOINT_PROB" {{ nnodes_vx_enspoint_prob }}:ppn={{ ppn_vx_enspoint_prob }} {%- if machine not in ["GAEA", "NOAACLOUD"] %} {{ mem_vx_enspoint_prob }} @@ -1893,8 +1885,8 @@ the tag to be identical to the ones above for other output times. {{ wtime_vx_enspoint_prob }} &NCORES_PER_NODE; &SCHED_NATIVE_CMD; - &VX_ENSPOINT_PROB_TN; - &LOGDIR;/&VX_ENSPOINT_PROB_TN;_@Y@m@d@H&LOGEXT; + &TN_VX_ENSPOINT_PROB; + &LOGDIR;/&TN_VX_ENSPOINT_PROB;_@Y@m@d@H&LOGEXT; GLOBAL_VAR_DEFNS_FP&GLOBAL_VAR_DEFNS_FP; USHdir&USHdir; @@ -1906,7 +1898,7 @@ the tag to be identical to the ones above for other output times. FHR {% for h in range(0, fcst_len_hrs+1) %}{{ " %02d" % h }}{% endfor %} - + diff --git a/parm/data_locations.yml b/parm/data_locations.yml index d20ca819aa..8d19240f00 100644 --- a/parm/data_locations.yml +++ b/parm/data_locations.yml @@ -155,6 +155,7 @@ GDAS: - gdas.t{hh}z.sfcf{fcst_hr:03d}.nc fcst: - gdas.t{hh}z.atmf{fcst_hr:03d}.nc + - gdas.t{hh}z.sfcf{fcst_hr:03d}.nc nomads: protocol: download url: https://nomads.ncep.noaa.gov/pub/data/nccf/com/gfs/prod/enkfgdas.{yyyymmdd}/{hh}/atmos/mem{mem:03d} @@ -271,3 +272,120 @@ NAM: fcst: - nam.t{hh}z.awphys{fcst_hr:02d}.tm00.grib2 +########################## +########################## +### Observation Data ### +########################## +########################## + +GFS_obs: + hpss: + protocol: htar + archive_format: zip + archive_path: + - /BMC/fdr/Permanent/{yyyy}/{mm}/{dd}/data/grids/gfs/prepbufr + archive_file_names: + prepbufr: + obs: + - "{yyyymmdd}0000.zip" + tcvitals: + obs: + - "{yyyymmdd}0000.zip" + file_names: + prepbufr: + obs: + - "{yy}{jjj}{hh}00.gfs.t{hh}z.prepbufr.nr" + tcvitals: + obs: + - "{yy}{jjj}{hh}00.gfs.t{hh}z.syndata.tcvitals.tm00" + +RAP_obs: + hpss: + protocol: htar + archive_format: zip + archive_path: + - /BMC/fdr/Permanent/{yyyy}/{mm}/{dd}/data/grids/rap/obs + archive_internal_dir: + - ./ + archive_file_names: + - "{yyyymmddhh}00.zip" + file_names: + obs: + - "{yyyymmddhh}.rap.t{hh}z.prepbufr.tm00" + - "{yyyymmddhh}.rap.t{hh}z.1bamua.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.1bhrs4.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.1bmhs.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.amsr2.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.ascatt.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.ascatw.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.atms.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.atmsdb.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.crisf4.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.crsfdb.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.esamua.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.esatms.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.eshrs3.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.esiasi.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.esmhs.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.gpsipw.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.gpsipw.tm00.bufr_d.nr" + - "{yyyymmddhh}.rap.t{hh}z.gsrasr.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.gsrcsr.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.iasidb.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.lghtng.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.lghtng.tm00.bufr_d.nr" + - "{yyyymmddhh}.rap.t{hh}z.lgycld.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.mtiasi.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.nexrad.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.rassda.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.satwnd.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.sevasr.tm00.bufr_d" + - "{yyyymmddhh}.rap.t{hh}z.ssmisu.tm00.bufr_d" + aws: + protocol: download + url: https://noaa-rap-pds.s3.amazonaws.com/rap.{yyyymmdd} + file_names: + obs: + - rap.t{hh}z.prepbufr.tm00.nr + - rap.t{hh}z.1bamua.tm00.bufr_d + - rap.t{hh}z.1bhrs4.tm00.bufr_d + - rap.t{hh}z.1bmhs.tm00.bufr_d + - rap.t{hh}z.amsr2.tm00.bufr_d + - rap.t{hh}z.ascatt.tm00.bufr_d + - rap.t{hh}z.ascatw.tm00.bufr_d + - rap.t{hh}z.atms.tm00.bufr_d + - rap.t{hh}z.atmsdb.tm00.bufr_d + - rap.t{hh}z.crisf4.tm00.bufr_d + - rap.t{hh}z.crsfdb.tm00.bufr_d + - rap.t{hh}z.esamua.tm00.bufr_d + - rap.t{hh}z.esatms.tm00.bufr_d + - rap.t{hh}z.eshrs3.tm00.bufr_d + - rap.t{hh}z.esiasi.tm00.bufr_d + - rap.t{hh}z.esmhs.tm00.bufr_d + - rap.t{hh}z.gpsipw.tm00.bufr_d + - rap.t{hh}z.gpsipw.tm00.bufr_d.nr + - rap.t{hh}z.gsrasr.tm00.bufr_d + - rap.t{hh}z.gsrcsr.tm00.bufr_d + - rap.t{hh}z.iasidb.tm00.bufr_d + - rap.t{hh}z.lghtng.tm00.bufr_d + - rap.t{hh}z.lghtng.tm00.bufr_d.nr + - rap.t{hh}z.lgycld.tm00.bufr_d + - rap.t{hh}z.mtiasi.tm00.bufr_d + - rap.t{hh}z.nexrad.tm00.bufr_d + - rap.t{hh}z.rassda.tm00.bufr_d + - rap.t{hh}z.satwnd.tm00.bufr_d + - rap.t{hh}z.sevasr.tm00.bufr_d + - rap.t{hh}z.ssmisu.tm00.bufr_d + +########################### +########################### +####### Fix Files ######### +########################### +########################### + +GSI-FIX: + remote: + protocol: download + url: https://epic-sandbox-srw.s3.amazonaws.com + file_names: + - gsi-fix.22.07.27.tar.gz diff --git a/parm/diag_table.FV3_GFS_v17_p8 b/parm/diag_table.FV3_GFS_v17_p8 new file mode 100644 index 0000000000..3c5b58508a --- /dev/null +++ b/parm/diag_table.FV3_GFS_v17_p8 @@ -0,0 +1,245 @@ +{{ starttime.strftime("%Y%m%d.%H") }}Z.{{ cres }}.32bit.non-hydro.regional +{{ starttime.strftime("%Y %m %d %H %M %S") }} + +#output files +"grid_spec", -1, "months", 1, "days", "time" +"atmos_static", -1, "hours", 1, "hours", "time" +"fv3_history", 3, "hours", 1, "hours", "time" +"fv3_history2d", 3, "hours", 1, "hours", "time" + +# +#======================= +# ATMOSPHERE DIAGNOSTICS +#======================= +### +# grid_spec +### +"dynamics", "grid_lon", "grid_lon", "grid_spec", "all", .false., "none", 2 +"dynamics", "grid_lat", "grid_lat", "grid_spec", "all", .false., "none", 2 +"dynamics", "grid_lont", "grid_lont", "grid_spec", "all", .false., "none", 2 +"dynamics", "grid_latt", "grid_latt", "grid_spec", "all", .false., "none", 2 +"dynamics", "area", "area", "grid_spec", "all", .false., "none", 2 +### +# gfs static data +### +"dynamics", "pk", "pk", "atmos_static", "all", .false., "none", 2 +"dynamics", "bk", "bk", "atmos_static", "all", .false., "none", 2 +"dynamics", "hyam", "hyam", "atmos_static", "all", .false., "none", 2 +"dynamics", "hybm", "hybm", "atmos_static", "all", .false., "none", 2 +"dynamics", "zsurf", "zsurf", "atmos_static", "all", .false., "none", 2 +### +# FV3 variabls needed for NGGPS evaluation +### +"gfs_dyn", "ucomp", "ugrd", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "vcomp", "vgrd", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "sphum", "spfh", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "temp", "tmp", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "liq_wat", "clwmr", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "o3mr", "o3mr", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "delp", "dpres", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "delz", "delz", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "w", "dzdt", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "ice_wat", "icmr", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "rainwat", "rwmr", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "snowwat", "snmr", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "graupel", "grle", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "ps", "pressfc", "fv3_history", "all", .false., "none", 2 +"gfs_dyn", "hs", "hgtsfc", "fv3_history", "all", .false., "none", 2 + +"gfs_phys", "cldfra", "cldfra", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "ALBDO_ave", "albdo_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "cnvprcp_ave", "cprat_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "cnvprcpb_ave", "cpratb_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "totprcp_ave", "prate_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "totprcpb_ave", "prateb_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "DLWRF", "dlwrf_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "DLWRFI", "dlwrf", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "ULWRF", "ulwrf_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "ULWRFI", "ulwrf", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "DSWRF", "dswrf_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "DSWRFI", "dswrf", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "USWRF", "uswrf_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "USWRFI", "uswrf", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "DSWRFtoa", "dswrf_avetoa", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "USWRFtoa", "uswrf_avetoa", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "ULWRFtoa", "ulwrf_avetoa", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "gflux_ave", "gflux_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "hpbl", "hpbl", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "lhtfl_ave", "lhtfl_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "shtfl_ave", "shtfl_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "pwat", "pwat", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "soilm", "soilm", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "TCDC_aveclm", "tcdc_aveclm", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "TCDC_avebndcl", "tcdc_avebndcl", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "TCDC_avehcl", "tcdc_avehcl", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "TCDC_avelcl", "tcdc_avelcl", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "TCDC_avemcl", "tcdc_avemcl", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "TCDCcnvcl", "tcdccnvcl", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "PREScnvclt", "prescnvclt", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "PREScnvclb", "prescnvclb", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "PRES_avehct", "pres_avehct", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "PRES_avehcb", "pres_avehcb", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "TEMP_avehct", "tmp_avehct", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "PRES_avemct", "pres_avemct", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "PRES_avemcb", "pres_avemcb", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "TEMP_avemct", "tmp_avemct", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "PRES_avelct", "pres_avelct", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "PRES_avelcb", "pres_avelcb", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "TEMP_avelct", "tmp_avelct", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "u-gwd_ave", "u-gwd_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "v-gwd_ave", "v-gwd_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "dusfc", "uflx_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "dvsfc", "vflx_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "psurf", "pressfc", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "u10m", "ugrd10m", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "v10m", "vgrd10m", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "acond", "acond", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "cduvb_ave", "cduvb_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "cpofp", "cpofp", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "duvb_ave", "duvb_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "csdlf_ave", "csdlf", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "csusf_ave", "csusf", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "csusf_avetoa", "csusftoa", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "csdsf_ave", "csdsf", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "csulf_ave", "csulf", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "csulf_avetoa", "csulftoa", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "cwork_ave", "cwork_aveclm", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "evbs_ave", "evbs_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "evcw_ave", "evcw_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "fldcp", "fldcp", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "hgt_hyblev1", "hgt_hyblev1", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "spfh_hyblev1", "spfh_hyblev1", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "ugrd_hyblev1", "ugrd_hyblev1", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "vgrd_hyblev1", "vgrd_hyblev1", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "tmp_hyblev1", "tmp_hyblev1", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "gfluxi", "gflux", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "lhtfl", "lhtfl", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "shtfl", "shtfl", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "pevpr", "pevpr", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "pevpr_ave", "pevpr_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "sbsno_ave", "sbsno_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "sfexc", "sfexc", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "snohf", "snohf", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "snowc_ave", "snowc_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "spfhmax2m", "spfhmax_max2m", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "spfhmin2m", "spfhmin_min2m", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "tmpmax2m", "tmax_max2m", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "tmpmin2m", "tmin_min2m", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "ssrun_acc", "ssrun_acc", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "sunsd_acc", "sunsd_acc", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "watr_acc", "watr_acc", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "wilt", "wilt", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "vbdsf_ave", "vbdsf_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "vddsf_ave", "vddsf_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "nbdsf_ave", "nbdsf_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "nddsf_ave", "nddsf_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "trans_ave", "trans_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "pahi", "pahi", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "pah_ave", "pah_ave", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "ecan_acc", "ecan_acc", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "etran_acc", "etran_acc", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "edir_acc", "edir_acc", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "rainc", "cnvprcp", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "wa_acc", "wa_acc", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "AOD_550", "aod550", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "DU_AOD_550", "du_aod550", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "SU_AOD_550", "su_aod550", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "BC_AOD_550", "bc_aod550", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "OC_AOD_550", "oc_aod550", "fv3_history2d", "all", .false., "none", 2 +"gfs_phys", "SS_AOD_550", "ss_aod550", "fv3_history2d", "all", .false., "none", 2 + +"gfs_sfc", "crain", "crain", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "tprcp", "tprcp", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "hgtsfc", "orog", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "lfrac", "lfrac", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "weasd", "weasd", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "f10m", "f10m", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "q2m", "spfh2m", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "t2m", "tmp2m", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "tsfc", "tmpsfc", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "vtype", "vtype", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "stype", "sotyp", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "slmsksfc", "land", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "vfracsfc", "veg", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "zorlsfc", "sfcr", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "uustar", "fricv", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "soilt1", "soilt1" "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "soilt2", "soilt2" "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "soilt3", "soilt3" "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "soilt4", "soilt4" "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "soilw1", "soilw1" "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "soilw2", "soilw2" "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "soilw3", "soilw3" "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "soilw4", "soilw4" "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "slc_1", "soill1", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "slc_2", "soill2", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "slc_3", "soill3", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "slc_4", "soill4", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "slope", "sltyp", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "alnsf", "alnsf", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "alnwf", "alnwf", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "alvsf", "alvsf", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "alvwf", "alvwf", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "canopy", "cnwat", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "facsf", "facsf", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "facwf", "facwf", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "ffhh", "ffhh", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "ffmm", "ffmm", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "fice", "icec", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "hice", "icetk", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "snoalb", "snoalb", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "shdmax", "shdmax", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "shdmin", "shdmin", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "snowd", "snod", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "tg3", "tg3", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "tisfc", "tisfc", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "tref", "tref", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "z_c", "zc", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "c_0", "c0", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "c_d", "cd", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "w_0", "w0", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "w_d", "wd", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "xt", "xt", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "xz", "xz", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "dt_cool", "dtcool", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "xs", "xs", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "xu", "xu", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "xv", "xv", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "xtts", "xtts", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "xzts", "xzts", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "d_conv", "dconv", "fv3_history2d", "all", .false., "none", 2 +"gfs_sfc", "qrain", "qrain", "fv3_history2d", "all", .false., "none", 2 + +#============================================================================================= +# +#====> This file can be used with diag_manager/v2.0a (or higher) <==== +# +# +# FORMATS FOR FILE ENTRIES (not all input values are used) +# ------------------------ +# +#"file_name", output_freq, "output_units", format, "time_units", "long_name", +# +# +#output_freq: > 0 output frequency in "output_units" +# = 0 output frequency every time step +# =-1 output frequency at end of run +# +#output_units = units used for output frequency +# (years, months, days, minutes, hours, seconds) +# +#time_units = units used to label the time axis +# (days, minutes, hours, seconds) +# +# +# FORMAT FOR FIELD ENTRIES (not all input values are used) +# ------------------------ +# +#"module_name", "field_name", "output_name", "file_name" "time_sampling", time_avg, "other_opts", packing +# +#time_avg = .true. or .false. +# +#packing = 1 double precision +# = 2 float +# = 4 packed 16-bit integers +# = 8 packed 1-byte (not tested?) diff --git a/parm/field_table.FV3_GFS_v17_p8 b/parm/field_table.FV3_GFS_v17_p8 new file mode 100644 index 0000000000..6fea174372 --- /dev/null +++ b/parm/field_table.FV3_GFS_v17_p8 @@ -0,0 +1,51 @@ +# added by FRE: sphum must be present in atmos +# specific humidity for moist runs + "TRACER", "atmos_mod", "sphum" + "longname", "specific humidity" + "units", "kg/kg" + "profile_type", "fixed", "surface_value=3.e-6" / +# prognostic cloud water mixing ratio + "TRACER", "atmos_mod", "liq_wat" + "longname", "cloud water mixing ratio" + "units", "kg/kg" + "profile_type", "fixed", "surface_value=1.e30" / +# prognostic ice water mixing ratio + "TRACER", "atmos_mod", "ice_wat" + "longname", "cloud ice mixing ratio" + "units", "kg/kg" + "profile_type", "fixed", "surface_value=1.e30" / +# prognostic rain water mixing ratio + "TRACER", "atmos_mod", "rainwat" + "longname", "rain water mixing ratio" + "units", "kg/kg" + "profile_type", "fixed", "surface_value=1.e30" / +# prognostic snow water mixing ratio + "TRACER", "atmos_mod", "snowwat" + "longname", "snow water mixing ratio" + "units", "kg/kg" + "profile_type", "fixed", "surface_value=1.e30" / +# prognostic Grau water mixing ratio + "TRACER", "atmos_mod", "graupel" + "longname", "graupel mixing ratio" + "units", "kg/kg" + "profile_type", "fixed", "surface_value=1.e30" / +# prognostic cloud ice number concentration + "TRACER", "atmos_mod", "ice_nc" + "longname", "cloud ice water number concentration" + "units", "/kg" + "profile_type", "fixed", "surface_value=0.0" / +# prognostic rain number concentration + "TRACER", "atmos_mod", "rain_nc" + "longname", "rain number concentration" + "units", "/kg" + "profile_type", "fixed", "surface_value=0.0" / +# prognostic ozone mixing ratio tracer + "TRACER", "atmos_mod", "o3mr" + "longname", "ozone mixing ratio" + "units", "kg/kg" + "profile_type", "fixed", "surface_value=1.e30" / +# prognostic subgrid scale turbulent kinetic energy + "TRACER", "atmos_mod", "sgs_tke" + "longname", "subgrid scale turbulent kinetic energy" + "units", "m2/s2" + "profile_type", "fixed", "surface_value=0.0" / diff --git a/parm/metplus/EnsembleStat_APCP01h.conf b/parm/metplus/EnsembleStat_APCP01h.conf index bdefb0d203..fc1e92566a 100644 --- a/parm/metplus/EnsembleStat_APCP01h.conf +++ b/parm/metplus/EnsembleStat_APCP01h.conf @@ -28,7 +28,7 @@ INIT_INCREMENT=3600 LEAD_SEQ = {ENV[fhr_list]} # Used in the MET config file for: model, output_prefix -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} ENSEMBLE_STAT_DESC = NA ;; not in other file diff --git a/parm/metplus/EnsembleStat_APCP03h.conf b/parm/metplus/EnsembleStat_APCP03h.conf index 17971d7b4f..a147672bbc 100644 --- a/parm/metplus/EnsembleStat_APCP03h.conf +++ b/parm/metplus/EnsembleStat_APCP03h.conf @@ -32,7 +32,7 @@ INIT_INCREMENT=3600 LEAD_SEQ = {ENV[fhr_list]} # Used in the MET config file for: model, output_prefix -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} FCST_NATIVE_DATA_TYPE = GRIB ENSEMBLE_STAT_DESC = NA ;; not in other file diff --git a/parm/metplus/EnsembleStat_APCP06h.conf b/parm/metplus/EnsembleStat_APCP06h.conf index b497b31000..6b7ba19bb6 100644 --- a/parm/metplus/EnsembleStat_APCP06h.conf +++ b/parm/metplus/EnsembleStat_APCP06h.conf @@ -32,7 +32,7 @@ INIT_INCREMENT=3600 LEAD_SEQ = {ENV[fhr_list]} # Used in the MET config file for: model, output_prefix -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} FCST_NATIVE_DATA_TYPE = GRIB ENSEMBLE_STAT_DESC = NA ;; not in other file diff --git a/parm/metplus/EnsembleStat_APCP24h.conf b/parm/metplus/EnsembleStat_APCP24h.conf index f0b7b5638d..d97cb27c76 100644 --- a/parm/metplus/EnsembleStat_APCP24h.conf +++ b/parm/metplus/EnsembleStat_APCP24h.conf @@ -32,7 +32,7 @@ INIT_INCREMENT=3600 LEAD_SEQ = {ENV[fhr_list]} # Used in the MET config file for: model, output_prefix -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} FCST_NATIVE_DATA_TYPE = GRIB ENSEMBLE_STAT_DESC = NA ;; not in other file diff --git a/parm/metplus/EnsembleStat_REFC.conf b/parm/metplus/EnsembleStat_REFC.conf index 180988656b..7bda1db6dd 100644 --- a/parm/metplus/EnsembleStat_REFC.conf +++ b/parm/metplus/EnsembleStat_REFC.conf @@ -28,7 +28,7 @@ INIT_INCREMENT=3600 LEAD_SEQ = {ENV[fhr_list]} # Used in the MET config file for: model, output_prefix -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} ENSEMBLE_STAT_DESC = NA ;; not in other file diff --git a/parm/metplus/EnsembleStat_RETOP.conf b/parm/metplus/EnsembleStat_RETOP.conf index 7f58de0de7..0f81607b8e 100644 --- a/parm/metplus/EnsembleStat_RETOP.conf +++ b/parm/metplus/EnsembleStat_RETOP.conf @@ -28,7 +28,7 @@ INIT_INCREMENT=3600 LEAD_SEQ = {ENV[fhr_list]} # Used in the MET config file for: model, output_prefix -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} ENSEMBLE_STAT_DESC = NA ;; not in other file diff --git a/parm/metplus/EnsembleStat_conus_sfc.conf b/parm/metplus/EnsembleStat_conus_sfc.conf index 7c429eda98..1cd9080aec 100644 --- a/parm/metplus/EnsembleStat_conus_sfc.conf +++ b/parm/metplus/EnsembleStat_conus_sfc.conf @@ -28,7 +28,7 @@ INIT_INCREMENT=3600 LEAD_SEQ = {ENV[fhr_list]} # Used in the MET config file for: model, output_prefix -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} ENSEMBLE_STAT_DESC = NA diff --git a/parm/metplus/EnsembleStat_upper_air.conf b/parm/metplus/EnsembleStat_upper_air.conf index 57aa33273e..7a708d8410 100644 --- a/parm/metplus/EnsembleStat_upper_air.conf +++ b/parm/metplus/EnsembleStat_upper_air.conf @@ -28,7 +28,7 @@ INIT_INCREMENT=3600 LEAD_SEQ = begin_end_incr(0,{ENV[fhr_last]},6) # Used in the MET config file for: model, output_prefix -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} ENSEMBLE_STAT_DESC = NA diff --git a/parm/metplus/GridStat_APCP01h.conf b/parm/metplus/GridStat_APCP01h.conf index aec4f10375..0fc402ed67 100644 --- a/parm/metplus/GridStat_APCP01h.conf +++ b/parm/metplus/GridStat_APCP01h.conf @@ -80,7 +80,7 @@ GRID_STAT_REGRID_SHAPE = SQUARE #GRID_STAT_GRID_WEIGHT_FLAG = # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output diff --git a/parm/metplus/GridStat_APCP01h_mean.conf b/parm/metplus/GridStat_APCP01h_mean.conf index 9df901f396..0713c6bada 100644 --- a/parm/metplus/GridStat_APCP01h_mean.conf +++ b/parm/metplus/GridStat_APCP01h_mean.conf @@ -80,7 +80,7 @@ GRID_STAT_REGRID_SHAPE = SQUARE #GRID_STAT_GRID_WEIGHT_FLAG = # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]}_mean +MODEL = {ENV[VX_FCST_MODEL_NAME]}_mean FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output @@ -157,7 +157,7 @@ FCST_IS_PROB = False # Only used if FCST_IS_PROB is true - sets probabilistic threshold FCST_GRID_STAT_PROB_THRESH = ==0.1 -GRID_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_mean +GRID_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_mean # Climatology data #GRID_STAT_CLIMO_MEAN_FILE_NAME = @@ -253,7 +253,7 @@ STAGING_DIR = {OUTPUT_BASE}/stage/APCP_01h_mean [filename_templates] # Template to look for forecast input to GridStat relative to FCST_GRID_STAT_INPUT_DIR -FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to GridStat relative to OBS_GRID_STAT_INPUT_DIR OBS_GRID_STAT_INPUT_TEMPLATE = {valid?fmt=%Y%m%d}/ccpa.t{valid?fmt=%H}z.01h.hrap.conus.gb2 diff --git a/parm/metplus/GridStat_APCP01h_prob.conf b/parm/metplus/GridStat_APCP01h_prob.conf index ec08a36644..087fc77123 100644 --- a/parm/metplus/GridStat_APCP01h_prob.conf +++ b/parm/metplus/GridStat_APCP01h_prob.conf @@ -80,7 +80,7 @@ GRID_STAT_REGRID_SHAPE = SQUARE #GRID_STAT_GRID_WEIGHT_FLAG = # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]}_prob +MODEL = {ENV[VX_FCST_MODEL_NAME]}_prob FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output @@ -168,7 +168,7 @@ FCST_PROB_IN_GRIB_PDS = False # Only used if FCST_IS_PROB is true - sets probabilistic threshold FCST_GRID_STAT_PROB_THRESH = ==0.1 -GRID_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_prob +GRID_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_prob # Climatology data #GRID_STAT_CLIMO_MEAN_FILE_NAME = @@ -264,7 +264,7 @@ STAGING_DIR = {OUTPUT_BASE}/stage/APCP_01h_prob [filename_templates] # Template to look for forecast input to GridStat relative to FCST_GRID_STAT_INPUT_DIR -FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to GridStat relative to OBS_GRID_STAT_INPUT_DIR OBS_GRID_STAT_INPUT_TEMPLATE = {valid?fmt=%Y%m%d}/ccpa.t{valid?fmt=%H}z.01h.hrap.conus.gb2 diff --git a/parm/metplus/GridStat_APCP03h.conf b/parm/metplus/GridStat_APCP03h.conf index e1be9d0f46..a27fe9269f 100644 --- a/parm/metplus/GridStat_APCP03h.conf +++ b/parm/metplus/GridStat_APCP03h.conf @@ -105,7 +105,7 @@ GRID_STAT_REGRID_SHAPE = SQUARE #GRID_STAT_GRID_WEIGHT_FLAG = # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} FCST_NATIVE_DATA_TYPE = GRIB FCST_PCP_COMBINE_INPUT_DATATYPE = GRIB diff --git a/parm/metplus/GridStat_APCP03h_mean.conf b/parm/metplus/GridStat_APCP03h_mean.conf index d8759e2734..2a53ee4751 100644 --- a/parm/metplus/GridStat_APCP03h_mean.conf +++ b/parm/metplus/GridStat_APCP03h_mean.conf @@ -80,7 +80,7 @@ GRID_STAT_REGRID_SHAPE = SQUARE #GRID_STAT_GRID_WEIGHT_FLAG = # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]}_mean +MODEL = {ENV[VX_FCST_MODEL_NAME]}_mean FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output @@ -157,7 +157,7 @@ FCST_IS_PROB = False # Only used if FCST_IS_PROB is true - sets probabilistic threshold FCST_GRID_STAT_PROB_THRESH = ==0.1 -GRID_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_mean +GRID_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_mean # Climatology data #GRID_STAT_CLIMO_MEAN_FILE_NAME = @@ -253,7 +253,7 @@ STAGING_DIR = {OUTPUT_BASE}/stage/APCP_03h_mean [filename_templates] # Template to look for forecast input to GridStat relative to FCST_GRID_STAT_INPUT_DIR -FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to GridStat relative to OBS_GRID_STAT_INPUT_DIR OBS_GRID_STAT_INPUT_TEMPLATE = {OBS_GRID_STAT_INPUT_DIR}/{valid?fmt=%Y%m%d}/ccpa.t{valid?fmt=%H}z.hrap.conus.gb2_a03h diff --git a/parm/metplus/GridStat_APCP03h_prob.conf b/parm/metplus/GridStat_APCP03h_prob.conf index 357a6495d3..c7dc3b26ad 100644 --- a/parm/metplus/GridStat_APCP03h_prob.conf +++ b/parm/metplus/GridStat_APCP03h_prob.conf @@ -80,7 +80,7 @@ GRID_STAT_REGRID_SHAPE = SQUARE #GRID_STAT_GRID_WEIGHT_FLAG = # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]}_prob +MODEL = {ENV[VX_FCST_MODEL_NAME]}_prob FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output @@ -167,7 +167,7 @@ FCST_PROB_IN_GRIB_PDS = False # Only used if FCST_IS_PROB is true - sets probabilistic threshold FCST_GRID_STAT_PROB_THRESH = ==0.1 -GRID_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_prob +GRID_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_prob # Climatology data #GRID_STAT_CLIMO_MEAN_FILE_NAME = @@ -263,7 +263,7 @@ STAGING_DIR = {OUTPUT_BASE}/stage/APCP_03h_prob [filename_templates] # Template to look for forecast input to GridStat relative to FCST_GRID_STAT_INPUT_DIR -FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to GridStat relative to OBS_GRID_STAT_INPUT_DIR OBS_GRID_STAT_INPUT_TEMPLATE = {OBS_GRID_STAT_INPUT_DIR}/{valid?fmt=%Y%m%d}/ccpa.t{valid?fmt=%H}z.hrap.conus.gb2_a03h diff --git a/parm/metplus/GridStat_APCP06h.conf b/parm/metplus/GridStat_APCP06h.conf index efcf52da92..22c85d1422 100644 --- a/parm/metplus/GridStat_APCP06h.conf +++ b/parm/metplus/GridStat_APCP06h.conf @@ -105,7 +105,7 @@ GRID_STAT_REGRID_SHAPE = SQUARE #GRID_STAT_GRID_WEIGHT_FLAG = # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} FCST_NATIVE_DATA_TYPE = GRIB FCST_PCP_COMBINE_INPUT_DATATYPE = GRIB diff --git a/parm/metplus/GridStat_APCP06h_mean.conf b/parm/metplus/GridStat_APCP06h_mean.conf index c2049d41c2..5226bb4051 100644 --- a/parm/metplus/GridStat_APCP06h_mean.conf +++ b/parm/metplus/GridStat_APCP06h_mean.conf @@ -80,7 +80,7 @@ GRID_STAT_REGRID_SHAPE = SQUARE #GRID_STAT_GRID_WEIGHT_FLAG = # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]}_mean +MODEL = {ENV[VX_FCST_MODEL_NAME]}_mean FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output @@ -157,7 +157,7 @@ FCST_IS_PROB = False # Only used if FCST_IS_PROB is true - sets probabilistic threshold FCST_GRID_STAT_PROB_THRESH = ==0.1 -GRID_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_mean +GRID_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_mean # Climatology data #GRID_STAT_CLIMO_MEAN_FILE_NAME = @@ -253,7 +253,7 @@ STAGING_DIR = {OUTPUT_BASE}/stage/APCP_06h_mean [filename_templates] # Template to look for forecast input to GridStat relative to FCST_GRID_STAT_INPUT_DIR -FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to GridStat relative to OBS_GRID_STAT_INPUT_DIR OBS_GRID_STAT_INPUT_TEMPLATE = {OBS_GRID_STAT_INPUT_DIR}/{valid?fmt=%Y%m%d}/ccpa.t{valid?fmt=%H}z.hrap.conus.gb2_a06h diff --git a/parm/metplus/GridStat_APCP06h_prob.conf b/parm/metplus/GridStat_APCP06h_prob.conf index 6f32e711d6..ce629c7e38 100644 --- a/parm/metplus/GridStat_APCP06h_prob.conf +++ b/parm/metplus/GridStat_APCP06h_prob.conf @@ -80,7 +80,7 @@ GRID_STAT_REGRID_SHAPE = SQUARE #GRID_STAT_GRID_WEIGHT_FLAG = # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]}_prob +MODEL = {ENV[VX_FCST_MODEL_NAME]}_prob FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output @@ -167,7 +167,7 @@ FCST_PROB_IN_GRIB_PDS = False # Only used if FCST_IS_PROB is true - sets probabilistic threshold FCST_GRID_STAT_PROB_THRESH = ==0.1 -GRID_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_prob +GRID_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_prob # Climatology data #GRID_STAT_CLIMO_MEAN_FILE_NAME = @@ -263,7 +263,7 @@ STAGING_DIR = {OUTPUT_BASE}/stage/APCP_06h_prob [filename_templates] # Template to look for forecast input to GridStat relative to FCST_GRID_STAT_INPUT_DIR -FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to GridStat relative to OBS_GRID_STAT_INPUT_DIR OBS_GRID_STAT_INPUT_TEMPLATE = {OBS_GRID_STAT_INPUT_DIR}/{valid?fmt=%Y%m%d}/ccpa.t{valid?fmt=%H}z.hrap.conus.gb2_a06h diff --git a/parm/metplus/GridStat_APCP24h.conf b/parm/metplus/GridStat_APCP24h.conf index ebba69d1dc..2689680fbc 100644 --- a/parm/metplus/GridStat_APCP24h.conf +++ b/parm/metplus/GridStat_APCP24h.conf @@ -105,7 +105,7 @@ GRID_STAT_REGRID_SHAPE = SQUARE #GRID_STAT_GRID_WEIGHT_FLAG = # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} FCST_NATIVE_DATA_TYPE = GRIB FCST_PCP_COMBINE_INPUT_DATATYPE = GRIB diff --git a/parm/metplus/GridStat_APCP24h_mean.conf b/parm/metplus/GridStat_APCP24h_mean.conf index d7b9083266..b43ee83404 100644 --- a/parm/metplus/GridStat_APCP24h_mean.conf +++ b/parm/metplus/GridStat_APCP24h_mean.conf @@ -80,7 +80,7 @@ GRID_STAT_REGRID_SHAPE = SQUARE #GRID_STAT_GRID_WEIGHT_FLAG = # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]}_mean +MODEL = {ENV[VX_FCST_MODEL_NAME]}_mean FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output @@ -157,7 +157,7 @@ FCST_IS_PROB = False # Only used if FCST_IS_PROB is true - sets probabilistic threshold FCST_GRID_STAT_PROB_THRESH = ==0.1 -GRID_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_mean +GRID_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_mean # Climatology data #GRID_STAT_CLIMO_MEAN_FILE_NAME = @@ -253,7 +253,7 @@ STAGING_DIR = {OUTPUT_BASE}/stage/APCP_24h_mean [filename_templates] # Template to look for forecast input to GridStat relative to FCST_GRID_STAT_INPUT_DIR -FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to GridStat relative to OBS_GRID_STAT_INPUT_DIR OBS_GRID_STAT_INPUT_TEMPLATE = {OBS_GRID_STAT_INPUT_DIR}/{valid?fmt=%Y%m%d}/ccpa.t{valid?fmt=%H}z.hrap.conus.gb2_a24h diff --git a/parm/metplus/GridStat_APCP24h_prob.conf b/parm/metplus/GridStat_APCP24h_prob.conf index 61c08ac530..c80772c38b 100644 --- a/parm/metplus/GridStat_APCP24h_prob.conf +++ b/parm/metplus/GridStat_APCP24h_prob.conf @@ -80,7 +80,7 @@ GRID_STAT_REGRID_SHAPE = SQUARE #GRID_STAT_GRID_WEIGHT_FLAG = # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]}_prob +MODEL = {ENV[VX_FCST_MODEL_NAME]}_prob FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output @@ -167,7 +167,7 @@ FCST_PROB_IN_GRIB_PDS = False # Only used if FCST_IS_PROB is true - sets probabilistic threshold FCST_GRID_STAT_PROB_THRESH = ==0.1 -GRID_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_prob +GRID_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_prob # Climatology data #GRID_STAT_CLIMO_MEAN_FILE_NAME = @@ -263,7 +263,7 @@ STAGING_DIR = {OUTPUT_BASE}/stage/APCP_24h_prob [filename_templates] # Template to look for forecast input to GridStat relative to FCST_GRID_STAT_INPUT_DIR -FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_APCP_{ENV[acc]}_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to GridStat relative to OBS_GRID_STAT_INPUT_DIR OBS_GRID_STAT_INPUT_TEMPLATE = {OBS_GRID_STAT_INPUT_DIR}/{valid?fmt=%Y%m%d}/ccpa.t{valid?fmt=%H}z.hrap.conus.gb2_a24h diff --git a/parm/metplus/GridStat_REFC.conf b/parm/metplus/GridStat_REFC.conf index 8c0b939740..fe49962bc2 100644 --- a/parm/metplus/GridStat_REFC.conf +++ b/parm/metplus/GridStat_REFC.conf @@ -80,7 +80,7 @@ GRID_STAT_INTERP_TYPE_WIDTH = 1 GRID_STAT_GRID_WEIGHT_FLAG = NONE # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output diff --git a/parm/metplus/GridStat_REFC_mean.conf b/parm/metplus/GridStat_REFC_mean.conf index 11299bb848..d39d383e39 100644 --- a/parm/metplus/GridStat_REFC_mean.conf +++ b/parm/metplus/GridStat_REFC_mean.conf @@ -80,7 +80,7 @@ GRID_STAT_INTERP_TYPE_WIDTH = 1 GRID_STAT_GRID_WEIGHT_FLAG = NONE # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]}_mean +MODEL = {ENV[VX_FCST_MODEL_NAME]}_mean #FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output @@ -167,7 +167,7 @@ OBS_IS_PROB = false # Only used if OBS_IS_PROB is true - sets probabilistic threshold OBS_GRID_STAT_PROB_THRESH = ==0.1 -GRID_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_REFC_{OBTYPE}_mean +GRID_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_REFC_{OBTYPE}_mean # Climatology data #GRID_STAT_CLIMO_MEAN_FILE_NAME = @@ -263,7 +263,7 @@ STAGING_DIR = {OUTPUT_BASE}/stage/REFC_mean [filename_templates] # Template to look for forecast input to GridStat relative to FCST_GRID_STAT_INPUT_DIR -FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_REFC_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_REFC_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to GridStat relative to OBS_GRID_STAT_INPUT_DIR OBS_GRID_STAT_INPUT_TEMPLATE = {valid?fmt=%Y%m%d}/MergedReflectivityQCComposite_00.50_{valid?fmt=%Y%m%d}-{valid?fmt=%H}0000.grib2 diff --git a/parm/metplus/GridStat_REFC_prob.conf b/parm/metplus/GridStat_REFC_prob.conf index 1abcc8803d..cec1ad5682 100644 --- a/parm/metplus/GridStat_REFC_prob.conf +++ b/parm/metplus/GridStat_REFC_prob.conf @@ -80,7 +80,7 @@ GRID_STAT_INTERP_TYPE_WIDTH = 1 GRID_STAT_GRID_WEIGHT_FLAG = NONE # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]}_prob +MODEL = {ENV[VX_FCST_MODEL_NAME]}_prob #FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output @@ -194,7 +194,7 @@ OBS_IS_PROB = false # Only used if OBS_IS_PROB is true - sets probabilistic threshold OBS_GRID_STAT_PROB_THRESH = ==0.1 -GRID_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_REFC_{OBTYPE}_prob +GRID_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_REFC_{OBTYPE}_prob # Climatology data #GRID_STAT_CLIMO_MEAN_FILE_NAME = @@ -288,7 +288,7 @@ STAGING_DIR = {OUTPUT_BASE}/stage/REFC_prob [filename_templates] # Template to look for forecast input to GridStat relative to FCST_GRID_STAT_INPUT_DIR -FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_REFC_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_REFC_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to GridStat relative to OBS_GRID_STAT_INPUT_DIR OBS_GRID_STAT_INPUT_TEMPLATE = {valid?fmt=%Y%m%d}/MergedReflectivityQCComposite_00.50_{valid?fmt=%Y%m%d}-{valid?fmt=%H}0000.grib2 diff --git a/parm/metplus/GridStat_RETOP.conf b/parm/metplus/GridStat_RETOP.conf index c1d203106f..2f4630771d 100644 --- a/parm/metplus/GridStat_RETOP.conf +++ b/parm/metplus/GridStat_RETOP.conf @@ -80,7 +80,7 @@ GRID_STAT_INTERP_TYPE_WIDTH = 1 GRID_STAT_GRID_WEIGHT_FLAG = NONE # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output diff --git a/parm/metplus/GridStat_RETOP_mean.conf b/parm/metplus/GridStat_RETOP_mean.conf index 3970c4b674..032c5a7d8a 100644 --- a/parm/metplus/GridStat_RETOP_mean.conf +++ b/parm/metplus/GridStat_RETOP_mean.conf @@ -80,7 +80,7 @@ GRID_STAT_INTERP_TYPE_WIDTH = 1 GRID_STAT_GRID_WEIGHT_FLAG = NONE # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]}_mean +MODEL = {ENV[VX_FCST_MODEL_NAME]}_mean #FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output @@ -167,7 +167,7 @@ OBS_IS_PROB = false # Only used if OBS_IS_PROB is true - sets probabilistic threshold OBS_GRID_STAT_PROB_THRESH = ==0.1 -GRID_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_RETOP_{OBTYPE}_mean +GRID_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_RETOP_{OBTYPE}_mean # Climatology data #GRID_STAT_CLIMO_MEAN_FILE_NAME = @@ -263,7 +263,7 @@ STAGING_DIR = {OUTPUT_BASE}/stage/RETOP_mean [filename_templates] # Template to look for forecast input to GridStat relative to FCST_GRID_STAT_INPUT_DIR -FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_RETOP_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_RETOP_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to GridStat relative to OBS_GRID_STAT_INPUT_DIR OBS_GRID_STAT_INPUT_TEMPLATE = {valid?fmt=%Y%m%d}/EchoTop_18_00.50_{valid?fmt=%Y%m%d}-{valid?fmt=%H}0000.grib2 diff --git a/parm/metplus/GridStat_RETOP_prob.conf b/parm/metplus/GridStat_RETOP_prob.conf index 49a5f67138..9c757badcd 100644 --- a/parm/metplus/GridStat_RETOP_prob.conf +++ b/parm/metplus/GridStat_RETOP_prob.conf @@ -80,7 +80,7 @@ GRID_STAT_INTERP_TYPE_WIDTH = 1 GRID_STAT_GRID_WEIGHT_FLAG = NONE # Name to identify model (forecast) data in output -MODEL = {ENV[MODEL]}_prob +MODEL = {ENV[VX_FCST_MODEL_NAME]}_prob #FCST_NATIVE_DATA_TYPE = GRIB # Name to identify observation data in output @@ -177,7 +177,7 @@ OBS_IS_PROB = false # Only used if OBS_IS_PROB is true - sets probabilistic threshold OBS_GRID_STAT_PROB_THRESH = ==0.1 -GRID_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_RETOP_{OBTYPE}_prob +GRID_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_RETOP_{OBTYPE}_prob # Climatology data #GRID_STAT_CLIMO_MEAN_FILE_NAME = @@ -271,7 +271,7 @@ STAGING_DIR = {OUTPUT_BASE}/stage/RETOP_prob [filename_templates] # Template to look for forecast input to GridStat relative to FCST_GRID_STAT_INPUT_DIR -FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_RETOP_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_GRID_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_RETOP_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to GridStat relative to OBS_GRID_STAT_INPUT_DIR OBS_GRID_STAT_INPUT_TEMPLATE = {valid?fmt=%Y%m%d}/EchoTop_18_00.50_{valid?fmt=%Y%m%d}-{valid?fmt=%H}0000.grib2 diff --git a/parm/metplus/PointStat_conus_sfc.conf b/parm/metplus/PointStat_conus_sfc.conf index 1fc815e201..89e211c49a 100644 --- a/parm/metplus/PointStat_conus_sfc.conf +++ b/parm/metplus/PointStat_conus_sfc.conf @@ -137,7 +137,7 @@ OBS_POINT_STAT_WINDOW_END = {OBS_WINDOW_END} POINT_STAT_OFFSETS = 0 # Model/fcst and obs name, e.g. GFS, NAM, GDAS, etc. -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} POINT_STAT_DESC = NA OBTYPE = NDAS @@ -233,6 +233,10 @@ OBS_VAR11_LEVELS = L0 OBS_VAR11_OPTIONS = GRIB_lvl_typ = 215; interp = { type = [ { method = NEAREST; width = 1; } ]; } OBS_VAR11_THRESH = <152, <305, <914, <1520, <3040, >=914 +BOTH_VAR12_NAME = SPFH +BOTH_VAR12_LEVELS = Z2 + + # End of [config] section and start of [dir] section [dir] diff --git a/parm/metplus/PointStat_conus_sfc_mean.conf b/parm/metplus/PointStat_conus_sfc_mean.conf index 83af83c302..9bc8b30c57 100644 --- a/parm/metplus/PointStat_conus_sfc_mean.conf +++ b/parm/metplus/PointStat_conus_sfc_mean.conf @@ -137,7 +137,7 @@ OBS_POINT_STAT_WINDOW_END = {OBS_WINDOW_END} POINT_STAT_OFFSETS = 0 # Model/fcst and obs name, e.g. GFS, NAM, GDAS, etc. -MODEL = {ENV[MODEL]}_mean +MODEL = {ENV[VX_FCST_MODEL_NAME]}_mean POINT_STAT_DESC = NA OBTYPE = NDAS @@ -148,7 +148,7 @@ POINT_STAT_REGRID_TO_GRID = NONE POINT_STAT_REGRID_METHOD = BILIN POINT_STAT_REGRID_WIDTH = 2 -POINT_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_ADPSFC_{OBTYPE}_mean +POINT_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_ADPSFC_{OBTYPE}_mean # sets the -obs_valid_beg command line argument (optional) # not used for this example @@ -246,7 +246,7 @@ PB2NC_INPUT_TEMPLATE = prepbufr.ndas.{valid?fmt=%Y%m%d%H} PB2NC_OUTPUT_TEMPLATE = prepbufr.ndas{ENV[DOT_ENSMEM]}.{valid?fmt=%Y%m%d%H}.nc # Template to look for forecast input to PointStat relative to FCST_POINT_STAT_INPUT_DIR -FCST_POINT_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_ADPSFC_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_POINT_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_ADPSFC_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to PointStat relative to OBS_POINT_STAT_INPUT_DIR OBS_POINT_STAT_INPUT_TEMPLATE = prepbufr.ndas{ENV[DOT_ENSMEM]}.{valid?fmt=%Y%m%d%H}.nc diff --git a/parm/metplus/PointStat_conus_sfc_prob.conf b/parm/metplus/PointStat_conus_sfc_prob.conf index 0c2e290782..0374d034be 100644 --- a/parm/metplus/PointStat_conus_sfc_prob.conf +++ b/parm/metplus/PointStat_conus_sfc_prob.conf @@ -137,7 +137,7 @@ OBS_POINT_STAT_WINDOW_END = {OBS_WINDOW_END} POINT_STAT_OFFSETS = 0 # Model/fcst and obs name, e.g. GFS, NAM, GDAS, etc. -MODEL = {ENV[MODEL]}_prob +MODEL = {ENV[VX_FCST_MODEL_NAME]}_prob POINT_STAT_DESC = NA OBTYPE = NDAS @@ -148,7 +148,7 @@ POINT_STAT_REGRID_TO_GRID = NONE POINT_STAT_REGRID_METHOD = BILIN POINT_STAT_REGRID_WIDTH = 2 -POINT_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_ADPSFC_{OBTYPE}_prob +POINT_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_ADPSFC_{OBTYPE}_prob # sets the -obs_valid_beg command line argument (optional) # not used for this example @@ -422,7 +422,7 @@ PB2NC_INPUT_TEMPLATE = prepbufr.ndas.{valid?fmt=%Y%m%d%H} PB2NC_OUTPUT_TEMPLATE = prepbufr.ndas{ENV[DOT_ENSMEM]}.{valid?fmt=%Y%m%d%H}.nc # Template to look for forecast input to PointStat relative to FCST_POINT_STAT_INPUT_DIR -FCST_POINT_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_ADPSFC_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_POINT_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_ADPSFC_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to PointStat relative to OBS_POINT_STAT_INPUT_DIR OBS_POINT_STAT_INPUT_TEMPLATE = prepbufr.ndas{ENV[DOT_ENSMEM]}.{valid?fmt=%Y%m%d%H}.nc diff --git a/parm/metplus/PointStat_upper_air.conf b/parm/metplus/PointStat_upper_air.conf index 822f9bae1e..8406274fca 100644 --- a/parm/metplus/PointStat_upper_air.conf +++ b/parm/metplus/PointStat_upper_air.conf @@ -137,7 +137,7 @@ OBS_POINT_STAT_WINDOW_END = {OBS_WINDOW_END} POINT_STAT_OFFSETS = 0 # Model/fcst and obs name, e.g. GFS, NAM, GDAS, etc. -MODEL = {ENV[MODEL]} +MODEL = {ENV[VX_FCST_MODEL_NAME]} POINT_STAT_DESC = NA OBTYPE = NDAS diff --git a/parm/metplus/PointStat_upper_air_mean.conf b/parm/metplus/PointStat_upper_air_mean.conf index b2a9dd593e..c2f181c395 100644 --- a/parm/metplus/PointStat_upper_air_mean.conf +++ b/parm/metplus/PointStat_upper_air_mean.conf @@ -137,7 +137,7 @@ OBS_POINT_STAT_WINDOW_END = {OBS_WINDOW_END} POINT_STAT_OFFSETS = 0 # Model/fcst and obs name, e.g. GFS, NAM, GDAS, etc. -MODEL = {ENV[MODEL]}_mean +MODEL = {ENV[VX_FCST_MODEL_NAME]}_mean POINT_STAT_DESC = NA OBTYPE = NDAS @@ -148,7 +148,7 @@ POINT_STAT_REGRID_TO_GRID = NONE POINT_STAT_REGRID_METHOD = BILIN POINT_STAT_REGRID_WIDTH = 2 -POINT_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_ADPUPA_{OBTYPE}_mean +POINT_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_ADPUPA_{OBTYPE}_mean # sets the -obs_valid_beg command line argument (optional) # not used for this example @@ -322,7 +322,7 @@ PB2NC_INPUT_TEMPLATE = prepbufr.ndas.{valid?fmt=%Y%m%d%H} PB2NC_OUTPUT_TEMPLATE = prepbufr.ndas{ENV[DOT_ENSMEM]}.{valid?fmt=%Y%m%d%H}.nc # Template to look for forecast input to PointStat relative to FCST_POINT_STAT_INPUT_DIR -FCST_POINT_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_ADPUPA_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_POINT_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_ADPUPA_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to PointStat relative to OBS_POINT_STAT_INPUT_DIR OBS_POINT_STAT_INPUT_TEMPLATE = prepbufr.ndas{ENV[DOT_ENSMEM]}.{valid?fmt=%Y%m%d%H}.nc diff --git a/parm/metplus/PointStat_upper_air_prob.conf b/parm/metplus/PointStat_upper_air_prob.conf index 092c13970e..0e65d35468 100644 --- a/parm/metplus/PointStat_upper_air_prob.conf +++ b/parm/metplus/PointStat_upper_air_prob.conf @@ -137,7 +137,7 @@ OBS_POINT_STAT_WINDOW_END = {OBS_WINDOW_END} POINT_STAT_OFFSETS = 0 # Model/fcst and obs name, e.g. GFS, NAM, GDAS, etc. -MODEL = {ENV[MODEL]}_prob +MODEL = {ENV[VX_FCST_MODEL_NAME]}_prob POINT_STAT_DESC = NA OBTYPE = NDAS @@ -148,7 +148,7 @@ POINT_STAT_REGRID_TO_GRID = NONE POINT_STAT_REGRID_METHOD = BILIN POINT_STAT_REGRID_WIDTH = 2 -POINT_STAT_OUTPUT_PREFIX = {ENV[MODEL]}_ADPUPA_{OBTYPE}_prob +POINT_STAT_OUTPUT_PREFIX = {ENV[VX_FCST_MODEL_NAME]}_ADPUPA_{OBTYPE}_prob # sets the -obs_valid_beg command line argument (optional) # not used for this example @@ -546,7 +546,7 @@ PB2NC_INPUT_TEMPLATE = prepbufr.ndas.{valid?fmt=%Y%m%d%H} PB2NC_OUTPUT_TEMPLATE = prepbufr.ndas{ENV[DOT_ENSMEM]}.{valid?fmt=%Y%m%d%H}.nc # Template to look for forecast input to PointStat relative to FCST_POINT_STAT_INPUT_DIR -FCST_POINT_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[MODEL]}_ADPUPA_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc +FCST_POINT_STAT_INPUT_TEMPLATE = ensemble_stat_{ENV[VX_FCST_MODEL_NAME]}_ADPUPA_{OBTYPE}_{valid?fmt=%Y%m%d}_{valid?fmt=%H%M%S}V_ens.nc # Template to look for observation input to PointStat relative to OBS_POINT_STAT_INPUT_DIR OBS_POINT_STAT_INPUT_TEMPLATE = prepbufr.ndas{ENV[DOT_ENSMEM]}.{valid?fmt=%Y%m%d%H}.nc diff --git a/scripts/exregional_aqm_lbcs.sh b/scripts/exregional_aqm_lbcs.sh index b27368bbcc..5864d34df9 100755 --- a/scripts/exregional_aqm_lbcs.sh +++ b/scripts/exregional_aqm_lbcs.sh @@ -60,6 +60,7 @@ export OMP_STACKSIZE=${OMP_STACKSIZE_MAKE_LBCS} # #----------------------------------------------------------------------- # +set -x eval ${PRE_TASK_CMDS} nprocs=$(( NNODES_AQM_LBCS*PPN_AQM_LBCS )) @@ -143,18 +144,18 @@ fi if [ ${DO_AQM_GEFS_LBCS} = "TRUE" ]; then RUN_CYC="${cyc}" + CDATE_MOD=$( $DATE_UTIL --utc --date "${PDY} ${cyc} UTC - ${EXTRN_MDL_LBCS_OFFSET_HRS} hours" "+%Y%m%d%H" ) + PDY_MOD=${CDATE_MOD:0:8} + AQM_GEFS_FILE_CYC=${AQM_GEFS_FILE_CYC:-"${CDATE_MOD:8:2}"} + AQM_GEFS_FILE_CYC=$( printf "%02d" "${AQM_GEFS_FILE_CYC}" ) if [ ${DO_REAL_TIME} = "TRUE" ]; then - CDATE_MOD=$( $DATE_UTIL --utc --date "${PDY} ${cyc} UTC - ${EXTRN_MDL_LBCS_OFFSET_HRS} hours" "+%Y%m%d%H" ) - PDY_MOD=${CDATE_MOD:0:8} - AQM_GEFS_CYC=$( printf "%02d" ${CDATE_MOD:8:2} ) - AQM_MOFILE_FN="${AQM_GEFS_DIR}/gefs.${PDY_MOD}/${AQM_GEFS_CYC}/chem/sfcsig/geaer.t${AQM_GEFS_CYC}z.atmf" + AQM_MOFILE_FN="${COMINgefs}/gefs.${PDY_MOD}/${AQM_GEFS_FILE_CYC}/chem/sfcsig/${AQM_GEFS_FILE_PREFIX}.t${AQM_GEFS_FILE_CYC}z.atmf" else - AQM_GEFS_CYC=$( printf "%02d" "${AQM_GEFS_CYC}" ) - AQM_MOFILE_FN="${AQM_GEFS_DIR}/${PDY}/${AQM_GEFS_CYC}/gfs.t00z.atmf" + AQM_MOFILE_FN="${AQM_GEFS_DIR}/${PDY}/${AQM_GEFS_FILE_CYC}/${AQM_GEFS_FILE_PREFIX}.t${AQM_GEFS_FILE_CYC}z.atmf" fi - GEFS_CYC_DIFF=$( printf "%02d" "$(( RUN_CYC - AQM_GEFS_CYC ))" ) + GEFS_CYC_DIFF=$( printf "%02d" "$(( RUN_CYC - AQM_GEFS_FILE_CYC ))" ) NUMTS="$(( FCST_LEN_HRS / LBC_SPEC_INTVL_HRS + 1 ))" cat > gefs2lbc-nemsio.ini < $base_path/${fn_array[2]} + merged_fn+=( "${fn_array[2]}" ) + fi + done + done + # If merge files exist, update the extrn_defn file + merged_fn_str="( ${merged_fn[@]} )" + printf "Merged files are: ${merged_fn_str} \nUpdating ${EXTRN_DEFNS}\n\n" + echo "$(awk -F= -v val="${merged_fn_str}" '/EXTRN_MDL_FNS/ {$2=val} {print}' OFS== $base_path/${EXTRN_DEFNS})" > $base_path/${EXTRN_DEFNS} + merged_fn=() + mod_fn_list=() + done +fi +# +#----------------------------------------------------------------------- +# # Restore the shell options saved at the beginning of this script/function. # #----------------------------------------------------------------------- diff --git a/scripts/exregional_make_grid.sh b/scripts/exregional_make_grid.sh index e19448e03d..22d06cfcfc 100755 --- a/scripts/exregional_make_grid.sh +++ b/scripts/exregional_make_grid.sh @@ -573,7 +573,7 @@ failed." # those variables in the forecast model's namelist file that specify the # paths to the surface climatology files. These files will either already # be avaialable in a user-specified directory (SFC_CLIMO_DIR) or will be -# generated by the MAKE_SFC_CLIMO_TN task. They (or symlinks to them) +# generated by the TN_MAKE_SFC_CLIMO task. They (or symlinks to them) # will be placed (or wll already exist) in the FIXlam directory. # #----------------------------------------------------------------------- diff --git a/scripts/exregional_make_ics.sh b/scripts/exregional_make_ics.sh index 181f705a4a..3fa42a8f93 100755 --- a/scripts/exregional_make_ics.sh +++ b/scripts/exregional_make_ics.sh @@ -86,10 +86,10 @@ fi #----------------------------------------------------------------------- # if [ $RUN_ENVIR = "nco" ]; then - extrn_mdl_staging_dir="${COMINext}" + extrn_mdl_staging_dir="${COMINext}${SLASH_ENSMEM_SUBDIR}" extrn_mdl_var_defns_fp="${extrn_mdl_staging_dir}/${NET}.${cycle}.${EXTRN_MDL_NAME_ICS}.ICS.${EXTRN_MDL_VAR_DEFNS_FN}.sh" else - extrn_mdl_staging_dir="${COMIN}/${EXTRN_MDL_NAME_ICS}/for_ICS" + extrn_mdl_staging_dir="${COMIN}/${EXTRN_MDL_NAME_ICS}/for_ICS${SLASH_ENSMEM_SUBDIR}" extrn_mdl_var_defns_fp="${extrn_mdl_staging_dir}/${EXTRN_MDL_VAR_DEFNS_FN}.sh" fi . ${extrn_mdl_var_defns_fp} @@ -124,6 +124,7 @@ case "${CCPP_PHYS_SUITE}" in # "FV3_RRFS_v1beta" | \ "FV3_GFS_v15_thompson_mynn_lam3km" | \ + "FV3_GFS_v17_p8" | \ "FV3_WoFS_v0" | \ "FV3_HRRR" ) if [ "${EXTRN_MDL_NAME_ICS}" = "RAP" ] || \ @@ -131,6 +132,8 @@ case "${CCPP_PHYS_SUITE}" in varmap_file="GSDphys_var_map.txt" elif [ "${EXTRN_MDL_NAME_ICS}" = "NAM" ] || \ [ "${EXTRN_MDL_NAME_ICS}" = "FV3GFS" ] || \ + [ "${EXTRN_MDL_NAME_ICS}" = "GEFS" ] || \ + [ "${EXTRN_MDL_NAME_ICS}" = "GDAS" ] || \ [ "${EXTRN_MDL_NAME_ICS}" = "GSMGFS" ]; then varmap_file="GFSphys_var_map.txt" fi @@ -389,6 +392,35 @@ case "${EXTRN_MDL_NAME_ICS}" in tg3_from_soil=False ;; +"GDAS") + tracers_input="[\"spfh\",\"clwmr\",\"o3mr\",\"icmr\",\"rwmr\",\"snmr\",\"grle\"]" + tracers="[\"sphum\",\"liq_wat\",\"o3mr\",\"ice_wat\",\"rainwat\",\"snowwat\",\"graupel\"]" + external_model="GFS" + input_type="gaussian_netcdf" + convert_nst=False + fn_atm="${EXTRN_MDL_FNS[0]}" + fn_sfc="${EXTRN_MDL_FNS[1]}" + vgtyp_from_climo=True + sotyp_from_climo=True + vgfrc_from_climo=True + minmax_vgfrc_from_climo=True + lai_from_climo=True + tg3_from_soil=True + ;; + +"GEFS") + external_model="GFS" + fn_grib2="${EXTRN_MDL_FNS[0]}" + input_type="grib2" + convert_nst=False + vgtyp_from_climo=True + sotyp_from_climo=True + vgfrc_from_climo=True + minmax_vgfrc_from_climo=True + lai_from_climo=True + tg3_from_soil=False + ;; + "HRRR") external_model="HRRR" fn_grib2="${EXTRN_MDL_FNS[0]}" diff --git a/scripts/exregional_make_lbcs.sh b/scripts/exregional_make_lbcs.sh index 53fefc112d..30ad15f78f 100755 --- a/scripts/exregional_make_lbcs.sh +++ b/scripts/exregional_make_lbcs.sh @@ -84,10 +84,10 @@ fi #----------------------------------------------------------------------- # if [ $RUN_ENVIR = "nco" ]; then - extrn_mdl_staging_dir="${COMINext}" + extrn_mdl_staging_dir="${COMINext}${SLASH_ENSMEM_SUBDIR}" extrn_mdl_var_defns_fp="${extrn_mdl_staging_dir}/${NET}.${cycle}.${EXTRN_MDL_NAME_LBCS}.LBCS.${EXTRN_MDL_VAR_DEFNS_FN}.sh" else - extrn_mdl_staging_dir="${COMIN}/${EXTRN_MDL_NAME_LBCS}/for_LBCS" + extrn_mdl_staging_dir="${COMIN}/${EXTRN_MDL_NAME_LBCS}/for_LBCS${SLASH_ENSMEM_SUBDIR}" extrn_mdl_var_defns_fp="${extrn_mdl_staging_dir}/${EXTRN_MDL_VAR_DEFNS_FN}.sh" fi . ${extrn_mdl_var_defns_fp} @@ -122,6 +122,7 @@ case "${CCPP_PHYS_SUITE}" in # "FV3_RRFS_v1beta" | \ "FV3_GFS_v15_thompson_mynn_lam3km" | \ + "FV3_GFS_v17_p8" | \ "FV3_WoFS_v0" | \ "FV3_HRRR" ) if [ "${EXTRN_MDL_NAME_LBCS}" = "RAP" ] || \ @@ -129,6 +130,8 @@ case "${CCPP_PHYS_SUITE}" in varmap_file="GSDphys_var_map.txt" elif [ "${EXTRN_MDL_NAME_LBCS}" = "NAM" ] || \ [ "${EXTRN_MDL_NAME_LBCS}" = "FV3GFS" ] || \ + [ "${EXTRN_MDL_NAME_LBCS}" = "GEFS" ] || \ + [ "${EXTRN_MDL_NAME_LBCS}" = "GDAS" ] || \ [ "${EXTRN_MDL_NAME_LBCS}" = "GSMGFS" ]; then varmap_file="GFSphys_var_map.txt" fi @@ -291,6 +294,20 @@ case "${EXTRN_MDL_NAME_LBCS}" in fi ;; +"GDAS") + tracers_input="[\"spfh\",\"clwmr\",\"o3mr\",\"icmr\",\"rwmr\",\"snmr\",\"grle\"]" + tracers="[\"sphum\",\"liq_wat\",\"o3mr\",\"ice_wat\",\"rainwat\",\"snowwat\",\"graupel\"]" + external_model="GFS" + input_type="gaussian_netcdf" + fn_atm="${EXTRN_MDL_FNS[0]}" + ;; + +"GEFS") + external_model="GFS" + fn_grib2="${EXTRN_MDL_FNS[0]}" + input_type="grib2" + ;; + "RAP") external_model="RAP" input_type="grib2" @@ -364,6 +381,12 @@ for (( i=0; i<${num_fhrs}; i++ )); do fn_atm="${EXTRN_MDL_FNS[$i]}" fi ;; + "GDAS") + fn_atm="${EXTRN_MDL_FNS[0][$i]}" + ;; + "GEFS") + fn_grib2="${EXTRN_MDL_FNS[$i]}" + ;; "RAP") fn_grib2="${EXTRN_MDL_FNS[$i]}" ;; diff --git a/scripts/exregional_make_orog.sh b/scripts/exregional_make_orog.sh index 0a61eb3f03..99da64c4b6 100755 --- a/scripts/exregional_make_orog.sh +++ b/scripts/exregional_make_orog.sh @@ -244,7 +244,7 @@ mv_vrfy "${raw_orog_fp_orig}" "${raw_orog_fp}" # #----------------------------------------------------------------------- # -if [ "${CCPP_PHYS_SUITE}" = "FV3_HRRR" ]; then +if [ "${CCPP_PHYS_SUITE}" = "FV3_HRRR" ] || [ "${CCPP_PHYS_SUITE}" = "FV3_GFS_v17_p8" ]; then DATA="${DATA:-${OROG_DIR}/temp_orog_data}" mkdir_vrfy -p ${DATA} cd_vrfy ${DATA} diff --git a/scripts/exregional_nexus_gfs_sfc.sh b/scripts/exregional_nexus_gfs_sfc.sh index ae3b788381..9604d6b764 100755 --- a/scripts/exregional_nexus_gfs_sfc.sh +++ b/scripts/exregional_nexus_gfs_sfc.sh @@ -81,7 +81,12 @@ fi GFS_SFC_TAR_DIR="${NEXUS_GFS_SFC_ARCHV_DIR}/rh${yyyy}/${yyyymm}/${yyyymmdd}" GFS_SFC_TAR_SUB_DIR="gfs.${yyyymmdd}/${hh}/atmos" -GFS_SFC_LOCAL_DIR="${COMINgfs_BASEDIR}/${GFS_SFC_TAR_SUB_DIR}" +if [ "${DO_REAL_TIME}" = "TRUE" ]; then + GFS_SFC_LOCAL_DIR="${COMINgfs}/${GFS_SFC_TAR_SUB_DIR}" +else + GFS_SFC_LOCAL_DIR="${NEXUS_GFS_SFC_DIR}/${GFS_SFC_TAR_SUB_DIR}" +fi + GFS_SFC_DATA_INTVL="3" # copy files from local directory @@ -149,7 +154,7 @@ else htar -tvf ${gfs_sfc_tar_fp} PREP_STEP htar -xvf ${gfs_sfc_tar_fp} ${gfs_sfc_fps} ${REDIRECT_OUT_ERR} || \ - print_err_msg_exit "htar file reading operation (\"htar -xvf ...\") failed." + print_err_msg_exit "htar file reading operation (\"htar -xvf ...\") failed." POST_STEP fi # Move retrieved files to staging directory diff --git a/scripts/exregional_nexus_post_split.sh b/scripts/exregional_nexus_post_split.sh index 3282c8ee1a..f8e34be24b 100755 --- a/scripts/exregional_nexus_post_split.sh +++ b/scripts/exregional_nexus_post_split.sh @@ -47,16 +47,6 @@ This is the ex-script for the task that runs NEXUS. # #----------------------------------------------------------------------- # -# Set OpenMP variables. -# -#----------------------------------------------------------------------- -# -export KMP_AFFINITY=${KMP_AFFINITY_NEXUS_POST_SPLIT} -export OMP_NUM_THREADS=${OMP_NUM_THREADS_NEXUS_POST_SPLIT} -export OMP_STACKSIZE=${OMP_STACKSIZE_NEXUS_POST_SPLIT} -# -#----------------------------------------------------------------------- -# # Set run command. # #----------------------------------------------------------------------- diff --git a/scripts/exregional_plot_allvars.py b/scripts/exregional_plot_allvars.py index a81ed9252d..2dfc3ec068 100755 --- a/scripts/exregional_plot_allvars.py +++ b/scripts/exregional_plot_allvars.py @@ -307,6 +307,14 @@ def setup_logging(debug=False): help="Name of native domain used in forecast (and in constructing post file names).", required=True, ) + parser.add_argument( + "--plot-domains", + "-p", + nargs="+", + default=["conus"], + help="Name of domain to plot (either 'conus' or 'regional' or both).", + required=False, + ) parser.add_argument( "--debug", action="store_true", @@ -414,7 +422,7 @@ def setup_logging(debug=False): # Specify plotting domains # User can add domains here, just need to specify lat/lon information below # (if dom == 'conus' block) - domains = ["conus"] # Other option is 'regional' + domains = args.plot_domains # Other option is 'regional' ################################################### # Read in all variables and calculate differences # diff --git a/scripts/exregional_plot_allvars_diff.py b/scripts/exregional_plot_allvars_diff.py index e94ec7717f..2690f7b6a9 100755 --- a/scripts/exregional_plot_allvars_diff.py +++ b/scripts/exregional_plot_allvars_diff.py @@ -316,6 +316,14 @@ def setup_logging(debug=False): help="Name of native domain used in forecast (and in constructing post file names).", required=True, ) + parser.add_argument( + "--plot-domains", + "-p", + nargs="+", + default=["conus"], + help="Name of domains to plot (either 'conus' or 'regional' or both).", + required=False, + ) parser.add_argument( "--debug", action="store_true", @@ -438,7 +446,7 @@ def setup_logging(debug=False): # Specify plotting domains # User can add domains here, just need to specify lat/lon information below # (if dom == 'conus' block) - domains = ["conus"] # Other option is 'regional' + domains = args.plot_domains # Other option is 'regional' ################################################### # Read in all variables and calculate differences # diff --git a/scripts/exregional_point_source.sh b/scripts/exregional_point_source.sh index 0a47f3f22f..198661a04b 100755 --- a/scripts/exregional_point_source.sh +++ b/scripts/exregional_point_source.sh @@ -8,7 +8,7 @@ #----------------------------------------------------------------------- # . $USHdir/source_util_funcs.sh -source_config_for_task "task_make_grid|task_run_fcst|cpl_aqm_parm" ${GLOBAL_VAR_DEFNS_FP} +source_config_for_task "task_run_fcst|cpl_aqm_parm" ${GLOBAL_VAR_DEFNS_FP} # #----------------------------------------------------------------------- # @@ -47,25 +47,12 @@ This is the ex-script for the task that runs PT_SOURCE. # #----------------------------------------------------------------------- # -# Set OpenMP variables. -# -#----------------------------------------------------------------------- -# -export KMP_AFFINITY=${KMP_AFFINITY_POINT_SOURCE} -export OMP_NUM_THREADS=${OMP_NUM_THREADS_POINT_SOURCE} -export OMP_STACKSIZE=${OMP_STACKSIZE_POINT_SOURCE} -# -#----------------------------------------------------------------------- -# # Set run command. # #----------------------------------------------------------------------- # eval ${PRE_TASK_CMDS} -nprocs=$(( LAYOUT_X*LAYOUT_Y )) -ppn_run_aqm="${PPN_POINT_SOURCE}" -omp_num_threads_run_aqm="${OMP_NUM_THREADS_POINT_SOURCE}" if [ "${FCST_LEN_HRS}" = "-1" ]; then for i_cdate in "${!ALL_CDATES[@]}"; do if [ "${ALL_CDATES[$i_cdate]}" = "${PDY}${cyc}" ]; then @@ -77,16 +64,6 @@ fi nstep=$(( FCST_LEN_HRS+1 )) yyyymmddhh="${PDY}${cyc}" -if [ -z "${RUN_CMD_AQM:-}" ] ; then - print_err_msg_exit "\ - Run command was not set in machine file. \ - Please set RUN_CMD_AQM for your platform" -else - RUN_CMD_AQM=$(eval echo ${RUN_CMD_AQM}) - print_info_msg "$VERBOSE" " - All executables will be submitted with command \'${RUN_CMD_AQM}\'." -fi - # #----------------------------------------------------------------------- # @@ -116,60 +93,10 @@ PT_SRC_AK="${PT_SRC_BASEDIR}/9AK1" # if [ ! -s "${DATA}/pt-${yyyymmddhh}.nc" ]; then python3 ${HOMEdir}/sorc/AQM-utils/python_utils/stack-pt-merge.py -s ${yyyymmddhh} -n ${nstep} -conus ${PT_SRC_CONUS} -hi ${PT_SRC_HI} -ak ${PT_SRC_AK} - - # bail if error - if [ ! -s "${DATA}/pt-${yyyymmddhh}.nc" ]; then - print_err_msg_exit "\ -The point source file \"pt-${yyyymmddhh}.nc\" was not generated." - else - print_info_msg "The intermediate file \"pt-${yyyymmddhh}.nc\" exists." - fi fi -# -#---------------------------------------------------------------------- -# -# Export input parameters of PT_SOURCE executable -# -#----------------------------------------------------------------------- -# -export NX=${ESGgrid_NX} -export NY=${ESGgrid_NY} -export LAYOUT_X -export LAYOUT_Y -export TOPO="${NEXUS_FIX_DIR}/${NEXUS_GRID_FN}" -export PT_IN="${DATA}/pt-${yyyymmddhh}.nc" - -# -#---------------------------------------------------------------------- -# -# Temporary output directory for PT_SOURCE executable -# -#----------------------------------------------------------------------- -# -mkdir_vrfy -p "${DATA}/PT" - -# -#---------------------------------------------------------------------- -# -# Execute PT_SOURCE -# -#----------------------------------------------------------------------- -# -PREP_STEP -eval ${RUN_CMD_AQM} ${EXECdir}/decomp-ptemis-mpi ${REDIRECT_OUT_ERR} || \ -print_err_msg_exit "\ -Call to execute PT_SOURCE for Online-CMAQ failed." -POST_STEP - -# -#----------------------------------------------------------------------- -# -# Move output to INPUT_DATA directory. -# -#----------------------------------------------------------------------- -# -mv_vrfy "${DATA}/PT" ${INPUT_DATA} +# Move to COMIN +mv_vrfy ${DATA}/pt-${yyyymmddhh}.nc ${INPUT_DATA}/${NET}.${cycle}${dot_ensmem}.PT.nc # #----------------------------------------------------------------------- diff --git a/scripts/exregional_pre_post_stat.sh b/scripts/exregional_pre_post_stat.sh index 6541dcb75b..1fc7bbb7a0 100755 --- a/scripts/exregional_pre_post_stat.sh +++ b/scripts/exregional_pre_post_stat.sh @@ -47,16 +47,6 @@ This is the ex-script for the task that runs POST-UPP-STAT. # #----------------------------------------------------------------------- # -# Set OpenMP variables. -# -#----------------------------------------------------------------------- -# -export KMP_AFFINITY=${KMP_AFFINITY_PRE_POST_STAT} -export OMP_NUM_THREADS=${OMP_NUM_THREADS_PRE_POST_STAT} -export OMP_STACKSIZE=${OMP_STACKSIZE_PRE_POST_STAT} -# -#----------------------------------------------------------------------- -# # Set run command. # #----------------------------------------------------------------------- diff --git a/scripts/exregional_run_fcst.sh b/scripts/exregional_run_fcst.sh index 1380275fde..d4d320f424 100755 --- a/scripts/exregional_run_fcst.sh +++ b/scripts/exregional_run_fcst.sh @@ -93,11 +93,11 @@ the grid and (filtered) orography files ..." cd_vrfy ${DATA}/INPUT # -# For experiments in which the MAKE_GRID_TN task is run, we make the +# For experiments in which the TN_MAKE_GRID task is run, we make the # symlinks to the grid files relative because those files wlll be located # within the experiment directory. This keeps the experiment directory # more portable and the symlinks more readable. However, for experiments -# in which the MAKE_GRID_TN task is not run, pregenerated grid files will +# in which the TN_MAKE_GRID task is not run, pregenerated grid files will # be used, and those will be located in an arbitrary directory (specified # by the user) that is somwehere outside the experiment directory. Thus, # in this case, there isn't really an advantage to using relative symlinks, @@ -155,7 +155,7 @@ create_symlink_to_file target="$target" symlink="$symlink" \ # # As with the symlinks grid files above, when creating the symlinks to -# the orography files, use relative paths if running the MAKE_OROG_TN +# the orography files, use relative paths if running the TN_MAKE_OROG # task and absolute paths otherwise. # if [ "${RUN_TASK_MAKE_OROG}" = "TRUE" ]; then @@ -194,7 +194,7 @@ create_symlink_to_file target="$target" symlink="$symlink" \ # that the FV3 model is hardcoded to recognize, and those are the names # we use below. # -if [ "${CCPP_PHYS_SUITE}" = "FV3_HRRR" ]; then +if [ "${CCPP_PHYS_SUITE}" = "FV3_HRRR" ] || [ "${CCPP_PHYS_SUITE}" = "FV3_GFS_v17_p8" ]; then fileids=( "ss" "ls" ) for fileid in "${fileids[@]}"; do diff --git a/scripts/exregional_run_post.sh b/scripts/exregional_run_post.sh index c3de8c3f28..54ec7f5dbf 100755 --- a/scripts/exregional_run_post.sh +++ b/scripts/exregional_run_post.sh @@ -240,14 +240,8 @@ if [ ${len_fhr} -eq 2 ]; then elif [ ${len_fhr} -eq 3 ]; then if [ "${fhr:0:1}" = "0" ]; then post_fhr="${fhr:1}" -# What should happen in the "else" case? Would just setting post_fhr to -# fhr work? Need to test. else - print_err_msg_exit "\ -The \${fhr} variable contains a 3-digit integer whose first digit is not -0. In this case, it is not clear how to set the variable post_fhr used -in constructing the grib2 file names generated by UPP: - fhr = \"$fhr\"" + post_fhr="${fhr}" fi else print_err_msg_exit "\ diff --git a/scripts/exregional_run_vx_ensgrid.sh b/scripts/exregional_run_vx_ensgrid.sh index 5177efee4a..75e301176e 100755 --- a/scripts/exregional_run_vx_ensgrid.sh +++ b/scripts/exregional_run_vx_ensgrid.sh @@ -134,7 +134,7 @@ export MET_BIN_EXEC export METPLUS_PATH export METPLUS_CONF export MET_CONFIG -export MODEL +export VX_FCST_MODEL_NAME export NET export POST_OUTPUT_DOMAIN_NAME export NUM_ENS_MEMBERS diff --git a/scripts/exregional_run_vx_ensgrid_mean.sh b/scripts/exregional_run_vx_ensgrid_mean.sh index 9d09e18028..2f0bde5118 100755 --- a/scripts/exregional_run_vx_ensgrid_mean.sh +++ b/scripts/exregional_run_vx_ensgrid_mean.sh @@ -143,7 +143,7 @@ export MET_BIN_EXEC export METPLUS_PATH export METPLUS_CONF export MET_CONFIG -export MODEL +export VX_FCST_MODEL_NAME export NET export POST_OUTPUT_DOMAIN_NAME export LOG_SUFFIX diff --git a/scripts/exregional_run_vx_ensgrid_prob.sh b/scripts/exregional_run_vx_ensgrid_prob.sh index ad6afcd26d..170b26ad64 100755 --- a/scripts/exregional_run_vx_ensgrid_prob.sh +++ b/scripts/exregional_run_vx_ensgrid_prob.sh @@ -143,7 +143,7 @@ export MET_BIN_EXEC export METPLUS_PATH export METPLUS_CONF export MET_CONFIG -export MODEL +export VX_FCST_MODEL_NAME export NET export POST_OUTPUT_DOMAIN_NAME export LOG_SUFFIX diff --git a/scripts/exregional_run_vx_enspoint.sh b/scripts/exregional_run_vx_enspoint.sh index 32793de7b8..524726c23e 100755 --- a/scripts/exregional_run_vx_enspoint.sh +++ b/scripts/exregional_run_vx_enspoint.sh @@ -138,7 +138,7 @@ export MET_BIN_EXEC export METPLUS_PATH export METPLUS_CONF export MET_CONFIG -export MODEL +export VX_FCST_MODEL_NAME export NET export POST_OUTPUT_DOMAIN_NAME export NUM_ENS_MEMBERS diff --git a/scripts/exregional_run_vx_enspoint_mean.sh b/scripts/exregional_run_vx_enspoint_mean.sh index 58dbc3306c..c2272bb051 100755 --- a/scripts/exregional_run_vx_enspoint_mean.sh +++ b/scripts/exregional_run_vx_enspoint_mean.sh @@ -139,7 +139,7 @@ export MET_BIN_EXEC export METPLUS_PATH export METPLUS_CONF export MET_CONFIG -export MODEL +export VX_FCST_MODEL_NAME export NET export POST_OUTPUT_DOMAIN_NAME diff --git a/scripts/exregional_run_vx_enspoint_prob.sh b/scripts/exregional_run_vx_enspoint_prob.sh index b17eb4b39b..9707eb4875 100755 --- a/scripts/exregional_run_vx_enspoint_prob.sh +++ b/scripts/exregional_run_vx_enspoint_prob.sh @@ -139,7 +139,7 @@ export MET_BIN_EXEC export METPLUS_PATH export METPLUS_CONF export MET_CONFIG -export MODEL +export VX_FCST_MODEL_NAME export NET export POST_OUTPUT_DOMAIN_NAME diff --git a/scripts/exregional_run_vx_gridstat.sh b/scripts/exregional_run_vx_gridstat.sh index dcc8da93b8..f325a1de4a 100755 --- a/scripts/exregional_run_vx_gridstat.sh +++ b/scripts/exregional_run_vx_gridstat.sh @@ -85,10 +85,10 @@ if [ $RUN_ENVIR = "nco" ]; then export DOT_MEM_CUSTOM=".{custom?fmt=%s}" else if [[ ${DO_ENSEMBLE} == "FALSE" ]]; then - export INPUT_BASE=${EXPTDIR}/${CDATE}/postprd + export INPUT_BASE=${VX_FCST_INPUT_BASEDIR}/${CDATE}/postprd export OUTPUT_BASE=${EXPTDIR}/${CDATE} else - export INPUT_BASE=${EXPTDIR}/${CDATE}/${SLASH_ENSMEM_SUBDIR}/postprd + export INPUT_BASE=${VX_FCST_INPUT_BASEDIR}/${CDATE}/${SLASH_ENSMEM_SUBDIR}/postprd export OUTPUT_BASE=${EXPTDIR}/${CDATE}/${SLASH_ENSMEM_SUBDIR} fi export MEM_BASE=$EXPTDIR/$CDATE @@ -116,7 +116,7 @@ if [[ ${DO_ENSEMBLE} == "FALSE" ]]; then fi elif [[ ${DO_ENSEMBLE} == "TRUE" ]]; then ENSMEM=`echo ${SLASH_ENSMEM_SUBDIR} | cut -d"/" -f2` - MODEL=${MODEL}_${ENSMEM} + VX_FCST_MODEL_NAME=${VX_FCST_MODEL_NAME}_${ENSMEM} if [ ${VAR} == "APCP" ]; then LOG_SUFFIX=gridstat_${CDATE}_${ENSMEM}_${VAR}_${ACCUM}h else @@ -150,7 +150,7 @@ export MET_BIN_EXEC export METPLUS_PATH export METPLUS_CONF export MET_CONFIG -export MODEL +export VX_FCST_MODEL_NAME export NET export POST_OUTPUT_DOMAIN_NAME diff --git a/scripts/exregional_run_vx_pointstat.sh b/scripts/exregional_run_vx_pointstat.sh index 65df885e66..8c83309f1e 100755 --- a/scripts/exregional_run_vx_pointstat.sh +++ b/scripts/exregional_run_vx_pointstat.sh @@ -85,10 +85,10 @@ if [ $RUN_ENVIR = "nco" ]; then export DOT_MEM_CUSTOM=".{custom?fmt=%s}" else if [[ ${DO_ENSEMBLE} == "FALSE" ]]; then - export INPUT_BASE=${EXPTDIR}/${CDATE}/postprd + export INPUT_BASE=${VX_FCST_INPUT_BASEDIR}/${CDATE}/postprd export OUTPUT_BASE=${EXPTDIR}/${CDATE} else - export INPUT_BASE=${EXPTDIR}/${CDATE}/${SLASH_ENSMEM_SUBDIR}/postprd + export INPUT_BASE=${VX_FCST_INPUT_BASEDIR}/${CDATE}/${SLASH_ENSMEM_SUBDIR}/postprd export OUTPUT_BASE=${EXPTDIR}/${CDATE}/${SLASH_ENSMEM_SUBDIR} fi export MEM_BASE=$EXPTDIR/$CDATE @@ -112,7 +112,7 @@ if [[ ${DO_ENSEMBLE} == "FALSE" ]]; then LOG_SUFFIX=pointstat_${CDATE} elif [[ ${DO_ENSEMBLE} == "TRUE" ]]; then ENSMEM=`echo ${SLASH_ENSMEM_SUBDIR} | cut -d"/" -f2` - MODEL=${MODEL}_${ENSMEM} + VX_FCST_MODEL_NAME=${VX_FCST_MODEL_NAME}_${ENSMEM} LOG_SUFFIX=pointstat_${CDATE}_${ENSMEM} fi @@ -141,7 +141,7 @@ export MET_BIN_EXEC export METPLUS_PATH export METPLUS_CONF export MET_CONFIG -export MODEL +export VX_FCST_MODEL_NAME export NET export POST_OUTPUT_DOMAIN_NAME diff --git a/sorc/CMakeLists.txt b/sorc/CMakeLists.txt index 9cfeb4604e..4d2bb4d872 100644 --- a/sorc/CMakeLists.txt +++ b/sorc/CMakeLists.txt @@ -12,25 +12,18 @@ message(STATUS "BUILD_UFS_UTILS ......... ${BUILD_UFS_UTILS}") message(STATUS "BUILD_GSI ............... ${BUILD_GSI}") message(STATUS "BUILD_UPP ............... ${BUILD_UPP}") message(STATUS "BUILD_RRFS_UTILS ........ ${BUILD_RRFS_UTILS}") - -# Build UFS utilities -if(BUILD_UFS_UTILS) - list(APPEND TARGET_LIST UFS_UTILS) - - list(APPEND UFS_UTILS_ARGS - "-DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}" - "-DCMAKE_INSTALL_BINDIR=${CMAKE_INSTALL_BINDIR}" - "-DBUILD_TESTING=OFF" - ) - - ExternalProject_Add(UFS_UTILS - PREFIX ${CMAKE_CURRENT_BINARY_DIR}/UFS_UTILS - SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/UFS_UTILS - INSTALL_DIR ${CMAKE_INSTALL_PREFIX} - CMAKE_ARGS ${UFS_UTILS_ARGS} - BUILD_ALWAYS TRUE - STEP_TARGETS build - ) +message(STATUS "BUILD_NEXUS ............. ${BUILD_NEXUS}") +message(STATUS "BUILD_AQM_UTILS ......... ${BUILD_AQM_UTILS}") + +# Set dependency of ufs weather model only for coupled model +if (NOT APP) + set(UFS_DEPEND "") +else() + if (BUILD_UFS) + set(UFS_DEPEND "ufs-weather-model") + else() + set(UFS_DEPEND "") + endif() endif() # Build UFS weather model @@ -38,7 +31,11 @@ if (BUILD_UFS) list(APPEND TARGET_LIST ufs-weather-model) if(NOT CCPP_SUITES) - set(CCPP_SUITES "FV3_GFS_2017_gfdlmp,FV3_GFS_2017_gfdlmp_regional,FV3_GFS_v15p2,FV3_GFS_v16,FV3_RRFS_v1beta,FV3_HRRR,FV3_GFS_v15_thompson_mynn_lam3km,FV3_WoFS_v0") + if(CPL_AQM) + set(CCPP_SUITES "FV3_GFS_v15p2,FV3_GFS_v16,FV3_GFS_v17_p8") + else() + set(CCPP_SUITES "FV3_GFS_2017_gfdlmp,FV3_GFS_2017_gfdlmp_regional,FV3_GFS_v15p2,FV3_GFS_v16,FV3_GFS_v17_p8,FV3_RRFS_v1beta,FV3_HRRR,FV3_GFS_v15_thompson_mynn_lam3km,FV3_WoFS_v0") + endif() endif() if(NOT APP) @@ -93,11 +90,33 @@ if (BUILD_UFS) ) endif() +# Build UFS utilities +if(BUILD_UFS_UTILS) + list(APPEND TARGET_LIST UFS_UTILS) + + list(APPEND UFS_UTILS_ARGS + "-DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}" + "-DCMAKE_INSTALL_BINDIR=${CMAKE_INSTALL_BINDIR}" + "-DBUILD_TESTING=OFF" + ) + + ExternalProject_Add(UFS_UTILS + DEPENDS ${UFS_DEPEND} + PREFIX ${CMAKE_CURRENT_BINARY_DIR}/UFS_UTILS + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/UFS_UTILS + INSTALL_DIR ${CMAKE_INSTALL_PREFIX} + CMAKE_ARGS ${UFS_UTILS_ARGS} + BUILD_ALWAYS TRUE + STEP_TARGETS build + ) +endif() + # Build UPP if (BUILD_UPP) list(APPEND TARGET_LIST UPP) ExternalProject_Add(UPP + DEPENDS ${UFS_DEPEND} PREFIX ${CMAKE_CURRENT_BINARY_DIR}/UPP SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/UPP INSTALL_DIR ${CMAKE_INSTALL_PREFIX} @@ -136,6 +155,34 @@ if (BUILD_RRFS_UTILS) ) endif() +if (CPL_AQM) + if (BUILD_NEXUS) + list(APPEND TARGET_LIST NEXUS) + + ExternalProject_Add(arl_nexus + DEPENDS ${UFS_DEPEND} + PREFIX ${CMAKE_CURRENT_BINARY_DIR}/arl_nexus + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/arl_nexus + INSTALL_DIR ${CMAKE_INSTALL_PREFIX} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} -DCMAKE_INSTALL_BINDIR=${CMAKE_INSTALL_BINDIR} + BUILD_ALWAYS TRUE + ) + endif() + + if (BUILD_AQM_UTILS) + list(APPEND TARGET_LIST AQM_UTILS) + + ExternalProject_Add(AQM-utils + DEPENDS ${UFS_DEPEND} + PREFIX ${CMAKE_CURRENT_BINARY_DIR}/AQM-utils + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/AQM-utils + INSTALL_DIR ${CMAKE_INSTALL_PREFIX} + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} -DCMAKE_INSTALL_BINDIR=${CMAKE_INSTALL_BINDIR} -DBUILD_POST_STAT=${BUILD_POST_STAT} + BUILD_ALWAYS TRUE + ) + endif() +endif() + # add custom target for separate build and install foreach(i ${TARGET_LIST}) list(APPEND BUILD_LIST "${i}-build") diff --git a/tests/WE2E/get_WE2Etest_names_subdirs_descs.sh b/tests/WE2E/get_WE2Etest_names_subdirs_descs.sh index 7ce1a6dd8b..2e7c312701 100755 --- a/tests/WE2E/get_WE2Etest_names_subdirs_descs.sh +++ b/tests/WE2E/get_WE2Etest_names_subdirs_descs.sh @@ -497,9 +497,12 @@ information on all WE2E tests: "grids_extrn_mdls_suites_community" \ "grids_extrn_mdls_suites_nco" \ "release_SRW_v1" \ + "verification" \ "wflow_features" \ ) num_category_subdirs="${#category_subdirs[@]}" + + orig_dir=$(pwd) # #----------------------------------------------------------------------- # @@ -817,6 +820,10 @@ This is probably because it is a directory. Please correct and rerun." num_prim_tests="${#prim_test_names[@]}" num_alt_tests="${#alt_test_names[@]}" # +# Change location back to original directory. +# + cd_vrfy "${orig_dir}" +# #----------------------------------------------------------------------- # # Create the array test_names that contains both the primary and alternate diff --git a/tests/WE2E/machine_suites/fundamental.hera.gnu.com b/tests/WE2E/machine_suites/fundamental.hera.gnu.com index 7306903f1d..136600466c 100644 --- a/tests/WE2E/machine_suites/fundamental.hera.gnu.com +++ b/tests/WE2E/machine_suites/fundamental.hera.gnu.com @@ -1,4 +1,5 @@ MET_verification +MET_verification_only_vx community_ensemble_2mems grid_RRFS_CONUS_25km_ics_FV3GFS_lbcs_FV3GFS_suite_GFS_2017_gfdlmp grid_RRFS_CONUS_25km_ics_FV3GFS_lbcs_FV3GFS_suite_GFS_2017_gfdlmp_regional_plot diff --git a/tests/WE2E/monitor_jobs.py b/tests/WE2E/monitor_jobs.py new file mode 100755 index 0000000000..9e34a87264 --- /dev/null +++ b/tests/WE2E/monitor_jobs.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python3 + +import sys +import argparse +import logging +import subprocess +import sqlite3 +import time +from textwrap import dedent +from datetime import datetime +from contextlib import closing + +sys.path.append("../../ush") + +from python_utils import ( + load_config_file, + cfg_to_yaml_str +) + +from check_python_version import check_python_version + + +def monitor_jobs(expt_dict: dict, monitor_file: str = '', debug: bool = False) -> str: + """Function to monitor and run jobs for the specified experiment using Rocoto + + Args: + expt_dict (dict): A dictionary containing the information needed to run + one or more experiments. See example file monitor_jobs.yaml + monitor_file (str): [optional] + debug (bool): [optional] Enable extra output for debugging + Returns: + str: The name of the file used for job monitoring (when script is finished, this + contains results/summary) + + """ + + starttime = datetime.now() + # Write monitor_file, which will contain information on each monitored experiment + if not monitor_file: + monitor_file = f'monitor_jobs_{starttime.strftime("%Y%m%d%H%M%S")}.yaml' + logging.info(f"Writing information for all experiments to {monitor_file}") + + write_monitor_file(monitor_file,expt_dict) + + # Perform initial setup for each experiment + logging.info("Checking tests available for monitoring...") + for expt in expt_dict: + logging.info(f"Starting experiment {expt} running") + expt_dict[expt] = update_expt_status(expt_dict[expt], expt) + + write_monitor_file(monitor_file,expt_dict) + + logging.info(f'Setup complete; monitoring {len(expt_dict)} experiments') + + #Make a copy of experiment dictionary; will use this copy to monitor active experiments + running_expts = expt_dict.copy() + + i = 0 + while running_expts: + i += 1 + for expt in running_expts.copy(): + expt_dict[expt] = update_expt_status(expt_dict[expt], expt) + running_expts[expt] = expt_dict[expt] + if running_expts[expt]["status"] in ['DEAD','ERROR','COMPLETE']: + logging.info(f'Experiment {expt} is {running_expts[expt]["status"]}; will no longer monitor.') + running_expts.pop(expt) + continue + logging.debug(f'Experiment {expt} status is {expt_dict[expt]["status"]}') + + + write_monitor_file(monitor_file,expt_dict) + endtime = datetime.now() + total_walltime = endtime - starttime + + logging.debug(f"Finished loop {i}\nWalltime so far is {str(total_walltime)}") + + #Slow things down just a tad between loops so experiments behave better + time.sleep(5) + + + endtime = datetime.now() + total_walltime = endtime - starttime + + logging.info(f'All {num_expts} experiments finished in {str(total_walltime)}') + + return monitor_file + +def update_expt_status(expt: dict, name: str) -> dict: + """ + This function reads the dictionary showing the location of a given experiment, runs a + `rocotorun` command to update the experiment (running new jobs and updating the status of + previously submitted ones), and reads the rocoto database file to update the status of + each job for that experiment in the experiment dictionary. + + The function then and uses a simple set of rules to combine the statuses of every task + into a useful "status" for the whole experiment, and returns the updated experiment dictionary. + + Experiment "status" levels explained: + CREATED: The experiments have been created, but the monitor script has not yet processed them. + This is immediately overwritten at the beginning of the "monitor_jobs" function, so we + should never see this status in this function. Including just for completeness sake. + SUBMITTING: All jobs are in status SUBMITTING or SUCCEEDED. This is a normal state; we will + continue to monitor this experiment. + DYING: One or more tasks have died (status "DEAD"), so this experiment has had an error. + We will continue to monitor this experiment until all tasks are either status DEAD or + status SUCCEEDED (see next entry). + DEAD: One or more tasks are at status DEAD, and the rest are either DEAD or SUCCEEDED. We + will no longer monitor this experiment. + ERROR: One or more tasks are at status UNKNOWN, meaning that rocoto has failed to track the + job associated with that task. This will require manual intervention to solve, so we + will no longer monitor this experiment. + This status may also appear if we fail to read the rocoto database file. + RUNNING: One or more jobs are at status RUNNING, and the rest are either status QUEUED, SUBMITTED, + or SUCCEEDED. This is a normal state; we will continue to monitor this experiment. + QUEUED: One or more jobs are at status QUEUED, and some others may be at status SUBMITTED or + SUCCEEDED. + This is a normal state; we will continue to monitor this experiment. + SUCCEEDED: All jobs are status SUCCEEDED; we will monitor for one more cycle in case there are + unsubmitted jobs remaining. + COMPLETE:All jobs are status SUCCEEDED, and we have monitored this job for an additional cycle + to ensure there are no un-submitted jobs. We will no longer monitor this experiment. + + Args: + expt (dict): A dictionary containing the information for an individual experiment, as + described in the main monitor_jobs() function. + name (str): [optional] + Returns: + dict: The updated experiment dictionary. + """ + + #If we are no longer tracking this experiment, return unchanged + if expt["status"] in ['DEAD','ERROR','COMPLETE']: + return expt + + # Update experiment, read rocoto database + rocoto_db = f"{expt['expt_dir']}/FV3LAM_wflow.db" + rocotorun_cmd = ["rocotorun", f"-w {expt['expt_dir']}/FV3LAM_wflow.xml", f"-d {rocoto_db}"] + subprocess.run(rocotorun_cmd) + + logging.debug(f"Reading database for experiment {name}, updating experiment dictionary") + try: + # This section of code queries the "job" table of the rocoto database, returning a list + # of tuples containing the taskname, cycle, and state of each job respectively + with closing(sqlite3.connect(rocoto_db)) as connection: + with closing(connection.cursor()) as cur: + db = cur.execute('SELECT taskname,cycle,state from jobs').fetchall() + except: + logging.warning(f"Unable to read database {rocoto_db}\nCan not track experiment {name}") + expt["status"] = "ERROR" + return expt + + for task in db: + # For each entry from rocoto database, store that under a dictionary key named TASKNAME_CYCLE + # Cycle comes from the database in Unix Time (seconds), so convert to human-readable + cycle = datetime.utcfromtimestamp(task[1]).strftime('%Y%m%d%H%M') + expt[f"{task[0]}_{cycle}"] = task[2] + + #Run rocotorun again to get around rocotobqserver proliferation issue + subprocess.run(rocotorun_cmd) + + statuses = list() + for task in expt: + # Skip non-task entries + if task in ["expt_dir","status"]: + continue + statuses.append(expt[task]) + + if "DEAD" in statuses: + still_live = ["RUNNING", "SUBMITTING", "QUEUED"] + if any(status in still_live for status in statuses): + logging.debug(f'DEAD job in experiment {name}; continuing to track until all jobs are complete') + expt["status"] = "DYING" + else: + expt["status"] = "DEAD" + return expt + + if "UNKNOWN" in statuses: + expt["status"] = "ERROR" + + if "RUNNING" in statuses: + expt["status"] = "RUNNING" + elif "QUEUED" in statuses: + expt["status"] = "QUEUED" + elif "SUBMITTING" in statuses: + expt["status"] = "SUBMITTING" + elif "SUCCEEDED" in statuses: + if expt["status"] == "SUCCEEDED": + expt["status"] = "COMPLETE" + else: + expt["status"] = "SUCCEEDED" + else: + logging.fatal("Some kind of horrible thing has happened") + raise ValueError(dedent(f"""Some kind of horrible thing has happened to the experiment status + for experiment {name} + status is {expt["status"]} + all task statuses are {statuses}""")) + + return expt + + +def write_monitor_file(monitor_file: str, expt_dict: dict): + try: + with open(monitor_file,"w") as f: + f.write("### WARNING ###\n") + f.write("### THIS FILE IS AUTO_GENERATED AND REGULARLY OVER-WRITTEN BY monitor_jobs.py\n") + f.write("### EDITS MAY RESULT IN MISBEHAVIOR OF EXPERIMENTS RUNNING\n") + f.writelines(cfg_to_yaml_str(expt_dict)) + except: + logging.fatal("\n********************************\n") + logging.fatal(f"WARNING WARNING WARNING\nFailure occurred while writing monitor file {monitor_file}") + logging.fatal("File may be corrupt or invalid for re-run!!") + logging.fatal("\n********************************\n") + raise + + +def setup_logging(logfile: str = "log.run_WE2E_tests", debug: bool = False) -> None: + """ + Sets up logging, printing high-priority (INFO and higher) messages to screen, and printing all + messages with detailed timing and routine info in the specified text file. + """ + logging.getLogger().setLevel(logging.DEBUG) + + formatter = logging.Formatter("%(name)-16s %(levelname)-8s %(message)s") + + fh = logging.FileHandler(logfile, mode='w') + fh.setLevel(logging.DEBUG) + fh.setFormatter(formatter) + logging.getLogger().addHandler(fh) + + logging.debug(f"Finished setting up debug file logging in {logfile}") + console = logging.StreamHandler() + if debug: + console.setLevel(logging.DEBUG) + else: + console.setLevel(logging.INFO) + logging.getLogger().addHandler(console) + logging.debug("Logging set up successfully") + + +if __name__ == "__main__": + + check_python_version() + + logfile='log.monitor_jobs' + + #Parse arguments + parser = argparse.ArgumentParser(description="Script for monitoring and running jobs in a specified experiment, as specified in a yaml configuration file\n") + + parser.add_argument('-y', '--yaml_file', type=str, help='YAML-format file specifying the information of jobs to be run; for an example file, see monitor_jobs.yaml', required=True) + parser.add_argument('-d', '--debug', action='store_true', help='Script will be run in debug mode with more verbose output') + + args = parser.parse_args() + + setup_logging(logfile,args.debug) + + expt_dict = load_config_file(args.yaml_file) + + #Call main function + + try: + monitor_jobs(expt_dict,args.yaml_file, args.debug) + except: + logging.exception( + dedent( + f""" + ********************************************************************* + FATAL ERROR: + An error occurred. See the error message(s) printed below. + For more detailed information, check the log file from the workflow + generation script: {logfile} + *********************************************************************\n + """ + ) + ) diff --git a/tests/WE2E/monitor_jobs.yaml b/tests/WE2E/monitor_jobs.yaml new file mode 100644 index 0000000000..03d15c5d45 --- /dev/null +++ b/tests/WE2E/monitor_jobs.yaml @@ -0,0 +1,54 @@ +# This is an example yaml file showing the various entries that can be created for tracking jobs by monitor_jobs.py +# Any valid file created by monitor_jobs.py (unless corrupted) can be re-submitted for continued tracking if any +# experiments are yet to be completed. +# If an experiment with status: COMPLETE, DEAD, or ERROR is read by monitor_jobs,py, it will be ignored. +#First example: an experiment that has been created by generate_FV3LAM_workflow.py but has not yet started running +custom_ESGgrid: + expt_dir: /some_directory/expt_dirs/custom_ESGgrid + status: CREATED +#Second example: an experiment that has just been submitted +custom_ESGgrid: + expt_dir: /some_directory/expt_dirs/custom_ESGgrid + status: SUBMITTING + make_grid_201907010000: SUBMITTING + get_extrn_ics_201907010000: SUBMITTING + get_extrn_lbcs_201907010000: SUBMITTING +#Third example: an experiment with a mix of successful and running tasks +custom_ESGgrid: + expt_dir: /some_directory/expt_dirs/custom_ESGgrid + status: RUNNING + make_grid_201907010000: SUCCEEDED + get_extrn_ics_201907010000: SUCCEEDED + get_extrn_lbcs_201907010000: SUCCEEDED + make_orog_201907010000: SUCCEEDED + make_sfc_climo_201907010000: SUCCEEDED + make_ics_201907010000: RUNNING + make_lbcs_201907010000: RUNNING +#Fourth example: an experiment that has completed successfully +custom_ESGgrid: + expt_dir: /some_directory/expt_dirs/custom_ESGgrid + status: COMPLETE + make_grid_201907010000: SUCCEEDED + get_extrn_ics_201907010000: SUCCEEDED + get_extrn_lbcs_201907010000: SUCCEEDED + make_orog_201907010000: SUCCEEDED + make_sfc_climo_201907010000: SUCCEEDED + make_ics_201907010000: SUCCEEDED + make_lbcs_201907010000: SUCCEEDED + run_fcst_201907010000: SUCCEEDED + run_post_f000_201907010000: SUCCEEDED + run_post_f001_201907010000: SUCCEEDED + run_post_f002_201907010000: SUCCEEDED + run_post_f003_201907010000: SUCCEEDED + run_post_f004_201907010000: SUCCEEDED + run_post_f005_201907010000: SUCCEEDED + run_post_f006_201907010000: SUCCEEDED +#Fifth example: an experiment that has died due to a failed task. +custom_ESGgrid: + expt_dir: /some_directory/expt_dirs/custom_ESGgrid + status: DEAD + make_grid_201907010000: SUCCEEDED + get_extrn_ics_201907010000: SUCCEEDED + get_extrn_lbcs_201907010000: SUCCEEDED + make_orog_201907010000: DEAD + diff --git a/tests/WE2E/run_WE2E_tests.py b/tests/WE2E/run_WE2E_tests.py new file mode 100755 index 0000000000..dc472f8333 --- /dev/null +++ b/tests/WE2E/run_WE2E_tests.py @@ -0,0 +1,474 @@ +#!/usr/bin/env python3 + +import os +import sys +import glob +import argparse +import logging +from textwrap import dedent + +sys.path.append("../../ush") + +from generate_FV3LAM_wflow import generate_FV3LAM_wflow +from python_utils import ( + cfg_to_yaml_str, + load_config_file, +) + +from check_python_version import check_python_version + +from monitor_jobs import monitor_jobs + + +def run_we2e_tests(homedir, args) -> None: + """Function to run the WE2E tests selected by the user + + Args: + homedir (str): The full path of the top-level app directory + args : The argparse.Namespace object containing command-line arguments + Returns: + None + """ + + # Set up logging to write to screen and logfile + setup_logging(debug=args.debug) + + # Set some important directories + ushdir=os.path.join(homedir,'ush') + + # Set some variables based on input arguments + run_envir = args.run_envir + machine = args.machine.lower() + + # If args.tests is a list of length more than one, we assume it is a list of test names + if len(args.tests) > 1: + tests_to_check=args.tests + logging.debug(f"User specified a list of tests:\n{tests_to_check}") + else: + #First see if args.tests is a valid test name + user_spec_tests = args.tests + logging.debug(f'Checking if {user_spec_tests} is a valid test name') + match = check_test(user_spec_tests[0]) + if match: + tests_to_check = user_spec_tests + else: + # If not a valid test name, check if it is a test suite + logging.debug(f'Checking if {user_spec_tests} is a valid test suite') + if user_spec_tests[0] == 'all': + alltests = glob.glob('test_configs/**/config*.yaml', recursive=True) + tests_to_check = [] + for f in alltests: + filename = os.path.basename(f) + # We just want the test namein this list, so cut out the "config." prefix and ".yaml" extension + tests_to_check.append(filename[7:-5]) + logging.debug(f"Will check all tests:\n{tests_to_check}") + elif user_spec_tests[0] in ['fundamental', 'comprehensive']: + # I am writing this section of code under protest; we should use args.run_envir to check for run_envir-specific files! + prefix = f"machine_suites/{user_spec_tests[0]}" + testfilename = f"{prefix}.{machine}.{args.compiler}.nco" + if not os.path.isfile(testfilename): + testfilename = f"{prefix}.{machine}.{args.compiler}.com" + if not os.path.isfile(testfilename): + testfilename = f"{prefix}.{machine}.{args.compiler}" + if not os.path.isfile(testfilename): + testfilename = f"{prefix}.{machine}" + if not os.path.isfile(testfilename): + testfilename = f"machine_suites/{user_spec_tests[0]}" + else: + if not run_envir: + run_envir = 'community' + logging.debug(f'{testfilename} exists for this platform and run_envir has not been specified'\ + 'Setting run_envir = {run_envir} for all tests') + else: + if not run_envir: + run_envir = 'nco' + logging.debug(f'{testfilename} exists for this platform and run_envir has not been specified'\ + 'Setting run_envir = {run_envir} for all tests') + logging.debug(f"Reading test file: {testfilename}") + with open(testfilename) as f: + tests_to_check = [x.rstrip() for x in f] + logging.debug(f"Will check {user_spec_tests[0]} tests:\n{tests_to_check}") + else: + # If we have gotten this far then the only option left for user_spec_tests is a file containing test names + logging.debug(f'Checking if {user_spec_tests} is a file containing test names') + if os.path.isfile(user_spec_tests[0]): + with open(user_spec_tests[0]) as f: + tests_to_check = [x.rstrip() for x in f] + else: + raise FileNotFoundError(dedent(f""" + The specified 'tests' argument '{user_spec_tests}' + does not appear to be a valid test name, a valid test suite, or a file containing valid test names. + + Check your inputs and try again. + """)) + + + logging.info("Checking that all tests are valid") + + tests_to_run=check_tests(tests_to_check) + + pretty_list = "\n".join(str(x) for x in tests_to_run) + logging.info(f'Will run {len(tests_to_run)} tests:\n{pretty_list}') + + + config_default_file = os.path.join(ushdir,'config_defaults.yaml') + logging.debug(f"Loading config defaults file {config_default_file}") + config_defaults = load_config_file(config_default_file) + + machine_file = os.path.join(ushdir, 'machine', f'{machine}.yaml') + logging.debug(f"Loading machine defaults file {machine_file}") + machine_defaults = load_config_file(machine_file) + + # Set up dictionary for job monitoring yaml + if not args.use_cron_to_relaunch: + monitor_yaml = dict() + + for test in tests_to_run: + #Starting with test yaml template, fill in user-specified and machine- and + # test-specific options, then write resulting complete config.yaml + test_name = os.path.basename(test).split('.')[1] + logging.debug(f"For test {test_name}, constructing config.yaml") + test_cfg = load_config_file(test) + + test_cfg['user'].update({"MACHINE": machine}) + test_cfg['user'].update({"ACCOUNT": args.account}) + if run_envir: + test_cfg['user'].update({"RUN_ENVIR": run_envir}) + # if platform section was not in input config, initialize as empty dict + if 'platform' not in test_cfg: + test_cfg['platform'] = dict() + test_cfg['platform'].update({"BUILD_MOD_FN": args.modulefile}) + test_cfg['workflow'].update({"COMPILER": args.compiler}) + if args.expt_basedir: + test_cfg['workflow'].update({"EXPT_BASEDIR": args.expt_basedir}) + test_cfg['workflow'].update({"EXPT_SUBDIR": test_name}) + if args.exec_subdir: + test_cfg['workflow'].update({"EXEC_SUBDIR": args.exec_subdir}) + if args.use_cron_to_relaunch: + test_cfg['workflow'].update({"USE_CRON_TO_RELAUNCH": args.use_cron_to_relaunch}) + if args.cron_relaunch_intvl_mnts: + test_cfg['workflow'].update({"CRON_RELAUNCH_INTVL_MNTS": args.cron_relaunch_intvl_mnts}) + if args.debug_tests: + test_cfg['workflow'].update({"DEBUG": args.debug_tests}) + if args.verbose_tests: + test_cfg['workflow'].update({"VERBOSE": args.verbose_tests}) + + logging.debug(f"Overwriting WE2E-test-specific settings for test \n{test_name}\n") + + if 'task_get_extrn_ics' in test_cfg: + logging.debug(test_cfg['task_get_extrn_ics']) + test_cfg['task_get_extrn_ics'] = check_task_get_extrn_ics(test_cfg,machine_defaults,config_defaults) + logging.debug(test_cfg['task_get_extrn_ics']) + if 'task_get_extrn_lbcs' in test_cfg: + logging.debug(test_cfg['task_get_extrn_lbcs']) + test_cfg['task_get_extrn_lbcs'] = check_task_get_extrn_lbcs(test_cfg,machine_defaults,config_defaults) + logging.debug(test_cfg['task_get_extrn_lbcs']) + + + logging.debug(f"Writing updated config.yaml for test {test_name}\nbased on specified command-line arguments:\n") + logging.debug(cfg_to_yaml_str(test_cfg)) + with open(ushdir + "/config.yaml","w") as f: + f.writelines(cfg_to_yaml_str(test_cfg)) + + logging.debug(f"Calling workflow generation function for test {test_name}\n") + if args.quiet: + console_handler = logging.getLogger().handlers[1] + console_handler.setLevel(logging.WARNING) + expt_dir = generate_FV3LAM_wflow(ushdir,logfile=f"{ushdir}/log.generate_FV3LAM_wflow",debug=args.debug) + if args.quiet: + if args.debug: + console_handler.setLevel(logging.DEBUG) + else: + console_handler.setLevel(logging.INFO) + logging.info(f"Workflow for test {test_name} successfully generated in\n{expt_dir}\n") + # If this job is not using crontab, we need to add an entry to monitor.yaml + if 'USE_CRON_TO_RELAUNCH' not in test_cfg['workflow']: + test_cfg['workflow'].update({"USE_CRON_TO_RELAUNCH": False}) + if not test_cfg['workflow']['USE_CRON_TO_RELAUNCH']: + logging.debug(f'Creating entry for job {test_name} in job monitoring dict') + monitor_yaml[test_name] = dict() + monitor_yaml[test_name].update({"expt_dir": expt_dir}) + monitor_yaml[test_name].update({"status": "CREATED"}) + + if not args.use_cron_to_relaunch: + logging.info("calling function that monitors jobs, prints summary") + monitor_file = monitor_jobs(monitor_yaml, debug=args.debug) + + logging.info("All experiments are complete") + logging.info(f"Summary of results available in {monitor_file}") + + + + + +def check_tests(tests: list) -> list: + """ + Function for checking that all tests in a provided list of tests are valid + + Args: + tests : List of potentially valid test names + Returns: + tests_to_run : List of config files corresponding to test names + """ + + testfiles = glob.glob('test_configs/**/config*.yaml', recursive=True) + # Check that there are no duplicate test filenames + testfilenames=[] + for testfile in testfiles: + if os.path.basename(testfile) in testfilenames: + duplicates = glob.glob('test_configs/**/' + os.path.basename(testfile), recursive=True) + raise Exception(dedent(f""" + Found duplicate test file names: + {duplicates} + Ensure that each test file name under the test_configs/ directory + is unique. + """)) + testfilenames.append(os.path.basename(testfile)) + tests_to_run=[] + for test in tests: + # Skip blank/empty testnames; this avoids failure if newlines or spaces are included + if not test or test.isspace(): + continue + match = check_test(test) + if not match: + raise Exception(f"Could not find test {test}") + tests_to_run.append(match) + # Because some test files are symlinks to other tests, check that we don't + # include the same test twice + for testfile in tests_to_run.copy(): + if os.path.islink(testfile): + if os.path.realpath(testfile) in tests_to_run: + logging.warning(dedent(f"""WARNING: test file {testfile} is a symbolic link to a + test file ({os.path.realpath(testfile)}) that is also included in the + test list. Only the latter test will be run.""")) + tests_to_run.remove(testfile) + if len(tests_to_run) != len(set(tests_to_run)): + logging.warning("\nWARNING: Duplicate test names were found in list. Removing duplicates and continuing.\n") + tests_to_run = list(set(tests_to_run)) + return tests_to_run + + + +def check_test(test: str) -> str: + """ + Function for checking that a string corresponds to a valid test name + + Args: + test (str) : String of potential test name + Returns: + str : File name of test config file (empty string if no test file found) + """ + # potential test files + testfiles = glob.glob('test_configs/**/config*.yaml', recursive=True) + # potential test file for input test name + test_config=f'config.{test.strip()}.yaml' + config = '' + for testfile in testfiles: + if test_config in testfile: + logging.debug(f"found test {test}, testfile {testfile}") + config = os.path.abspath(testfile) + return config + + +def check_task_get_extrn_ics(cfg: dict, mach: dict, dflt: dict) -> dict: + """ + Function for checking and updating various settings in task_get_extrn_ics section of test config yaml + + Args: + cfg : Dictionary loaded from test config file + mach : Dictionary loaded from machine settings file + dflt : Dictionary loaded from default config file + Returns: + cfg_ics : Updated dictionary for task_get_extrn_ics section of test config + """ + + #Make our lives easier by shortening some dictionary calls + cfg_ics = cfg['task_get_extrn_ics'] + + # If RUN_TASK_GET_EXTRN_ICS is explicitly set to false, do nothing and return + if 'workflow_switches' in cfg: + if 'RUN_TASK_GET_EXTRN_ICS' in cfg['workflow_switches']: + if cfg['workflow_switches']['RUN_TASK_GET_EXTRN_ICS'] is False: + return cfg_ics + + # If USE_USER_STAGED_EXTRN_FILES not specified or false, do nothing and return + if not cfg_ics.get('USE_USER_STAGED_EXTRN_FILES'): + logging.debug(f'USE_USER_STAGED_EXTRN_FILES not specified or False in task_get_extrn_ics section of config') + return cfg_ics + + # If EXTRN_MDL_SYSBASEDIR_ICS is "set_to_non_default_location_in_testing_script", replace with test value from machine file + if cfg_ics.get('EXTRN_MDL_SYSBASEDIR_ICS') == "set_to_non_default_location_in_testing_script": + if 'TEST_ALT_EXTRN_MDL_SYSBASEDIR_ICS' in mach['platform']: + if os.path.isdir(mach['platform']['TEST_ALT_EXTRN_MDL_SYSBASEDIR_ICS']): + raise FileNotFoundError(f"Non-default input file location TEST_ALT_EXTRN_MDL_SYSBASEDIR_ICS from machine file does not exist or is not a directory") + cfg_ics['EXTRN_MDL_SYSBASEDIR_ICS'] = mach['platform']['TEST_ALT_EXTRN_MDL_SYSBASEDIR_ICS'] + else: + raise KeyError(f"Non-default input file location TEST_ALT_EXTRN_MDL_SYSBASEDIR_ICS not set in machine file") + return cfg_ics + + # Because USE_USER_STAGED_EXTRN_FILES is true, only look on disk, and ensure the staged data directory exists + cfg['platform']['EXTRN_MDL_DATA_STORES'] = "disk" + if 'TEST_EXTRN_MDL_SOURCE_BASEDIR' not in mach['platform']: + raise KeyError("TEST_EXTRN_MDL_SOURCE_BASEDIR, the directory for staged test data,"\ + "has not been specified in the machine file for this platform") + if not os.path.isdir(mach['platform']['TEST_EXTRN_MDL_SOURCE_BASEDIR']): + raise FileNotFoundError(dedent(f"""The directory for staged test data specified in this platform's machine file + TEST_EXTRN_MDL_SOURCE_BASEDIR = {mach['platform']['TEST_EXTRN_MDL_SOURCE_BASEDIR']} + does not exist.""")) + + # Different input data types have different directory structures, so set the data directory accordingly + if cfg_ics['EXTRN_MDL_NAME_ICS'] == 'FV3GFS': + if 'FV3GFS_FILE_FMT_ICS' not in cfg_ics: + cfg_ics['FV3GFS_FILE_FMT_ICS'] = dflt['task_get_extrn_ics']['FV3GFS_FILE_FMT_ICS'] + cfg_ics['EXTRN_MDL_SOURCE_BASEDIR_ICS'] = f"{mach['platform']['TEST_EXTRN_MDL_SOURCE_BASEDIR']}/"\ + f"{cfg_ics['EXTRN_MDL_NAME_ICS']}/{cfg_ics['FV3GFS_FILE_FMT_ICS']}/${{yyyymmddhh}}" + else: + cfg_ics['EXTRN_MDL_SOURCE_BASEDIR_ICS'] = f"{mach['platform']['TEST_EXTRN_MDL_SOURCE_BASEDIR']}/"\ + f"{cfg_ics['EXTRN_MDL_NAME_ICS']}/${{yyyymmddhh}}" + + return cfg_ics + +def check_task_get_extrn_lbcs(cfg: dict, mach: dict, dflt: dict) -> dict: + """ + Function for checking and updating various settings in task_get_extrn_lbcs section of test config yaml + + Args: + cfg : Dictionary loaded from test config file + mach : Dictionary loaded from machine settings file + dflt : Dictionary loaded from default config file + Returns: + cfg_lbcs : Updated dictionary for task_get_extrn_lbcs section of test config + """ + + #Make our lives easier by shortening some dictionary calls + cfg_lbcs = cfg['task_get_extrn_lbcs'] + + # If RUN_TASK_GET_EXTRN_LBCS is explicitly set to false, do nothing and return + if 'workflow_switches' in cfg: + if 'RUN_TASK_GET_EXTRN_LBCS' in cfg['workflow_switches']: + if cfg['workflow_switches']['RUN_TASK_GET_EXTRN_LBCS'] is False: + return cfg_lbcs + + # If USE_USER_STAGED_EXTRN_FILES not specified or false, do nothing and return + if not cfg_lbcs.get('USE_USER_STAGED_EXTRN_FILES'): + logging.debug(f'USE_USER_STAGED_EXTRN_FILES not specified or False in task_get_extrn_lbcs section of config') + return cfg_lbcs + + # If EXTRN_MDL_SYSBASEDIR_LBCS is "set_to_non_default_location_in_testing_script", replace with test value from machine file + if cfg_lbcs.get('EXTRN_MDL_SYSBASEDIR_LBCS') == "set_to_non_default_location_in_testing_script": + if 'TEST_ALT_EXTRN_MDL_SYSBASEDIR_LBCS' in mach['platform']: + if os.path.isdir(mach['platform']['TEST_ALT_EXTRN_MDL_SYSBASEDIR_LBCS']): + raise FileNotFoundError(f"Non-default input file location TEST_ALT_EXTRN_MDL_SYSBASEDIR_LBCS from machine file does not exist or is not a directory") + cfg_lbcs['EXTRN_MDL_SYSBASEDIR_LBCS'] = mach['platform']['TEST_ALT_EXTRN_MDL_SYSBASEDIR_LBCS'] + else: + raise KeyError(f"Non-default input file location TEST_ALT_EXTRN_MDL_SYSBASEDIR_LBCS not set in machine file") + return cfg_lbcs + + # Because USE_USER_STAGED_EXTRN_FILES is true, only look on disk, and ensure the staged data directory exists + cfg['platform']['EXTRN_MDL_DATA_STORES'] = "disk" + if 'TEST_EXTRN_MDL_SOURCE_BASEDIR' not in mach['platform']: + raise KeyError("TEST_EXTRN_MDL_SOURCE_BASEDIR, the directory for staged test data,"\ + "has not been specified in the machine file for this platform") + if not os.path.isdir(mach['platform']['TEST_EXTRN_MDL_SOURCE_BASEDIR']): + raise FileNotFoundError(dedent(f"""The directory for staged test data specified in this platform's machine file + TEST_EXTRN_MDL_SOURCE_BASEDIR = {mach['platform']['TEST_EXTRN_MDL_SOURCE_BASEDIR']} + does not exist.""")) + + # Different input data types have different directory structures, so set the data directory accordingly + if cfg_lbcs['EXTRN_MDL_NAME_LBCS'] == 'FV3GFS': + if 'FV3GFS_FILE_FMT_LBCS' not in cfg_lbcs: + cfg_lbcs['FV3GFS_FILE_FMT_LBCS'] = dflt['task_get_extrn_lbcs']['FV3GFS_FILE_FMT_LBCS'] + cfg_lbcs['EXTRN_MDL_SOURCE_BASEDIR_LBCS'] = f"{mach['platform']['TEST_EXTRN_MDL_SOURCE_BASEDIR']}/"\ + f"{cfg_lbcs['EXTRN_MDL_NAME_LBCS']}/{cfg_lbcs['FV3GFS_FILE_FMT_LBCS']}/${{yyyymmddhh}}" + else: + cfg_lbcs['EXTRN_MDL_SOURCE_BASEDIR_LBCS'] = f"{mach['platform']['TEST_EXTRN_MDL_SOURCE_BASEDIR']}/"\ + f"{cfg_lbcs['EXTRN_MDL_NAME_LBCS']}/${{yyyymmddhh}}" + + return cfg_lbcs + +def setup_logging(logfile: str = "log.run_WE2E_tests", debug: bool = False) -> None: + """ + Sets up logging, printing high-priority (INFO and higher) messages to screen, and printing all + messages with detailed timing and routine info in the specified text file. + """ + logging.getLogger().setLevel(logging.DEBUG) + + formatter = logging.Formatter("%(name)-16s %(levelname)-8s %(message)s") + + fh = logging.FileHandler(logfile, mode='w') + fh.setLevel(logging.DEBUG) + fh.setFormatter(formatter) + logging.getLogger().addHandler(fh) + + logging.debug(f"Finished setting up debug file logging in {logfile}") + console = logging.StreamHandler() + if debug: + console.setLevel(logging.DEBUG) + else: + console.setLevel(logging.INFO) + logging.getLogger().addHandler(console) + logging.debug("Logging set up successfully") + + + +if __name__ == "__main__": + + # Check python version and presence of some non-standard packages + check_python_version() + + #Get the "Home" directory, two levels above this one + homedir=os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + logfile='log.run_WE2E_tests' + + #Parse arguments + parser = argparse.ArgumentParser(epilog="For more information about config arguments (denoted in CAPS), see ush/config_defaults.yaml\n") + optional = parser._action_groups.pop() # Create a group for optional arguments so they can be listed after required args + required = parser.add_argument_group('required arguments') + + required.add_argument('-m', '--machine', type=str, help='Machine name; see ush/machine/ for valid values', required=True) + required.add_argument('-a', '--account', type=str, help='Account name for running submitted jobs', required=True) + required.add_argument('-t', '--tests', type=str, nargs="*", help="""Can be one of three options (in order of priority): + 1. A test name or list of test names. + 2. A test suite name ("fundamental", "comprehensive", or "all") + 3. The name of a file (full or relative path) containing a list of test names. + """, required=True) + + parser.add_argument('-c', '--compiler', type=str, help='Compiler used for building the app', default='intel') + parser.add_argument('-d', '--debug', action='store_true', help='Script will be run in debug mode with more verbose output') + parser.add_argument('-q', '--quiet', action='store_true', help='Suppress console output from workflow generation; this will help keep the screen uncluttered') + + + parser.add_argument('--modulefile', type=str, help='Modulefile used for building the app') + parser.add_argument('--run_envir', type=str, help='Overrides RUN_ENVIR variable to a new value ( "nco" or "community" ) for all experiments', default='') + parser.add_argument('--expt_basedir', type=str, help='Explicitly set EXPT_BASEDIR for all experiments') + parser.add_argument('--exec_subdir', type=str, help='Explicitly set EXEC_SUBDIR for all experiments') + parser.add_argument('--use_cron_to_relaunch', action='store_true', help='Explicitly set USE_CRON_TO_RELAUNCH for all experiments; this option disables the "monitor" script functionality') + parser.add_argument('--cron_relaunch_intvl_mnts', type=str, help='Overrides CRON_RELAUNCH_INTVL_MNTS for all experiments') + parser.add_argument('--debug_tests', action='store_true', help='Explicitly set DEBUG=TRUE for all experiments') + parser.add_argument('--verbose_tests', action='store_true', help='Explicitly set VERBOSE=TRUE for all experiments') + + parser._action_groups.append(optional) + + args = parser.parse_args() + + #Set defaults that need other argument values + if args.modulefile is None: + args.modulefile = f'build_{args.machine.lower()}_{args.compiler}' + + #Call main function + + try: + run_we2e_tests(homedir,args) + except: + logging.exception( + dedent( + f""" + ********************************************************************* + FATAL ERROR: + Experiment generation failed. See the error message(s) printed below. + For more detailed information, check the log file from the workflow + generation script: {logfile} + *********************************************************************\n + """ + ) + ) diff --git a/tests/WE2E/run_WE2E_tests.sh b/tests/WE2E/run_WE2E_tests.sh index b11c237be7..d2319b6d87 100755 --- a/tests/WE2E/run_WE2E_tests.sh +++ b/tests/WE2E/run_WE2E_tests.sh @@ -396,9 +396,11 @@ The argument \"machine\" specifying the machine or platform on which to run the WE2E tests was not specified in the call to this script. \ ${help_msg}" fi +machine=${machine,,} + # Cheyenne-specific test limitation -if [ "${machine,,}" = "cheyenne" ]; then +if [ "${machine}" = "cheyenne" ]; then use_cron_to_relaunch=FALSE echo " Due to system limitations, the 'use_cron_to_relaunch' command can not be used on @@ -803,7 +805,7 @@ Please correct and rerun." save_USHdir=${USHdir} source_config ${USHdir}/config_defaults.yaml USHdir=${save_USHdir} - MACHINE_FILE=${machine_file:-"${USHdir}/machine/${machine,,}.yaml"} + MACHINE_FILE=${machine_file:-"${USHdir}/machine/${machine}.yaml"} source_config ${MACHINE_FILE} source_config ${test_config_fp} # diff --git a/tests/WE2E/test_configs/grids_extrn_mdls_suites_community/config.grid_RRFS_CONUS_25km_ics_FV3GFS_lbcs_FV3GFS_suite_GFS_v17_p8.yaml b/tests/WE2E/test_configs/grids_extrn_mdls_suites_community/config.grid_RRFS_CONUS_25km_ics_FV3GFS_lbcs_FV3GFS_suite_GFS_v17_p8.yaml new file mode 100644 index 0000000000..a95c4f04ec --- /dev/null +++ b/tests/WE2E/test_configs/grids_extrn_mdls_suites_community/config.grid_RRFS_CONUS_25km_ics_FV3GFS_lbcs_FV3GFS_suite_GFS_v17_p8.yaml @@ -0,0 +1,21 @@ +metadata: + description: |- + This test is to ensure that the workflow running in community mode + completes successfully on the RRFS_CONUS_25km grid using the GFS_v17_p8 + physics suite with ICs and LBCs derived from the FV3GFS. +user: + RUN_ENVIR: community +workflow: + CCPP_PHYS_SUITE: FV3_GFS_v17_p8 + PREDEF_GRID_NAME: RRFS_CONUS_25km + DATE_FIRST_CYCL: '2019070100' + DATE_LAST_CYCL: '2019070100' + FCST_LEN_HRS: 6 + PREEXISTING_DIR_METHOD: rename +task_get_extrn_ics: + EXTRN_MDL_NAME_ICS: FV3GFS + USE_USER_STAGED_EXTRN_FILES: true +task_get_extrn_lbcs: + EXTRN_MDL_NAME_LBCS: FV3GFS + LBC_SPEC_INTVL_HRS: 3 + USE_USER_STAGED_EXTRN_FILES: true diff --git a/tests/WE2E/test_configs/wflow_features/config.MET_ensemble_verification.yaml b/tests/WE2E/test_configs/verification/config.MET_ensemble_verification.yaml similarity index 86% rename from tests/WE2E/test_configs/wflow_features/config.MET_ensemble_verification.yaml rename to tests/WE2E/test_configs/verification/config.MET_ensemble_verification.yaml index 4299c2b795..d5b01f6816 100644 --- a/tests/WE2E/test_configs/wflow_features/config.MET_ensemble_verification.yaml +++ b/tests/WE2E/test_configs/verification/config.MET_ensemble_verification.yaml @@ -1,11 +1,9 @@ metadata: description: |- This test is to ensure that the workflow running in community mode - completes successfully with MET verification. + completes successfully with MET ensemble verification. user: RUN_ENVIR: community -platform: - MODEL: FV3_GFS_v15p2_CONUS_25km workflow: CCPP_PHYS_SUITE: FV3_GFS_v15p2 PREDEF_GRID_NAME: RRFS_CONUS_25km @@ -32,3 +30,5 @@ task_run_fcst: global: DO_ENSEMBLE: true NUM_ENS_MEMBERS: 2 +verification: + VX_FCST_MODEL_NAME: FV3_GFS_v15p2_CONUS_25km diff --git a/tests/WE2E/test_configs/wflow_features/config.MET_verification.yaml b/tests/WE2E/test_configs/verification/config.MET_verification.yaml similarity index 84% rename from tests/WE2E/test_configs/wflow_features/config.MET_verification.yaml rename to tests/WE2E/test_configs/verification/config.MET_verification.yaml index 06569a9195..c2a1398031 100644 --- a/tests/WE2E/test_configs/wflow_features/config.MET_verification.yaml +++ b/tests/WE2E/test_configs/verification/config.MET_verification.yaml @@ -1,11 +1,9 @@ metadata: description: |- This test is to ensure that the workflow running in community mode - completes successfully with MET verification. + completes successfully with MET deterministic verification. user: RUN_ENVIR: community -platform: - MODEL: FV3_GFS_v15p2_CONUS_25km workflow: CCPP_PHYS_SUITE: FV3_GFS_v15p2 PREDEF_GRID_NAME: RRFS_CONUS_25km @@ -27,3 +25,5 @@ task_get_extrn_lbcs: USE_USER_STAGED_EXTRN_FILES: true task_run_fcst: WTIME_RUN_FCST: 01:00:00 +verification: + VX_FCST_MODEL_NAME: FV3_GFS_v15p2_CONUS_25km diff --git a/tests/WE2E/test_configs/verification/config.MET_verification_only_vx.yaml b/tests/WE2E/test_configs/verification/config.MET_verification_only_vx.yaml new file mode 100644 index 0000000000..8421ee39dd --- /dev/null +++ b/tests/WE2E/test_configs/verification/config.MET_verification_only_vx.yaml @@ -0,0 +1,51 @@ +metadata: + description: |- + This test is to ensure that the workflow running in community mode + can successfully run the MET deterministic verification tasks but + using staged observation and forecast files (i.e. with other tasks + such as pre-processing, forecast, and post-processing deactivated). +user: + RUN_ENVIR: community +workflow: + PREDEF_GRID_NAME: RRFS_CONUS_25km + DATE_FIRST_CYCL: '2019061500' + DATE_LAST_CYCL: '2019061500' + FCST_LEN_HRS: 6 + PREEXISTING_DIR_METHOD: rename +workflow_switches: +# +# This test assumes that the post-processed forecast files are staged +# (i.e., not generated by running the forecast model). Thus, turn off +# pre-processing, forecast, post-processing, and other tasks ordinarily +# needed for generation of post-processed forecast files. +# + RUN_TASK_MAKE_GRID: false + RUN_TASK_MAKE_OROG: false + RUN_TASK_MAKE_SFC_CLIMO: false + RUN_TASK_GET_EXTRN_ICS: false + RUN_TASK_GET_EXTRN_LBCS: false + RUN_TASK_MAKE_ICS: false + RUN_TASK_MAKE_LBCS: false + RUN_TASK_RUN_FCST: false + RUN_TASK_RUN_POST: false +# +# This test assumes the observation files are staged. Thus, deactivate +# the GET_OBS_... tasks and instead specify the obs staging directories. +# + RUN_TASK_GET_OBS_CCPA: false + CCPA_OBS_DIR: '/scratch2/BMC/det/Gerard.Ketefian/UFS_CAM/DTC_ensemble_task/staged/obs/ccpa/proc' + RUN_TASK_GET_OBS_MRMS: false + MRMS_OBS_DIR: '/scratch2/BMC/det/Gerard.Ketefian/UFS_CAM/DTC_ensemble_task/staged/obs/mrms/proc' + RUN_TASK_GET_OBS_NDAS: false + NDAS_OBS_DIR: '/scratch2/BMC/det/Gerard.Ketefian/UFS_CAM/DTC_ensemble_task/staged/obs/ndas/proc' +# +# Turn on verification tasks. +# + RUN_TASK_VX_GRIDSTAT: true + RUN_TASK_VX_POINTSTAT: true +verification: + VX_FCST_MODEL_NAME: FV3_GFS_v15p2_CONUS_25km +# +# Since the forecast files are staged, specify the base staging directory. +# + VX_FCST_INPUT_BASEDIR: '/scratch2/BMC/det/Gerard.Ketefian/UFS_CAM/DTC_ensemble_task/staged/fcst_det' diff --git a/tests/WE2E/test_configs/wflow_features/config.deactivate_tasks.yaml b/tests/WE2E/test_configs/wflow_features/config.deactivate_tasks.yaml index 5defa0bf98..f17039df85 100644 --- a/tests/WE2E/test_configs/wflow_features/config.deactivate_tasks.yaml +++ b/tests/WE2E/test_configs/wflow_features/config.deactivate_tasks.yaml @@ -32,10 +32,3 @@ workflow_switches: RUN_TASK_MAKE_LBCS: false RUN_TASK_RUN_FCST: false RUN_TASK_RUN_POST: false -task_get_extrn_ics: - EXTRN_MDL_NAME_ICS: FV3GFS - USE_USER_STAGED_EXTRN_FILES: true -task_get_extrn_lbcs: - EXTRN_MDL_NAME_LBCS: FV3GFS - LBC_SPEC_INTVL_HRS: 3 - USE_USER_STAGED_EXTRN_FILES: true diff --git a/tests/WE2E/test_configs/wflow_features/config.get_from_AWS_ics_GEFS_lbcs_GEFS_fmt_grib2_2022040400_ensemble_2mems.yaml b/tests/WE2E/test_configs/wflow_features/config.get_from_AWS_ics_GEFS_lbcs_GEFS_fmt_grib2_2022040400_ensemble_2mems.yaml new file mode 100644 index 0000000000..1d73649615 --- /dev/null +++ b/tests/WE2E/test_configs/wflow_features/config.get_from_AWS_ics_GEFS_lbcs_GEFS_fmt_grib2_2022040400_ensemble_2mems.yaml @@ -0,0 +1,27 @@ +metadata: + description: |- + This test checks the capability of the workflow to retrieve from NOAA + AWS grib2-formatted output files generated by GEFS. +user: + RUN_ENVIR: community +platform: + EXTRN_MDL_DATA_STORES: aws +workflow: + CCPP_PHYS_SUITE: FV3_HRRR + PREDEF_GRID_NAME: RRFS_CONUS_3km + DATE_FIRST_CYCL: '2022040400' + DATE_LAST_CYCL: '2022040400' + FCST_LEN_HRS: 6 + PREEXISTING_DIR_METHOD: rename +task_get_extrn_ics: + EXTRN_MDL_NAME_ICS: GEFS + EXTRN_MDL_ICS_OFFSET_HRS: 6 + FV3GFS_FILE_FMT_ICS: grib2 +task_get_extrn_lbcs: + EXTRN_MDL_NAME_LBCS: GEFS + LBC_SPEC_INTVL_HRS: 6 + EXTRN_MDL_LBCS_OFFSET_HRS: 0 + FV3GFS_FILE_FMT_LBCS: grib2 +global: + DO_ENSEMBLE: true + NUM_ENS_MEMBERS: 2 diff --git a/tests/WE2E/test_configs/wflow_features/config.get_from_HPSS_ics_GDAS_lbcs_GDAS_fmt_netcdf_2022040400_ensemble_2mems.yaml b/tests/WE2E/test_configs/wflow_features/config.get_from_HPSS_ics_GDAS_lbcs_GDAS_fmt_netcdf_2022040400_ensemble_2mems.yaml new file mode 100644 index 0000000000..2a51b558e3 --- /dev/null +++ b/tests/WE2E/test_configs/wflow_features/config.get_from_HPSS_ics_GDAS_lbcs_GDAS_fmt_netcdf_2022040400_ensemble_2mems.yaml @@ -0,0 +1,27 @@ +metadata: + description: |- + This test checks the capability of the workflow to retrieve from NOAA + HPSS netcdf-formatted output files generated by GDAS. +user: + RUN_ENVIR: community +platform: + EXTRN_MDL_DATA_STORES: hpss +workflow: + CCPP_PHYS_SUITE: FV3_HRRR + PREDEF_GRID_NAME: RRFS_CONUS_3km + DATE_FIRST_CYCL: '2022040400' + DATE_LAST_CYCL: '2022040400' + FCST_LEN_HRS: 6 + PREEXISTING_DIR_METHOD: rename +task_get_extrn_ics: + EXTRN_MDL_NAME_ICS: GDAS + EXTRN_MDL_ICS_OFFSET_HRS: 6 + FV3GFS_FILE_FMT_ICS: netcdf +task_get_extrn_lbcs: + EXTRN_MDL_NAME_LBCS: GDAS + LBC_SPEC_INTVL_HRS: 6 + EXTRN_MDL_LBCS_OFFSET_HRS: 0 + FV3GFS_FILE_FMT_LBCS: netcdf +global: + DO_ENSEMBLE: true + NUM_ENS_MEMBERS: 2 diff --git a/ush/config.community.yaml b/ush/config.community.yaml index f211c74a1f..f06d617db6 100644 --- a/ush/config.community.yaml +++ b/ush/config.community.yaml @@ -6,7 +6,6 @@ user: MACHINE: hera ACCOUNT: an_account platform: - MODEL: FV3_GFS_v16_CONUS_25km MET_INSTALL_DIR: "" METPLUS_PATH: "" CCPA_OBS_DIR: "" @@ -50,3 +49,5 @@ task_plot_allvars: global: DO_ENSEMBLE: false NUM_ENS_MEMBERS: 2 +verification: + VX_FCST_MODEL_NAME: FV3_GFS_v16_CONUS_25km diff --git a/ush/config_defaults.yaml b/ush/config_defaults.yaml index 4974e79f50..18ee973f09 100644 --- a/ush/config_defaults.yaml +++ b/ush/config_defaults.yaml @@ -215,9 +215,6 @@ platform: # # Set METplus parameters. Definitions: # - # MODEL: - # String that specifies a descriptive name for the model being verified. - # # MET_INSTALL_DIR: # Location to top-level directory of MET installation. # @@ -235,7 +232,7 @@ platform: # precipitation files used by METplus are located. This parameter needs # to be set for both user-provided observations and for observations # that are retrieved from the NOAA HPSS (if the user has access) via - # the get_obs_ccpa_tn task (activated in workflow by setting + # the TN_GET_OBS_CCPA task (activated in workflow by setting # RUN_TASK_GET_OBS_CCPA=true). In the case of pulling observations # directly from NOAA HPSS, the data retrieved will be placed in this # directory. Please note, this path must be defind as @@ -260,7 +257,7 @@ platform: # reflectivity files used by METplus are located. This parameter needs # to be set for both user-provided observations and for observations # that are retrieved from the NOAA HPSS (if the user has access) via the - # get_obs_mrms_tn task (activated in workflow by setting + # TN_GET_OBS_MRMS task (activated in workflow by setting # RUN_TASK_GET_OBS_MRMS=true). In the case of pulling observations # directly from NOAA HPSS, the data retrieved will be placed in this # directory. Please note, this path must be defind as @@ -289,7 +286,7 @@ platform: # files used by METplus are located. This parameter needs to be set for # both user-provided observations and for observations that are # retrieved from the NOAA HPSS (if the user has access) via the - # get_obs_ndas_tn task (activated in workflow by settingĀ  + # TN_GET_OBS_NDAS task (activated in workflow by settingĀ  # RUN_TASK_GET_OBS_NDAS=true). In the case of pulling observations # directly from NOAA HPSS, the data retrieved will be placed in this # directory. Please note, this path must be defind as @@ -307,7 +304,6 @@ platform: # #----------------------------------------------------------------------- # - MODEL: "" MET_INSTALL_DIR: "" MET_BIN_EXEC: "" METPLUS_PATH: "" @@ -455,7 +451,8 @@ workflow: # EXPT_BASEDIR: # The base directory in which the experiment directory will be created. # If this is not specified or if it is set to an empty string, it will - # default to ${HOMEdir}/../expt_dirs. + # default to ${HOMEdir}/../expt_dirs. If set to a relative path, the + # path will be appended to the default value ${HOMEdir}/../expt_dirs # # EXPT_SUBDIR: # The name that the experiment directory (without the full path) will @@ -472,7 +469,7 @@ workflow: # installed. #----------------------------------------------------------------------- # - EXPT_BASEDIR: '{{ workflow.EXPT_BASEDIR }}' + EXPT_BASEDIR: '' # This will be set in setup.py prior to extend_yaml() being called EXPT_SUBDIR: '{{ EXPT_SUBDIR }}' EXEC_SUBDIR: "exec" EXPTDIR: '{{ [EXPT_BASEDIR, EXPT_SUBDIR]|path_join }}' @@ -568,9 +565,9 @@ workflow: # EXTRN_MDL_VAR_DEFNS_FN: # Name of file (a shell script) containing the defintions of variables # associated with the external model from which ICs or LBCs are generated. This - # file is created by the GET_EXTRN_*_TN task because the values of the variables + # file is created by the TN_GET_EXTRN_* task because the values of the variables # it contains are not known before this task runs. The file is then sourced by - # the MAKE_ICS_TN and MAKE_LBCS_TN tasks. + # the TN_MAKE_ICS and TN_MAKE_LBCS tasks. # # WFLOW_LAUNCH_SCRIPT_FN: # Name of the script that can be used to (re)launch the experiment's rocoto @@ -855,8 +852,9 @@ workflow: # GET_OBS: # Task modulefile name for all get_obs_* tasks # - # VX_TN: - # Task modulefile name for all verification tasks + # TN_VX: + # Task name to use in forming the modulefile name for all verification + # tasks. # #------------------------------------------------------------------------ # @@ -864,7 +862,7 @@ workflow: SYMLINK_FIX_FILES: true GET_OBS: "get_obs" - VX_TN: "run_vx" + TN_VX: "run_vx" #---------------------------- # NCO specific variables @@ -946,52 +944,52 @@ workflow_switches: #----------------------------------------------------------------------- # # Set flags (and related directories) that determine whether various - # workflow tasks should be run. Note that the MAKE_GRID_TN, MAKE_OROG_TN, - # and MAKE_SFC_CLIMO_TN are all cycle-independent tasks, i.e. if they + # workflow tasks should be run. Note that the TN_MAKE_GRID, TN_MAKE_OROG, + # and TN_MAKE_SFC_CLIMO are all cycle-independent tasks, i.e. if they # are to be run, they do so only once at the beginning of the workflow # before any cycles are run. Definitions: # # RUN_TASK_MAKE_GRID: - # Flag that determines whether the MAKE_GRID_TN task is to be run. If + # Flag that determines whether the TN_MAKE_GRID task is to be run. If # this is set to true, the grid generation task is run and new grid # files are generated. If it is set to false, then the scripts look # for pregenerated grid files in the directory specified by GRID_DIR # (see below). # # RUN_TASK_MAKE_OROG: - # Same as RUN_TASK_MAKE_GRID but for the MAKE_OROG_TN task. + # Same as RUN_TASK_MAKE_GRID but for the TN_MAKE_OROG task. # # RUN_TASK_MAKE_SFC_CLIMO: - # Same as RUN_TASK_MAKE_GRID but for the MAKE_SFC_CLIMO_TN task. + # Same as RUN_TASK_MAKE_GRID but for the TN_MAKE_SFC_CLIMO task. # # RUN_TASK_GET_EXTRN_ICS: - # Flag that determines whether the GET_EXTRN_ICS_TN task is to be run. + # Flag that determines whether the TN_GET_EXTRN_ICS task is to be run. # # RUN_TASK_GET_EXTRN_LBCS: - # Flag that determines whether the GET_EXTRN_LBCS_TN task is to be run. + # Flag that determines whether the TN_GET_EXTRN_LBCS task is to be run. # # RUN_TASK_MAKE_ICS: - # Flag that determines whether the MAKE_ICS_TN task is to be run. + # Flag that determines whether the TN_MAKE_ICS task is to be run. # # RUN_TASK_MAKE_LBCS: - # Flag that determines whether the MAKE_LBCS_TN task is to be run. + # Flag that determines whether the TN_MAKE_LBCS task is to be run. # # RUN_TASK_RUN_FCST: - # Flag that determines whether the RUN_FCST_TN task is to be run. + # Flag that determines whether the TN_RUN_FCST task is to be run. # # RUN_TASK_RUN_POST: - # Flag that determines whether the RUN_POST_TN task is to be run. + # Flag that determines whether the TN_RUN_POST task is to be run. # # RUN_TASK_GET_OBS_CCPA: - # Flag that determines whether to run the GET_OBS_CCPA_TN task, which + # Flag that determines whether to run the TN_GET_OBS_CCPA task, which # retrieves the CCPA hourly precipitation files used by METplus from NOAA HPSS. # # RUN_TASK_GET_OBS_MRMS: - # Flag that determines whether to run the GET_OBS_MRMS_TN task, which + # Flag that determines whether to run the TN_GET_OBS_MRMS task, which # retrieves the MRMS composite reflectivity files used by METplus from NOAA HPSS. # # RUN_TASK_GET_OBS_NDAS: - # Flag that determines whether to run the GET_OBS_NDAS_TN task, which + # Flag that determines whether to run the TN_GET_OBS_NDAS task, which # retrieves the NDAS PrepBufr files used by METplus from NOAA HPSS. # # RUN_TASK_VX_GRIDSTAT: @@ -1040,7 +1038,7 @@ workflow_switches: # MAKE GRID config parameters #----------------------------- task_make_grid: - MAKE_GRID_TN: "make_grid" + TN_MAKE_GRID: "make_grid" NNODES_MAKE_GRID: 1 PPN_MAKE_GRID: 24 WTIME_MAKE_GRID: 00:20:00 @@ -1327,7 +1325,7 @@ task_make_grid: # MAKE OROG config parameters #----------------------------- task_make_orog: - MAKE_OROG_TN: "make_orog" + TN_MAKE_OROG: "make_orog" NNODES_MAKE_OROG: 1 PPN_MAKE_OROG: 24 WTIME_MAKE_OROG: 00:20:00 @@ -1341,7 +1339,7 @@ task_make_orog: # MAKE SFC CLIMO config parameters #----------------------------- task_make_sfc_climo: - MAKE_SFC_CLIMO_TN: "make_sfc_climo" + TN_MAKE_SFC_CLIMO: "make_sfc_climo" NNODES_MAKE_SFC_CLIMO: 2 PPN_MAKE_SFC_CLIMO: 24 WTIME_MAKE_SFC_CLIMO: 00:20:00 @@ -1355,7 +1353,7 @@ task_make_sfc_climo: # EXTRN ICS config parameters #----------------------------- task_get_extrn_ics: - GET_EXTRN_ICS_TN: "get_extrn_ics" + TN_GET_EXTRN_ICS: "get_extrn_ics" NNODES_GET_EXTRN_ICS: 1 PPN_GET_EXTRN_ICS: 1 MEM_GET_EXTRN_ICS: 2G @@ -1454,7 +1452,7 @@ task_get_extrn_ics: # EXTRN LBCS config parameters #----------------------------- task_get_extrn_lbcs: - GET_EXTRN_LBCS_TN: "get_extrn_lbcs" + TN_GET_EXTRN_LBCS: "get_extrn_lbcs" NNODES_GET_EXTRN_LBCS: 1 PPN_GET_EXTRN_LBCS: 1 MEM_GET_EXTRN_LBCS: 2G @@ -1535,7 +1533,7 @@ task_get_extrn_lbcs: # MAKE ICS config parameters #----------------------------- task_make_ics: - MAKE_ICS_TN: "make_ics" + TN_MAKE_ICS: "make_ics" NNODES_MAKE_ICS: 4 PPN_MAKE_ICS: 12 WTIME_MAKE_ICS: 00:30:00 @@ -1580,7 +1578,7 @@ task_make_ics: # MAKE LBCS config parameters #----------------------------- task_make_lbcs: - MAKE_LBCS_TN: "make_lbcs" + TN_MAKE_LBCS: "make_lbcs" NNODES_MAKE_LBCS: 4 PPN_MAKE_LBCS: 12 WTIME_MAKE_LBCS: 00:30:00 @@ -1594,7 +1592,7 @@ task_make_lbcs: # FORECAST config parameters #----------------------------- task_run_fcst: - RUN_FCST_TN: "run_fcst" + TN_RUN_FCST: "run_fcst" NNODES_RUN_FCST: '{{ (PE_MEMBER01 + PPN_RUN_FCST - 1) // PPN_RUN_FCST }}' PPN_RUN_FCST: '{{ platform.NCORES_PER_NODE // OMP_NUM_THREADS_RUN_FCST }}' WTIME_RUN_FCST: 04:30:00 @@ -1658,7 +1656,7 @@ task_run_fcst: # WRITE_DOPOST: # Flag that determines whether or not to use the inline post feature # [i.e. calling the Unified Post Processor (UPP) from within the weather - # model]. If this is set to true, the RUN_POST_TN task is deactivated + # model]. If this is set to true, the TN_RUN_POST task is deactivated # (i.e. RUN_TASK_RUN_POST is set to false) to avoid unnecessary # computations. # @@ -1840,13 +1838,13 @@ task_run_fcst: # #----------------------------------------------------------------------- # - USE_MERRA_CLIMO: '{{ workflow.CCPP_PHYS_SUITE == "FV3_GFS_v15_thompson_mynn_lam3km" }}' + USE_MERRA_CLIMO: '{{ workflow.CCPP_PHYS_SUITE == "FV3_GFS_v15_thompson_mynn_lam3km" or workflow.CCPP_PHYS_SUITE == "FV3_GFS_v17_p8" }}' #---------------------------- # POST config parameters #----------------------------- task_run_post: - RUN_POST_TN: "run_post" + TN_RUN_POST: "run_post" NNODES_RUN_POST: 2 PPN_RUN_POST: 24 WTIME_RUN_POST: 00:15:00 @@ -1900,7 +1898,7 @@ task_run_post: # # POST_OUTPUT_DOMAIN_NAME: # Domain name (in lowercase) used in constructing the names of the output - # files generated by UPP [which is called either by running the RUN_POST_TN + # files generated by UPP [which is called either by running the TN_RUN_POST # task or by activating the inline post feature (WRITE_DOPOST set to true)]. # The post output files are named as follows: # @@ -1921,7 +1919,7 @@ task_run_post: # GET OBS CCPA config parameters #----------------------------- task_get_obs_ccpa: - GET_OBS_CCPA_TN: "get_obs_ccpa" + TN_GET_OBS_CCPA: "get_obs_ccpa" NNODES_GET_OBS_CCPA: 1 PPN_GET_OBS_CCPA: 1 MEM_GET_OBS_CCPA: 2G @@ -1932,7 +1930,7 @@ task_get_obs_ccpa: # GET OBS MRMS config parameters #----------------------------- task_get_obs_mrms: - GET_OBS_MRMS_TN: "get_obs_mrms" + TN_GET_OBS_MRMS: "get_obs_mrms" NNODES_GET_OBS_MRMS: 1 PPN_GET_OBS_MRMS: 1 MEM_GET_OBS_MRMS: 2G @@ -1943,7 +1941,7 @@ task_get_obs_mrms: # GET OBS NDAS config parameters #----------------------------- task_get_obs_ndas: - GET_OBS_NDAS_TN: "get_obs_ndas" + TN_GET_OBS_NDAS: "get_obs_ndas" NNODES_GET_OBS_NDAS: 1 PPN_GET_OBS_NDAS: 1 MEM_GET_OBS_NDAS: 2G @@ -1954,7 +1952,7 @@ task_get_obs_ndas: # VX_GRIDSTAT config parameters #----------------------------- task_run_vx_gridstat: - VX_GRIDSTAT_TN: "run_gridstatvx" + TN_VX_GRIDSTAT: "run_gridstatvx" NNODES_VX_GRIDSTAT: 1 PPN_VX_GRIDSTAT: 1 MEM_VX_GRIDSTAT: 2G @@ -1965,7 +1963,7 @@ task_run_vx_gridstat: # VX_GRIDSTAT_REFC config parameters #----------------------------- task_run_vx_gridstat_refc: - VX_GRIDSTAT_REFC_TN: "run_gridstatvx_refc" + TN_VX_GRIDSTAT_REFC: "run_gridstatvx_refc" NNODES_VX_GRIDSTAT: 1 PPN_VX_GRIDSTAT: 1 MEM_VX_GRIDSTAT: 2G @@ -1976,7 +1974,7 @@ task_run_vx_gridstat_refc: # VX_GRIDSTAT_RETOP config parameters #----------------------------- task_run_vx_gridstat_retop: - VX_GRIDSTAT_RETOP_TN: "run_gridstatvx_retop" + TN_VX_GRIDSTAT_RETOP: "run_gridstatvx_retop" NNODES_VX_GRIDSTAT: 1 PPN_VX_GRIDSTAT: 1 MEM_VX_GRIDSTAT: 2G @@ -1987,7 +1985,7 @@ task_run_vx_gridstat_retop: # VX_GRIDSTAT_03h config parameters #----------------------------- task_run_vx_gridstat_03h: - VX_GRIDSTAT_03h_TN: "run_gridstatvx_03h" + TN_VX_GRIDSTAT_03h: "run_gridstatvx_03h" NNODES_VX_GRIDSTAT: 1 PPN_VX_GRIDSTAT: 1 MEM_VX_GRIDSTAT: 2G @@ -1998,7 +1996,7 @@ task_run_vx_gridstat_03h: # VX_GRIDSTAT_06h config parameters #----------------------------- task_run_vx_gridstat_06h: - VX_GRIDSTAT_06h_TN: "run_gridstatvx_06h" + TN_VX_GRIDSTAT_06h: "run_gridstatvx_06h" NNODES_VX_GRIDSTAT: 1 PPN_VX_GRIDSTAT: 1 MEM_VX_GRIDSTAT: 2G @@ -2009,7 +2007,7 @@ task_run_vx_gridstat_06h: # VX_GRIDSTAT_24h config parameters #----------------------------- task_run_vx_gridstat_24h: - VX_GRIDSTAT_24h_TN: "run_gridstatvx_24h" + TN_VX_GRIDSTAT_24h: "run_gridstatvx_24h" NNODES_VX_GRIDSTAT: 1 PPN_VX_GRIDSTAT: 1 MEM_VX_GRIDSTAT: 2G @@ -2020,7 +2018,7 @@ task_run_vx_gridstat_24h: # VX_POINTSTAT config parameters #----------------------------- task_run_vx_pointstat: - VX_POINTSTAT_TN: "run_pointstatvx" + TN_VX_POINTSTAT: "run_pointstatvx" NNODES_VX_POINTSTAT: 1 PPN_VX_POINTSTAT: 1 MEM_VX_POINTSTAT: 2G @@ -2031,15 +2029,15 @@ task_run_vx_pointstat: # VX_ENSGRID config parameters #----------------------------- task_run_vx_ensgrid: - VX_ENSGRID_TN: "run_ensgridvx" + TN_VX_ENSGRID: "run_ensgridvx" MAXTRIES_VX_ENSGRID: 2 - VX_ENSGRID_03h_TN: "run_ensgridvx_03h" + TN_VX_ENSGRID_03h: "run_ensgridvx_03h" MAXTRIES_VX_ENSGRID_03h: 2 - VX_ENSGRID_06h_TN: "run_ensgridvx_06h" + TN_VX_ENSGRID_06h: "run_ensgridvx_06h" MAXTRIES_VX_ENSGRID_06h: 2 - VX_ENSGRID_24h_TN: "run_ensgridvx_24h" + TN_VX_ENSGRID_24h: "run_ensgridvx_24h" MAXTRIES_VX_ENSGRID_24h: 2 - VX_ENSGRID_RETOP_TN: "run_ensgridvx_retop" + TN_VX_ENSGRID_RETOP: "run_ensgridvx_retop" MAXTRIES_VX_ENSGRID_RETOP: 2 NNODES_VX_ENSGRID: 1 PPN_VX_ENSGRID: 1 @@ -2051,7 +2049,7 @@ task_run_vx_ensgrid: # VX_ENSGRID_REFC config parameters #----------------------------- task_run_vx_ensgrid_refc: - VX_ENSGRID_REFC_TN: "run_ensgridvx_refc" + TN_VX_ENSGRID_REFC: "run_ensgridvx_refc" NNODES_VX_ENSGRID: 1 PPN_VX_ENSGRID: 1 MEM_VX_ENSGRID: 2G @@ -2062,7 +2060,7 @@ task_run_vx_ensgrid_refc: # VX_ENSGRID_MEAN config parameters #----------------------------- task_run_vx_ensgrid_mean: - VX_ENSGRID_MEAN_TN: "run_ensgridvx_mean" + TN_VX_ENSGRID_MEAN: "run_ensgridvx_mean" NNODES_VX_ENSGRID_MEAN: 1 PPN_VX_ENSGRID_MEAN: 1 MEM_VX_ENSGRID_MEAN: 2G @@ -2073,7 +2071,7 @@ task_run_vx_ensgrid_mean: # VX_ENSGRID_MEAN_03h config parameters #----------------------------- task_run_vx_ensgrid_mean_03h: - VX_ENSGRID_MEAN_03h_TN: "run_ensgridvx_mean_03h" + TN_VX_ENSGRID_MEAN_03h: "run_ensgridvx_mean_03h" NNODES_VX_ENSGRID_MEAN: 1 PPN_VX_ENSGRID_MEAN: 1 MEM_VX_ENSGRID_MEAN: 2G @@ -2084,7 +2082,7 @@ task_run_vx_ensgrid_mean_03h: # VX_ENSGRID_MEAN_06h config parameters #----------------------------- task_run_vx_ensgrid_mean_06h: - VX_ENSGRID_MEAN_06h_TN: "run_ensgridvx_mean_06h" + TN_VX_ENSGRID_MEAN_06h: "run_ensgridvx_mean_06h" NNODES_VX_ENSGRID_MEAN: 1 PPN_VX_ENSGRID_MEAN: 1 MEM_VX_ENSGRID_MEAN: 2G @@ -2095,7 +2093,7 @@ task_run_vx_ensgrid_mean_06h: # VX_ENSGRID_MEAN_24h config parameters #----------------------------- task_run_vx_ensgrid_mean_24h: - VX_ENSGRID_MEAN_24h_TN: "run_ensgridvx_mean_24h" + TN_VX_ENSGRID_MEAN_24h: "run_ensgridvx_mean_24h" NNODES_VX_ENSGRID_MEAN: 1 PPN_VX_ENSGRID_MEAN: 1 MEM_VX_ENSGRID_MEAN: 2G @@ -2106,11 +2104,11 @@ task_run_vx_ensgrid_mean_24h: # VX_ENSGRID_PROB config parameters #----------------------------- task_run_vx_ensgrid_prob: - VX_ENSGRID_PROB_TN: "run_ensgridvx_prob" + TN_VX_ENSGRID_PROB: "run_ensgridvx_prob" NNODES_VX_ENSGRID_PROB: 1 - VX_ENSGRID_PROB_RETOP_TN: "run_ensgridvx_prob_retop" + TN_VX_ENSGRID_PROB_RETOP: "run_ensgridvx_prob_retop" MAXTRIES_VX_ENSGRID_PROB_RETOP: 2 - VX_ENSGRID_PROB_REFC_TN: "run_ensgridvx_prob_refc" + TN_VX_ENSGRID_PROB_REFC: "run_ensgridvx_prob_refc" MAXTRIES_VX_ENSGRID_PROB_REFC: 2 PPN_VX_ENSGRID_PROB: 1 MEM_VX_ENSGRID_PROB: 2G @@ -2121,7 +2119,7 @@ task_run_vx_ensgrid_prob: # VX_ENSGRID_PROB_03h config parameters #----------------------------- task_run_vx_ensgrid_prob_03h: - VX_ENSGRID_PROB_03h_TN: "run_ensgridvx_prob_03h" + TN_VX_ENSGRID_PROB_03h: "run_ensgridvx_prob_03h" NNODES_VX_ENSGRID_PROB: 1 PPN_VX_ENSGRID_PROB: 1 MEM_VX_ENSGRID_PROB: 2G @@ -2132,7 +2130,7 @@ task_run_vx_ensgrid_prob_03h: # VX_ENSGRID_PROB_06h config parameters #----------------------------- task_run_vx_ensgrid_prob_06h: - VX_ENSGRID_PROB_06h_TN: "run_ensgridvx_prob_06h" + TN_VX_ENSGRID_PROB_06h: "run_ensgridvx_prob_06h" NNODES_VX_ENSGRID_PROB: 1 PPN_VX_ENSGRID_PROB: 1 MEM_VX_ENSGRID_PROB: 2G @@ -2143,7 +2141,7 @@ task_run_vx_ensgrid_prob_06h: # VX_ENSGRID_PROB_24h config parameters #----------------------------- task_run_vx_ensgrid_prob_24h: - VX_ENSGRID_PROB_24h_TN: "run_ensgridvx_prob_24h" + TN_VX_ENSGRID_PROB_24h: "run_ensgridvx_prob_24h" NNODES_VX_ENSGRID_PROB: 1 PPN_VX_ENSGRID_PROB: 1 MEM_VX_ENSGRID_PROB: 2G @@ -2154,7 +2152,7 @@ task_run_vx_ensgrid_prob_24h: # VX_ENSPOINT config parameters #----------------------------- task_run_vx_enspoint: - VX_ENSPOINT_TN: "run_enspointvx" + TN_VX_ENSPOINT: "run_enspointvx" NNODES_VX_ENSPOINT: 1 PPN_VX_ENSPOINT: 1 MEM_VX_ENSPOINT: 2G @@ -2165,7 +2163,7 @@ task_run_vx_enspoint: # VX_ENSPOINT_MEAN config parameters #----------------------------- task_run_vx_enspoint_mean: - VX_ENSPOINT_MEAN_TN: "run_enspointvx_mean" + TN_VX_ENSPOINT_MEAN: "run_enspointvx_mean" NNODES_VX_ENSPOINT_MEAN: 1 PPN_VX_ENSPOINT_MEAN: 1 MEM_VX_ENSPOINT_MEAN: 2G @@ -2176,7 +2174,7 @@ task_run_vx_enspoint_mean: # VX_ENSPOINT_PROB config parameters #----------------------------- task_run_vx_enspoint_prob: - VX_ENSPOINT_PROB_TN: "run_enspointvx_prob" + TN_VX_ENSPOINT_PROB: "run_enspointvx_prob" NNODES_VX_ENSPOINT_PROB: 1 PPN_VX_ENSPOINT_PROB: 1 MEM_VX_ENSPOINT_PROB: 2G @@ -2187,7 +2185,7 @@ task_run_vx_enspoint_prob: # PLOT_ALLVARS config parameters #----------------------------- task_plot_allvars: - PLOT_ALLVARS_TN: "plot_allvars" + TN_PLOT_ALLVARS: "plot_allvars" NNODES_PLOT_ALLVARS: 1 PPN_PLOT_ALLVARS: 24 WTIME_PLOT_ALLVARS: 01:00:00 @@ -2210,6 +2208,10 @@ task_plot_allvars: # By default the end is FCST_LEN_HRS #----------------------------------- PLOT_FCST_END: "" + #------------------------------------------------------------------------------ + # Domains to plot. Currently supported are either "conus" or "regional" or both + #------------------------------------------------------------------------------- + PLOT_DOMAINS: ["conus"] #---------------------------- @@ -2370,3 +2372,21 @@ global: # #----------------------------------------------------------------------- # + +#---------------------------- +# verification parameters +# +# VX_FCST_MODEL_NAME: +# String that specifies a descriptive name for the model being verified. +# This is used in forming the names of the verification output files as +# well as in the contents of those files. +# +# VX_FCST_INPUT_BASEDIR: +# Location of top-level directory containing forecast (but not obs) files +# that will be used as input into METplus for verification. If not +# specified, this gets set to EXPTDIR. +# +#----------------------------- +verification: + VX_FCST_MODEL_NAME: '{{ nco.NET }}.{{ task_run_post.POST_OUTPUT_DOMAIN_NAME }}' + VX_FCST_INPUT_BASEDIR: '{{ workflow.EXPTDIR }}' diff --git a/ush/generate_FV3LAM_wflow.py b/ush/generate_FV3LAM_wflow.py index f8637e71e5..9b8cd32ae3 100755 --- a/ush/generate_FV3LAM_wflow.py +++ b/ush/generate_FV3LAM_wflow.py @@ -40,19 +40,20 @@ from check_python_version import check_python_version -def generate_FV3LAM_wflow(ushdir, logfile: str = "log.generate_FV3LAM_wflow") -> None: +def generate_FV3LAM_wflow(ushdir, logfile: str = "log.generate_FV3LAM_wflow", debug: bool = False) -> str: """Function to setup a forecast experiment and create a workflow (according to the parameters specified in the config file) Args: - ushdir (str): The full path of the ush/ directory where this script is located - logfile (str): The name of the file where logging is written + ushdir (str) : The full path of the ush/ directory where this script is located + logfile (str) : The name of the file where logging is written + debug (bool): Enable extra output for debugging Returns: - None + EXPTDIR (str) : The full path of the directory where this experiment has been generated """ # Set up logging to write to screen and logfile - setup_logging(logfile) + setup_logging(logfile, debug) # Check python version and presence of some non-standard packages check_python_version() @@ -67,7 +68,7 @@ def generate_FV3LAM_wflow(ushdir, logfile: str = "log.generate_FV3LAM_wflow") -> # The setup function reads the user configuration file and fills in # non-user-specified values from config_defaults.yaml - expt_config = setup(ushdir) + expt_config = setup(ushdir,debug=debug) verbose = expt_config["workflow"]["VERBOSE"] # @@ -655,12 +656,12 @@ def generate_FV3LAM_wflow(ushdir, logfile: str = "log.generate_FV3LAM_wflow") -> + settings_str ) # - # If not running the MAKE_GRID_TN task (which implies the workflow will + # If not running the TN_MAKE_GRID task (which implies the workflow will # use pregenerated grid files), set the namelist variables specifying # the paths to surface climatology files. These files are located in # (or have symlinks that point to them) in the FIXlam directory. # - # Note that if running the MAKE_GRID_TN task, this action usually cannot + # Note that if running the TN_MAKE_GRID task, this action usually cannot # be performed here but must be performed in that task because the names # of the surface climatology files depend on the CRES parameter (which is # the C-resolution of the grid), and this parameter is in most workflow @@ -681,20 +682,6 @@ def generate_FV3LAM_wflow(ushdir, logfile: str = "log.generate_FV3LAM_wflow") -> # cp_vrfy(os.path.join(ushdir, EXPT_CONFIG_FN), EXPTDIR) - # Note workflow generation completion - log_info( - f""" - ======================================================================== - ======================================================================== - - Experiment generation completed. The experiment directory is: - - EXPTDIR='{EXPTDIR}' - - ======================================================================== - ======================================================================== - """ - ) # # ----------------------------------------------------------------------- # @@ -744,21 +731,36 @@ def generate_FV3LAM_wflow(ushdir, logfile: str = "log.generate_FV3LAM_wflow") -> # If we got to this point everything was successful: move the log file to the experiment directory. mv_vrfy(logfile, EXPTDIR) + return EXPTDIR -def setup_logging(logfile: str = "log.generate_FV3LAM_wflow") -> None: + +def setup_logging(logfile: str = "log.generate_FV3LAM_wflow", debug: bool = False) -> None: """ Sets up logging, printing high-priority (INFO and higher) messages to screen, and printing all messages with detailed timing and routine info in the specified text file. + + If debug = True, print all messages to both screen and log file. """ - logging.basicConfig( - level=logging.DEBUG, - format="%(name)-22s %(levelname)-8s %(message)s", - filename=logfile, - filemode="w", - ) + logging.getLogger().setLevel(logging.DEBUG) + + formatter = logging.Formatter("%(name)-22s %(levelname)-8s %(message)s") + + fh = logging.FileHandler(logfile, mode='w') + fh.setLevel(logging.DEBUG) + fh.setFormatter(formatter) + logging.getLogger().addHandler(fh) logging.debug(f"Finished setting up debug file logging in {logfile}") + + # If there are already multiple handlers, that means generate_FV3LAM_workflow was called from another function. + # In that case, do not change the console (print-to-screen) logging. + if len(logging.getLogger().handlers) > 1: + return + console = logging.StreamHandler() - console.setLevel(logging.INFO) + if debug: + console.setLevel(logging.DEBUG) + else: + console.setLevel(logging.INFO) logging.getLogger().addHandler(console) logging.debug("Logging set up successfully") @@ -771,7 +773,7 @@ def setup_logging(logfile: str = "log.generate_FV3LAM_wflow") -> None: # Call the generate_FV3LAM_wflow function defined above to generate the # experiment/workflow. try: - generate_FV3LAM_wflow(USHdir, wflow_logfile) + expt_dir = generate_FV3LAM_wflow(USHdir, wflow_logfile) except: logging.exception( dedent( @@ -785,6 +787,21 @@ def setup_logging(logfile: str = "log.generate_FV3LAM_wflow") -> None: """ ) ) + + # Note workflow generation completion + log_info( + f""" + ======================================================================== + ======================================================================== + + Experiment generation completed. The experiment directory is: + + EXPTDIR='{EXPTDIR}' + + ======================================================================== + ======================================================================== + """ + ) class Testing(unittest.TestCase): diff --git a/ush/link_fix.py b/ush/link_fix.py index 0351c7a7d2..3aa0ffa5b7 100755 --- a/ush/link_fix.py +++ b/ush/link_fix.py @@ -208,7 +208,7 @@ def link_fix( f"C*{dot_or_uscore}oro_data.tile{tile_rgnl}.halo{nh0}.nc", f"C*{dot_or_uscore}oro_data.tile{tile_rgnl}.halo{nh4}.nc", ] - if ccpp_phys_suite == "FV3_HRRR": + if ccpp_phys_suite == "FV3_HRRR" or ccpp_phys_suite == "FV3_GFS_v17_p8": fns += [ f"C*{dot_or_uscore}oro_data_ss.tile{tile_rgnl}.halo{nh0}.nc", f"C*{dot_or_uscore}oro_data_ls.tile{tile_rgnl}.halo{nh0}.nc", @@ -310,7 +310,7 @@ def link_fix( # ----------------------------------------------------------------------- # # If the task in consideration (one of the pre-processing tasks - # MAKE_GRID_TN, MAKE_OROG_TN, and MAKE_SFC_CLIMO_TN) was run, then + # TN_MAKE_GRID, TN_MAKE_OROG, and TN_MAKE_SFC_CLIMO) was run, then # the source location of the fix files will be located under the # experiment directory. In this case, we use relative symlinks for # portability and readability. Make absolute links otherwise. diff --git a/ush/machine/hera.yaml b/ush/machine/hera.yaml index 3a7769a557..1038c3bd9d 100644 --- a/ush/machine/hera.yaml +++ b/ush/machine/hera.yaml @@ -32,3 +32,6 @@ platform: FIXsfc: /scratch2/BMC/det/UFS_SRW_App/develop/fix/fix_sfc_climo FIXshp: /scratch2/BMC/det/UFS_SRW_App/develop/NaturalEarth EXTRN_MDL_DATA_STORES: hpss aws nomads +data: + obs: + RAP_obs: /scratch2/BMC/public/data/grids/rap/obs diff --git a/ush/machine/jet.yaml b/ush/machine/jet.yaml index 22f945335d..ae6f8a8b6e 100644 --- a/ush/machine/jet.yaml +++ b/ush/machine/jet.yaml @@ -40,3 +40,8 @@ data: netcdf: /public/data/grids/gfs/anl/netcdf RAP: /public/data/grids/rap/full/wrfprs/grib2 HRRR: /public/data/grids/hrrr/conus/wrfprs/grib2 + obs: + RAP_obs: /public/data/grids/rap/obs + GFS_obs: + prepbufr: /public/data/grids/gfs/prepbufr + tcvitals: /public/data/grids/gfs/bufr diff --git a/ush/machine/noaacloud.yaml b/ush/machine/noaacloud.yaml index c1e96b362b..d0af150935 100644 --- a/ush/machine/noaacloud.yaml +++ b/ush/machine/noaacloud.yaml @@ -24,6 +24,7 @@ platform: FIXorg: /contrib/EPIC/UFS_SRW_App/develop/fix/fix_orog FIXsfc: /contrib/EPIC/UFS_SRW_App/develop/fix/fix_sfc_climo FIXshp: /contrib/EPIC/UFS_SRW_App/develop/NaturalEarth + FIXgsi: /contrib/EPIC/UFS_SRW_App/develop/fix/fix_gsi EXTRN_MDL_DATA_STORES: aws nomads data: ics_lbcs: diff --git a/ush/python_utils/config_parser.py b/ush/python_utils/config_parser.py index c09ff8c9c5..6510af62eb 100644 --- a/ush/python_utils/config_parser.py +++ b/ush/python_utils/config_parser.py @@ -465,25 +465,26 @@ def update_dict(dict_o, dict_t, provide_default=False): def check_structure_dict(dict_o, dict_t): """Check if a dictionary's structure follows a template. - The invalid entries are printed to the screen. + The invalid entries are returned as a list of lists. + If all entries are valid, returns an empty dictionary Args: dict_o: target dictionary dict_t: template dictionary to compare structure to Returns: - Boolean + dict: Invalid key-value pairs. """ + inval = {} for k, v in dict_o.items(): if k in dict_t.keys(): v1 = dict_t[k] if isinstance(v, dict) and isinstance(v1, dict): r = check_structure_dict(v, v1) - if not r: - return False + if r: + inval.update(r) else: - print(f"INVALID ENTRY: {k}={v}") - return False - return True + inval[k] = v + return inval def filter_dict(dict_o, keys_regex): @@ -579,9 +580,11 @@ def cfg_main(): cfg_t = load_config_file(args.validate, 1) r = check_structure_dict(cfg, cfg_t) if r: - print("SUCCESS") - else: + for k in r: + print(f"INVALID ENTRY: {k}={r[k]}") print("FAILURE") + else: + print("SUCCESS") else: if args.template: cfg = flatten_dict(cfg) diff --git a/ush/retrieve_data.py b/ush/retrieve_data.py index e6ad879fbc..081bfb138c 100755 --- a/ush/retrieve_data.py +++ b/ush/retrieve_data.py @@ -32,6 +32,7 @@ import shutil import subprocess import sys +import glob from textwrap import dedent import time from copy import deepcopy @@ -81,7 +82,7 @@ def copy_file(source, destination, copy_cmd): """ if not os.path.exists(source): - logging.info(f"File does not exist on disk \n {source}") + logging.info(f"File does not exist on disk \n {source} \n try using: --input_file_path ") return False # Using subprocess here because system copy is much faster than @@ -183,7 +184,7 @@ def fill_template(template_str, cycle_date, templates_only=False, **kwargs): Return: filled template string """ - + # Parse keyword args ens_group = kwargs.get("ens_group") fcst_hr = kwargs.get("fcst_hr", 0) @@ -224,7 +225,7 @@ def fill_template(template_str, cycle_date, templates_only=False, **kwargs): if templates_only: return f'{",".join((format_values.keys()))}' return template_str.format(**format_values) - + def create_target_path(target_path): @@ -294,7 +295,7 @@ def get_file_templates(cla, known_data_info, data_store, use_cla_tmpl=False): # Remove sfc files from fcst in file_names of external models for LBCs # sfc files needed in fcst when time_offset is not zero. - if cla.ics_or_lbcs == "LBCS": + if cla.ics_or_lbcs == "LBCS" and isinstance(file_templates, dict): for format in ['netcdf', 'nemsio']: for i, tmpl in enumerate(file_templates.get(format, {}).get('fcst', [])): if "sfc" in tmpl: @@ -306,7 +307,7 @@ def get_file_templates(cla, known_data_info, data_store, use_cla_tmpl=False): if isinstance(file_templates, dict): if cla.file_type is not None: file_templates = file_templates[cla.file_type] - file_templates = file_templates[cla.anl_or_fcst] + file_templates = file_templates[cla.file_set] if not file_templates: msg = "No file naming convention found. They must be provided \ either on the command line or on in a config file." @@ -344,7 +345,7 @@ def get_requested_files(cla, file_templates, input_locs, method="disk", **kwargs """ members = kwargs.get("members", "") - members = members if isinstance(members, list) else [members] + members = cla.members if isinstance(cla.members, list) else [members] check_all = kwargs.get("check_all", False) @@ -478,7 +479,7 @@ def hpss_requested_files(cla, file_names, store_specs, members=-1, ens_group=-1) archive_file_names = archive_file_names[cla.file_type] if isinstance(archive_file_names, dict): - archive_file_names = archive_file_names[cla.anl_or_fcst] + archive_file_names = archive_file_names[cla.file_set] unavailable = {} existing_archives = {} @@ -505,7 +506,7 @@ def hpss_requested_files(cla, file_names, store_specs, members=-1, ens_group=-1) archive_internal_dirs = store_specs.get("archive_internal_dir", [""]) if isinstance(archive_internal_dirs, dict): - archive_internal_dirs = archive_internal_dirs.get(cla.anl_or_fcst, [""]) + archive_internal_dirs = archive_internal_dirs.get(cla.file_set, [""]) # which_archive matters for choosing the correct file names within, # but we can safely just try all options for the @@ -683,6 +684,7 @@ def setup_logging(debug=False): user-defined level for logging in the script.""" level = logging.WARNING + level = logging.INFO if debug: level = logging.DEBUG @@ -697,26 +699,30 @@ def write_summary_file(cla, data_store, file_templates): the data was retrieved, write a bash summary file that is needed by the workflow elements downstream.""" - files = [] - for tmpl in file_templates: - files.extend( - [fill_template(tmpl, cla.cycle_date, fcst_hr=fh) for fh in cla.fcst_hrs] + members = cla.members if isinstance(cla.members, list) else [-1] + for mem in members: + files = [] + for tmpl in file_templates: + tmpl = tmpl if isinstance(tmpl, list) else [tmpl] + for t in tmpl: + files.extend( + [fill_template(t, cla.cycle_date, fcst_hr=fh, mem=mem) for fh in cla.fcst_hrs] + ) + output_path = fill_template(cla.output_path, cla.cycle_date, mem=mem) + summary_fp = os.path.join(output_path, cla.summary_file) + logging.info(f"Writing a summary file to {summary_fp}") + file_contents = dedent( + f""" + DATA_SRC={data_store} + EXTRN_MDL_CDATE={cla.cycle_date.strftime('%Y%m%d%H')} + EXTRN_MDL_STAGING_DIR={output_path} + EXTRN_MDL_FNS=( {' '.join(files)} ) + EXTRN_MDL_FHRS=( {' '.join([str(i) for i in cla.fcst_hrs])} ) + """ ) - - summary_fp = os.path.join(cla.output_path, cla.summary_file) - logging.info(f"Writing a summary file to {summary_fp}") - file_contents = dedent( - f""" - DATA_SRC={data_store} - EXTRN_MDL_CDATE={cla.cycle_date.strftime('%Y%m%d%H')} - EXTRN_MDL_STAGING_DIR={cla.output_path} - EXTRN_MDL_FNS=( {' '.join(files)} ) - EXTRN_MDL_FHRS=( {' '.join([str(i) for i in cla.fcst_hrs])} ) - """ - ) - logging.info(f"Contents: {file_contents}") - with open(summary_fp, "w") as summary: - summary.write(file_contents) + logging.info(f"Contents: {file_contents}") + with open(summary_fp, "w") as summary: + summary.write(file_contents) def to_datetime(arg): @@ -743,7 +749,7 @@ def main(argv): cla.members = arg_list_to_range(cla.members) setup_logging(cla.debug) - print("Running script retrieve_data.py with args:\n", f"{('-' * 80)}\n{('-' * 80)}") + print("Running script retrieve_data.py with args:", f"\n{('-' * 80)}\n{('-' * 80)}") for name, val in cla.__dict__.items(): if name not in ["config"]: print(f"{name:>15s}: {val}") @@ -896,10 +902,10 @@ def parse_args(argv): # Required parser.add_argument( - "--anl_or_fcst", - choices=("anl", "fcst"), - help="Flag for whether analysis or forecast \ - files should be gathered", + "--file_set", + choices=("anl", "fcst", "obs", "fix"), + help="Flag for whether analysis, forecast, \ + fix, or observation files should be gathered", required=True, ) parser.add_argument( @@ -907,19 +913,22 @@ def parse_args(argv): help="Full path to a configuration file containing paths and \ naming conventions for known data streams. The default included \ in this repository is in parm/data_locations.yml", + required=True, type=config_exists, + ) parser.add_argument( "--cycle_date", help="Cycle date of the data to be retrieved in YYYYMMDDHH \ format.", - required=True, + required=False, # relaxed this arg option, and set a benign value when not used + default="1999123100", type=to_datetime, ) parser.add_argument( "--data_stores", help="List of priority data_stores. Tries first list item \ - first. Choices: hpss, nomads, aws, disk", + first. Choices: hpss, nomads, aws, disk, remote.", nargs="*", required=True, type=to_lower, @@ -928,6 +937,7 @@ def parse_args(argv): "--external_model", choices=( "FV3GFS", + "GFS_obs", "GDAS", "GEFS", "GSMGFS", @@ -935,7 +945,9 @@ def parse_args(argv): "NAM", "RAP", "RAPx", + "RAP_obs", "HRRRx", + "GSI-FIX", ), help="External model label. This input is case-sensitive", required=True, @@ -946,25 +958,30 @@ def parse_args(argv): one fhr will be processed. If 2 or 3 arguments, a sequence \ of forecast hours [start, stop, [increment]] will be \ processed. If more than 3 arguments, the list is processed \ - as-is.", + as-is. default=[0]", nargs="+", - required=True, + required=False, # relaxed this arg option, and set a default value when not used + default=[0], type=int, ) parser.add_argument( "--output_path", help="Path to a location on disk. Path is expected to exist.", - required=True, + required=True, type=os.path.abspath, ) parser.add_argument( "--ics_or_lbcs", choices=("ICS", "LBCS"), help="Flag for whether ICS or LBCS.", - required=True, + required=True ) # Optional + parser.add_argument( + "--version", # for file patterns that dont conform to cycle_date [TBD] + help="Version number of package to download, e.g. x.yy.zz", + ) parser.add_argument( "--symlink", action="store_true", @@ -984,7 +1001,7 @@ def parse_args(argv): ) parser.add_argument( "--file_type", - choices=("grib2", "nemsio", "netcdf"), + choices=("grib2", "nemsio", "netcdf", "prepbufr", "tcvitals"), help="External model file format", ) parser.add_argument( diff --git a/ush/set_FV3nml_ens_stoch_seeds.py b/ush/set_FV3nml_ens_stoch_seeds.py index 098aadc37f..08ed944f46 100644 --- a/ush/set_FV3nml_ens_stoch_seeds.py +++ b/ush/set_FV3nml_ens_stoch_seeds.py @@ -37,7 +37,7 @@ def set_FV3nml_ens_stoch_seeds(cdate): within each member directory housed within each cycle directory. Files of any two ensemble members differ only in their stochastic "seed" parameter values. These namelist files are generated when this file is - called as part of the RUN_FCST_TN task. + called as part of the TN_RUN_FCST task. Args: cdate diff --git a/ush/set_FV3nml_sfc_climo_filenames.py b/ush/set_FV3nml_sfc_climo_filenames.py index 4cad4dfff9..b2fb7a7deb 100644 --- a/ush/set_FV3nml_sfc_climo_filenames.py +++ b/ush/set_FV3nml_sfc_climo_filenames.py @@ -33,7 +33,7 @@ def set_FV3nml_sfc_climo_filenames(): This function sets the values of the variables in the forecast model's namelist file that specify the paths to the surface climatology files on the FV3LAM native grid (which are either pregenerated - or created by the MAKE_SFC_CLIMO_TN task). Note that the workflow + or created by the TN_MAKE_SFC_CLIMO task). Note that the workflow generation scripts create symlinks to these surface climatology files in the FIXlam directory, and the values in the namelist file that get set by this function are relative or full paths to these links. diff --git a/ush/setup.py b/ush/setup.py index c2fdc0affd..993a63d47b 100644 --- a/ush/setup.py +++ b/ush/setup.py @@ -4,8 +4,8 @@ import sys import datetime import traceback +import logging from textwrap import dedent -from logging import getLogger from python_utils import ( log_info, @@ -55,7 +55,10 @@ def load_config_for_setup(ushdir, default_config, user_config): """ # Load the default config. + logging.debug(f"Loading config defaults file {default_config}") cfg_d = load_config_file(default_config) + logging.debug(f"Read in the following values from config defaults file:\n") + logging.debug(cfg_d) # Load the user config file, then ensure all user-specified # variables correspond to a default value. @@ -69,6 +72,8 @@ def load_config_for_setup(ushdir, default_config, user_config): try: cfg_u = load_config_file(user_config) + logging.debug(f"Read in the following values from YAML config file {user_config}:\n") + logging.debug(cfg_u) except: errmsg = dedent( f"""\n @@ -80,15 +85,13 @@ def load_config_for_setup(ushdir, default_config, user_config): # Make sure the keys in user config match those in the default # config. - if not check_structure_dict(cfg_u, cfg_d): - raise Exception( - dedent( - f""" - User-specified variable "{key}" in {user_config} is not valid - Check {EXPT_DEFAULT_CONFIG_FN} for allowed user-specified variables\n - """ - ) - ) + invalid = check_structure_dict(cfg_u, cfg_d) + if invalid: + errmsg = f"Invalid key(s) specified in {user_config}:\n" + for entry in invalid: + errmsg = errmsg + f"{entry} = {invalid[entry]}\n" + errmsg = errmsg + f"\nCheck {default_config} for allowed user-specified variables\n" + raise Exception(errmsg) # Mandatory variables *must* be set in the user's config; the default value is invalid mandatory = ["user.MACHINE"] @@ -116,6 +119,7 @@ def load_config_for_setup(ushdir, default_config, user_config): ({machine}) in your config file {user_config}""" ) ) + logging.debug(f"Loading machine defaults file {machine_file}") machine_cfg = load_config_file(machine_file) # Load the fixed files configuration @@ -144,6 +148,20 @@ def load_config_for_setup(ushdir, default_config, user_config): # User settings (take precedence over all others) update_dict(cfg_u, cfg_d) + # Set "Home" directory, the top-level ufs-srweather-app directory + homedir = os.path.abspath(os.path.dirname(__file__) + os.sep + os.pardir) + cfg_d["user"]["HOMEdir"] = homedir + + # Special logic if EXPT_BASEDIR is a relative path; see config_defaults.yaml for explanation + expt_basedir = cfg_d["workflow"]["EXPT_BASEDIR"] + if (not expt_basedir) or (expt_basedir[0] != "/"): + expt_basedir = os.path.join(homedir, "..", "expt_dirs", expt_basedir) + try: + expt_basedir = os.path.realpath(expt_basedir) + except: + pass + cfg_d["workflow"]["EXPT_BASEDIR"] = os.path.abspath(expt_basedir) + extend_yaml(cfg_d) # Do any conversions of data types @@ -152,9 +170,6 @@ def load_config_for_setup(ushdir, default_config, user_config): if not (v is None or v == ""): cfg_d[sect][k] = str_to_list(v) - for k, v in cfg_d["task_run_fcst"].items(): - print(f"*** {k}: {v}") - # Mandatory variables *must* be set in the user's config or the machine file; the default value is invalid mandatory = [ "EXPT_SUBDIR", @@ -216,7 +231,7 @@ def set_srw_paths(ushdir, expt_config): """ # HOMEdir is the location of the SRW clone, one directory above ush/ - homedir = os.path.abspath(os.path.dirname(__file__) + os.sep + os.pardir) + homedir = expt_config.get("user", {}).get("HOMEdir") # Read Externals.cfg mng_extrns_cfg_fn = os.path.join(homedir, "Externals.cfg") @@ -255,13 +270,12 @@ def set_srw_paths(ushdir, expt_config): ) return dict( - HOMEdir=homedir, USHdir=ushdir, UFS_WTHR_MDL_DIR=ufs_wthr_mdl_dir, ) -def setup(USHdir, user_config_fn="config.yaml"): +def setup(USHdir, user_config_fn="config.yaml", debug: bool = False): """Function that validates user-provided configuration, and derives a secondary set of parameters needed to configure a Rocoto-based SRW workflow. The derived parameters use a set of required user-defined @@ -276,13 +290,13 @@ def setup(USHdir, user_config_fn="config.yaml"): USHdir (str): The full path of the ush/ directory where this script is located user_config_fn (str): The name of a user-provided config YAML + debug (bool): Enable extra output for debugging Returns: None """ - logger = getLogger(__name__) - cd_vrfy(USHdir) + logger = logging.getLogger(__name__) # print message log_info( @@ -341,38 +355,7 @@ def setup(USHdir, user_config_fn="config.yaml"): fcst_len_hrs_max = {fcst_len_hrs_max}""" ) - # - # ----------------------------------------------------------------------- - # - # If the base directory (EXPT_BASEDIR) in which the experiment subdirectory - # (EXPT_SUBDIR) will be located does not start with a "/", then it is - # either set to a null string or contains a relative directory. In both - # cases, prepend to it the absolute path of the default directory under - # which the experiment directories are placed. If EXPT_BASEDIR was set - # to a null string, it will get reset to this default experiment directory, - # and if it was set to a relative directory, it will get reset to an - # absolute directory that points to the relative directory under the - # default experiment directory. Then create EXPT_BASEDIR if it doesn't - # already exist. - # - # ----------------------------------------------------------------------- - # - expt_basedir = workflow_config.get("EXPT_BASEDIR") - homedir = expt_config["user"].get("HOMEdir") - if (not expt_basedir) or (expt_basedir[0] != "/"): - if not expt_basedir or "{{" in expt_basedir: - expt_basedir = "" - expt_basedir = os.path.join(homedir, "..", "expt_dirs", expt_basedir) - try: - expt_basedir = os.path.realpath(expt_basedir) - except: - pass - expt_basedir = os.path.abspath(expt_basedir) - workflow_config["EXPT_BASEDIR"] = expt_basedir - - # Update some paths that include EXPT_BASEDIR - extend_yaml(expt_config) # # ----------------------------------------------------------------------- # @@ -383,7 +366,10 @@ def setup(USHdir, user_config_fn="config.yaml"): # expt_subdir = workflow_config.get("EXPT_SUBDIR", "") - exptdir = workflow_config["EXPTDIR"] + exptdir = workflow_config.get("EXPTDIR") + + # Update some paths that include EXPTDIR and EXPT_BASEDIR + extend_yaml(expt_config) preexisting_dir_method = workflow_config.get("PREEXISTING_DIR_METHOD", "") try: check_for_preexist_dir_file(exptdir, preexisting_dir_method) @@ -1063,7 +1049,7 @@ def get_location(xcs, fmt, expt_cfg): # # ----------------------------------------------------------------------- # NOTE: currently this is executed no matter what, should it be dependent on the logic described below?? - # If not running the MAKE_GRID_TN, MAKE_OROG_TN, and/or MAKE_SFC_CLIMO + # If not running the TN_MAKE_GRID, TN_MAKE_OROG, and/or TN_MAKE_SFC_CLIMO # tasks, create symlinks under the FIXlam directory to pregenerated grid, # orography, and surface climatology files. # @@ -1285,7 +1271,7 @@ def get_location(xcs, fmt, expt_cfg): # # loop through the flattened expt_config and check validity of params - cfg_v = load_config_file("valid_param_vals.yaml") + cfg_v = load_config_file(os.path.join(USHdir, "valid_param_vals.yaml")) for k, v in flatten_dict(expt_config).items(): if v is None or v == "": continue diff --git a/ush/test_retrieve_data.py b/ush/test_retrieve_data.py index e35eac2dad..85e9b2860d 100644 --- a/ush/test_retrieve_data.py +++ b/ush/test_retrieve_data.py @@ -41,7 +41,7 @@ def test_fv3gfs_grib2_lbcs_from_hpss(self): # fmt: off args = [ - '--anl_or_fcst', 'fcst', + '--file_set', 'fcst', '--config', self.config, '--cycle_date', '2022062512', '--data_stores', 'hpss', @@ -74,7 +74,7 @@ def test_fv3gfs_netcdf_lbcs_from_hpss(self): # fmt: off args = [ - '--anl_or_fcst', 'fcst', + '--file_set', 'fcst', '--config', self.config, '--cycle_date', '2022060112', '--data_stores', 'hpss', @@ -103,11 +103,11 @@ def test_gdas_ics_from_aws(self): with tempfile.TemporaryDirectory(dir=".") as tmp_dir: os.chdir(tmp_dir) - out_path_tmpl = f"mem{{mem:03d}}" + out_path_tmpl = os.path.join(tmp_dir, f"mem{{mem:03d}}") # fmt: off args = [ - '--anl_or_fcst', 'anl', + '--file_set', 'anl', '--config', self.config, '--cycle_date', '2022052512', '--data_stores', 'aws', @@ -124,7 +124,6 @@ def test_gdas_ics_from_aws(self): retrieve_data.main(args) # Verify files exist in temp dir - for mem in [9, 10]: files_on_disk = glob.glob( os.path.join(out_path_tmpl.format(mem=mem), "*") @@ -132,6 +131,7 @@ def test_gdas_ics_from_aws(self): self.assertEqual(len(files_on_disk), 2) # GEFS Tests + @unittest.skipIf(os.environ.get("CI") == "true", "Skipping HPSS tests") def test_gefs_grib2_ics_from_aws(self): """Get GEFS grib2 a & b files for ICS offset by 6 hours.""" @@ -139,11 +139,11 @@ def test_gefs_grib2_ics_from_aws(self): with tempfile.TemporaryDirectory(dir=".") as tmp_dir: os.chdir(tmp_dir) - out_path_tmpl = f"mem{{mem:03d}}" + out_path_tmpl = os.path.join(tmp_dir, f"mem{{mem:03d}}") # fmt: off args = [ - '--anl_or_fcst', 'anl', + '--file_set', 'anl', '--config', self.config, '--cycle_date', '2022052512', '--data_stores', 'aws', @@ -177,7 +177,7 @@ def test_hrrr_ics_from_hpss(self): # fmt: off args = [ - '--anl_or_fcst', 'anl', + '--file_set', 'anl', '--config', self.config, '--cycle_date', '2022062512', '--data_stores', 'hpss', @@ -207,7 +207,7 @@ def test_hrrr_lbcs_from_hpss(self): # fmt: off args = [ - '--anl_or_fcst', 'fcst', + '--file_set', 'fcst', '--config', self.config, '--cycle_date', '2022062512', '--data_stores', 'hpss', @@ -236,7 +236,7 @@ def test_hrrr_ics_from_aws(self): # fmt: off args = [ - '--anl_or_fcst', 'anl', + '--file_set', 'anl', '--config', self.config, '--cycle_date', '2022062512', '--data_stores', 'aws', @@ -262,10 +262,10 @@ def test_hrrr_lbcs_from_aws(self): with tempfile.TemporaryDirectory(dir=".") as tmp_dir: os.chdir(tmp_dir) - + # fmt: off args = [ - '--anl_or_fcst', 'fcst', + '--file_set', 'fcst', '--config', self.config, '--cycle_date', '2022062512', '--data_stores', 'aws', @@ -295,7 +295,7 @@ def test_rap_ics_from_aws(self): # fmt: off args = [ - '--anl_or_fcst', 'anl', + '--file_set', 'anl', '--config', self.config, '--cycle_date', '2022062509', '--data_stores', 'aws', @@ -325,7 +325,7 @@ def test_rap_lbcs_from_aws(self): # fmt: off args = [ - '--anl_or_fcst', 'fcst', + '--file_set', 'fcst', '--config', self.config, '--cycle_date', '2022062509', '--data_stores', 'aws', diff --git a/ush/valid_param_vals.yaml b/ush/valid_param_vals.yaml index 5a8088ae7f..5b6a76796d 100644 --- a/ush/valid_param_vals.yaml +++ b/ush/valid_param_vals.yaml @@ -39,13 +39,14 @@ valid_vals_CCPP_PHYS_SUITE: [ "FV3_GFS_v15p2", "FV3_GFS_v15_thompson_mynn_lam3km", "FV3_GFS_v16", +"FV3_GFS_v17_p8", "FV3_RRFS_v1beta", "FV3_WoFS_v0", "FV3_HRRR" ] valid_vals_GFDLgrid_NUM_CELLS: [48, 96, 192, 384, 768, 1152, 3072] -valid_vals_EXTRN_MDL_NAME_ICS: ["GSMGFS", "FV3GFS", "RAP", "HRRR", "NAM"] -valid_vals_EXTRN_MDL_NAME_LBCS: ["GSMGFS", "FV3GFS", "RAP", "HRRR", "NAM"] +valid_vals_EXTRN_MDL_NAME_ICS: ["GSMGFS", "FV3GFS", "GEFS", "GDAS", "RAP", "HRRR", "NAM"] +valid_vals_EXTRN_MDL_NAME_LBCS: ["GSMGFS", "FV3GFS", "GEFS", "GDAS", "RAP", "HRRR", "NAM"] valid_vals_USE_USER_STAGED_EXTRN_FILES: [True, False] valid_vals_FV3GFS_FILE_FMT_ICS: ["nemsio", "grib2", "netcdf"] valid_vals_FV3GFS_FILE_FMT_LBCS: ["nemsio", "grib2", "netcdf"] diff --git a/versions/build.ver.wcoss2 b/versions/build.ver.wcoss2 index 5d9b42c82a..9204cb4c9f 100644 --- a/versions/build.ver.wcoss2 +++ b/versions/build.ver.wcoss2 @@ -5,25 +5,29 @@ export module_ver=8.5.2 export envvar_ver=1.0 export PrgEnv_intel_ver=8.1.0 export intel_ver=19.1.3.304 +export intel_para_ver=19.1.3.304 +export intel_dev_ver=19.1.3.304 export craype_ver=2.7.13 export cray_mpich_ver=8.1.7 +export cray_mpich_para_ver=8.1.7 +export cray_mpich_dev_ver=8.1.9 export cmake_ver=3.20.2 -export intel_ver=19.1.3.304 -export cray_mpich_ver=8.1.7 export jasper_ver=2.0.25 export zlib_ver=1.2.11 export libpng_ver=1.6.37 export hdf5_ver=1.10.6 export netcdf_ver=4.7.4 -export pio_ver=2.5.2 -export esmf_ver=8.3.0b09 -export fms_ver=2022.01 +export fms_ver=2022.04 export bacio_ver=2.4.1 -export crtm_ver=2.3.0 +export crtm_ver=2.4.0 export g2_ver=3.4.5 export g2tmpl_ver=1.10.0 export ip_ver=3.3.3 export sp_ver=2.3.3 +export pio_ver=2.5.2 +export gftl_shared_ver=1.5.0 +export esmf_ver=8.3.0b09 +export mapl_ver=2.23.1-esmf-8.3.0b09 export w3nco_ver=2.4.1 export libjpeg_ver=9c export cray_pals_ver=1.1.3 @@ -32,3 +36,6 @@ export nemsio_ver=2.5.2 export sigio_ver=2.3.2 export sfcio_ver=1.4.1 export wrf_io_ver=1.2.0 +export wgrib2_ver=2.0.8_wmo +export bufr_ver=11.7.0 +export nemsiogfs_ver=2.5.3 diff --git a/versions/run.ver.wcoss2 b/versions/run.ver.wcoss2 index 6a06331a85..6c7a9c1d52 100644 --- a/versions/run.ver.wcoss2 +++ b/versions/run.ver.wcoss2 @@ -4,9 +4,18 @@ export intel_ver=19.1.3.304 export python_ver=3.8.6 export rocoto_ver=1.3.5 -export gfs_ver=v16.2 +export gfs_ver=v16.3 export nam_ver=v4.2 export rap_ver=v5.1 export gsmgfs_ver=v16.2 export hrrr_ver=v3 export prod_util_ver=2.0.14 + +export udunits_ver=2.2.28 +export gsl_ver=2.7 +export netcdf_ver=4.7.4 +export nco_ver=4.9.7 + +export wgrib2_ver=2.0.8_wmo +export libjpeg_ver=9c +export grib_util_ver=1.2.4