diff --git a/ChangeLog b/ChangeLog
index 4138740ccda..efaa4318a64 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,178 @@
======================================================================
+Originator: Chris Fischer
+Date: 11-08-2017
+Tag: cime5.4.0-alpha.09
+Answer Changes: None
+Tests: IRT.T62_g17.C1850ECO.yellowstone_intel.pop-default
+ code_checker, seq by-hand and with re-run
+Dependencies:
+
+Brief Summary:
+ - Allow undated history files to be archived.
+ - Convert SEQ to use compare-two.
+
+User interface changes:
+
+PR summary: git log --oneline --first-parent [previous_tag]..master
+ce40669 Merge pull request #2037 from jedwards4b/fix_for_undated_history_file_archiving
+280e006 Merge pull request #2030 from ESMCI/jgfouca/seq_cmpr_two
+
+Modified files: git diff --name-status [previous_tag]
+M scripts/lib/CIME/SystemTests/seq.py
+M scripts/lib/CIME/SystemTests/system_tests_common.py
+M scripts/lib/CIME/case_st_archive.py
+
+======================================================================
+
+======================================================================
+
+Originator: Chris Fischer
+Date: 11-07-2017
+Tag: cime5.4.0-alpha.08
+Answer Changes: None
+Tests: Hand test B1850
+Dependencies:
+
+Brief Summary:
+ - Handle case when cmdargs is not a list
+
+User interface changes:
+
+PR summary: git log --oneline --first-parent [previous_tag]..master
+fc72203 Merge pull request #2032 from jedwards4b/cmdargs_fix
+
+Modified files: git diff --name-status [previous_tag]
+M scripts/lib/CIME/utils.py
+
+======================================================================
+
+======================================================================
+
+Originator: Chris Fischer
+Date: 11-06-2017
+Tag: cime5.4.0-alpha.07
+Answer Changes: None
+Tests: scripts_regression_tests.pr
+ manual checks of mapping files generated with runoff_to_ocn tool and run of check_map.sh tool
+ Tested runoff_map tool with a source file
+ run_merge_mapping_files.sh
+ code_checker
+ tests with casename try+45
+Dependencies:
+
+Brief Summary:
+ - Improve interface to external scripts PRERUN_SCRIPT, POSTRUN_SCRIPT, DATA_ASSIMILATION_SCRIPT.
+ - Better error message for compsets.
+ - For runoff map: fix mask_b and frac_b in nearest neighbor map.
+ - If area units on source grid file are in sq deg, convert to sq radians.
+ - Handle the case where no points are mapped to marginal seas.
+ - Add function to rebuild dependencies.
+ - Make srt change backward compatible.
+ - Allow overrides in config batch.
+ - Fix machines option to srt and unitialzed var.
+ - Fix cprnc build on mac.
+ - Add NLCOMP warning if comparison fails and RUN phase is complete.
+ - Remove string formatting from code-path that checks python version.
+ - Bluewaters update.
+ - Fix for ACME MPAS builds.
+ - Add support for selecting different driver.
+ - Fix critical bug with py3 str handling.
+ - Only report mem and tput errors once.
+ - Fix slurm output fields.
+ - Fix critical problems found during ACME/ESMCI integration.
+ - Handle pattern match chars like + in casename.
+ - Add check for a batch system when testing user prerequisites.
+ - Fix batch prepend test.
+ - py3 fixes
+ - Fix COST_PES and TOTALPES for titan (aprun).
+ - Add support for arbitrary prerequisites to case.submit.
+ - Updates for NAS pleiades systems.
+
+User interface changes:
+ - Add --clean-depends to case.build, this argument can take a component list and will
+ clean all components if one is not provided.
+ - Some additional output from nl comp code in some cases
+ - --driver added to create_newcase, new _V$driver testopt.
+
+
+PR summary: git log --oneline --first-parent [previous_tag]..master
+0027c84 Merge pull request #2005 from jedwards4b/external_process_interface
+a84b875 Merge pull request #2027 from jedwards4b/better_errormsg_for_compsets
+184d7de Merge pull request #2013 from billsacks/runoff_nn_fix_mask
+28f74aa Merge pull request #2022 from billsacks/runoff_convert_sq_degrees
+4793fd8 Merge pull request #2007 from billsacks/runoff_map_no_marginal_ocean
+5e79775 Merge pull request #2026 from jedwards4b/clean_depends
+2b79440 make srt change backward compatible
+a5a6949 Merge pull request #2025 from jedwards4b/allow_overrides_in_config_batch
+cd181bb Merge pull request #2023 from jedwards4b/fix_unitialized_var
+2978605 Merge pull request #2021 from billsacks/fix_cprnc_make
+2d39e61 Merge pull request #2016 from ESMCI/jgfouca/add_nl_comp_warning
+32375b4 Merge pull request #2017 from ESMCI/jgfouca/fix_cime_for_old_python
+f3493fa Merge pull request #2015 from jedwards4b/bluewaters_update1
+33ddbe8 Merge pull request #2008 from ESMCI/jgfouca/fix_acme_mpas_builds
+f99c174 Merge pull request #1988 from ESMCI/jgfouca/add_driver_selection_support
+33597f4 Merge pull request #2003 from ESMCI/jgfouca/py3_str_unicode_confusion
+9202dbb Merge pull request #1998 from ESMCI/jgfouca/only_report_memory_tput_errors_once
+09b7b8e Merge pull request #2001 from ESMCI/jgfouca/fix_slurm_output_fields
+e67caa2 Merge pull request #1999 from ESMCI/jgfouca/misc_fixes
+abf1e91 Merge pull request #1997 from jedwards4b/handle_special_chars_in_casename
+952c5fd Merge pull request #1996 from ESMCI/mfdeakin-sandia/case_submit_prereq/fix_test
+ef08588 fix batch prepend test
+4226f42 py3 fixes
+e959bdc Merge pull request #1990 from ESMCI/jgfouca/titan_fixes_and_misc
+5f7359b Merge pull request #1753 from ESMCI/mfdeakin-sandia/case_submit/prereq
+b8190ea Merge pull request #1983 from ESMCI/jgfouca/merge_st_archive
+
+Modified files: git diff --name-status [previous_tag]
+M config/acme/config_files.xml
+M config/acme/machines/Makefile
+M config/acme/machines/config_batch.xml
+M config/cesm/machines/Makefile
+M config/cesm/machines/config_batch.xml
+M config/cesm/machines/config_machines.xml
+M config/xml_schemas/config_batch.xsd
+M doc/source/users_guide/running-a-case.rst
+M scripts/Tools/case.build
+M scripts/Tools/case.submit
+M scripts/create_newcase
+M scripts/lib/CIME/SystemTests/dae.py
+M scripts/lib/CIME/SystemTests/system_tests_common.py
+M scripts/lib/CIME/SystemTests/system_tests_compare_two.py
+M scripts/lib/CIME/XML/component.py
+M scripts/lib/CIME/XML/entry_id.py
+M scripts/lib/CIME/XML/env_batch.py
+M scripts/lib/CIME/XML/env_mach_pes.py
+M scripts/lib/CIME/XML/generic_xml.py
+M scripts/lib/CIME/XML/machines.py
+M scripts/lib/CIME/aprun.py
+M scripts/lib/CIME/build.py
+M scripts/lib/CIME/buildlib.py
+M scripts/lib/CIME/case.py
+M scripts/lib/CIME/case_cmpgen_namelists.py
+M scripts/lib/CIME/case_run.py
+M scripts/lib/CIME/case_setup.py
+M scripts/lib/CIME/case_st_archive.py
+M scripts/lib/CIME/case_submit.py
+M scripts/lib/CIME/compare_namelists.py
+M scripts/lib/CIME/get_timing.py
+M scripts/lib/CIME/test_scheduler.py
+M scripts/lib/CIME/utils.py
+M scripts/query_config
+M scripts/tests/scripts_regression_tests.py
+M src/drivers/mct/cime_config/config_component.xml
+M src/drivers/mct/main/prep_ocn_mod.F90
+M tools/cprnc/Makefile
+M tools/mapping/check_maps/src/ESMF_RegridWeightGenCheck.F90
+M tools/mapping/gen_mapping_files/runoff_to_ocn/ncl/merge_mapping_files.ncl
+M tools/mapping/gen_mapping_files/runoff_to_ocn/src/main.F90
+M tools/mapping/gen_mapping_files/runoff_to_ocn/src/map_mod.F90
+
+
+======================================================================
+
+======================================================================
+
Originator: Chris Fischer
Date: 10-25-2017
Tag: cime5.4.0-alpha.06
diff --git a/config/acme/machines/Makefile b/config/acme/machines/Makefile
index 4ebd0b889cb..a83421af7fa 100644
--- a/config/acme/machines/Makefile
+++ b/config/acme/machines/Makefile
@@ -868,6 +868,39 @@ $(COMPLIB): $(OBJS)
%.F90: %.F90.in
$(CIMEROOT)/src/externals/genf90/genf90.pl $< > $@
+clean_dependsatm:
+ $(RM) -f $(EXEROOT)/atm/obj/Srcfiles
+
+clean_dependscpl:
+ $(RM) -f $(EXEROOT)/cpl/obj/Srcfiles
+
+clean_dependsocn:
+ $(RM) -f $(EXEROOT)/ocn/obj/Srcfiles
+
+clean_dependswav:
+ $(RM) -f $(EXEROOT)/wav/obj/Srcfiles
+
+clean_dependsglc:
+ $(RM) -f $(EXEROOT)/glc/obj/Srcfiles
+
+clean_dependsice:
+ $(RM) -f $(EXEROOT)/ice/obj/Srcfiles
+
+clean_dependsrof:
+ $(RM) -f $(EXEROOT)/rof/obj/Srcfiles
+
+clean_dependsesp:
+ $(RM) -f $(EXEROOT)/esp/obj/Srcfiles
+
+clean_dependslnd:
+ $(RM) -f $(LNDOBJDIR)/Srcfiles
+
+clean_dependscsmshare:
+ $(RM) -f $(SHAREDLIBROOT)/$(SHAREDPATH)/$(COMP_INTERFACE)/$(ESMFDIR)/$(NINST_VALUE)/csm_share/Srcfiles
+
+clean_depends: clean_dependsatm clean_dependscpl clean_dependswav clean_dependsglc clean_dependsice clean_dependsrof clean_dependslnd clean_dependscsmshare clean_dependsesp
+
+
cleanatm:
$(RM) -f $(LIBROOT)/libatm.a
$(RM) -fr $(EXEROOT)/atm/obj
diff --git a/config/acme/machines/config_pio.xml b/config/acme/machines/config_pio.xml
index 1ad2586c80b..e4a836b50b7 100644
--- a/config/acme/machines/config_pio.xml
+++ b/config/acme/machines/config_pio.xml
@@ -28,13 +28,11 @@
-
+ (\d+.bw)$
-l nodes={{ num_nodes }}:ppn={{ tasks_per_node }}:xe
-S {{ shell }}
- regular
- debug
+ normal
+ debug
diff --git a/config/cesm/machines/config_machines.xml b/config/cesm/machines/config_machines.xml
index d4515959f2a..cae0e21d676 100644
--- a/config/cesm/machines/config_machines.xml
+++ b/config/cesm/machines/config_machines.xml
@@ -56,12 +56,13 @@
CNL
pgi,cray,gnu
mpich
+ banu
/scratch/sciteam/$USER
$ENV{CESMDATAROOT}/inputdata
$ENV{CESMDATAROOT}/inputdata/atm/datm7
- /scratch/sciteam/$USER/archive/$CASE
+ $CIME_OUTPUT_ROOT/archive/$CASE
$ENV{CESMDATAROOT}/ccsm_baselines
- $ENV{CESMDATAROOT}/tools/ccsm_cprnc/cprnc
+ $ENV{CESMDATAROOT}/tools/cprnc
8
pbs
cseg
@@ -95,28 +96,28 @@
PrgEnv-pgi
- pgi pgi/16.3.0
+ pgi pgi/17.5.0
- PrgEnv-gnu/4.2.84
- gcc gcc/4.8.2
+ PrgEnv-gnu
+ gcc gcc/6.3.0
PrgEnv-cray
- cce cce/8.4.6
+ cce cce/8.5.8
- papi/5.3.2
- cray-mpich cray-mpich/7.3.3
- cray-libsci cray-libsci/16.03.1
- torque/6.0.2
+ papi/5.5.1.1
+ cray-mpich cray-mpich/7.5.3
+ cray-libsci cray-libsci/16.11.1
+ torque/6.0.4
- cray-netcdf-hdf5parallel/4.4.0
- cray-parallel-netcdf/1.7.0
+ cray-netcdf-hdf5parallel/4.4.1.1
+ cray-parallel-netcdf/1.8.0
- cray-netcdf/4.4.0
+ cray-netcdf/4.4.1.1
cmake/3.1.3
@@ -194,7 +195,7 @@
Example port to centos7 linux system with gcc, netcdf, pnetcdf and mpich
- using modules from http://www.admin-magazine.com/HPC/Articles/Environment-Modules
+ using modules from http://www.admin-magazine.com/HPC/Articles/Environment-Modules
regex.expression.matching.your.machine
LINUX
@@ -285,8 +286,8 @@
+ shared nodes. However, running mpi jobs on the shared nodes currently
+ requires some workarounds; these workarounds are implemented here -->
/opt/sgi/mpt/mpt-2.15/bin/mpirun $ENV{UNIT_TEST_HOST} -np 1
@@ -346,7 +347,7 @@
false
@@ -733,7 +734,7 @@
--label
-n $TOTALPES
- -c $ENV{OMP_NUM_THREADS}
+ -c $ENV{OMP_NUM_THREADS}
@@ -1082,7 +1083,7 @@
mpirun
- -np $TOTALPES
+ -np $TOTALPES
@@ -1096,8 +1097,8 @@
module
- sems-env
- sems-git
+ sems-env
+ sems-git
sems-python/2.7.9
sems-gcc/5.1.0
sems-openmpi/1.8.7
@@ -1247,12 +1248,12 @@
nas
pkgsrc
- comp-intel/2016.2.181
- mpi-sgi/mpt.2.15r20
- szip/2.1.1
- hdf4/4.2.12
- hdf5/1.8.18_mpt
- netcdf/4.4.1.1_mpt
+ comp-intel/2016.2.181
+ mpi-sgi/mpt.2.15r20
+ szip/2.1.1
+ hdf4/4.2.12
+ hdf5/1.8.18_mpt
+ netcdf/4.4.1.1_mpt
@@ -1302,11 +1303,11 @@
nas
pkgsrc
comp-intel/2016.2.181
- mpi-sgi/mpt.2.15r20
- szip/2.1.1
- hdf4/4.2.12
- hdf5/1.8.18_mpt
- netcdf/4.4.1.1_mpt
+ mpi-sgi/mpt.2.15r20
+ szip/2.1.1
+ hdf4/4.2.12
+ hdf5/1.8.18_mpt
+ netcdf/4.4.1.1_mpt
@@ -1357,10 +1358,10 @@
pkgsrc
comp-intel/2016.2.181
mpi-sgi/mpt.2.15r20
- szip/2.1.1
- hdf4/4.2.12
- hdf5/1.8.18_mpt
- netcdf/4.4.1.1_mpt
+ szip/2.1.1
+ hdf4/4.2.12
+ hdf5/1.8.18_mpt
+ netcdf/4.4.1.1_mpt
@@ -1411,10 +1412,10 @@
pkgsrc
comp-intel/2016.2.181
mpi-sgi/mpt.2.15r20
- szip/2.1.1
- hdf4/4.2.12
- hdf5/1.8.18_mpt
- netcdf/4.4.1.1_mpt
+ szip/2.1.1
+ hdf4/4.2.12
+ hdf5/1.8.18_mpt
+ netcdf/4.4.1.1_mpt
@@ -1482,7 +1483,7 @@
mpirun
- -np $TOTALPES
+ -np $TOTALPES
@@ -1496,8 +1497,8 @@
module
- sems-env
- sems-git
+ sems-env
+ sems-git
sems-python/2.7.9
sems-gcc/5.1.0
sems-openmpi/1.8.7
@@ -1550,21 +1551,21 @@
module
module
-
- sems-env
- sems-git
- sems-python/2.7.9
- gnu/4.9.2
- intel/intel-15.0.3.187
- libraries/intel-mkl-15.0.2.164
- libraries/intel-mkl-15.0.2.164
+
+ sems-env
+ sems-git
+ sems-python/2.7.9
+ gnu/4.9.2
+ intel/intel-15.0.3.187
+ libraries/intel-mkl-15.0.2.164
+ libraries/intel-mkl-15.0.2.164
- openmpi-intel/1.8
- sems-hdf5/1.8.12/parallel
- sems-netcdf/4.3.2/parallel
- sems-hdf5/1.8.12/base
- sems-netcdf/4.3.2/base
+ openmpi-intel/1.8
+ sems-hdf5/1.8.12/parallel
+ sems-netcdf/4.3.2/parallel
+ sems-hdf5/1.8.12/base
+ sems-netcdf/4.3.2/base
@@ -1845,14 +1846,14 @@
TRUE
+ after the mpirun command when launching cesm on yellowstone -->
mpirun.lsf
TARGET_PROCESSOR_LIST=AUTO_SELECT $ENV{CESMDATAROOT}/tools/bin/mpirun
-n $TOTALPES
- -tpn {{ tasks_per_node }}
+ -tpn {{ tasks_per_node }}
$ENV{CESMDATAROOT}/tools/bin/launch
@@ -1860,7 +1861,7 @@
unset MP_PE_AFFINITY; unset MP_TASK_AFFINITY; unset MP_CPU_BIND_LIST; $ENV{CESMDATAROOT}/tools/bin/mpirun
-n $TOTALPES
- -tpn {{ tasks_per_node }}
+ -tpn {{ tasks_per_node }}
$ENV{CESMDATAROOT}/tools/bin/hybrid_launch
@@ -1868,7 +1869,7 @@
$ENV{CESMDATAROOT}/tools/bin/mpirun
-n $TOTALPES
- -tpn {{ tasks_per_node }}
+ -tpn {{ tasks_per_node }}
diff --git a/config/xml_schemas/config_batch.xsd b/config/xml_schemas/config_batch.xsd
index 4a29fc5dd40..08c8453126c 100644
--- a/config/xml_schemas/config_batch.xsd
+++ b/config/xml_schemas/config_batch.xsd
@@ -21,6 +21,7 @@
+
@@ -81,6 +82,8 @@
+
+
diff --git a/config/xml_schemas/config_machines.xsd b/config/xml_schemas/config_machines.xsd
index 837edbd62c5..2a6f5ad1f0f 100644
--- a/config/xml_schemas/config_machines.xsd
+++ b/config/xml_schemas/config_machines.xsd
@@ -192,10 +192,7 @@
-
-
-
-
+
diff --git a/config/xml_schemas/env_mach_specific.xsd b/config/xml_schemas/env_mach_specific.xsd
index 1c79b6c8ae7..faa8fc01f6a 100644
--- a/config/xml_schemas/env_mach_specific.xsd
+++ b/config/xml_schemas/env_mach_specific.xsd
@@ -84,7 +84,13 @@
-
+
+
+
+
+
+
+
diff --git a/doc/source/users_guide/running-a-case.rst b/doc/source/users_guide/running-a-case.rst
index d43ce642a0e..280744650a6 100644
--- a/doc/source/users_guide/running-a-case.rst
+++ b/doc/source/users_guide/running-a-case.rst
@@ -12,16 +12,16 @@ Calling **case.submit**
Before you submit the case using **case.submit**, make sure
the batch queue variables are set correctly for your run
-Those variables are contained in the file **$CASEROOT/env_batch.xml**
-under the XML ```` and ````
-elements.
+Those variables are contained in the file **$CASEROOT/env_batch.xml**
+under the XML ```` and ````
+elements.
Make sure that you have appropriate account numbers (``PROJECT``), time limits
(``JOB_WALLCLOCK_TIME``), and queue (``JOB_QUEUE``) for those groups.
Also modify **$CASEROOT/env_run.xml** for your case using :ref:`xmlchange`.
-Once you have executed **case.setup** and **case.build**, run **case.submit**
+Once you have executed **case.setup** and **case.build**, run **case.submit**
to submit the run to your machine's batch queue system.
::
@@ -40,7 +40,7 @@ When called, the **case.submit** script will:
- Run **preview_namelist**, which in turn will run each component's **buildnml**.
-- Run :ref:`check_input_data` to verify that the required
+- Run :ref:`check_input_data` to verify that the required
data are present.
- Submit the job to the batch queue. which in turn will run the **case.run** script.
@@ -52,7 +52,7 @@ Upon successful completion of the run, **case.run** will:
- Copy log files back to ``$LOGDIR``.
-- Submit the short-term archiver script **case.st_archive**
+- Submit the short-term archiver script **case.st_archive**
to the batch queue if ``$DOUT_S`` is TRUE.
- Resubmit **case.run** if ``$RESUBMIT`` > 0.
@@ -95,21 +95,21 @@ messages:
.. note::
After a successful first run, set the **env_run.xml** variable
``$CONTINUE_RUN`` to ``TRUE`` before resubmitting or the job will not
- progress.
-
+ progress.
+
You may also need to modify the **env_run.xml** variables
``$STOP_OPTION``, ``$STOP_N`` and/or ``$STOP_DATE`` as well as
``$REST_OPTION``, ``$REST_N`` and/or ``$REST_DATE``, and ``$RESUBMIT``
before resubmitting.
-See :ref:`the basic example` for a complete example
+See :ref:`the basic example` for a complete example
of how to run a case.
---------------------------------
Troubleshooting a job that fails
---------------------------------
-There are several places to look for information if a job fails.
+There are several places to look for information if a job fails.
Start with the **STDOUT** and **STDERR** file(s) in **$CASEROOT**.
If you don't find an obvious error message there, the
**$RUNDIR/$model.log.$datestamp** files will probably give you a
@@ -126,14 +126,14 @@ problems` for more information.
Input data
====================================================
-The **check_input_data** script determines if the required data files
-for your case exist on local disk in the appropriate subdirectory of
+The **check_input_data** script determines if the required data files
+for your case exist on local disk in the appropriate subdirectory of
``$DIN_LOC_ROOT``. It automatically downloads missing data.
The required input data sets needed for each component are found in the
-**$CASEROOT/Buildconf** directory. These files are generated by a call
-to **preview_namlists** and are in turn created by each component's
-**buildnml** script. For example, for compsets consisting only of data
+**$CASEROOT/Buildconf** directory. These files are generated by a call
+to **preview_namlists** and are in turn created by each component's
+**buildnml** script. For example, for compsets consisting only of data
models (``A`` compsets), the following files are created:
::
@@ -163,12 +163,12 @@ Controlling starting, stopping and restarting a run
====================================================
The file **env_run.xml** contains variables that may be modified at
-initialization or any time during the course of a model run. Among
-other features, the variables comprise coupler namelist settings for
-the model stop time, restart frequency, coupler history frequency, and
+initialization or any time during the course of a model run. Among
+other features, the variables comprise coupler namelist settings for
+the model stop time, restart frequency, coupler history frequency, and
a flag to determine if the run should be flagged as a continuation run.
-At a minimum, you will need to set the variables ``$STOP_OPTION`` and
+At a minimum, you will need to set the variables ``$STOP_OPTION`` and
``$STOP_N``. Other driver namelist settings then will have consistent and
reasonable default values. The default settings guarantee that
restart files are produced at the end of the model run.
@@ -203,10 +203,10 @@ The case initialization type is set using the ``$RUN_TYPE`` variable in
``startup``
In a startup run (the default), all components are initialized using
- baseline states. These states are set independently by each component
- and can include the use of restart files, initial files, external
+ baseline states. These states are set independently by each component
+ and can include the use of restart files, initial files, external
observed data files, or internal initialization (that is, a "cold start").
- In a startup run, the coupler sends the start date to the components
+ In a startup run, the coupler sends the start date to the components
at initialization. In addition, the coupler does not need an input data file.
In a startup initialization, the ocean model does not start until the second
ocean coupling step.
@@ -231,14 +231,14 @@ The case initialization type is set using the ``$RUN_TYPE`` variable in
type of run. ``$RUN_REFCASE`` and ``$RUN_REFDATE`` are required for
branch runs. To set up a branch run, locate the restart tar file or
restart directory for ``$RUN_REFCASE`` and ``$RUN_REFDATE`` from a
- previous run, then place those files in the ``$RUNDIR`` directory.
+ previous run, then place those files in the ``$RUNDIR`` directory.
See :ref:`setting up a branch
run`.
``hybrid``
A hybrid run is initialized like a startup but it uses
initialization data sets from a previous case. It is similar
- to a branch run with relaxed restart constraints.
+ to a branch run with relaxed restart constraints.
A hybrid run allows users to bring together
combinations of initial/restart files from a previous case
(specified by ``$RUN_REFCASE``) at a given model output date
@@ -259,10 +259,10 @@ run, the ``$CONTINUE_RUN`` variable is set to TRUE, and the model
restarts exactly using input files in a case, date, and bit-for-bit
continuous fashion.
-The variable ``$RUN_STARTDATE`` is the start date (in yyyy-mm-dd format)
-for either a startup run or a hybrid run. If the run is targeted to be
+The variable ``$RUN_STARTDATE`` is the start date (in yyyy-mm-dd format)
+for either a startup run or a hybrid run. If the run is targeted to be
a hybrid or branch run, you must specify values for ``$RUN_REFCASE`` and
-``$RUN_REFDATE``.
+``$RUN_REFDATE``.
.. _controlling-output-data:
@@ -303,13 +303,13 @@ Also:
- Users generally should turn off short-term archiving when developing new code.
-Standard output generated from each component is saved in ``$RUNDIR``
-in a *log file*. Each time the model is run, a single coordinated datestamp
+Standard output generated from each component is saved in ``$RUNDIR``
+in a *log file*. Each time the model is run, a single coordinated datestamp
is incorporated into the filename of each output log file.
The run script generates the datestamp in the form YYMMDD-hhmmss, indicating
the year, month, day, hour, minute and second that the run began
-(ocn.log.040526-082714, for example). Log files are copied to a user-specified
-directory using the variable ``$LOGDIR`` in **env_run.xml**. The default is a "logs"
+(ocn.log.040526-082714, for example). Log files are copied to a user-specified
+directory using the variable ``$LOGDIR`` in **env_run.xml**. The default is a "logs"
subdirectory in the **$CASEROOT** directory.
By default, each component also periodically writes history files
@@ -339,7 +339,7 @@ for a description of output data filenames.
Restarting a run
======================
-Active components (and some data components) write restart files
+Active components (and some data components) write restart files
at intervals that are dictated by the driver via the setting of the
``$REST_OPTION`` and ``$REST_N`` variables in **env_run.xml**. Restart
files allow the model to stop and then start again with bit-for-bit
@@ -347,15 +347,15 @@ exact capability; the model output is exactly the same as if the model
had not stopped. The driver coordinates the writing of restart
files as well as the time evolution of the model.
-Runs that are initialized as branch or hybrid runs require
-restart/initial files from previous model runs (as specified by the
+Runs that are initialized as branch or hybrid runs require
+restart/initial files from previous model runs (as specified by the
variables ``$RUN_REFCASE`` and ``$RUN_REFDATE``). Pre-stage these
-iles to the case ``$RUNDIR`` (normally ``$EXEROOT/run``)
-before the model run starts. Normally this is done by copying the contents
+iles to the case ``$RUNDIR`` (normally ``$EXEROOT/run``)
+before the model run starts. Normally this is done by copying the contents
of the relevant **$RUN_REFCASE/rest/$RUN_REFDATE.00000** directory.
Whenever a component writes a restart file, it also writes a restart
-pointer file in the format **rpointer.$component**. Upon a restart, each
+pointer file in the format **rpointer.$component**. Upon a restart, each
component reads the pointer file to determine which file to read in
order to continue the run. These are examples of pointer files created
for a component set using full active model components.
@@ -382,17 +382,17 @@ Backing up to a previous restart
---------------------------------
If a run encounters problems and crashes, you will normally have to
-back up to a previous restart. If short-term archiving is enabled,
+back up to a previous restart. If short-term archiving is enabled,
find the latest **$DOUT_S_ROOT/rest/yyyy-mm-dd-ssss/** directory
and copy its contents into your run directory (``$RUNDIR``).
-Make sure that the new restart pointer files overwrite older files in
-in ``$RUNDIR`` or the job may not restart in the correct place. You can
+Make sure that the new restart pointer files overwrite older files in
+in ``$RUNDIR`` or the job may not restart in the correct place. You can
then continue the run using the new restarts.
Occasionally, when a run has problems restarting, it is because the
-pointer and restart files are out of sync. The pointer files
-are text files that can be edited to match the correct dates
+pointer and restart files are out of sync. The pointer files
+are text files that can be edited to match the correct dates
of the restart and history files. All of the restart files should
have the same date.
@@ -400,9 +400,9 @@ have the same date.
Archiving model output data
============================
-When a job has run successfully, the component log files are copied
-to the directory specified by the **env_run.xml** variable ``$LOGDIR``,
-which is set to **$CASEROOT/logs** by default. If the job aborts, log
+When a job has run successfully, the component log files are copied
+to the directory specified by the **env_run.xml** variable ``$LOGDIR``,
+which is set to **$CASEROOT/logs** by default. If the job aborts, log
files are NOT be copied out of the ``$RUNDIR`` directory.
The output data flow from a successful run depends on whether or not
@@ -421,7 +421,7 @@ Short-term archiving
If short-term archiving is enabled, component output files are moved
to the short-term archiving area on local disk, as specified by
-``$DOUT_S_ROOT``. The directory normally is **$EXEROOT/../archive/$CASE.**
+``$DOUT_S_ROOT``. The directory normally is **$EXEROOT/../archive/$CASE.**
and has the following directory structure: ::
rest/yyyy-mm-dd-sssss/
@@ -444,7 +444,7 @@ The **rest/** subdirectory contains a subset of directories that each contains
a *consistent* set of restart files, initial files and rpointer
files. Each subdirectory has a unique name corresponding to the model
year, month, day and seconds into the day when the files were created.
-The contents of any restart directory can be used to create a branch run
+The contents of any restart directory can be used to create a branch run
or a hybrid run or to back up to a previous restart date.
---------------------
@@ -457,4 +457,56 @@ long-term archiver tool that supported mass tape storage and HPSS systems.
However, with the industry migration away from tape archives, it is no longer
feasible for CIME to support all the possible archival schemes available.
+============================
+Data Assimilation and other External Processing
+============================
+
+CIME provides a capability to run a task on the compute nodes either
+before or after the model run. CIME also provides a data assimilation
+capability which will cycle the model and then a user defined task for
+a user determined number of cycles.
+
+
+---------------------
+Pre and Post run scripts
+---------------------
+
+Variables ``PRERUN_SCRIPT`` and ``POSTRUN_SCRIPT`` can each be used to name
+a script which should be exectuted immediately prior starting or
+following completion of the CESM executable within the batch
+environment. The script is expected to be found in the case directory
+and will recieve one argument which is the full path to that
+directory. If the script is written in python and contains a
+subroutine with the same name as the script, it will be called as a
+subroutine rather than as an external shell script.
+
+---------------------
+Data Assimilatin scripts
+---------------------
+
+Variables ``DATA_ASSIMILATION``, ``DATA_ASSIMILATION_SCRIPT``, and
+``DATA_ASSIMILATION_CYCLES`` may also be used to externally control
+model evolution. If ``DATA_ASSIMILATION`` is true after the model
+completes the ``DATA_ASSIMILATION_SCRIPT`` will be run and then the
+model will be started again ``DATA_ASSIMILATION_CYCLES`` times. The
+script is expected to be found in the case directory and will recieve
+two arguments, the full path to that directory and the cycle number.
+If the script is written in python and contains a subroutine with the
+same name as the script, it will be called as a subroutine rather than
+as an external shell script.
+
+..: A simple example pre run script.
+
+::
+
+ #!/usr/bin/env python
+ import sys
+ from CIME.case import Case
+
+ def myprerun(caseroot):
+ with Case(caseroot) as case:
+ print ("rundir is ",case.get_value("RUNDIR"))
+ if __name__ == "__main__":
+ caseroot = sys.argv[1]
+ myprerun(caseroot)
diff --git a/scripts/Tools/case.build b/scripts/Tools/case.build
index 4ac984e3745..4bd106ef7de 100755
--- a/scripts/Tools/case.build
+++ b/scripts/Tools/case.build
@@ -61,12 +61,19 @@ OR
mutex_group.add_argument("--clean-all", action="store_true",
help="clean all objects including sharedlibobjects that may be used by other builds")
+ mutex_group.add_argument("--clean-depends", nargs="*", choices=comps+["csmshare"],
+ help="clean Depends and Srcfiles only - "
+ "this allows you to rebuild after adding new "
+ "files in the source tree or in SourceMods")
+
args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser)
+ clean_depends = args.clean_depends if args.clean_depends is None or len(args.clean_depends) else comps
+
cleanlist = args.clean if args.clean is None or len(args.clean) else comps
buildlist = None if args.build is None or len(args.build) == 0 else args.build
- return args.caseroot, args.sharedlib_only, args.model_only, cleanlist, args.clean_all, buildlist
+ return args.caseroot, args.sharedlib_only, args.model_only, cleanlist, args.clean_all, buildlist, clean_depends
###############################################################################
def _main_func(description):
@@ -75,14 +82,14 @@ def _main_func(description):
test_results = doctest.testmod(verbose=True)
sys.exit(1 if test_results.failed > 0 else 0)
- caseroot, sharedlib_only, model_only, cleanlist, clean_all, buildlist = parse_command_line(sys.argv, description)
+ caseroot, sharedlib_only, model_only, cleanlist, clean_all, buildlist, clean_depends = parse_command_line(sys.argv, description)
success = True
with Case(caseroot, read_only=False) as case:
testname = case.get_value('TESTCASE')
- if cleanlist is not None or clean_all:
- build.clean(case, cleanlist, clean_all)
+ if cleanlist is not None or clean_all or clean_depends is not None:
+ build.clean(case, cleanlist=cleanlist, clean_all=clean_all, clean_depends=clean_depends)
elif(testname is not None):
logging.warning("Building test for {} in directory {}".format(testname,
caseroot))
diff --git a/scripts/Tools/case.submit b/scripts/Tools/case.submit
index 1a8a42bffc4..82aa756377f 100755
--- a/scripts/Tools/case.submit
+++ b/scripts/Tools/case.submit
@@ -20,6 +20,9 @@ OR
\033[1mEXAMPLES:\033[0m
\033[1;32m# Setup case \033[0m
> {0}
+
+ \033[1;32m# Setup case, request mail at job begin and job end \033[0m
+ > {0} -m begin,end
""".format(os.path.basename(args[0])),
description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
@@ -50,17 +53,15 @@ OR
parser.add_argument("--skip-preview-namelist", action="store_true",
help="Skip calling preview-namelist during case.run")
- parser.add_argument("-M", "--mail-user", help="email to be used for batch notification.")
-
- parser.add_argument("-m", "--mail-type", default="never",
- choices=("never", "all", "begin", "end", "fail"),
- help="when to send user email.")
+ CIME.utils.add_mail_type_args(parser)
parser.add_argument("-a", "--batch-args",
help="Used to pass additional arguments to batch system. ")
args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser)
+ CIME.utils.resolve_mail_type_args(args)
+
return args.test, args.caseroot, args.job, args.no_batch, args.prereq, \
args.resubmit, args.skip_preview_namelist, args.mail_user, args.mail_type, \
args.batch_args
diff --git a/scripts/Tools/case_diff b/scripts/Tools/case_diff
index c9ef504f1a2..fc9d81feeb3 100755
--- a/scripts/Tools/case_diff
+++ b/scripts/Tools/case_diff
@@ -132,7 +132,7 @@ def _main_func(description):
for xml_normalize_field in xml_normalize_fields:
val1 = run_cmd_no_fail("./xmlquery --value {}".format(xml_normalize_field), from_dir=case1)
val2 = run_cmd_no_fail("./xmlquery --value {}".format(xml_normalize_field), from_dir=case2)
- if "/" in val1:
+ if os.sep in val1:
repls[os.path.normpath(val2)] = os.path.normpath(val1)
else:
repls[val2] = val1
diff --git a/scripts/Tools/xmlchange b/scripts/Tools/xmlchange
index 5ca3a5ea8e9..3901b43376b 100755
--- a/scripts/Tools/xmlchange
+++ b/scripts/Tools/xmlchange
@@ -135,10 +135,10 @@ def xmlchange(caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup,
# store these choices in USER_REQUESTED_WALLTIME and/or USER_REQUESTED_QUEUE so they
# are not lost if the user does a case.setup --reset.
if xmlid == "JOB_WALLCLOCK_TIME":
- case.set_value("USER_REQUESTED_WALLTIME", xmlval)
+ case.set_value("USER_REQUESTED_WALLTIME", xmlval, subgroup)
if xmlid == "JOB_QUEUE":
- case.set_value("USER_REQUESTED_QUEUE", xmlval)
+ case.set_value("USER_REQUESTED_QUEUE", xmlval, subgroup)
if not noecho:
argstr = ""
diff --git a/scripts/create_newcase b/scripts/create_newcase
index 87fa9225817..455ed30f8bf 100755
--- a/scripts/create_newcase
+++ b/scripts/create_newcase
@@ -128,6 +128,9 @@ OR
parser.add_argument("-i", "--input-dir",
help="Use a non-default location for input files")
+ parser.add_argument("--driver",
+ help="Select a driver. No selection implies mct driver")
+
args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser)
if args.srcroot is not None:
@@ -162,7 +165,7 @@ OR
args.user_mods_dir, args.pesfile, \
args.user_grid, args.gridfile, args.srcroot, args.test, args.multi_driver, \
args.ninst, args.walltime, args.queue, args.output_root, args.script_root, \
- run_unsupported, args.answer, args.input_dir
+ run_unsupported, args.answer, args.input_dir, args.driver
###############################################################################
def _main_func(description):
@@ -174,7 +177,7 @@ def _main_func(description):
user_mods_dir, pesfile, \
user_grid, gridfile, srcroot, test, multi_driver, ninst, walltime, \
queue, output_root, script_root, run_unsupported, \
- answer, input_dir = parse_command_line(sys.argv, cimeroot, description)
+ answer, input_dir, driver = parse_command_line(sys.argv, cimeroot, description)
if script_root is None:
caseroot = os.path.abspath(casename)
@@ -199,7 +202,7 @@ def _main_func(description):
multi_driver=multi_driver, ninst=ninst, test=test,
walltime=walltime, queue=queue, output_root=output_root,
run_unsupported=run_unsupported, answer=answer,
- input_dir=input_dir)
+ input_dir=input_dir, driver=driver)
###############################################################################
diff --git a/scripts/create_test b/scripts/create_test
index cb4e5919739..08f2475bfcb 100755
--- a/scripts/create_test
+++ b/scripts/create_test
@@ -12,7 +12,7 @@ from Tools.standard_script_setup import *
import update_acme_tests
from CIME.test_scheduler import TestScheduler, RUN_PHASE
-from CIME.utils import expect, convert_to_seconds, compute_total_time, convert_to_babylonian_time, run_cmd_no_fail
+from CIME.utils import expect, convert_to_seconds, compute_total_time, convert_to_babylonian_time, run_cmd_no_fail, get_cime_config
from CIME.XML.machines import Machines
from CIME.case import Case
@@ -74,6 +74,8 @@ OR
CIME.utils.setup_standard_logging_options(parser)
+ config = get_cime_config()
+
parser.add_argument("--no-run", action="store_true",
help="Do not run generated tests")
@@ -86,35 +88,56 @@ OR
parser.add_argument("-u", "--use-existing", action="store_true",
help="Use pre-existing case directories they will pick up at the latest PEND state or re-run the first failed state. Requires test-id")
+ default = get_default_setting(config, "SAVE_TIMING", False, check_main=False)
+
parser.add_argument("--save-timing", action="store_true",
+ default=default,
help="Enable archiving of performance data.")
parser.add_argument("--no-batch", action="store_true",
help="Do not submit jobs to batch system, run locally."
" If false, will default to machine setting.")
+ default = get_default_setting(config, "SINGLE_SUBMIT", False, check_main=False)
+
parser.add_argument("--single-submit", action="store_true",
+ default=default,
help="Use a single interactive allocation to run all the tests. "
"Can drastically reduce queue waiting. Only makes sense on batch machines.")
+ default = get_default_setting(config, "TEST_ROOT", None, check_main=False)
+
parser.add_argument("-r", "--test-root",
+ default=default,
help="Where test cases will be created."
" Will default to output root as defined in the config_machines file")
+ default = get_default_setting(config, "OUTPUT_ROOT", None, check_main=False)
+
parser.add_argument("--output-root",
+ default=default,
help="Where the case output is written.")
+ default = get_default_setting(config, "BASELINE_ROOT", None, check_main=False)
+
parser.add_argument("--baseline-root",
+ default=default,
help="Specifies an root directory for baseline"
"datasets used for Bit-for-bit generate/compare"
"testing.")
+ default = get_default_setting(config, "CLEAN", False, check_main=False)
+
parser.add_argument("--clean", action="store_true",
+ default=default,
help="Specifies if tests should be cleaned after run. If set, "
"all object executables, and data files will"
" be removed after tests are run")
+ default = get_default_setting(config, "MACHINE", None, check_main=True)
+
parser.add_argument("-m", "--machine",
+ default=default,
help="The machine for which to build tests, this machine must be defined"
" in the config_machines.xml file for the given model. "
"Default is to match the name of the machine in the test name or "
@@ -122,7 +145,10 @@ OR
"NODENAME_REGEX field in config_machines.xml. This option is highly "
"unsafe and should only be used if you know what you're doing.")
+ default = get_default_setting(config, "MPILIB", None, check_main=True)
+
parser.add_argument("--mpilib",
+ default=default,
help="Specify the mpilib. "
"To see list of supported mpilibs for each machine, use the utility query_config in this directory. "
"The default is the first listing in MPILIBS in config_machines.xml")
@@ -171,7 +197,10 @@ OR
help="While testing, generate baselines; "
"this can also be done after the fact with bless_test_results")
+ default = get_default_setting(config, "COMPILER", None, check_main=True)
+
parser.add_argument("--compiler",
+ default=default,
help="Compiler to use to build cime. Default will be the name in"
" the Testnames or the default defined for the machine.")
@@ -193,49 +222,83 @@ OR
"(All sorts of problems can occur if you use the same test-id twice "
"on the same file system, even if the test lists are completely different.)")
- parser.add_argument("-j", "--parallel-jobs", type=int, default=None,
+ default = get_default_setting(config, "PARALLEL_JOBS", None, check_main=False)
+
+ parser.add_argument("-j", "--parallel-jobs", type=int, default=default,
help="Number of tasks create_test should perform simultaneously. Default "
"will be min(num_cores, num_tests).")
- parser.add_argument("--proc-pool", type=int, default=None,
+ default = get_default_setting(config, "PROC_POOL", None, check_main=False)
+
+ parser.add_argument("--proc-pool", type=int, default=default,
help="The size of the processor pool that create_test can use. Default "
"is MAX_MPITASKS_PER_NODE + 25 percent.")
- parser.add_argument("--walltime", default=os.getenv("CIME_GLOBAL_WALLTIME"),
+ default = os.getenv("CIME_GLOBAL_WALLTIME")
+ if default is None:
+ default = get_default_setting(config, "WALLTIME", None, check_main=True)
+
+ parser.add_argument("--walltime", default=default,
help="Set the wallclock limit for all tests in the suite. "
"Can use env var CIME_GLOBAL_WALLTIME to set this for all test.")
- parser.add_argument("-q", "--queue", default=None,
+ default = get_default_setting(config, "JOB_QUEUE", None, check_main=True)
+
+ parser.add_argument("-q", "--queue", default=default,
help="Force batch system to use a certain queue")
parser.add_argument("-f", "--testfile",
help="A file containing an ascii list of tests to run")
+ default = get_default_setting(config, "ALLOW_BASELINE_OVERWRITE", False, check_main=False)
+
parser.add_argument("-o", "--allow-baseline-overwrite", action="store_true",
+ default=default,
help="If the --generate option is given, then by default "
"an attempt to overwrite an existing baseline directory "
"will raise an error. Specifying this option allows "
"existing baseline directories to be silently overwritten.")
+ default = get_default_setting(config, "WAIT", False, check_main=False)
+
parser.add_argument("--wait", action="store_true",
+ default=default,
help="On batch systems, wait for submitted jobs to complete")
- parser.add_argument("--force-procs", type=int, default=None,
+ default = get_default_setting(config, "FORCE_PROCS", None, check_main=False)
+
+ parser.add_argument("--force-procs", type=int, default=default,
help="For all tests to run with this number of processors")
- parser.add_argument("--force-threads", type=int, default=None,
+ default = get_default_setting(config, "FORCE_THREADS", None, check_main=False)
+
+ parser.add_argument("--force-threads", type=int, default=default,
help="For all tests to run with this number of threads")
+ default = get_default_setting(config, "INPUT_DIR", None, check_main=True)
+
parser.add_argument("-i", "--input-dir",
+ default=default,
help="Use a non-default location for input files")
- parser.add_argument("--pesfile",
+ default = get_default_setting(config, "PESFILE", None, check_main=True)
+
+ parser.add_argument("--pesfile",default=default,
help="Full pathname of an optional pes specification "
"file. The file can follow either the config_pes.xml or "
"the env_mach_pes.xml format.")
+ default = get_default_setting(config, "RETRY", 0, check_main=False)
+
+ parser.add_argument("--retry", type=int, default=default,
+ help="Automatically retry failed tests. >0 implies --wait")
+
+ CIME.utils.add_mail_type_args(parser)
+
args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser)
+ CIME.utils.resolve_mail_type_args(args)
+
# generate and compare flags may not point to the same directory
if model == "cesm":
if args.generate is not None:
@@ -257,6 +320,9 @@ OR
expect(not (args.namelists_only and not (args.generate or args.compare)),
"Must provide either --compare or --generate with --namelists-only")
+ if args.retry > 0:
+ args.wait = True
+
if args.parallel_jobs is not None:
expect(args.parallel_jobs > 0,
"Invalid value for parallel_jobs: %d" % args.parallel_jobs)
@@ -379,7 +445,21 @@ OR
args.namelists_only, args.project, \
args.test_id, args.parallel_jobs, args.walltime, \
args.single_submit, args.proc_pool, args.use_existing, args.save_timing, args.queue, \
- args.allow_baseline_overwrite, args.output_root, args.wait, args.force_procs, args.force_threads, args.mpilib, args.input_dir, args.pesfile
+ args.allow_baseline_overwrite, args.output_root, args.wait, args.force_procs, args.force_threads, args.mpilib, args.input_dir, args.pesfile, args.retry, args.mail_user, args.mail_type
+
+
+def get_default_setting(config, varname, default_if_not_found, check_main=False):
+ if config.has_option("create_test",varname):
+ default = config.get("create_test",varname)
+ elif check_main and config.has_option("main", varname):
+ default = config.get("main",varname)
+ else:
+ default=default_if_not_found
+ return default
+
+
+
+
###############################################################################
def single_submit_impl(machine_name, test_id, proc_pool, _, args, job_cost_map, wall_time, test_root):
@@ -475,7 +555,7 @@ def single_submit_impl(machine_name, test_id, proc_pool, _, args, job_cost_map,
def create_test(test_names, test_data, compiler, machine_name, no_run, no_build, no_setup, no_batch, test_root,
baseline_root, clean, baseline_cmp_name, baseline_gen_name, namelists_only, project, test_id, parallel_jobs,
walltime, single_submit, proc_pool, use_existing, save_timing, queue, allow_baseline_overwrite, output_root, wait,
- force_procs, force_threads, mpilib, input_dir, pesfile):
+ force_procs, force_threads, mpilib, input_dir, pesfile, mail_user, mail_type):
###############################################################################
impl = TestScheduler(test_names, test_data=test_data,
no_run=no_run, no_build=no_build, no_setup=no_setup, no_batch=no_batch,
@@ -488,7 +568,7 @@ def create_test(test_names, test_data, compiler, machine_name, no_run, no_build,
proc_pool=proc_pool, use_existing=use_existing, save_timing=save_timing,
queue=queue, allow_baseline_overwrite=allow_baseline_overwrite,
output_root=output_root, force_procs=force_procs, force_threads=force_threads,
- mpilib=mpilib, input_dir=input_dir, pesfile=pesfile)
+ mpilib=mpilib, input_dir=input_dir, pesfile=pesfile, mail_user=mail_user, mail_type=mail_type)
success = impl.run_tests(wait=wait)
@@ -514,7 +594,7 @@ def create_test(test_names, test_data, compiler, machine_name, no_run, no_build,
# Create submit script
single_submit_impl(machine_name, test_id, proc_pool, project, sys.argv[1:], job_cost_map, walltime, test_root)
- return 0 if success else CIME.utils.TESTS_FAILED_ERR_CODE
+ return success
###############################################################################
def _main_func(description):
@@ -529,13 +609,26 @@ def _main_func(description):
test_names, test_data, compiler, machine_name, no_run, no_build, no_setup, no_batch, \
test_root, baseline_root, clean, baseline_cmp_name, baseline_gen_name, namelists_only, \
project, test_id, parallel_jobs, walltime, single_submit, proc_pool, use_existing, \
- save_timing, queue, allow_baseline_overwrite, output_root, wait, force_procs, force_threads, mpilib, input_dir, pesfile \
- = parse_command_line(sys.argv, description)
-
- sys.exit(create_test(test_names, test_data, compiler, machine_name, no_run, no_build, no_setup, no_batch, test_root,
- baseline_root, clean, baseline_cmp_name, baseline_gen_name, namelists_only,
- project, test_id, parallel_jobs, walltime, single_submit, proc_pool, use_existing, save_timing,
- queue, allow_baseline_overwrite, output_root, wait, force_procs, force_threads, mpilib, input_dir, pesfile))
+ save_timing, queue, allow_baseline_overwrite, output_root, wait, force_procs, force_threads, mpilib, input_dir, pesfile, \
+ retry, mail_user, mail_type = \
+ parse_command_line(sys.argv, description)
+
+ success = False
+ run_count = 0
+ while not success and run_count <= retry:
+ use_existing = use_existing if run_count == 0 else True
+ success = create_test(test_names, test_data, compiler, machine_name, no_run, no_build, no_setup, no_batch, test_root,
+ baseline_root, clean, baseline_cmp_name, baseline_gen_name, namelists_only,
+ project, test_id, parallel_jobs, walltime, single_submit, proc_pool, use_existing, save_timing,
+ queue, allow_baseline_overwrite, output_root, wait, force_procs, force_threads, mpilib, input_dir, pesfile,
+ mail_user, mail_type)
+ run_count += 1
+
+ # For testing only
+ os.environ["TESTBUILDFAIL_PASS"] = "True"
+ os.environ["TESTRUNFAIL_PASS"] = "True"
+
+ sys.exit(0 if success else CIME.utils.TESTS_FAILED_ERR_CODE)
###############################################################################
diff --git a/scripts/lib/CIME/SystemTests/dae.py b/scripts/lib/CIME/SystemTests/dae.py
index cdd062c9c04..ca825d84fbd 100644
--- a/scripts/lib/CIME/SystemTests/dae.py
+++ b/scripts/lib/CIME/SystemTests/dae.py
@@ -7,6 +7,7 @@
import os.path
import logging
import glob
+import gzip
import CIME.XML.standard_module_setup as sms
from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
@@ -93,12 +94,13 @@ def run_phase(self): # pylint: disable=arguments-differ
for fname in da_files:
found_caseroot = False
found_cycle = False
- with open(fname) as dfile:
- for line in dfile:
- expect(line[0:5] != 'ERROR', "ERROR, error line found in {}".format(fname))
- if line[0:8] == 'caseroot':
+ with gzip.open(fname, "r") as dfile:
+ for bline in dfile:
+ line = bline.decode("utf-8")
+ expect(not 'ERROR' in line, "ERROR, error line {} found in {}".format(line, fname))
+ if 'caseroot' in line[0:8]:
found_caseroot = True
- elif line[0:5] == 'cycle':
+ elif 'cycle' in line[0:5]:
found_cycle = True
expect(int(line[7:]) == cycle_num,
"ERROR: Wrong cycle ({:d}) found in {} (expected {:d})".format(int(line[7:]), fname, cycle_num))
diff --git a/scripts/lib/CIME/SystemTests/seq.py b/scripts/lib/CIME/SystemTests/seq.py
index f09bb9d03d5..357dfe7f3a2 100644
--- a/scripts/lib/CIME/SystemTests/seq.py
+++ b/scripts/lib/CIME/SystemTests/seq.py
@@ -1,44 +1,33 @@
"""
-CIME smoke test This class inherits from SystemTestsCommon
+sequencing bfb test (10 day seq,conc tests)
"""
from CIME.XML.standard_module_setup import *
-from CIME.SystemTests.system_tests_common import SystemTestsCommon
+from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
from CIME.case_setup import case_setup
from CIME.check_lockedfiles import *
import shutil
logger = logging.getLogger(__name__)
-class SEQ(SystemTestsCommon):
+class SEQ(SystemTestsCompareTwo):
def __init__(self, case):
"""
initialize an object interface to file env_test.xml in the case directory
"""
- SystemTestsCommon.__init__(self, case, expected=["TEST"])
+ SystemTestsCompareTwo.__init__(self,
+ case,
+ separate_builds=True,
+ run_two_suffix="seq",
+ run_one_description = "base",
+ run_two_description = "sequence")
- def build_phase(self, sharedlib_only=False, model_only=False):
- """
- Build two cases.
- """
- # Build the default configuration
- self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only)
- if sharedlib_only:
- return
-
- # Build the model with all components with different rootpes
- exeroot = self._case.get_value("EXEROOT")
- cime_model = self._case.get_value("MODEL")
- shutil.move("{}/{}.exe".format(exeroot,cime_model),
- "{}/{}.exe.SEQ1".format(exeroot,cime_model))
- any_changes = False
- machpes1 = "env_mach_pes.SEQ1.xml"
- if is_locked(machpes1):
- restore(machpes1, newname="env_mach_pes.xml")
- else:
- lock_file("env_mach_pes.xml", newname=machpes1)
+ def _case_one_setup(self):
+ pass
+ def _case_two_setup(self):
comp_classes = self._case.get_values("COMP_CLASSES")
+ any_changes = False
for comp in comp_classes:
any_changes |= self._case.get_value("ROOTPE_{}".format(comp)) != 0
if any_changes:
@@ -61,54 +50,3 @@ def build_phase(self, sharedlib_only=False, model_only=False):
self._case.flush()
case_setup(self._case, test_mode=True, reset=True)
- self.clean_build()
- self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only)
- shutil.move("{}/{}.exe".format(exeroot,cime_model),
- "{}/{}.exe.SEQ2".format(exeroot,cime_model))
- lock_file("env_mach_pes.xml", newname="env_mach_pes.SEQ2.xml")
-
- def run_phase(self):
- # Move to config_tests.xml once that's ready.
- self._case.set_value("CONTINUE_RUN", False)
- self._case.set_value("REST_OPTION", "never")
- self._case.set_value("HIST_OPTION", "$STOP_OPTION")
- self._case.set_value("HIST_N", "$STOP_N")
- self._case.flush()
-
- stop_n = self._case.get_value("STOP_N")
- stop_option = self._case.get_value("STOP_OPTION")
- exeroot = self._case.get_value("EXEROOT")
- cime_model = self._case.get_value("MODEL")
-
- #
- # do an initial run test with default layout
- #
- logger.info("doing a {:d} {} initial test with default layout".format(stop_n, stop_option))
-
- shutil.copy("{}/{}.exe.SEQ1".format(exeroot,cime_model),
- "{}/{}.exe".format(exeroot,cime_model))
- restore("env_mach_pes.SEQ1.xml", newname="env_mach_pes.xml")
-
- # update the pelayout settings for this run
- self._case.read_xml()
-
- case_setup(self._case, test_mode=True, reset=True)
- self._case.set_value("BUILD_COMPLETE", True) # rootpe changes should not require a rebuild
-
- self.run_indv()
-
- restore("env_mach_pes.SEQ2.xml", newname="env_mach_pes.xml")
-
- os.remove("{}/{}.exe".format(exeroot,cime_model))
- shutil.copy("{}/{}.exe.SEQ1".format(exeroot,cime_model),
- "{}/{}.exe".format(exeroot,cime_model))
-
- logger.info("doing a second {:d} {} test with rootpes set to zero".format(stop_n, stop_option))
- # update the pelayout settings for this run
- self._case.read_xml()
-
- case_setup(self._case, test_mode=True, reset=True)
- self._case.set_value("BUILD_COMPLETE", True) # rootpe changes should not require a rebuild
-
- self.run_indv(suffix="seq")
- self._component_compare_test("base", "seq")
diff --git a/scripts/lib/CIME/SystemTests/system_tests_common.py b/scripts/lib/CIME/SystemTests/system_tests_common.py
index 06991d1fca2..f53698bafea 100644
--- a/scripts/lib/CIME/SystemTests/system_tests_common.py
+++ b/scripts/lib/CIME/SystemTests/system_tests_common.py
@@ -244,7 +244,7 @@ def _coupler_log_indicates_run_complete(self):
allgood = allgood - 1
except BaseException as e:
msg = e.__str__()
-
+
logger.info("{} is not compressed, assuming run failed {}".format(cpllog, msg))
return allgood==0
@@ -404,12 +404,13 @@ def _compare_baseline(self):
blmem = 0 if blmem == [] else blmem[-1][1]
curmem = memlist[-1][1]
diff = (curmem-blmem)/blmem
- if(diff < 0.1):
+ if diff < 0.1 and self._test_status.get_status(MEMCOMP_PHASE) is None:
self._test_status.set_status(MEMCOMP_PHASE, TEST_PASS_STATUS)
- else:
+ elif self._test_status.get_status(MEMCOMP_PHASE) != TEST_FAIL_STATUS:
comment = "Error: Memory usage increase > 10% from baseline"
self._test_status.set_status(MEMCOMP_PHASE, TEST_FAIL_STATUS, comments=comment)
append_testlog(comment)
+
# compare throughput to baseline
current = self._get_throughput(cpllog)
baseline = self._get_throughput(baselog)
@@ -420,10 +421,10 @@ def _compare_baseline(self):
if tolerance is None:
tolerance = 0.25
expect(tolerance > 0.0, "Bad value for throughput tolerance in test")
- if diff < tolerance:
+ if diff < tolerance and self._test_status.get_status(THROUGHPUT_PHASE) is None:
self._test_status.set_status(THROUGHPUT_PHASE, TEST_PASS_STATUS)
- else:
- comment = "Error: Computation time increase > %f pct from baseline" % tolerance*100
+ elif self._test_status.get_status(THROUGHPUT_PHASE) != TEST_FAIL_STATUS:
+ comment = "Error: Computation time increase > {:d} pct from baseline".format(int(tolerance*100))
self._test_status.set_status(THROUGHPUT_PHASE, TEST_FAIL_STATUS, comments=comment)
append_testlog(comment)
diff --git a/scripts/lib/CIME/SystemTests/system_tests_compare_two.py b/scripts/lib/CIME/SystemTests/system_tests_compare_two.py
index b5ed9d04c96..387d0bf76c3 100644
--- a/scripts/lib/CIME/SystemTests/system_tests_compare_two.py
+++ b/scripts/lib/CIME/SystemTests/system_tests_compare_two.py
@@ -186,8 +186,11 @@ def build_phase(self, sharedlib_only=False, model_only=False):
# to share the sharedlibroot area with case1 so we can reuse
# pieces of the build from there.
if get_model() != "acme":
+ # We need to turn off this change for ACME because it breaks
+ # the MPAS build system
self._case2.set_value("SHAREDLIBROOT",
self._case1.get_value("SHAREDLIBROOT"))
+
self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only)
else:
self._activate_case1()
@@ -279,9 +282,13 @@ def _get_output_root2(self):
Assumes that self._case1 is already set to point to the case1 object
"""
- # Since case 2 has the same name as case1 its CIME_OUTPUT_ROOT must also be different
+ # Since case2 has the same name as case1, its CIME_OUTPUT_ROOT
+ # must also be different, so that anything put in
+ # $CIME_OUTPUT_ROOT/$CASE/ is not accidentally shared between
+ # case1 and case2. (Currently nothing is placed here, but this
+ # helps prevent future problems.)
output_root2 = os.path.join(self._case1.get_value("CIME_OUTPUT_ROOT"),
- self._case1.get_value("CASE"), "case2")
+ self._case1.get_value("CASE"), "case2_output_root")
return output_root2
def _get_case2_exeroot(self):
@@ -291,17 +298,13 @@ def _get_case2_exeroot(self):
Returns None if we should use the default value of exeroot.
"""
if self._separate_builds:
- # Put the case2 bld directory directly under the case2
- # CIME_OUTPUT_ROOT, rather than following the typical
- # practice of putting it under CIME_OUTPUT_ROOT/CASENAME,
- # because the latter leads to too-long paths that make some
- # compilers fail.
- #
- # This only works because case2's CIME_OUTPUT_ROOT is unique
- # to this case. (If case2's CIME_OUTPUT_ROOT were in some
- # more generic location, then this would result in its bld
- # directory being inadvertently shared with other tests.)
- case2_exeroot = os.path.join(self._get_output_root2(), "bld")
+ # case2's EXEROOT needs to be somewhere that (1) is unique
+ # to this case (considering that case1 and case2 have the
+ # same case name), and (2) does not have too long of a path
+ # name (because too-long paths can make some compilers
+ # fail).
+ case1_exeroot = self._case1.get_value("EXEROOT")
+ case2_exeroot = os.path.join(case1_exeroot, "case2bld")
else:
# Use default exeroot
case2_exeroot = None
@@ -311,9 +314,12 @@ def _get_case2_rundir(self):
"""
Gets rundir for case2.
"""
- # Put the case2 run directory alongside its bld directory for
- # consistency. (See notes about EXEROOT in _get_case2_exeroot.)
- case2_rundir = os.path.join(self._get_output_root2(), "run")
+ # case2's RUNDIR needs to be somewhere that is unique to this
+ # case (considering that case1 and case2 have the same case
+ # name). Note that the location below is symmetrical to the
+ # location of case2's EXEROOT set in _get_case2_exeroot.
+ case1_rundir = self._case1.get_value("RUNDIR")
+ case2_rundir = os.path.join(case1_rundir, "case2run")
return case2_rundir
def _setup_cases_if_not_yet_done(self):
@@ -351,6 +357,7 @@ def _setup_cases_if_not_yet_done(self):
cime_output_root = self._get_output_root2(),
exeroot = self._get_case2_exeroot(),
rundir = self._get_case2_rundir())
+ self._write_info_to_case2_output_root()
self._setup_cases()
except:
# If a problem occurred in setting up the test cases, it's
@@ -393,6 +400,38 @@ def _activate_case2(self):
os.chdir(self._caseroot2)
self._set_active_case(self._case2)
+ def _write_info_to_case2_output_root(self):
+ """
+ Writes a file with some helpful information to case2's
+ output_root.
+
+ The motivation here is two-fold:
+
+ (1) Currently, case2's output_root directory is empty. This
+ could be confusing.
+
+ (2) For users who don't know where to look, it could be hard to
+ find case2's bld and run directories. It is somewhat easier
+ to stumble upon case2's output_root, so we put a file there
+ pointing them to the right place.
+ """
+
+ readme_path = os.path.join(self._get_output_root2(), "README")
+ try:
+ with open(readme_path, "w") as fd:
+ fd.write("This directory is typically empty.\n\n")
+ fd.write("case2's run dir is here: {}\n\n".format(
+ self._case2.get_value("RUNDIR")))
+ fd.write("case2's bld dir is here: {}\n".format(
+ self._case2.get_value("EXEROOT")))
+ except IOError:
+ # It's not a big deal if we can't write the README file
+ # (e.g., because the directory doesn't exist or isn't
+ # writeable; note that the former may be the case in unit
+ # tests). So just continue merrily on our way if there was a
+ # problem.
+ pass
+
def _setup_cases(self):
"""
Does all test-specific set up for the two test cases.
diff --git a/scripts/lib/CIME/XML/component.py b/scripts/lib/CIME/XML/component.py
index 1f4ae93162c..f3821ea8d52 100644
--- a/scripts/lib/CIME/XML/component.py
+++ b/scripts/lib/CIME/XML/component.py
@@ -162,7 +162,6 @@ def _get_description_v3(self, compsetname, comp_class):
modifier_mode = '*'
expect(modifier_mode in ('*','1','?','+'),
"Invalid modifier_mode {} in file {}".format(modifier_mode, self.filename))
-
optiondesc = {}
if comp_class == "forcing":
for node in desc_nodes:
@@ -176,30 +175,36 @@ def _get_description_v3(self, compsetname, comp_class):
desc = compsetname.split('_')[0]
return desc
+
# first pass just make a hash of the option descriptions
for node in desc_nodes:
option = node.get('option')
if option is not None:
optiondesc[option] = node.text
+
#second pass find a comp_class match
+ desc = ""
for node in desc_nodes:
compdesc = node.get(comp_class)
- if compdesc is None:
- continue
- opt_parts = [ x.rstrip("]") for x in compdesc.split("[%") ]
- parts = opt_parts.pop(0).split("%")
+ if compdesc is not None:
+ opt_parts = [ x.rstrip("]") for x in compdesc.split("[%") ]
+ parts = opt_parts.pop(0).split("%")
+ reqset = set(parts)
+ fullset = set(parts+opt_parts)
+ match, complist = self._get_description_match(compsetname, reqset, fullset, modifier_mode)
+ if match:
+ desc = node.text
+ for opt in complist:
+ if opt in optiondesc:
+ desc += optiondesc[opt]
+
- reqset = set(parts)
- fullset = set(parts+opt_parts)
- if self._get_description_match(compsetname, reqset, fullset, modifier_mode):
- desc = node.text
- break
# cpl and esp components may not have a description
if comp_class not in ['cpl','esp']:
expect(len(desc) > 0,
- "No description found for comp_class {} matching compsetname {} in file {}"\
- .format(comp_class,compsetname, self.filename))
+ "No description found for comp_class {} matching compsetname {} in file {}, expected match in {} % {}"\
+ .format(comp_class,compsetname, self.filename, list(reqset), list(opt_parts)))
return desc
def _get_description_match(self, compsetname, reqset, fullset, modifier_mode):
@@ -207,50 +212,53 @@ def _get_description_match(self, compsetname, reqset, fullset, modifier_mode):
>>> obj = Component('testingonly', 'ATM')
>>> obj._get_description_match("1850_DATM%CRU_FRED",set(["DATM"]), set(["DATM","CRU","HSI"]), "*")
- True
+ (True, ['DATM', 'CRU'])
>>> obj._get_description_match("1850_DATM%FRED_Barn",set(["DATM"]), set(["DATM","CRU","HSI"]), "*")
- False
+ (False, None)
>>> obj._get_description_match("1850_DATM_Barn",set(["DATM"]), set(["DATM","CRU","HSI"]), "?")
- True
+ (True, ['DATM'])
>>> obj._get_description_match("1850_DATM_Barn",set(["DATM"]), set(["DATM","CRU","HSI"]), "1")
Traceback (most recent call last):
...
- SystemExit: ERROR: Expected exactly one modifer found 0
+ SystemExit: ERROR: Expected exactly one modifer found 0 in ['DATM']
>>> obj._get_description_match("1850_DATM%CRU%HSI_Barn",set(["DATM"]), set(["DATM","CRU","HSI"]), "1")
Traceback (most recent call last):
...
- SystemExit: ERROR: Expected exactly one modifer found 2
+ SystemExit: ERROR: Expected exactly one modifer found 2 in ['DATM', 'CRU', 'HSI']
>>> obj._get_description_match("1850_CAM50%WCCM%RCO2_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "*")
- True
+ (True, ['CAM50', 'WCCM', 'RCO2'])
# The following is not allowed because the required WCCM field is missing
>>> obj._get_description_match("1850_CAM50%RCO2_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "*")
- False
+ (False, None)
>>> obj._get_description_match("1850_CAM50_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "+")
- False
+ (False, None)
>>> obj._get_description_match("1850_CAM50%WCCM_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "+")
- True
+ (True, ['CAM50', 'WCCM'])
"""
match = False
comparts = compsetname.split('_')
+ matchcomplist = None
+
for comp in comparts:
complist = comp.split('%')
cset = set(complist)
if cset == reqset or (cset > reqset and cset <= fullset):
if modifier_mode == '1':
expect(len(complist) == 2,
- "Expected exactly one modifer found {}".format(len(complist)-1))
+ "Expected exactly one modifer found {} in {}".format(len(complist)-1,complist))
elif modifier_mode == '+':
expect(len(complist) >= 2,
- "Expected one or more modifers found {}".format(len(complist)-1))
+ "Expected one or more modifers found {} in {}".format(len(complist)-1, list(reqset)))
elif modifier_mode == '?':
expect(len(complist) <= 2,
- "Expected 0 or one modifers found {}".format(len(complist)-1))
+ "Expected 0 or one modifers found {} in {}".format(len(complist)-1, complist))
expect(not match,"Found multiple matches in file {} for {}".format(self.filename,comp))
match = True
+ matchcomplist = complist
# found a match
- return match
+ return match, matchcomplist
diff --git a/scripts/lib/CIME/XML/env_batch.py b/scripts/lib/CIME/XML/env_batch.py
index b3ba5178c6b..ad58a24cd08 100644
--- a/scripts/lib/CIME/XML/env_batch.py
+++ b/scripts/lib/CIME/XML/env_batch.py
@@ -3,9 +3,8 @@
"""
from CIME.XML.standard_module_setup import *
-from CIME.utils import format_time
from CIME.XML.env_base import EnvBase
-from CIME.utils import transform_vars, get_cime_root, convert_to_seconds
+from CIME.utils import transform_vars, get_cime_root, convert_to_seconds, format_time, get_cime_config
from copy import deepcopy
from collections import OrderedDict
@@ -71,13 +70,12 @@ def get_value(self, item, attribute=None, resolved=True, subgroup="case.run"):
value = None
if subgroup is None:
- nodes = self.get_nodes(item, attribute)
- if len(nodes) == 1:
- node = nodes[0]
+ node = self.get_optional_node(item, attribute)
+ if node is not None:
value = node.text
if resolved:
value = self.get_resolved_value(value)
- elif not nodes:
+ else:
value = EnvBase.get_value(self,item,attribute,resolved)
else:
value = EnvBase.get_value(self, item, attribute=attribute, resolved=resolved, subgroup=subgroup)
@@ -149,6 +147,14 @@ def cleanupnode(self, node):
def set_batch_system(self, batchobj, batch_system_type=None):
if batch_system_type is not None:
self.set_batch_system_type(batch_system_type)
+
+ if batchobj.batch_system_node is not None and batchobj.machine_node is not None:
+ for node in batchobj.get_nodes('any', root=batchobj.machine_node, xpath='*'):
+ oldnode = batchobj.get_optional_node(node.tag, root=batchobj.batch_system_node)
+ if oldnode is not None and oldnode.tag != "directives":
+ logger.debug( "Replacing {}".format(oldnode.tag))
+ batchobj.batch_system_node.remove(oldnode)
+
if batchobj.batch_system_node is not None:
self.root.append(deepcopy(batchobj.batch_system_node))
if batchobj.machine_node is not None:
@@ -177,9 +183,6 @@ def make_batch_script(self, input_template, job, case):
os.chmod(job, os.stat(job).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def set_job_defaults(self, batch_jobs, case):
- walltime = case.get_value("USER_REQUESTED_WALLTIME") if case.get_value("USER_REQUESTED_WALLTIME") else None
- force_queue = case.get_value("USER_REQUESTED_QUEUE") if case.get_value("USER_REQUESTED_QUEUE") else None
-
if self._batchtype is None:
self._batchtype = self.get_batch_system_type()
@@ -187,6 +190,9 @@ def set_job_defaults(self, batch_jobs, case):
return
for job, jsect in batch_jobs:
+ walltime = case.get_value("USER_REQUESTED_WALLTIME", subgroup=job) if case.get_value("USER_REQUESTED_WALLTIME", subgroup=job) else None
+ force_queue = case.get_value("USER_REQUESTED_QUEUE", subgroup=job) if case.get_value("USER_REQUESTED_QUEUE", subgroup=job) else None
+ logger.info("job is {} USER_REQUESTED_WALLTIME {} USER_REQUESTED_QUEUE {}".format(job, walltime, force_queue))
task_count = jsect["task_count"] if "task_count" in jsect else None
if task_count is None:
node_count = case.num_nodes
@@ -316,7 +322,7 @@ def get_submit_args(self, case, job):
return submitargs
def submit_jobs(self, case, no_batch=False, job=None, user_prereq=None,
- skip_pnl=False, mail_user=None, mail_type='never',
+ skip_pnl=False, mail_user=None, mail_type=None,
batch_args=None, dry_run=False):
alljobs = self.get_jobs()
startindex = 0
@@ -382,7 +388,7 @@ def submit_jobs(self, case, no_batch=False, job=None, user_prereq=None,
return depid
def _submit_single_job(self, case, job, dep_jobs=None, no_batch=False,
- skip_pnl=False, mail_user=None, mail_type='never',
+ skip_pnl=False, mail_user=None, mail_type=None,
batch_args=None, dry_run=False):
logger.warning("Submit job {}".format(job))
batch_system = self.get_value("BATCH_SYSTEM", subgroup=None)
@@ -409,6 +415,7 @@ def _submit_single_job(self, case, job, dep_jobs=None, no_batch=False,
logger.info("dependencies: {}".format(dep_jobs))
dep_string = self.get_value("depend_string", subgroup=None)
separator_string = self.get_value("depend_separator", subgroup=None)
+ expect(separator_string is not None,"depend_separator string not defined")
expect("jobid" in dep_string, "depend_string is missing jobid for prerequisite jobs")
dep_ids_str = str(dep_jobs[0])
for dep_id in dep_jobs[1:]:
@@ -419,14 +426,40 @@ def _submit_single_job(self, case, job, dep_jobs=None, no_batch=False,
if batch_args is not None:
submitargs += " " + batch_args
+ cime_config = get_cime_config()
+
+ if mail_user is None and cime_config.has_option("main", "MAIL_USER"):
+ mail_user = cime_config.get("main", "MAIL_USER")
+
if mail_user is not None:
mail_user_flag = self.get_value('batch_mail_flag', subgroup=None)
if mail_user_flag is not None:
submitargs += " " + mail_user_flag + " " + mail_user
- if 'never' not in mail_type:
- mail_type_flag, mail_type = self.get_batch_mail_type(mail_type)
+
+ if mail_type is None:
+ if job == "case.test" and cime_config.has_option("create_test", "MAIL_TYPE"):
+ mail_type = cime_config.get("create_test", "MAIL_TYPE")
+ elif cime_config.has_option("main", "MAIL_TYPE"):
+ mail_type = cime_config.get("main", "MAIL_TYPE")
+ else:
+ mail_type = self.get_value("batch_mail_default")
+
+ if mail_type:
+ mail_type = mail_type.split(",") # pylint: disable=no-member
+
+ if mail_type:
+ mail_type_flag = self.get_value("batch_mail_type_flag", subgroup=None)
if mail_type_flag is not None:
- submitargs += " " + mail_type_flag + " " + mail_type
+ mail_type_args = []
+ for indv_type in mail_type:
+ mail_type_arg = self.get_batch_mail_type(indv_type)
+ mail_type_args.append(mail_type_arg)
+
+ if mail_type_flag == "-m":
+ # hacky, PBS-type systems pass multiple mail-types differently
+ submitargs += " {} {}".format(mail_type_flag, "".join(mail_type_args))
+ else:
+ submitargs += " {} {}".format(mail_type_flag, " {} ".format(mail_type_flag).join(mail_type_args))
batchsubmit = self.get_value("batch_submit", subgroup=None)
expect(batchsubmit is not None,
@@ -453,13 +486,12 @@ def _submit_single_job(self, case, job, dep_jobs=None, no_batch=False,
logger.info("Submitted job id is {}".format(jobid))
return jobid
- def get_batch_mail_type(self, mail_type='never'):
- mail_type_flag = self.get_value("batch_mail_type_flag", subgroup=None)
+ def get_batch_mail_type(self, mail_type):
raw = self.get_value("batch_mail_type", subgroup=None)
mail_types = [item.strip() for item in raw.split(",")] # pylint: disable=no-member
idx = ["never", "all", "begin", "end", "fail"].index(mail_type)
- return mail_type_flag, mail_types[idx]
+ return mail_types[idx] if idx < len(mail_types) else None
def get_batch_system_type(self):
nodes = self.get_nodes("batch_system")
@@ -547,7 +579,7 @@ def get_all_queues(self):
return self.get_nodes("queue")
def get_nodes(self, nodename, attributes=None, root=None, xpath=None):
- if nodename in ("JOB_WALLCLOCK_TIME", "PROJECT", "CHARGE_ACCOUNT",
+ if nodename in ("JOB_WALLCLOCK_TIME", "PROJECT", "CHARGE_ACCOUNT",
"PROJECT_REQUIRED", "JOB_QUEUE", "BATCH_COMMAND_FLAGS"):
nodes = EnvBase.get_nodes(self, "entry", attributes={"id":nodename},
root=root, xpath=xpath)
diff --git a/scripts/lib/CIME/aprun.py b/scripts/lib/CIME/aprun.py
index 576fdd9ae91..e5305b82b2b 100755
--- a/scripts/lib/CIME/aprun.py
+++ b/scripts/lib/CIME/aprun.py
@@ -72,7 +72,9 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids,
# Compute task and thread settings for batch commands
tasks_per_node, min_tasks_per_node, task_count, thread_count, max_thread_count, total_node_count, total_task_count, aprun_args = \
0, max_mpitasks_per_node, 1, maxt[0], maxt[0], 0, 0, ""
- for c1 in (range(1, total_tasks) + [None]):
+ c1list = list(range(1, total_tasks))
+ c1list.append(None)
+ for c1 in c1list:
if c1 is None or maxt[c1] != thread_count:
tasks_per_node = min(max_mpitasks_per_node, int(max_tasks_per_node / thread_count))
diff --git a/scripts/lib/CIME/build.py b/scripts/lib/CIME/build.py
index cf670697486..a2296ea3992 100644
--- a/scripts/lib/CIME/build.py
+++ b/scripts/lib/CIME/build.py
@@ -12,7 +12,7 @@
###############################################################################
def _build_model(build_threaded, exeroot, clm_config_opts, incroot, complist,
- lid, caseroot, cimeroot, compiler, buildlist):
+ lid, caseroot, cimeroot, compiler, buildlist, comp_interface):
###############################################################################
logs = []
@@ -79,7 +79,7 @@ def _build_model(build_threaded, exeroot, clm_config_opts, incroot, complist,
cime_model = get_model()
file_build = os.path.join(exeroot, "{}.bldlog.{}".format(cime_model, lid))
- config_dir = os.path.join(cimeroot, "src", "drivers", "mct", "cime_config")
+ config_dir = os.path.join(cimeroot, "src", "drivers", comp_interface, "cime_config")
f = open(file_build, "w")
bldroot = os.path.join(exeroot, "cpl", "obj")
if not os.path.isdir(bldroot):
@@ -206,7 +206,7 @@ def _build_checks(case, build_threaded, comp_interface, use_esmf_lib,
return sharedpath
###############################################################################
-def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid, compiler, buildlist):
+def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid, compiler, buildlist, comp_interface):
###############################################################################
shared_lib = os.path.join(exeroot, sharedpath, "lib")
@@ -215,7 +215,7 @@ def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid
if (not os.path.exists(shared_item)):
os.makedirs(shared_item)
mpilib = case.get_value("MPILIB")
- libs = ["gptl", "mct", "pio", "csm_share"]
+ libs = ["gptl", comp_interface, "pio", "csm_share"]
if mpilib == "mpi-serial":
libs.insert(0, mpilib)
logs = []
@@ -228,7 +228,7 @@ def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid
# csm_share adds its own dir name
full_lib_path = os.path.join(sharedlibroot, sharedpath)
elif lib == "mpi-serial":
- full_lib_path = os.path.join(sharedlibroot, sharedpath, "mct", lib)
+ full_lib_path = os.path.join(sharedlibroot, sharedpath, comp_interface, lib)
else:
full_lib_path = os.path.join(sharedlibroot, sharedpath, lib)
# pio build creates its own directory
@@ -261,9 +261,9 @@ def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid
if comp_lnd == "clm" and "clm4_0" not in clm_config_opts:
logging.info(" - Building clm4_5/clm5_0 Library ")
esmfdir = "esmf" if case.get_value("USE_ESMF_LIB") else "noesmf"
- bldroot = os.path.join(sharedlibroot, sharedpath, case.get_value("COMP_INTERFACE"), esmfdir, "clm","obj" )
- libroot = os.path.join(exeroot, sharedpath, case.get_value("COMP_INTERFACE"), esmfdir, "lib")
- incroot = os.path.join(exeroot, sharedpath, case.get_value("COMP_INTERFACE"), esmfdir, "include")
+ bldroot = os.path.join(sharedlibroot, sharedpath, comp_interface, esmfdir, "clm","obj" )
+ libroot = os.path.join(exeroot, sharedpath, comp_interface, esmfdir, "lib")
+ incroot = os.path.join(exeroot, sharedpath, comp_interface, esmfdir, "include")
file_build = os.path.join(exeroot, "lnd.bldlog.{}".format( lid))
config_lnd_dir = os.path.dirname(case.get_value("CONFIG_LND_FILE"))
@@ -311,11 +311,11 @@ def _build_model_thread(config_dir, compclass, compname, caseroot, libroot, bldr
logger.info("{} built in {:f} seconds".format(compname, (t2 - t1)))
###############################################################################
-def _clean_impl(case, cleanlist, clean_all):
+def _clean_impl(case, cleanlist, clean_all, clean_depends):
###############################################################################
+ exeroot = os.path.abspath(case.get_value("EXEROOT"))
if clean_all:
# If cleanlist is empty just remove the bld directory
- exeroot = os.path.abspath(case.get_value("EXEROOT"))
expect(exeroot is not None,"No EXEROOT defined in case")
if os.path.isdir(exeroot):
logging.info("cleaning directory {}".format(exeroot))
@@ -327,7 +327,8 @@ def _clean_impl(case, cleanlist, clean_all):
logging.warning("cleaning directory {}".format(sharedlibroot))
shutil.rmtree(sharedlibroot)
else:
- expect(cleanlist is not None and len(cleanlist) > 0,"Empty cleanlist not expected")
+ expect((cleanlist is not None and len(cleanlist) > 0) or
+ (clean_depends is not None and len(clean_depends)),"Empty cleanlist not expected")
debug = case.get_value("DEBUG")
use_esmf_lib = case.get_value("USE_ESMF_LIB")
build_threaded = case.get_build_threaded()
@@ -345,10 +346,16 @@ def _clean_impl(case, cleanlist, clean_all):
os.environ["CLM_CONFIG_OPTS"] = clm_config_opts if clm_config_opts is not None else ""
cmd = gmake + " -f " + os.path.join(casetools, "Makefile")
- for item in cleanlist:
- tcmd = cmd + " clean" + item
- logger.info("calling {} ".format(tcmd))
- run_cmd_no_fail(tcmd)
+ if cleanlist is not None:
+ for item in cleanlist:
+ tcmd = cmd + " clean" + item
+ logger.info("calling {} ".format(tcmd))
+ run_cmd_no_fail(tcmd)
+ else:
+ for item in clean_depends:
+ tcmd = cmd + " clean_depends" + item
+ logger.info("calling {} ".format(tcmd))
+ run_cmd_no_fail(tcmd)
# unlink Locked files directory
unlock_file("env_build.xml")
@@ -513,12 +520,12 @@ def _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist):
if not model_only:
logs = _build_libraries(case, exeroot, sharedpath, caseroot,
- cimeroot, libroot, lid, compiler, buildlist)
+ cimeroot, libroot, lid, compiler, buildlist, comp_interface)
if not sharedlib_only:
os.environ["INSTALL_SHAREDPATH"] = os.path.join(exeroot, sharedpath) # for MPAS makefile generators
logs.extend(_build_model(build_threaded, exeroot, clm_config_opts, incroot, complist,
- lid, caseroot, cimeroot, compiler, buildlist))
+ lid, caseroot, cimeroot, compiler, buildlist, comp_interface))
if not buildlist:
# in case component build scripts updated the xml files, update the case object
@@ -574,7 +581,7 @@ def case_build(caseroot, case, sharedlib_only=False, model_only=False, buildlist
return run_and_log_case_status(functor, "case.build", caseroot=caseroot)
###############################################################################
-def clean(case, cleanlist=None, clean_all=False):
+def clean(case, cleanlist=None, clean_all=False, clean_depends=None):
###############################################################################
- functor = lambda: _clean_impl(case, cleanlist, clean_all)
+ functor = lambda: _clean_impl(case, cleanlist, clean_all, clean_depends)
return run_and_log_case_status(functor, "build.clean", caseroot=case.get_value("CASEROOT"))
diff --git a/scripts/lib/CIME/buildlib.py b/scripts/lib/CIME/buildlib.py
index 551f6325d75..bfeb687a1b4 100644
--- a/scripts/lib/CIME/buildlib.py
+++ b/scripts/lib/CIME/buildlib.py
@@ -43,13 +43,14 @@ def parse_input(argv):
def build_cime_component_lib(case, compname, libroot, bldroot):
cimeroot = case.get_value("CIMEROOT")
+ comp_interface = case.get_value("COMP_INTERFACE")
compclass = compname[1:]
with open(os.path.join(bldroot,'Filepath'), 'w') as out:
out.write(os.path.join(case.get_value('CASEROOT'), "SourceMods",
"src.{}\n".format(compname)) + "\n")
if compname.startswith('d'):
- out.write(os.path.join(cimeroot, "src", "components", "data_comps", compname, "mct") + "\n")
+ out.write(os.path.join(cimeroot, "src", "components", "data_comps", compname, comp_interface) + "\n")
out.write(os.path.join(cimeroot, "src", "components", "data_comps", compname) + "\n")
elif compname.startswith('x'):
out.write(os.path.join(cimeroot, "src", "components", "xcpl_comps", "xshare") + "\n")
diff --git a/scripts/lib/CIME/case.py b/scripts/lib/CIME/case.py
index f12872aafe6..fef28d798c5 100644
--- a/scripts/lib/CIME/case.py
+++ b/scripts/lib/CIME/case.py
@@ -391,8 +391,12 @@ def set_value(self, item, value, subgroup=None, ignore_type=False, allow_undefin
self._env_files_that_need_rewrite.add(env_file)
return result
- expect(allow_undefined or result is not None,
- "No variable {} found in case".format(item))
+ if len(self._files) == 1:
+ expect(allow_undefined or result is not None,
+ "No variable {} found in file {}".format(item, self._files[0].filename))
+ else:
+ expect(allow_undefined or result is not None,
+ "No variable {} found in case".format(item))
def set_valid_values(self, item, valid_values):
"""
@@ -766,9 +770,13 @@ def configure(self, compset_name, grid_name, machine_name=None,
multi_driver=False, ninst=1, test=False,
walltime=None, queue=None, output_root=None,
run_unsupported=False, answer=None,
- input_dir=None):
+ input_dir=None, driver=None):
expect(check_name(compset_name, additional_chars='.'), "Invalid compset name {}".format(compset_name))
+
+ if driver:
+ self.set_lookup_value("COMP_INTERFACE", driver)
+
#--------------------------------------------
# compset, pesfile, and compset components
#--------------------------------------------
@@ -1170,7 +1178,7 @@ def _get_comp_user_mods(self, component):
return comp_user_mods
def submit_jobs(self, no_batch=False, job=None, prereq=None, skip_pnl=False,
- mail_user=None, mail_type='never', batch_args=None,
+ mail_user=None, mail_type=None, batch_args=None,
dry_run=False):
env_batch = self.get_env('batch')
return env_batch.submit_jobs(self, no_batch=no_batch, job=job, user_prereq=prereq,
@@ -1235,7 +1243,7 @@ def get_mpirun_cmd(self, job="case.run"):
executable, mpi_arg_list = env_mach_specific.get_mpirun(self, mpi_attribs, job=job)
# special case for aprun
- if executable is not None and "aprun" in executable and "titan" in self.get_value("MACH"):
+ if executable is not None and "aprun" in executable and not "theta" in self.get_value("MACH"):
aprun_args, num_nodes = get_aprun_cmd_for_case(self, run_exe)[0:2]
expect( (num_nodes + self.spare_nodes) == self.num_nodes, "Not using optimized num nodes")
return executable + aprun_args + " " + run_misc_suffix
@@ -1407,7 +1415,7 @@ def create(self, casename, srcroot, compset_name, grid_name,
multi_driver=False, ninst=1, test=False,
walltime=None, queue=None, output_root=None,
run_unsupported=False, answer=None,
- input_dir=None):
+ input_dir=None, driver=None):
try:
# Set values for env_case.xml
self.set_lookup_value("CASE", os.path.basename(casename))
@@ -1423,7 +1431,7 @@ def create(self, casename, srcroot, compset_name, grid_name,
walltime=walltime, queue=queue,
output_root=output_root,
run_unsupported=run_unsupported, answer=answer,
- input_dir=input_dir)
+ input_dir=input_dir, driver=driver)
self.create_caseroot()
diff --git a/scripts/lib/CIME/case_cmpgen_namelists.py b/scripts/lib/CIME/case_cmpgen_namelists.py
index 55faf46812b..35dd14224e8 100644
--- a/scripts/lib/CIME/case_cmpgen_namelists.py
+++ b/scripts/lib/CIME/case_cmpgen_namelists.py
@@ -119,6 +119,13 @@ def case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None
output = ""
if compare:
success, output = _do_full_nl_comp(case, test_name, compare_name, baseline_root)
+ if not success and ts.get_status(RUN_PHASE) is not None:
+ run_warn = \
+"""NOTE: It is not necessarily safe to compare namelists after RUN
+phase has completed. Running a case can pollute namelists. The namelists
+kept in the baselines are pre-RUN namelists."""
+ output += run_warn
+ logging.info(run_warn)
if generate:
_do_full_nl_gen(case, test_name, generate_name, baseline_root)
except:
diff --git a/scripts/lib/CIME/case_run.py b/scripts/lib/CIME/case_run.py
index 6659387e2c5..74d91a45d3e 100644
--- a/scripts/lib/CIME/case_run.py
+++ b/scripts/lib/CIME/case_run.py
@@ -1,6 +1,6 @@
from CIME.XML.standard_module_setup import *
from CIME.case_submit import submit
-from CIME.utils import gzip_existing_file, new_lid, run_and_log_case_status, get_timestamp, get_model
+from CIME.utils import gzip_existing_file, new_lid, run_and_log_case_status, get_timestamp, run_sub_or_cmd
from CIME.check_lockedfiles import check_lockedfiles
from CIME.get_timing import get_timing
from CIME.provenance import save_prerun_provenance, save_postrun_provenance
@@ -241,20 +241,18 @@ def resubmit_check(case):
###############################################################################
def do_external(script_name, caseroot, rundir, lid, prefix):
###############################################################################
+ expect(os.path.isfile(script_name), "External script {} not found".format(script_name))
filename = "{}.external.log.{}".format(prefix, lid)
outfile = os.path.join(rundir, filename)
- cmd = script_name + " 1> {} {} 2>&1".format(outfile, caseroot)
- logger.info("running {}".format(script_name))
- run_cmd_no_fail(cmd)
+ run_sub_or_cmd(script_name, [caseroot], os.path.basename(script_name), [caseroot], logfile=outfile,combine_output=True)
###############################################################################
def do_data_assimilation(da_script, caseroot, cycle, lid, rundir):
###############################################################################
+ expect(os.path.isfile(da_script), "Data Assimilation script {} not found".format(da_script))
filename = "da.log.{}".format(lid)
outfile = os.path.join(rundir, filename)
- cmd = da_script + " 1> {} {} {:d} 2>&1".format(outfile, caseroot, cycle)
- logger.info("running {}".format(da_script))
- run_cmd_no_fail(cmd)
+ run_sub_or_cmd(da_script, [caseroot, cycle], os.path.basename(da_script), [caseroot, cycle], logfile=outfile,combine_output=True)
###############################################################################
def case_run(case, skip_pnl=False):
@@ -281,21 +279,20 @@ def case_run(case, skip_pnl=False):
# set up the LID
lid = new_lid()
+ if prerun_script:
+ case.flush()
+ do_external(prerun_script, case.get_value("CASEROOT"), case.get_value("RUNDIR"),
+ lid, prefix="prerun")
+ case.read_xml()
+
for cycle in range(data_assimilation_cycles):
# After the first DA cycle, runs are restart runs
if cycle > 0:
case.set_value("CONTINUE_RUN", "TRUE")
lid = new_lid()
- if prerun_script:
- case.flush()
- do_external(prerun_script, case.get_value("CASEROOT"), case.get_value("RUNDIR"),
- lid, prefix="prerun")
- case.read_xml()
-
lid = run_model(case, lid, skip_pnl, da_cycle=cycle)
- save_logs(case, lid) # Copy log files back to caseroot
if case.get_value("CHECK_TIMING") or case.get_value("SAVE_TIMING"):
get_timing(case, lid) # Run the getTiming script
@@ -305,14 +302,18 @@ def case_run(case, skip_pnl=False):
case.get_value("RUNDIR"))
case.read_xml()
- if postrun_script:
- case.flush()
- do_external(postrun_script, case.get_value("CASEROOT"), case.get_value("RUNDIR"),
- lid, prefix="postrun")
- case.read_xml()
+ save_logs(case, lid) # Copy log files back to caseroot
save_postrun_provenance(case)
+ if postrun_script:
+ case.flush()
+ do_external(postrun_script, case.get_value("CASEROOT"), case.get_value("RUNDIR"),
+ lid, prefix="postrun")
+ case.read_xml()
+
+ save_logs(case, lid) # Copy log files back to caseroot
+
logger.warning("check for resubmit")
resubmit_check(case)
diff --git a/scripts/lib/CIME/case_st_archive.py b/scripts/lib/CIME/case_st_archive.py
index 87704c7d20a..0f8b5b47750 100644
--- a/scripts/lib/CIME/case_st_archive.py
+++ b/scripts/lib/CIME/case_st_archive.py
@@ -101,7 +101,8 @@ def _get_file_date(filename):
return datetime.datetime(year, month, day) + datetime.timedelta(seconds = second)
# Not a valid filename date format
- raise ValueError("{} is a filename without a supported date!".format(filename))
+ logger.debug("{} is a filename without a supported date!".format(filename))
+ return None
def _get_day_second(date):
"""
@@ -138,9 +139,9 @@ def _datetime_str_mpas(date):
to support abbreviations, so we can't use that here
>>> _datetime_str_mpas(datetime.datetime(5, 8, 22))
- '0005-08-22_00000'
+ '0005-08-22_00:00:00'
>>> _datetime_str_mpas(_get_file_date("0011-12-09-00435"))
- '0011-12-09_00435'
+ '0011-12-09_00:07:15'
"""
format_string = "{year:04d}-{month:02d}-{day:02d}_{hours:02d}:{minutes:02d}:{seconds:02d}"
@@ -277,7 +278,7 @@ def _archive_history_files(case, archive, archive_entry,
# determine history archive directory (create if it does not exist)
dout_s_root = case.get_value("DOUT_S_ROOT")
- casename = case.get_value("CASE")
+ casename = re.escape(case.get_value("CASE"))
archive_histdir = os.path.join(dout_s_root, compclass, 'hist')
if not os.path.exists(archive_histdir):
os.makedirs(archive_histdir)
@@ -313,7 +314,7 @@ def _archive_history_files(case, archive, archive_entry,
if histfiles:
for histfile in histfiles:
file_date = _get_file_date(os.path.basename(histfile))
- if last_date is None or file_date <= last_date:
+ if last_date is None or file_date is None or file_date <= last_date:
srcfile = join(rundir, histfile)
expect(os.path.isfile(srcfile),
"history file {} does not exist ".format(srcfile))
@@ -439,7 +440,7 @@ def _archive_restarts_date_comp(case, archive, archive_entry,
# the compname is drv but the files are named cpl
if compname == 'drv':
compname = 'cpl'
-
+ casename = re.escape(casename)
# get file_extension suffixes
for suffix in archive.get_rest_file_extensions(archive_entry):
for i in range(ninst):
diff --git a/scripts/lib/CIME/case_submit.py b/scripts/lib/CIME/case_submit.py
index 8a4008e6f74..5e73d126bf7 100644
--- a/scripts/lib/CIME/case_submit.py
+++ b/scripts/lib/CIME/case_submit.py
@@ -16,13 +16,18 @@
logger = logging.getLogger(__name__)
def _submit(case, job=None, no_batch=False, prereq=None, resubmit=False,
- skip_pnl=False, mail_user=None, mail_type='never', batch_args=None):
+ skip_pnl=False, mail_user=None, mail_type=None, batch_args=None):
if job is None:
if case.get_value("TEST"):
job = "case.test"
else:
job = "case.run"
+ rundir = case.get_value("RUNDIR")
+ continue_run = case.get_value("CONTINUE_RUN")
+ expect(os.path.isdir(rundir) or not continue_run,
+ " CONTINUE_RUN is true but RUNDIR {} does not exist".format(rundir))
+
if resubmit:
resub = case.get_value("RESUBMIT")
logger.info("Submitting job '{}', resubmit={:d}".format(job, resub))
@@ -78,7 +83,7 @@ def _submit(case, job=None, no_batch=False, prereq=None, resubmit=False,
case.set_value("JOB_IDS", xml_jobid_text)
def submit(case, job=None, no_batch=False, prereq=None, resubmit=False,
- skip_pnl=False, mail_user=None, mail_type='never', batch_args=None):
+ skip_pnl=False, mail_user=None, mail_type=None, batch_args=None):
if case.get_value("TEST"):
caseroot = case.get_value("CASEROOT")
casebaseid = case.get_value("CASEBASEID")
diff --git a/scripts/lib/CIME/test_scheduler.py b/scripts/lib/CIME/test_scheduler.py
index 431b23a8ffd..08c2bc25011 100644
--- a/scripts/lib/CIME/test_scheduler.py
+++ b/scripts/lib/CIME/test_scheduler.py
@@ -88,7 +88,7 @@ def __init__(self, test_names, test_data=None,
use_existing=False, save_timing=False, queue=None,
allow_baseline_overwrite=False, output_root=None,
force_procs=None, force_threads=None, mpilib=None,
- input_dir=None, pesfile=None):
+ input_dir=None, pesfile=None, mail_user=None, mail_type=None):
###########################################################################
self._cime_root = CIME.utils.get_cime_root()
self._cime_model = get_model()
@@ -101,6 +101,9 @@ def __init__(self, test_names, test_data=None,
self._pesfile = pesfile
self._allow_baseline_overwrite = allow_baseline_overwrite
+ self._mail_user = mail_user
+ self._mail_type = mail_type
+
self._machobj = Machines(machine=machine_name)
self._model_build_cost = 4
@@ -218,21 +221,26 @@ def __init__(self, test_names, test_data=None,
if use_existing:
for test in self._tests:
- ts = TestStatus(self._get_test_dir(test))
- for phase, status in ts:
- if phase in CORE_PHASES:
- if status in [TEST_PEND_STATUS, TEST_FAIL_STATUS]:
- # We need to pick up here
- break
- else:
- if phase != SUBMIT_PHASE:
- # Somewhat subtle. Create_test considers submit/run to be the run phase,
- # so don't try to update test status for a passed submit phase
- self._update_test_status(test, phase, TEST_PEND_STATUS)
- self._update_test_status(test, phase, status)
-
- if phase == RUN_PHASE:
- logger.info("Test {} passed and will not be re-run".format(test))
+ with TestStatus(self._get_test_dir(test)) as ts:
+ for phase, status in ts:
+ if phase in CORE_PHASES:
+ if status in [TEST_PEND_STATUS, TEST_FAIL_STATUS]:
+ if status == TEST_FAIL_STATUS:
+ # Import for potential subsequent waits
+ ts.set_status(phase, TEST_PEND_STATUS)
+
+ # We need to pick up here
+ break
+
+ else:
+ if phase != SUBMIT_PHASE:
+ # Somewhat subtle. Create_test considers submit/run to be the run phase,
+ # so don't try to update test status for a passed submit phase
+ self._update_test_status(test, phase, TEST_PEND_STATUS)
+ self._update_test_status(test, phase, status)
+
+ if phase == RUN_PHASE:
+ logger.info("Test {} passed and will not be re-run".format(test))
logger.info("Using existing test directory {}".format(self._get_test_dir(test)))
else:
@@ -411,19 +419,22 @@ def _create_newcase_phase(self, test):
mpilib = case_opt[1:]
create_newcase_cmd += " --mpilib {}".format(mpilib)
logger.debug (" MPILIB set to {}".format(mpilib))
- if case_opt.startswith('N'):
+ elif case_opt.startswith('N'):
expect(ncpl == 1,"Cannot combine _C and _N options")
ninst = case_opt[1:]
create_newcase_cmd += " --ninst {}".format(ninst)
logger.debug (" NINST set to {}".format(ninst))
- if case_opt.startswith('C'):
+ elif case_opt.startswith('C'):
expect(ninst == 1,"Cannot combine _C and _N options")
ncpl = case_opt[1:]
create_newcase_cmd += " --ninst {} --multi-driver" .format(ncpl)
logger.debug (" NCPL set to {}" .format(ncpl))
- if case_opt.startswith('P'):
+ elif case_opt.startswith('P'):
pesize = case_opt[1:]
create_newcase_cmd += " --pecount {}".format(pesize)
+ elif case_opt.startswith('V'):
+ driver = case_opt[1:]
+ create_newcase_cmd += " --driver {}".format(driver)
# create_test mpilib option overrides default but not explicitly set case_opt mpilib
if mpilib is None and self._mpilib is not None:
@@ -540,24 +551,14 @@ def _xml_phase(self, test):
# For PTS_MODE, compile with mpi-serial
envtest.set_test_parameter("MPILIB", "mpi-serial")
- elif opt.startswith('I'):
- # Marker to distinguish tests with same name - ignored
- continue
-
- elif opt.startswith('M'):
- # M option handled by create newcase
- continue
-
- elif opt.startswith('P'):
- # P option handled by create newcase
- continue
+ elif (opt.startswith('I') or # Marker to distinguish tests with same name - ignored
+ opt.startswith('M') or # handled in create_newcase
+ opt.startswith('P') or # handled in create_newcase
+ opt.startswith('N') or # handled in create_newcase
+ opt.startswith('C') or # handled in create_newcase
+ opt.startswith('V')): # handled in create_newcase
+ pass
- elif opt.startswith('N'):
- # handled in create_newcase
- continue
- elif opt.startswith('C'):
- # handled in create_newcase
- continue
elif opt.startswith('IOP'):
logger.warning("IOP test option not yet implemented")
else:
@@ -614,10 +615,14 @@ def _model_build_phase(self, test):
def _run_phase(self, test):
###########################################################################
test_dir = self._get_test_dir(test)
+
+ cmd = "./case.submit --skip-preview-namelist"
if self._no_batch:
- cmd = "./case.submit --no-batch --skip-preview-namelist"
- else:
- cmd = "./case.submit --skip-preview-namelist"
+ cmd += " --no-batch"
+ if self._mail_user:
+ cmd += " --mail-user={}".format(self._mail_user)
+ if self._mail_type:
+ cmd += " -M={}".format(",".join(self._mail_type))
return self._shell_cmd_for_phase(test, cmd, RUN_PHASE, from_dir=test_dir)
diff --git a/scripts/lib/CIME/tests/case_fake.py b/scripts/lib/CIME/tests/case_fake.py
index 1d5d3874c16..2ef48eaced5 100644
--- a/scripts/lib/CIME/tests/case_fake.py
+++ b/scripts/lib/CIME/tests/case_fake.py
@@ -29,6 +29,7 @@ def __init__(self, case_root, create_case_root=True):
self.set_value('CASE', casename)
self.set_value('CASEBASEID', casename)
self.set_value('RUN_TYPE', 'startup')
+ self.set_exeroot()
self.set_rundir()
def get_value(self, item):
@@ -65,6 +66,7 @@ def copy(self, newcasename, newcaseroot):
newcase.set_value('CASE', newcasename)
newcase.set_value('CASEBASEID', newcasename)
newcase.set_value('CASEROOT', newcaseroot)
+ newcase.set_exeroot()
newcase.set_rundir()
return newcase
@@ -86,8 +88,7 @@ def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None,
mach_dir (str, optional): Ignored
project (str, optional): Ignored
cime_output_root (str, optional): New CIME_OUTPUT_ROOT for the clone
- exeroot (str, optional): Ignored (because exeroot isn't used
- in this fake case implementation)
+ exeroot (str, optional): New EXEROOT for the clone
rundir (str, optional): New RUNDIR for the clone
Returns the clone case object
@@ -97,9 +98,11 @@ def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None,
os.makedirs(newcaseroot)
clone = self.copy(newcasename = newcasename, newcaseroot = newcaseroot)
if cime_output_root is not None:
- self.set_value('CIME_OUTPUT_ROOT', cime_output_root)
+ clone.set_value('CIME_OUTPUT_ROOT', cime_output_root)
+ if exeroot is not None:
+ clone.set_value('EXEROOT', exeroot)
if rundir is not None:
- self.set_value('RUNDIR', rundir)
+ clone.set_value('RUNDIR', rundir)
return clone
@@ -112,6 +115,13 @@ def make_rundir(self):
"""
os.makedirs(self.get_value('RUNDIR'))
+ def set_exeroot(self):
+ """
+ Assumes CASEROOT is already set; sets an appropriate EXEROOT
+ (nested inside CASEROOT)
+ """
+ self.set_value('EXEROOT', os.path.join(self.get_value('CASEROOT'), 'bld'))
+
def set_rundir(self):
"""
Assumes CASEROOT is already set; sets an appropriate RUNDIR (nested
diff --git a/scripts/lib/CIME/utils.py b/scripts/lib/CIME/utils.py
index fdd13e2c1ff..c5b5b905541 100644
--- a/scripts/lib/CIME/utils.py
+++ b/scripts/lib/CIME/utils.py
@@ -49,7 +49,7 @@ def expect(condition, error_msg, exc_type=SystemExit, error_prefix="ERROR:"):
if logger.isEnabledFor(logging.DEBUG):
import pdb
pdb.set_trace()
- raise exc_type("{} {}".format(error_prefix, error_msg))
+ raise exc_type(error_prefix + " " + error_msg)
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
@@ -228,7 +228,14 @@ def run_sub_or_cmd(cmd, cmdargs, subname, subargs, logfile=None, case=None,
logger.info(" Running {} ".format(cmd))
if case is not None:
case.flush()
- output = run_cmd_no_fail("{} {}".format(cmd, cmdargs), combine_output=combine_output,
+ fullcmd = cmd
+ if isinstance(cmdargs, list):
+ for arg in cmdargs:
+ fullcmd += " " + str(arg)
+ else:
+ fullcmd += " " + cmdargs
+ output = run_cmd_no_fail("{} 1> {} 2>&1".format(fullcmd, logfile),
+ combine_output=combine_output,
from_dir=from_dir)
logger.info(output)
# refresh case xml object from file
@@ -342,9 +349,9 @@ def check_minimum_python_version(major, minor):
>>> check_minimum_python_version(sys.version_info[0], sys.version_info[1])
>>>
"""
+ msg = "Python " + str(major) + ", minor version " + str(minor) + " is required, you have " + str(sys.version_info[0]) + "." + str(sys.version_info[1])
expect(sys.version_info[0] > major or
- (sys.version_info[0] == major and sys.version_info[1] >= minor),
- "Python {:d}, minor version {:d}+ is required, you have {:d}.{:d}".format(major, minor, sys.version_info[0], sys.version_info[1]))
+ (sys.version_info[0] == major and sys.version_info[1] >= minor), msg)
def normalize_case_id(case_id):
"""
@@ -1401,6 +1408,25 @@ def _check_for_invalid_args(args):
if arg.startswith("-") and len(arg) > 2:
sys.stderr.write( "WARNING: The {} argument is depricated. Multi-character arguments should begin with \"--\" and single character with \"-\"\n Use --help for a complete list of available options\n".format(arg))
+def add_mail_type_args(parser):
+ parser.add_argument("--mail-user", help="email to be used for batch notification.")
+
+ parser.add_argument("-M", "--mail-type", action="append",
+ help="when to send user email. Options are: never, all, begin, end, fail."
+ "You can specify multiple types with either comma-separate args or multiple -M flags")
+
+def resolve_mail_type_args(args):
+ if args.mail_type is not None:
+ resolved_mail_types = []
+ for mail_type in args.mail_type:
+ resolved_mail_types.extend(mail_type.split(","))
+
+ for mail_type in resolved_mail_types:
+ expect(mail_type in ("never", "all", "begin", "end", "fail"),
+ "Unsupported mail-type '{}'".format(mail_type))
+
+ args.mail_type = resolved_mail_types
+
class SharedArea(object):
"""
Enable 0002 umask within this manager
diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py
index 20ce9bad2dc..8ca2dacb8be 100755
--- a/scripts/tests/scripts_regression_tests.py
+++ b/scripts/tests/scripts_regression_tests.py
@@ -31,7 +31,7 @@
TEST_COMPILER = None
GLOBAL_TIMEOUT = None
TEST_MPILIB = None
-MACHINE = Machines()
+MACHINE = None
FAST_ONLY = False
NO_BATCH = False
NO_CMAKE = False
@@ -1037,6 +1037,38 @@ def test_c_use_existing(self):
assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS)
assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
+ del os.environ["TESTBUILDFAIL_PASS"]
+ del os.environ["TESTRUNFAIL_PASS"]
+
+ # test that passed tests are not re-run
+
+ ct2 = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, use_existing=True,
+ test_root=TEST_ROOT,output_root=TEST_ROOT,compiler=self._compiler,
+ mpilib=TEST_MPILIB)
+
+ log_lvl = logging.getLogger().getEffectiveLevel()
+ logging.disable(logging.CRITICAL)
+ try:
+ ct2.run_tests()
+ finally:
+ logging.getLogger().setLevel(log_lvl)
+
+ self._wait_for_tests(test_id)
+
+ for test_status in test_statuses:
+ ts = TestStatus(test_dir=os.path.dirname(test_status))
+ test_name = ts.get_name()
+ assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS)
+ assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS)
+ assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS)
+
+ ###########################################################################
+ def test_d_retry(self):
+ ###########################################################################
+ args = ["TESTBUILDFAIL_P1.f19_g16_rx1.A", "TESTRUNFAIL_P1.f19_g16_rx1.A", "TESTRUNPASS_P1.f19_g16_rx1.A", "--retry=1"]
+
+ self._create_test(args)
+
###############################################################################
class P_TestJenkinsGenericJob(TestCreateTestCommon):
###############################################################################
@@ -1399,6 +1431,8 @@ def test_cime_case(self):
###########################################################################
def test_cime_case_prereq(self):
###########################################################################
+ if not MACHINE.has_batch_system() or NO_BATCH:
+ self.skipTest("Skipping testing user prerequisites without batch systems")
testcase_name = 'prereq_test'
testdir = os.path.join(TEST_ROOT, testcase_name)
if os.path.exists(testdir):
@@ -1420,7 +1454,7 @@ def test_cime_case_prereq(self):
for batch_cmd in batch_commands:
self.assertTrue(isinstance(batch_cmd, collections.Sequence), "case.submit_jobs did not return a sequence of sequences")
self.assertTrue(len(batch_cmd) > batch_cmd_index, "case.submit_jobs returned internal sequences with length <= {}".format(batch_cmd_index))
- self.assertTrue(isinstance(batch_cmd[1], str), "case.submit_jobs returned internal sequences without the batch command string as the second parameter: {}".format(batch_cmd[1]))
+ self.assertTrue(isinstance(batch_cmd[1], six.string_types), "case.submit_jobs returned internal sequences without the batch command string as the second parameter: {}".format(batch_cmd[1]))
batch_cmd_args = batch_cmd[1]
jobid_ident = 'jobid'
@@ -1477,10 +1511,10 @@ def test_cime_case_build_threaded_2(self):
###########################################################################
def test_cime_case_mpi_serial(self):
###########################################################################
- self._create_test(["--no-build", "TESTRUNPASS_Mmpi-serial.f19_g16_rx1.A"], test_id=self._baseline_name)
+ self._create_test(["--no-build", "TESTRUNPASS_Mmpi-serial_P10.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
- "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_Mmpi-serial.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
+ "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_Mmpi-serial_P10.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=True) as case:
@@ -1491,13 +1525,15 @@ def test_cime_case_mpi_serial(self):
# Serial cases should be using 1 task
self.assertEqual(case.get_value("TOTALPES"), 1)
+ self.assertEqual(case.get_value("NTASKS_CPL"), 1)
+
###########################################################################
def test_cime_case_force_pecount(self):
###########################################################################
- self._create_test(["--no-build", "--force-procs=16", "--force-threads=8", "TESTRUNPASS_Mmpi-serial.f19_g16_rx1.A"], test_id=self._baseline_name)
+ self._create_test(["--no-build", "--force-procs=16", "--force-threads=8", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name)
casedir = os.path.join(self._testroot,
- "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_Mmpi-serial_P16x8.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
+ "%s.%s" % (CIME.utils.get_full_test_name("TESTRUNPASS_P16x8.f19_g16_rx1.A", machine=self._machine, compiler=self._compiler), self._baseline_name))
self.assertTrue(os.path.isdir(casedir), msg="Missing casedir '%s'" % casedir)
with Case(casedir, read_only=True) as case:
@@ -2462,9 +2498,15 @@ def _main_func():
midx = sys.argv.index("--machine")
mach_name = sys.argv[midx + 1]
MACHINE = Machines(machine=mach_name)
- os.environ["CIME_MACHINE"] = mach_name
del sys.argv[midx + 1]
del sys.argv[midx]
+ os.environ["CIME_MACHINE"] = mach_name
+ elif "CIME_MACHINE" in os.environ:
+ mach_name = os.environ["CIME_MACHINE"]
+ MACHINE = Machines(machine=mach_name)
+ else:
+ MACHINE = Machines()
+
if "--compiler" in sys.argv:
global TEST_COMPILER
diff --git a/src/drivers/mct/cime_config/config_component.xml b/src/drivers/mct/cime_config/config_component.xml
index f9f637c9079..90508ebd910 100644
--- a/src/drivers/mct/cime_config/config_component.xml
+++ b/src/drivers/mct/cime_config/config_component.xml
@@ -692,7 +692,7 @@
char
- mct
+ mct,nuopc
mct
build_def
env_build.xml
@@ -2282,15 +2282,15 @@
env_run.xml
pio root processor relative to component root
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
- 0
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
+ 1
@@ -2600,15 +2600,15 @@
char
- run_desc
- env_run.xml
+ job_submission
+ env_batch.xml
Store user override for queue
char
- run_desc
- env_run.xml
+ job_submission
+ env_batch.xml
Store user override for walltime
diff --git a/src/drivers/mct/main/prep_ocn_mod.F90 b/src/drivers/mct/main/prep_ocn_mod.F90
index 6f14a5aca9f..ac90d9a1518 100644
--- a/src/drivers/mct/main/prep_ocn_mod.F90
+++ b/src/drivers/mct/main/prep_ocn_mod.F90
@@ -160,6 +160,7 @@ subroutine prep_ocn_init(infodata, atm_c2_ocn, atm_c2_ice, ice_c2_ocn, rof_c2_oc
wav_gnam=wav_gnam , &
atm_nx=atm_nx , &
atm_ny=atm_ny , &
+ glc_gnam=glc_gnam , &
esmf_map_flag=esmf_map_flag )
allocate(mapper_Sa2o)
diff --git a/tools/cprnc/Makefile b/tools/cprnc/Makefile
index ba5940d1e26..e79bf8e1a40 100644
--- a/tools/cprnc/Makefile
+++ b/tools/cprnc/Makefile
@@ -45,7 +45,6 @@ GENF90 = ../../src/externals/genf90/genf90.pl
FC := $(SFC)
FFLAGS += -I$(INC_NETCDF) -I.
-LDFLAGS=$(SLIBS)
#------------------------------------------------------------------------
# Default rules and macros
#------------------------------------------------------------------------
@@ -72,7 +71,7 @@ endif
$(FC) -c $(FFLAGS) $<
$(EXEDIR)/$(EXENAME): $(OBJS)
- $(FC) -o $@ $(OBJS) $(LDFLAGS)
+ $(FC) -o $@ $(OBJS) $(LDFLAGS) $(SLIBS)
compare_vars_mod.F90 : compare_vars_mod.F90.in
perl $(GENF90) $< > $@
diff --git a/tools/mapping/check_maps/src/ESMF_RegridWeightGenCheck.F90 b/tools/mapping/check_maps/src/ESMF_RegridWeightGenCheck.F90
index 01c500ea1b5..6a70217b479 100644
--- a/tools/mapping/check_maps/src/ESMF_RegridWeightGenCheck.F90
+++ b/tools/mapping/check_maps/src/ESMF_RegridWeightGenCheck.F90
@@ -57,7 +57,7 @@ program OfflineTester
type(ESMF_VM) :: vm
- character(ESMF_MAXSTR) :: wgtfile, title
+ character(ESMF_MAXSTR) :: wgtfile
real(ESMF_KIND_R8), pointer :: factorList(:)
integer, pointer :: factorIndexList(:,:)
@@ -155,7 +155,7 @@ program OfflineTester
totCnt = 0
! read in the grid dimensions
- call NCFileInquire(wgtfile, title, src_dim, nxs, nys, &
+ call NCFileInquire(wgtfile, src_dim, nxs, nys, &
dst_dim, nxd, nyd, localrc=status)
if (ESMF_LogFoundError(rcToCheck=status, msg=ESMF_LOGERR_PASSTHRU, &
line=__LINE__, file=__FILE__, rcToReturn=rc)) &
@@ -774,10 +774,9 @@ program OfflineTester
! The weights file should have the source and destination grid information
! provided.
!***********************************************************************************
- subroutine NCFileInquire (wgtfile, title, src_dim, nxs, nys, dst_dim, nxd, nyd, localrc)
+ subroutine NCFileInquire (wgtfile, src_dim, nxs, nys, dst_dim, nxd, nyd, localrc)
character(ESMF_MAXSTR), intent(in) :: wgtfile
- character(ESMF_MAXSTR), intent(out) :: title
integer, intent(out) :: src_dim
integer, intent(out) :: nxs, nys
integer, intent(out) :: dst_dim
@@ -786,7 +785,6 @@ subroutine NCFileInquire (wgtfile, title, src_dim, nxs, nys, dst_dim, nxd, nyd,
integer :: ncstat, nc_file_id, nc_srcdim_id, nc_dstdim_id, srcdim, dstdim
integer :: gdims(2), dim_ids(1)
- integer :: titleLen
character(ESMF_MAXSTR) :: msg
@@ -806,29 +804,6 @@ subroutine NCFileInquire (wgtfile, title, src_dim, nxs, nys, dst_dim, nxd, nyd,
! source grid dimensions
!-----------------------------------------------------------------
- ncstat = nf90_inquire_attribute(nc_file_id, nf90_global, 'title', len=titleLen)
- if(ncstat /= 0) then
- write (msg, '(a,i4)') "- nf90_inquire_attribute error:", ncstat
- call ESMF_LogSetError(ESMF_RC_SYS, msg=msg, &
- line=__LINE__, file=__FILE__ , rcToReturn=rc)
- return
- endif
- if(len(title) < titleLen) then
- print *, "Not enough space to put title."
- return
- end if
- ncstat = nf90_get_att(nc_file_id, nf90_global, "title", title)
- if(ncstat /= 0) then
- write (msg, '(a,i4)') "- nf90_get_att error:", ncstat
- call ESMF_LogSetError(ESMF_RC_SYS, msg=msg, &
- line=__LINE__, file=__FILE__ , rcToReturn=rc)
- return
- endif
-
- !-----------------------------------------------------------------
- ! source grid dimensions
- !-----------------------------------------------------------------
-
ncstat = nf90_inq_dimid(nc_file_id, 'n_a', nc_srcdim_id)
if(ncstat /= 0) then
write (msg, '(a,i4)') "- nf90_inq_dimid error:", ncstat
diff --git a/tools/mapping/gen_mapping_files/runoff_to_ocn/ncl/merge_mapping_files.ncl b/tools/mapping/gen_mapping_files/runoff_to_ocn/ncl/merge_mapping_files.ncl
index 1e113bd1a25..85e35ea0c1f 100644
--- a/tools/mapping/gen_mapping_files/runoff_to_ocn/ncl/merge_mapping_files.ncl
+++ b/tools/mapping/gen_mapping_files/runoff_to_ocn/ncl/merge_mapping_files.ncl
@@ -147,7 +147,12 @@ begin
print("determining map_ms vals where REGION_MASK<0")
sign_REGION_MASK_ms = sign_matlab(REGION_MASK_1d(map_in_ms_row-1))
ind_vals_ms = ind(sign_REGION_MASK_ms .lt. 0)
- n_s_subset_ms = dimsizes(ind_vals_ms)
+ if (all(ismissing(ind_vals_ms))) then
+ print("No source points mapped to marginal seas: output will just contain points mapped to open ocean")
+ n_s_subset_ms = 0
+ else
+ n_s_subset_ms = dimsizes(ind_vals_ms)
+ end if
n_s_out = n_s_subset_oo + n_s_subset_ms
@@ -174,9 +179,11 @@ begin
map_out_col(0:n_s_subset_oo-1) = (/ map_in_oo_col(ind_vals_oo) /)
map_out_row(0:n_s_subset_oo-1) = (/ map_in_oo_row(ind_vals_oo) /)
- map_out_S(n_s_subset_oo:n_s_subset_oo+n_s_subset_ms-1) = (/ map_in_ms_S(ind_vals_ms) /)
- map_out_col(n_s_subset_oo:n_s_subset_oo+n_s_subset_ms-1) = (/ map_in_ms_col(ind_vals_ms) /)
- map_out_row(n_s_subset_oo:n_s_subset_oo+n_s_subset_ms-1) = (/ map_in_ms_row(ind_vals_ms) /)
+ if (n_s_subset_ms .gt. 0) then
+ map_out_S(n_s_subset_oo:n_s_subset_oo+n_s_subset_ms-1) = (/ map_in_ms_S(ind_vals_ms) /)
+ map_out_col(n_s_subset_oo:n_s_subset_oo+n_s_subset_ms-1) = (/ map_in_ms_col(ind_vals_ms) /)
+ map_out_row(n_s_subset_oo:n_s_subset_oo+n_s_subset_ms-1) = (/ map_in_ms_row(ind_vals_ms) /)
+ end if
print("writing merged map, " + MAP_OUT_FNAME)
system("rm -f " + MAP_OUT_FNAME)
diff --git a/tools/mapping/gen_mapping_files/runoff_to_ocn/src/main.F90 b/tools/mapping/gen_mapping_files/runoff_to_ocn/src/main.F90
index e0431901579..dbb3d44aa82 100644
--- a/tools/mapping/gen_mapping_files/runoff_to_ocn/src/main.F90
+++ b/tools/mapping/gen_mapping_files/runoff_to_ocn/src/main.F90
@@ -209,6 +209,14 @@ PROGRAM main
call map_dup(map_orig,map_new)
map_new%title = trim(title)
map_new%domain_b = trim(map_smooth%domain_b)
+
+ ! Need to use mask_b and frac_b from the smooth map rather than the nearest neighbor
+ ! map because, in the case where the nearest neighbor map uses a file_ocn_coastal_mask
+ ! that is a subset of file_ocn, the nearest neighbor map has a too-limited mask_b and
+ ! frac_b (containing only the coastal points).
+ map_new%mask_b = map_smooth%mask_b
+ map_new%frac_b = map_smooth%frac_b
+
call map_matMatMult(map_orig,map_new,map_smooth) ! mult(A,B,S): B=S*A
call mapsort_sort(map_new)
call map_check(map_new)
diff --git a/tools/mapping/gen_mapping_files/runoff_to_ocn/src/map_mod.F90 b/tools/mapping/gen_mapping_files/runoff_to_ocn/src/map_mod.F90
index 7cf87a7d846..1a7dc51ac99 100644
--- a/tools/mapping/gen_mapping_files/runoff_to_ocn/src/map_mod.F90
+++ b/tools/mapping/gen_mapping_files/runoff_to_ocn/src/map_mod.F90
@@ -644,13 +644,29 @@ SUBROUTINE map_gridRead(map, rfilename, ofilename, gridtype, lmake_rSCRIP)
rcode = nf_inq_varid (fid,'grid_imask',vid )
rcode = nf_get_var_int (fid,vid ,map%mask_a)
+
rcode = nf_inq_varid (fid,'grid_area',vid )
- if (rcode.eq.0) then
- rcode = nf_get_var_double(fid,vid ,map%area_a)
- else
+ if (rcode.ne.0) then
write(6,*) "ERROR: could not find variable grid_area in source grid input file!"
stop
end if
+ rcode = nf_get_var_double(fid,vid ,map%area_a)
+ units = "" ! units needs to be emptied before reading from netCDF file
+ rcode = nf_get_att_text(fid, vid, "units", units)
+ if (rcode.ne.0) then
+ write(6,*) "ERROR: No units attribute found for source grid_area variable"
+ write(6,*) "Please add a units attribute with value 'square radians' or 'square degrees'"
+ stop
+ end if
+ if (trim(units).eq."square radians") then
+ ! Nothing to do
+ else if (trim(units).eq."square degrees") then
+ map%area_a = map%area_a * DEGtoRAD * DEGtoRAD
+ else
+ write(6,*) "ERROR: Unrecognized units for source grid_area variable: ", trim(units)
+ write(6,*) "Recognized units are 'square radians' or 'square degrees'"
+ stop
+ end if
map%frac_a = map%mask_a * 1.0_r8