diff --git a/.github/parm/pytest_groups.txt b/.github/parm/pytest_groups.txt index 374b99da8..48bf9bd41 100644 --- a/.github/parm/pytest_groups.txt +++ b/.github/parm/pytest_groups.txt @@ -3,4 +3,5 @@ wrapper wrapper_a wrapper_b wrapper_c +wrapper_d plotting_or_long diff --git a/docs/Users_Guide/glossary.rst b/docs/Users_Guide/glossary.rst index 65b5c0345..9de1696a4 100644 --- a/docs/Users_Guide/glossary.rst +++ b/docs/Users_Guide/glossary.rst @@ -20,9 +20,9 @@ METplus Configuration Glossary [dir] SERIES_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/{custom?fmt=%s} - With this configuration, SeriesAnalysis will be called twice. The first run will use SeriesAnalysisConfig_one and write output to {OUTPUT_BASE}/one. The second run will use SeriesAnalysisConfig_two and write output to {OUTPUT_BASE}/two. + With this configuration, SeriesAnalysis will be called twice. The first run will use SeriesAnalysisConfig_one and write output to {OUTPUT_BASE}/one. The second run will use SeriesAnalysisConfig_two and write output to {OUTPUT_BASE}/two. - If unset or left blank, the wrapper will run once per run time. There are also wrapper-specific configuration variables to define a custom string loop list for a single wrapper, i.e. :term:`SERIES_ANALYSIS_CUSTOM_LOOP_LIST` and :term:`PCP_COMBINE_CUSTOM_LOOP_LIST`. + If unset or left blank, the wrapper will run once per run time. There are also wrapper-specific configuration variables to define a custom string loop list for a single wrapper, i.e. :term:`SERIES_ANALYSIS_CUSTOM_LOOP_LIST` and :term:`PCP_COMBINE_CUSTOM_LOOP_LIST`. | *Used by:* Many @@ -114,37 +114,12 @@ METplus Configuration Glossary GROUP_LIST_ITEMS Names of the lists in the METplus .conf file to treat the items in those lists as a group. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis LOOP_LIST_ITEMS Names of the lists in the METplus .conf file to treat the items in those lists individually. - | *Used by:* MakePlots, StatAnalysis - - MAKE_PLOTS_AVERAGE_METHOD - The method to use to average the data. Valid options are MEAN, MEDIAN, and AGGREGATION. - - | *Used by:* MakePlots - - MAKE_PLOTS_SCRIPTS_DIR - Directory to find scripts used by MakePlots. - - | *Used by:* MakePlots - - MAKE_PLOTS_INPUT_DIR - Directory containing input files used by MakePlots. - - | *Used by:* MakePlots - - MAKE_PLOTS_OUTPUT_DIR - Directory to write files generated by MakePlots. - - | *Used by:* MakePlots - - MAKE_PLOTS_VERIF_CASE - Verification case used by MakePlots. Valid options for this include: grid2grid, grid2obs, precip. - - | *Used by:* MakePlots + | *Used by:* StatAnalysis CYCLONE_PLOTTER_OUTPUT_DIR Directory for saving files generated by CyclonePlotter. @@ -766,14 +741,6 @@ METplus Configuration Glossary BMODEL .. warning:: **DEPRECATED:** Please use :term:`TC_STAT_BMODEL`. - CI_METHOD - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_CI_METHOD`. - - MAKE_PLOTS_CI_METHOD - The method for creating confidence intervals. Valid options are EMC, or NONE. - - | *Used by:* MakePlots - CYCLONE_CIRCLE_MARKER_SIZE .. warning:: **DEPRECATED:** Please use :term:`CYCLONE_PLOTTER_CIRCLE_MARKER_SIZE`. @@ -811,7 +778,7 @@ METplus Configuration Glossary COV_THRESH_LIST Specify the values of the COV_THRESH column in the MET .stat file to use; - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis CYCLONE_CROSS_MARKER_SIZE .. warning:: **DEPRECATED:** Please use :term:`CYCLONE_PLOTTER_CROSS_MARKER_SIZE`. @@ -908,14 +875,24 @@ METplus Configuration Glossary | *Used by:* TCMPRPlotter DESC_LIST - A single value or list of values used in the stat_analysis data stratification. Specifies the values of the DESC column in the MET .stat file to use. + A single value or list of values used in the stat_analysis data + stratification. + Specifies the values of the DESC column in the MET .stat file to use. - | *Used by:* MakePlots, StatAnalysis + Groups of values can be looped over by setting DESC_LIST and + adding DESC_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + + | *Used by:* StatAnalysis ALPHA_LIST A single value or list of values used in the stat_analysis data stratification. Specifies the values of the ALPHA column in the MET .stat file to use. - | *Used by:* MakePlots, StatAnalysis + Groups of values can be looped over by setting ALPHA_LIST and + adding ALPHA_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + + | *Used by:* StatAnalysis DLAND_FILE .. warning:: **DEPRECATED:** Please use :term:`TC_PAIRS_DLAND_FILE`. @@ -1124,14 +1101,6 @@ METplus Configuration Glossary | *Used by:* EnsembleStat - EVENT_EQUALIZATION - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_EVENT_EQUALIZATION`. - - MAKE_PLOTS_EVENT_EQUALIZATION - If event equalization is to be used (True) or not (False). If set to True, if any of the listed models are missing data for a particular time, data for all models will be masked out for this time. If set to False, there are no changes to the data. - - | *Used by:* MakePlots - EXTRACT_OUT_DIR .. warning:: **DEPRECATED:** Please use :term:`EXTRACT_TILES_OUTPUT_DIR`. @@ -1622,11 +1591,19 @@ METplus Configuration Glossary FCST_THRESH_LIST Specify the values of the FCST_THRESH column in the MET .stat file to use. + Groups of values can be looped over by setting FCST_THRESH_LIST and + adding FCST_THRESH_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis OBS_THRESH_LIST Specify the values of the OBS_THRESH column in the MET .stat file to use. + Groups of values can be looped over by setting OBS_THRESH_LIST and + adding OBS_THRESH_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis FCST_TILE_PREFIX @@ -1649,6 +1626,10 @@ METplus Configuration Glossary FCST_LEVEL_LIST Specify the values of the FCST_LEV column in the MET .stat file to use. + Groups of values can be looped over by setting FCST_LEVEL_LIST and + adding FCST_LEVEL_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis FCST_VAR_NAME @@ -1657,11 +1638,19 @@ METplus Configuration Glossary FCST_VAR_LIST Specify the values of the FCST_VAR column in the MET .stat file to use. + Groups of values can be looped over by setting FCST_VAR_LIST and + adding FCST_VAR_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis FCST_UNITS_LIST Specify the values of the FCST_UNITS column in the MET .stat file to use. + Groups of values can be looped over by setting FCST_UNITS_LIST and + adding FCST_UNITS_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis FCST_VAR_LEVELS @@ -2171,14 +2160,24 @@ METplus Configuration Glossary | *Used by:* TCPairs FCST_INIT_HOUR_LIST - Specify a list of hours for initialization times of forecast files for use in the analysis. + Specify a list of hours for initialization times of forecast files for + use in the analysis. + + Groups of values can be looped over by setting FCST_INIT_HOUR_LIST and + adding FCST_INIT_HOUR_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis OBS_INIT_HOUR_LIST - Specify a list of hours for initialization times of observation files for use in the analysis. + Specify a list of hours for initialization times of observation files for + use in the analysis. - | *Used by:* MakePlots, StatAnalysis + Groups of values can be looped over by setting OBS_INIT_HOUR_LIST and + adding OBS_INIT_HOUR_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + + | *Used by:* StatAnalysis INIT_HOUR_BEG .. warning:: **DEPRECATED:** Please use :term:`FCST_INIT_HOUR_LIST` or :term:`OBS_INIT_HOUR_LIST` instead. @@ -2219,17 +2218,27 @@ METplus Configuration Glossary .. warning:: **DEPRECATED:** Please use :term:`INTERP_MTHD_LIST` instead. INTERP_MTHD_LIST - Specify the values of the INTERP_MTHD column in the MET .stat file to use; specify the interpolation used to create the MET .stat files. + Specify the values of the INTERP_MTHD column in the MET .stat file to use; + specify the interpolation used to create the MET .stat files. - | *Used by:* MakePlots, StatAnalysis + Groups of values can be looped over by setting INTERP_MTHD_LIST and + adding INTERP_MTHD_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + + | *Used by:* StatAnalysis INTERP_PTS .. warning:: **DEPRECATED:** Please use :term:`INTERP_PNTS_LIST` instead. INTERP_PNTS_LIST - Specify the values of the INTERP_PNTS column in the MET .stat file to use; corresponds to the interpolation in the MET .stat files. + Specify the values of the INTERP_PNTS column in the MET .stat file to use; + corresponds to the interpolation in the MET .stat files. - | *Used by:* MakePlots, StatAnalysis + Groups of values can be looped over by setting INTERP_PNTS_LIST and + adding INTERP_PNTS_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + + | *Used by:* StatAnalysis INTERVAL_TIME Define the interval time in hours (HH) to be used by the MET pb2nc tool. @@ -2239,18 +2248,23 @@ METplus Configuration Glossary JOB_ARGS .. warning:: **DEPRECATED:** Please use :term:`STAT_ANALYSIS_JOB_ARGS` instead. - STAT_ANALYSIS_JOB_ARGS - Specify stat_analysis job arguments to run. The job arguments that are to be run with the corresponding :term:`STAT_ANALYSIS_JOB_NAME`. If using -dump_row, use -dump_row [dump_row_filename]. If using -out_stat, -out_stat [out_stat_filename]. For more information on these job arguments, please see the `MET User's Guide `_. + STAT_ANALYSIS_JOB + Specify StatAnalysis job arguments to run. Include the full set of job + arguments including the -job argument. Multiple jobs can be defined by + with STAT_ANALYSIS_JOB1, STAT_ANALYSIS_JOB2, etc. + Filename template tags can be used to insert values from a given run into + the job arguments. The keywords [dump_row_file] and [out_stat_file] can + be used and will be substituted with values from + :term:`MODEL_STAT_ANALYSIS_DUMP_ROW_TEMPLATE` and + :term:`MODEL_STAT_ANALYSIS_OUT_STAT_TEMPLATE` respectively. | *Used by:* StatAnalysis - JOB_NAME - .. warning:: **DEPRECATED:** Please use :term:`STAT_ANALYSIS_JOB_NAME` instead. + STAT_ANALYSIS_JOB_ARGS + .. warning:: **DEPRECATED:** Please use :term:`STAT_ANALYSIS_JOB\` instead. STAT_ANALYSIS_JOB_NAME - Specify stat_analysis job name to run. Valid options are filter, summary, aggregate, aggregate_stat, go_index, and ramp. For more information on these job names and what they do, please see the `MET User's Guide `_. - - | *Used by:* StatAnalysis + .. warning:: **DEPRECATED:** Please use :term:`STAT_ANALYSIS_JOB\` instead. EXTRACT_TILES_LAT_ADJ Specify a latitude adjustment, in degrees to be used in the analysis. In the ExtractTiles wrapper, this corresponds to the 2m portion of the 2n x 2m subregion tile. @@ -2272,14 +2286,23 @@ METplus Configuration Glossary .. warning:: **DEPRECATED:** Please use :term:`FCST_LEAD_LIST` instead. FCST_LEAD_LIST - Specify the values of the FSCT_LEAD column in the MET .stat file to use. Comma separated list format, e.g.: 00, 24, 48, 72, 96, 120 + Specify the values of the FSCT_LEAD column in the MET .stat file to use. + Comma separated list format, e.g.: 00, 24, 48, 72, 96, 120 - | *Used by:* MakePlots, StatAnalysis + Groups of values can be looped over by setting FCST_LEAD_LIST and + adding FCST_LEAD_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + + | *Used by:* StatAnalysis OBS_LEAD_LIST Specify the values of the OBS_LEAD column in the MET .stat file to use. Comma separated list format, e.g.: 00, 24, 48, 72, 96, 120 - | *Used by:* MakePlots, StatAnalysis + Groups of values can be looped over by setting OBS_LEAD_LIST and + adding OBS_LEAD_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + + | *Used by:* StatAnalysis LEAD_SEQ Specify the sequence of forecast lead times to include in the analysis. Comma separated list format, e.g.:0, 6, 12. See :ref:`looping_over_forecast_leads` for more information. Units are assumed to be hours unless specified with Y, m, d, H, M, or S. @@ -2321,7 +2344,11 @@ METplus Configuration Glossary LINE_TYPE_LIST Specify the MET STAT line types to be considered. - | *Used by:* MakePlots, StatAnalysis, TCMPRPlotter + Groups of values can be looped over by setting LINE_TYPE_LIST and + adding LINE_TYPE_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + + | *Used by:* StatAnalysis, TCMPRPlotter LOG_DIR Specify the directory where log files from MET and METplus should be written. @@ -2446,8 +2473,13 @@ METplus Configuration Glossary MODEL_LIST List of the specified the model names. + If this is left unset, then values from :term:`MODEL\` will be used. - | *Used by:* MakePlots, StatAnalysis + Groups of values can be looped over by setting MODEL_LIST and + adding MODEL_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + + | *Used by:* StatAnalysis MODEL_NAME .. warning:: **DEPRECATED:** Please use :term:`MODEL\`. @@ -2460,20 +2492,10 @@ METplus Configuration Glossary | ... | MODEL - | *Used by:* MakePlots, StatAnalysis - - MODEL_NAME_ON_PLOT - .. warning:: **DEPRECATED:** Please use :term:`MODEL_REFERENCE_NAME` instead. + | *Used by:* StatAnalysis MODEL_REFERENCE_NAME - Define the name the first model will be listed as on the plots. There can be number of models defined in configuration files, simply increment the "MODEL1" string to match the total number of models being used, e.g.: - - | MODEL1_REFERENCE_NAME - | MODEL2_REFERENCE_NAME - | ... - | MODELN_REFERENCE_NAME - - | *Used by:* MakePlots, StatAnalysis + .. warning:: **DEPRECATED:** No longer used. MODEL_OBS_NAME .. warning:: **DEPRECATED:** Please use :term:`MODEL_OBTYPE` instead. @@ -2486,7 +2508,7 @@ METplus Configuration Glossary | ... | MODEL_OBTYPE - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis MODEL_STAT_DIR .. warning:: **DEPRECATED:** Please use :term:`MODEL_STAT_ANALYSIS_LOOKIN_DIR` instead. @@ -3209,6 +3231,10 @@ METplus Configuration Glossary OBS_LEVEL_LIST Specify the values of the OBS_LEV column in the MET .stat file to use. + Groups of values can be looped over by setting OBS_LEVEL_LIST and + adding OBS_LEVEL_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis OBS_VAR_NAME @@ -3217,11 +3243,19 @@ METplus Configuration Glossary OBS_VAR_LIST Specify the values of the OBS_VAR column in the MET .stat file to use. + Groups of values can be looped over by setting OBS_VAR_LIST and + adding OBS_VAR_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis OBS_UNITS_LIST Specify the values of the OBS_UNITS column in the MET .stat file to use. + Groups of values can be looped over by setting OBS_UNITS_LIST and + adding OBS_UNITS_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + | *Used by:* StatAnalysis OBS_VAR_LEVELS @@ -3445,12 +3479,6 @@ METplus Configuration Glossary | *Used by:* PCPCombine - PLOTTING_OUTPUT_DIR - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_OUTPUT_DIR` instead. - - PLOTTING_SCRIPTS_DIR - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_SCRIPTS_DIR` instead. - PLOT_CONFIG_OPTS .. warning:: **DEPRECATED:** Please use :term:`TCMPR_PLOTTER_PLOT_CONFIG_OPTS` instead. @@ -3459,25 +3487,13 @@ METplus Configuration Glossary | *Used by:* TCMPRPlotter - PLOT_STATS_LIST - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_STATS_LIST` instead. - - MAKE_PLOTS_STATS_LIST - This is a list of the statistics to calculate and create plots for. Specify the list in a comma-separated list, e.g.: - - acc, bias, rmse - - The list of valid options varies depending on line type that was used during the filtering of stat_analysis_wrapper. For SL1L2, VL1L2 valid options are bias, rms, msess, rsd, rmse_md, rmse_pv, pcor, fbar, and fbar_obar. For SAL1L2, VAL1L2, the valid options is acc. For VCNT, bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, rsd, fbar_speed, fbar_dir, fbar_obar_speed, and fbar_obar_dir. For CTC, rate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs - - | *Used by:* MakePlots - PLOT_TIME .. warning:: **DEPRECATED:** Please use :term:`DATE_TYPE` instead. DATE_TYPE In StatAnalysis, this specifies the way to treat the date information, where valid options are VALID and INIT. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis PLOT_TYPES .. warning:: **DEPRECATED:** Please use :term:`TCMPR_PLOTTER_PLOT_TYPES` instead. @@ -3588,9 +3604,14 @@ METplus Configuration Glossary .. warning:: **DEPRECATED:** Please use :term:`VX_MASK_LIST` instead. VX_MASK_LIST - Specify the values of the VX_MASK column in the MET .stat file to use; a list of the verification regions of interest. + Specify the values of the VX_MASK column in the MET .stat file to use; + a list of the verification regions of interest. - | *Used by:* MakePlots, StatAnalysis + Groups of values can be looped over by setting VX_MASK_LIST and + adding VX_MASK_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + + | *Used by:* StatAnalysis POINT2GRID_REGRID_METHOD Sets the gridding method used by point2grid. @@ -3865,9 +3886,6 @@ METplus Configuration Glossary | *Used by:* StatAnalysis - STAT_FILES_INPUT_DIR - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_INPUT_DIR` instead. - SERIES_ANALYSIS_STAT_LIST .. warning:: **DEPRECATED:** Please use :term:`SERIES_ANALYSIS_OUTPUT_STATS_CNT` instead. @@ -4329,14 +4347,24 @@ METplus Configuration Glossary | *Used by:* All FCST_VALID_HOUR_LIST - Specify a list of hours for valid times of forecast files for use in the analysis. + Specify a list of hours for valid times of forecast files for use + in the analysis. + + Groups of values can be looped over by setting FCST_VALID_HOUR_LIST and + adding FCST_VALID_HOUR_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis OBS_VALID_HOUR_LIST - Specify a list of hours for valid times of observation files for use in the analysis. + Specify a list of hours for valid times of observation files for use + in the analysis. - | *Used by:* MakePlots, StatAnalysis + Groups of values can be looped over by setting OBS_VALID_HOUR_LIST and + adding OBS_VALID_HOUR_LIST to :term:`LOOP_LIST_ITEMS`. + See :ref:`stat-analysis-looping-groups` for more information. + + | *Used by:* StatAnalysis VALID_HOUR_BEG .. warning:: **DEPRECATED:** Please use :term:`FCST_VALID_HOUR_LIST` or :term:`OBS_VALID_HOUR_LIST` instead. @@ -4369,35 +4397,16 @@ METplus Configuration Glossary VAR_FOURIER_DECOMP Specify if Fourier decomposition is to be considered (True) or not (False). If this is set to True, data stratification will be done for the Fourier decomposition of FCS_VAR_NAME. This should have been previously run in grid_stat_wrapper. The default value is set to False. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis VAR_WAVE_NUM_LIST Specify a comma separated list of wave numbers pairings of the Fourier decomposition. - | *Used by:* MakePlots, StatAnalysis + | *Used by:* StatAnalysis VERIFICATION_GRID .. warning:: **DEPRECATED:** Please use :term:`REGRID_DATA_PLANE_VERIF_GRID` instead. - VERIF_CASE - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_VERIF_CASE` instead. - - VERIF_GRID - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_VERIF_GRID` instead. - - MAKE_PLOTS_VERIF_GRID - Specify a string describing the grid the verification was performed on. This is the name of the grid upon which the verification was done on, ex. G002. - - | *Used by:* MakePlots - - VERIF_TYPE - .. warning:: **DEPRECATED:** Please use :term:`MAKE_PLOTS_VERIF_TYPE` instead. - - MAKE_PLOTS_VERIF_TYPE - Specify a string describing the type of verification being performed. For MAKE_PLOTS_VERIF_CASE = grid2grid, valid options are anom, pres, and sfc. For MAKE_PLOTS_VERIF_CASE = grid2obs, valid options are conus_sfc and upper_air. For MAKE_PLOTS_VERIF_CASE = precip, any accumulation amount is valid, ex. A24. - - | *Used by:* MakePlots - VERTICAL_LOCATION .. warning:: **DEPRECATED:** Specify the vertical location desired when using the MET pb2nc tool. @@ -9947,3 +9956,135 @@ METplus Configuration Glossary Specify the value for 'nc_orank_flag.weight' in the MET configuration file for EnsembleStat. | *Used by:* EnsembleStat + + STAT_ANALYSIS_FCST_INIT_BEG + Specify the value for 'fcst_init_beg' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + INIT_BEG = 20221014 + STAT_ANALYSIS_FCST_INIT_BEG = {fcst_init_beg?fmt=%Y%m%d_%H} + + will set fcst_init_beg = "20221014_00"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_FCST_INIT_END + Specify the value for 'fcst_init_end' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + INIT_END = 20221015 + STAT_ANALYSIS_FCST_INIT_END = {fcst_init_beg?fmt=%Y%m%d}_12 + + will set fcst_init_end = "20221014_12"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_OBS_INIT_BEG + Specify the value for 'obs_init_beg' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + INIT_BEG = 20221014 + STAT_ANALYSIS_OBS_INIT_BEG = {obs_init_beg?fmt=%Y%m%d_%H} + + will set obs_init_beg = "20221014_00"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_OBS_INIT_END + Specify the value for 'obs_init_end' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + INIT_END = 20221015 + STAT_ANALYSIS_OBS_INIT_END = {obs_init_end?fmt=%Y%m%d}_12 + + will set obs_init_end = "20221014_12"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_FCST_VALID_BEG + Specify the value for 'fcst_valid_beg' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + VALID_BEG = 20221014 + STAT_ANALYSIS_FCST_VALID_BEG = {fcst_valid_beg?fmt=%Y%m%d_%H} + + will set fcst_valid_beg = "20221014_00"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_FCST_VALID_END + Specify the value for 'fcst_valid_end' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + VALID_END = 20221015 + STAT_ANALYSIS_FCST_VALID_END = {fcst_valid_beg?fmt=%Y%m%d}_12 + + will set fcst_valid_end = "20221014_12"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_OBS_VALID_BEG + Specify the value for 'obs_valid_beg' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + VALID_BEG = 20221014 + STAT_ANALYSIS_OBS_VALID_BEG = {obs_valid_beg?fmt=%Y%m%d_%H} + + will set obs_valid_beg = "20221014_00"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_OBS_VALID_END + Specify the value for 'obs_valid_end' in the MET configuration file for + StatAnalysis. This can refer to filename template tags that are set by + the wrapper. Example:: + + [config] + VALID_END = 20221015 + STAT_ANALYSIS_OBS_VALID_END = {obs_valid_end?fmt=%Y%m%d}_12 + + will set obs_valid_end = "20221014_12"; in the wrapped MET config file. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_INIT_BEG + Specify the value for both 'fcst_init_beg' and 'obs_init_beg' in the MET + configuration file for StatAnalysis. + See :term:`STAT_ANALYSIS_FCST_INIT_BEG`. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_INIT_END + Specify the value for both 'fcst_init_end' and 'obs_init_end' in the MET + configuration file for StatAnalysis. + See :term:`STAT_ANALYSIS_FCST_INIT_END`. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_VALID_BEG + Specify the value for both 'fcst_valid_beg' and 'obs_valid_beg' in the MET + configuration file for StatAnalysis. + See :term:`STAT_ANALYSIS_FCST_VALID_BEG`. + + | *Used by:* StatAnalysis + + STAT_ANALYSIS_VALID_END + Specify the value for both 'fcst_valid_end' and 'obs_valid_end' in the MET + configuration file for StatAnalysis. + See :term:`STAT_ANALYSIS_FCST_VALID_END`. + + | *Used by:* StatAnalysis diff --git a/docs/Users_Guide/installation.rst b/docs/Users_Guide/installation.rst index a935998ab..dcde8cefa 100644 --- a/docs/Users_Guide/installation.rst +++ b/docs/Users_Guide/installation.rst @@ -113,11 +113,6 @@ to run. - netCDF4 (1.5.4) -- MakePlots wrapper - - - cartopy (0.20.3) - - pandas (1.4.3) - - CyclonePlotter wrapper - cartopy (0.20.3) diff --git a/docs/Users_Guide/systemconfiguration.rst b/docs/Users_Guide/systemconfiguration.rst index c0ff3ef4d..a52b997d3 100644 --- a/docs/Users_Guide/systemconfiguration.rst +++ b/docs/Users_Guide/systemconfiguration.rst @@ -1116,7 +1116,7 @@ paths, and more. The value of each list item can be referenced in the METplus configuration variables by using {custom?fmt=%s}. The variable CUSTOM_LOOP_LIST will apply the values to each wrapper in the PROCESS_LIST unless the wrapper does not support this functionality. CyclonePlotter, -MakePlots, SeriesByInit, SeriesByLead, StatAnalysis, TCStat, and +StatAnalysis, TCStat, and TCMPRPlotter wrappers are not supported. If the variable is not set or set to an empty string, the wrapper will execute as normal without additional runs. The name of the wrapper-specific variables contain the name of the diff --git a/docs/Users_Guide/wrappers.rst b/docs/Users_Guide/wrappers.rst index 8850e47ab..e32b45234 100644 --- a/docs/Users_Guide/wrappers.rst +++ b/docs/Users_Guide/wrappers.rst @@ -3684,91 +3684,6 @@ see :ref:`How METplus controls MET config file settings`. * - :term:`IODA2NC_MET_CONFIG_OVERRIDES` - n/a -.. _make_plots_wrapper: - -MakePlots -========= - -Description ------------ - -The MakePlots wrapper creates various statistical plots using python -scripts for the various METplus Wrappers use cases. -This can only be run following StatAnalysis wrapper. -To run MakePlots wrapper, include MakePlots in PROCESS_LIST. - -METplus Configuration ---------------------- - -The following values **must** be defined in the METplus Wrappers -configuration file: - -| :term:`MAKE_PLOTS_SCRIPTS_DIR` -| :term:`MAKE_PLOTS_INPUT_DIR` -| :term:`MAKE_PLOTS_OUTPUT_DIR` -| :term:`MAKE_PLOTS_VERIF_CASE` -| :term:`MAKE_PLOTS_VERIF_TYPE` -| :term:`DATE_TYPE` -| :term:`MODEL\` -| :term:`MODEL_OBTYPE` -| :term:`MODEL_REFERENCE_NAME` -| :term:`GROUP_LIST_ITEMS` -| :term:`LOOP_LIST_ITEMS` -| :term:`MODEL_LIST` -| :term:`FCST_LEAD_LIST` -| :term:`VX_MASK_LIST` -| :term:`LINE_TYPE_LIST` -| :term:`MAKE_PLOTS_AVERAGE_METHOD` -| :term:`MAKE_PLOTS_STATS_LIST` -| :term:`MAKE_PLOTS_CI_METHOD` -| :term:`MAKE_PLOTS_VERIF_GRID` -| :term:`MAKE_PLOTS_EVENT_EQUALIZATION` -| - -The following values are **optional** in the METplus Wrappers -configuration file: - -| :term:`VAR_FOURIER_DECOMP` -| :term:`VAR_WAVE_NUM_LIST` -| :term:`FCST_VALID_HOUR_LIST` -| :term:`OBS_VALID_HOUR_LIST` -| :term:`FCST_INIT_HOUR_LIST` -| :term:`OBS_INIT_HOUR_LIST` -| :term:`OBS_LEAD_LIST` -| :term:`DESC_LIST` -| :term:`INTERP_MTHD_LIST` -| :term:`INTERP_PNTS_LIST` -| :term:`COV_THRESH_LIST` -| :term:`ALPHA_LIST` -| - -.. warning:: **DEPRECATED:** - - | :term:`PLOTTING_SCRIPTS_DIR` - | :term:`STAT_FILES_INPUT_DIR` - | :term:`PLOTTING_OUTPUT_DIR` - | :term:`VERIF_CASE` - | :term:`VERIF_TYPE` - | :term:`PLOT_TIME` - | :term:`MODEL_NAME` - | :term:`MODEL_OBS_NAME` - | :term:`MODEL_NAME_ON_PLOT` - | :term:`VALID_HOUR_METHOD` - | :term:`VALID_HOUR_BEG` - | :term:`VALID_HOUR_END` - | :term:`VALID_HOUR_INCREMENT` - | :term:`INIT_HOUR_BEG` - | :term:`INIT_HOUR_END` - | :term:`INIT_HOUR_INCREMENT` - | :term:`REGION_LIST` - | :term:`LEAD_LIST` - | :term:`LINE_TYPE` - | :term:`INTERP` - | :term:`PLOT_STATS_LIST` - | :term:`CI_METHOD` - | :term:`VERIF_GRID` - | :term:`EVENT_EQUALIZATION` - | .. _met_db_load_wrapper: @@ -6831,48 +6746,266 @@ The StatAnalysis wrapper encapsulates the behavior of the MET stat_analysis tool. It provides the infrastructure to summarize and filter the MET .stat files. +Timing +^^^^^^ + +This wrapper is configured differently than many of the other wrappers that +loop over multiple run times. The StatAnalysis wrapper is designed to process +a range of run times at once using filtering to subset what is processed. +The VALID_BEG and VALID_END or INIT_BEG and INIT_END variables are used to +calculate filtering criteria. + +Prior to v5.0.0, only the year, month, and day (YYYYMMDD) of the init/valid +begin and end times were read by the wrapper. The hours, minutes, and seconds +were ignored to be filtered using FCST_HOUR_LIST and OBS_HOUR_LIST. +Now the full time information is read and to enable users to process a more +specific range of time. To preserve the original behavior, end times that +do not include hours, minutes, or seconds will process up to 23:59:59 on that +day unless specific hours are defined with FCST_HOUR_LIST or OBS_HOUR_LIST. + +Note: The LEAD_SEQ variable that typically defines a list of forecast leads to +process is not used by the wrapper. Instead the FCST_LEAD_LIST and +OBS_LEAD_LIST are used to filter out forecast leads from the data. + +Optional MET Configuration File +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The wrapped MET config file specified with :term:`STAT_ANALYSIS_CONFIG_FILE` is +optional in the StatAnalysis wrapper. Excluding this option will result in a +call to stat_analysis with the job arguments added via the command line. +Only 1 job can be defined in no wrapped MET configuration file is used. +To use a configuration file, set the following in the METplus config file:: + + STAT_ANALYSIS_CONFIG_FILE = {PARM_BASE}/met_config/STATAnalysisConfig_wrapped + +Jobs +^^^^ + +The job arguments can be defined by setting :term:`STAT_ANALYSIS_JOB\` +variables, e.g. STAT_ANALYSIS_JOB1. All of the job commands including the -job +argument are set here. +Prior to v5.0.0, the config variables STAT_ANALYSIS_JOB_NAME and +STAT_ANALYSIS_JOB_ARGS were used to set the value following the -job argument +and any other job arguments respectively. + +Multiple jobs can be defined as of v5.0.0 using +STAT_ANALYSIS_JOB1, STAT_ANALYSIS_JOB2, etc. All jobs will be passed to each +call to stat_analysis. Only 1 job can be specified if no MET config file is +set with :term:`STAT_ANALYSIS_CONFIG_FILE`. + +Filtering with Lists +^^^^^^^^^^^^^^^^^^^^ + +There are many configuration variables that end with \_LIST that control +settings in the STATAnalysisConfig_wrapped file. +For example, MODEL_LIST controls the model variable in the MET config file and +FCST_LEAD_LIST controls the fcst_lead variable. The value for each of these +\_LIST variables can be a list of values separated by comma. +The value of GROUP_LIST_ITEMS is a comma-separated list of \_LIST variable +names that will be grouped together for each call to stat_analysis. +The value of LOOP_LIST_ITEMS is a comma-separated list of \_LIST variable +names that will be looped over to create multiple calls to stat_analysis. +The tool will be called with every combination of the LOOP_LIST_ITEMS +list values. List variables that are not included in either GROUP_LIST_ITEMS +or LOOP_LIST_ITEMS will be automatically added to GROUP_LIST_ITEMS. Lists +defined in LOOP_LIST_ITEMS that are empty lists will be automatically moved +to GROUP_LIST_ITEMS. + +.. _stat-analysis-looping-groups: + +Looping Over Groups of Lists +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +New in v5.0.0 is the ability to define groups of list items that can be looped +over. For example, a user may want to process forecast leads 1-3 in a +single run, then process forecast leads 4-6 in the next. To accomplish this, +define each group of items in a separate config variable ending with a number. +Then add the name of the list (without the numbers) to LOOP_LIST_ITEMS:: + + [config] + FCST_LEAD_LIST1 = 1,2,3 + FCST_LEAD_LIST2 = 4,5,6 + LOOP_LIST_ITEMS = FCST_LEAD_LIST + +If FCST_LEAD_LIST was added to GROUP_LIST_ITEMS instead, then all 6 items +defined in the 2 lists will be combined and passed to the tool at once. + +Filtering Begin and End Times +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Starting in v5.0.0, the [fcst/obs]_[init/valid]_[beg/end] in the wrapped +MET config file can be set using the corresponding METplus config variables. +The values can include the filename template tags that are supported in the +wrapper (see :ref:`stat-analysis-filename-template`). For example, +to set the fcst_valid_beg value:: + + [config] + VALID_BEG = 20221014 + STAT_ANALYSIS_FCST_VALID_BEG = {fcst_valid_beg?fmt=%Y%m%d_%H%M%S} + +This will set fcst_valid_beg = "20221014_000000"; in the MET config file. + +Prior to v5.0.0, settings hour values in [FCST/OBS]_[INIT/VALID]_HOUR_LIST +would result in the corresponding _beg and _end values in the wrapped MET +config file to be set based on the hours and the [INIT/VALID]_[BEG/END] values. + + +.. _stat-analysis-filename-template: + +Additional Filename Template Tags +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The StatAnalysis wrapper supports additional tags that can be substituted into +the input and output paths because the wrapper processes a range of time. + +The following filename template tags can be used: + +* model +* desc +* vx_mask +* interp_mthd +* interp_pnts +* cov_thresh +* alpha +* line_type +* fcst_var +* obs_var +* fcst_units +* obs_units +* fcst_thresh +* obs_thresh +* fcst_level +* obs_level +* fcst_valid_hour +* obs_valid_hour +* fcst_init_hour +* obs_init_hour +* fcst_lead +* obs_lead +* fcst_valid_hour_beg +* fcst_valid_hour_end +* obs_valid_hour_beg +* obs_valid_hour_end +* fcst_init_hour_beg +* fcst_init_hour_end +* obs_init_hour_beg +* obs_init_hour_end +* valid_hour +* valid_hour_beg +* valid_hour_end +* init_hour +* init_hour_beg +* init_hour_end +* fcst_valid +* fcst_valid_beg +* fcst_valid_end +* fcst_init +* fcst_init_beg +* fcst_init_end +* obs_valid +* obs_valid_beg +* obs_valid_end +* obs_init +* obs_init_beg +* obs_init_end +* valid +* valid_beg +* valid_end +* init +* init_beg +* init_end +* fcst_lead +* fcst_lead_hour +* fcst_lead_min +* fcst_lead_sec +* fcst_lead_totalsec +* obs_lead +* obs_lead_hour +* obs_lead_min +* obs_lead_sec +* obs_lead_totalsec +* lead +* lead_hour +* lead_min +* lead_sec +* lead_totalsec + +Please note that some of these items will be set to an empty string depending +on the configuration. For example, lead_hour, lead_min, lead_sec, and +lead_totalsec cannot be computed if there are multiple leads being processed +in a given run. Another example, if fcst_valid_beg has the same value as +fcst_valid_end, then fcst_valid will be set to the same value, otherwise it +will be left as an empty string. + +Outputs +^^^^^^^ + +This wrapper can be configured to write 3 types of output files. +Output files specified with the -out command line argument can be defined by +setting :term:`STAT_ANALYSIS_OUTPUT_TEMPLATE` and optionally +:term:`STAT_ANALYSIS_OUTPUT_DIR`. +Output files specified with the -dump_row or -out_stat arguments must be +defined in a job using :term:`STAT_ANALYSIS_JOB\`. +The [dump_row_file] keyword can be added to a job after the -dump_row argument +only if a :term:`MODEL_STAT_ANALYSIS_DUMP_ROW_TEMPLATE` is set. Similarly, +the [out_stat_file] keyword can be added to a job after the -out_stat argument +only if a :term:`MODEL_STAT_ANALYSIS_OUT_STAT_TEMPLATE` is set. + + METplus Configuration --------------------- The following values **must** be defined in the METplus configuration file: +| :term:`STAT_ANALYSIS_JOB\` | :term:`STAT_ANALYSIS_OUTPUT_DIR` -| :term:`LOG_STAT_ANALYSIS_VERBOSITY` | :term:`MODEL\` -| :term:`MODEL_OBTYPE` | :term:`MODEL_STAT_ANALYSIS_LOOKIN_DIR` -| :term:`MODEL_REFERENCE_NAME` | :term:`GROUP_LIST_ITEMS` | :term:`LOOP_LIST_ITEMS` -| :term:`MODEL_LIST` -| :term:`VX_MASK_LIST` -| :term:`FCST_LEAD_LIST` -| :term:`LINE_TYPE_LIST` -| :term:`STAT_ANALYSIS_JOB_NAME` -| :term:`STAT_ANALYSIS_JOB_ARGS` -| :term:`STAT_ANALYSIS_MET_CONFIG_OVERRIDES` -| The following values are optional in the METplus configuration file: | :term:`STAT_ANALYSIS_CONFIG_FILE` +| :term:`LOG_STAT_ANALYSIS_VERBOSITY` +| :term:`MODEL_OBTYPE` | :term:`VAR_FOURIER_DECOMP` | :term:`VAR_WAVE_NUM_LIST` +| :term:`MODEL_LIST` +| :term:`DESC_LIST` +| :term:`FCST_LEAD_LIST` +| :term:`OBS_LEAD_LIST` | :term:`FCST_VALID_HOUR_LIST` -| :term:`OBS_VALID_HOUR_LIST` | :term:`FCST_INIT_HOUR_LIST` +| :term:`OBS_VALID_HOUR_LIST` | :term:`OBS_INIT_HOUR_LIST` -| :term:`OBS_LEAD_LIST` -| :term:`DESC_LIST` +| :term:`FCST_VAR_LIST` +| :term:`OBS_VAR_LIST` +| :term:`FCST_UNITS_LIST` +| :term:`OBS_UNITS_LIST` +| :term:`FCST_LEVEL_LIST` +| :term:`OBS_LEVEL_LIST` +| :term:`VX_MASK_LIST` | :term:`INTERP_MTHD_LIST` | :term:`INTERP_PNTS_LIST` +| :term:`FCST_THRESH_LIST` +| :term:`OBS_THRESH_LIST` | :term:`COV_THRESH_LIST` | :term:`ALPHA_LIST` +| :term:`LINE_TYPE_LIST` | :term:`STAT_ANALYSIS_HSS_EC_VALUE` | :term:`STAT_ANALYSIS_OUTPUT_TEMPLATE` | :term:`MODEL_STAT_ANALYSIS_DUMP_ROW_TEMPLATE` | :term:`MODEL_STAT_ANALYSIS_OUT_STAT_TEMPLATE` -| +| :term:`STAT_ANALYSIS_FCST_INIT_BEG` +| :term:`STAT_ANALYSIS_FCST_INIT_END` +| :term:`STAT_ANALYSIS_FCST_VALID_BEG` +| :term:`STAT_ANALYSIS_FCST_VALID_END` +| :term:`STAT_ANALYSIS_OBS_INIT_BEG` +| :term:`STAT_ANALYSIS_OBS_INIT_END` +| :term:`STAT_ANALYSIS_OBS_VALID_BEG` +| :term:`STAT_ANALYSIS_OBS_VALID_END` +| :term:`STAT_ANALYSIS_MET_CONFIG_OVERRIDES` .. warning:: **DEPRECATED:** @@ -6888,7 +7021,7 @@ The following values are optional in the METplus configuration file: | :term:`INIT_HOUR_INCREMENT` | :term:`MODEL` | :term:`OBTYPE` - | :term:`JOB_NAME` + | JOB_NAME | :term:`JOB_ARGS` | :term:`FCST_LEAD` | :term:`FCST_VAR_NAME` @@ -6904,15 +7037,15 @@ The following values are optional in the METplus configuration file: | :term:`STAT_ANALYSIS_DUMP_ROW_TMPL` | :term:`STAT_ANALYSIS_OUT_STAT_TMPL` | :term:`PLOT_TIME` - | :term:`VERIF_CASE` - | :term:`VERIF_TYPE` | :term:`MODEL_NAME` | :term:`MODEL_OBS_NAME` - | :term:`MODEL_NAME_ON_PLOT` + | MODEL_NAME_ON_PLOT | :term:`MODEL_STAT_DIR` | :term:`REGION_LIST` | :term:`LEAD_LIST` - | + | :term:`STAT_ANALYSIS_JOB_NAME` + | :term:`STAT_ANALYSIS_JOB_ARGS` + .. _stat-analysis-met-conf: diff --git a/docs/build_docs.py b/docs/build_docs.py index 723d5c25b..444d9bff7 100755 --- a/docs/build_docs.py +++ b/docs/build_docs.py @@ -160,6 +160,10 @@ def main(): if os.stat(warning_file).st_size == 0: print(f"No warnings found, removing {warning_file}") os.remove(warning_file) + else: + print('ERROR: Doc build contains warnings or errors. ' + f'Please review {warning_file}') + sys.exit(1) print("Documentation build completed") diff --git a/internal/tests/plotting/examples/plot_emc_grid2grid_anom.conf b/internal/tests/plotting/examples/plot_emc_grid2grid_anom.conf deleted file mode 100644 index 76a68e834..000000000 --- a/internal/tests/plotting/examples/plot_emc_grid2grid_anom.conf +++ /dev/null @@ -1,133 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/grid2grid/anom -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/grid2grid/anom -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20170613 -VALID_END = 20170613 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00 -FCST_INIT_HOUR_LIST = 00 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = GFS -MODEL1_OBTYPE = ANLYS -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/grid2grid/anom/{fcst_valid_hour?fmt=%H}Z/{MODEL1} -MODEL1_REFERENCE_NAME = gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = HGT -FCST_VAR1_LEVELS = P1000, P700, P500, P250 -VAR1_FOURIER_DECOMP = True -VAR1_WAVE_NUM_LIST = 0-3, 4-9, 10-20, 0-20 - -FCST_VAR2_NAME = HGT -FCST_VAR2_LEVELS = P1000, P700, P500, P250 - -FCST_VAR3_NAME = UGRD_VGRD -FCST_VAR3_LEVELS = P850, P500, P250 - -FCST_VAR4_NAME = UGRD -FCST_VAR4_LEVELS = P850, P500, P250 - -FCST_VAR5_NAME = VGRD -FCST_VAR5_LEVELS = P850, P500, P250 - -FCST_VAR6_NAME = TMP -FCST_VAR6_LEVELS = P850, P500, P250 - -FCST_VAR7_NAME = PRMSL -FCST_VAR7_LEVELS = Z0 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 24, 48, 72, 96, 120, 144, 168, 192, 216, 240 -VX_MASK_LIST = G002, NHX, SHX, TRO, PNA -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -# Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2grid -MAKE_PLOTS_VERIF_TYPE = anom -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SAL1L2, VAL1L2 -MAKE_PLOTS_STATS_LIST = acc -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G002 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/examples/plot_emc_grid2grid_pres.conf b/internal/tests/plotting/examples/plot_emc_grid2grid_pres.conf deleted file mode 100644 index 6c88910e7..000000000 --- a/internal/tests/plotting/examples/plot_emc_grid2grid_pres.conf +++ /dev/null @@ -1,128 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/grid2grid/pres -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/grid2grid/pres -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20170613 -VALID_END = 20170613 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00 -FCST_INIT_HOUR_LIST = 00 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = GFS -MODEL1_OBTYPE = ANLYS -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/grid2grid/pres/{fcst_valid_hour?fmt=%H}Z/{MODEL1} -MODEL1_REFERENCE_NAME = gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = HGT -FCST_VAR1_LEVELS = P1000, P850, P700, P500, P200, P100, P50, P20, P10 - -FCST_VAR2_NAME = TMP -FCST_VAR2_LEVELS = P1000, P850, P700, P500, P200, P100, P50, P20, P10 - -FCST_VAR3_NAME = UGRD_VGRD -FCST_VAR3_LEVELS = P1000, P850, P700, P500, P200, P100, P50, P20, P10 - -FCST_VAR4_NAME = UGRD -FCST_VAR4_LEVELS = P1000, P850, P700, P500, P200, P100, P50, P20, P10 - -FCST_VAR5_NAME = VGRD -FCST_VAR5_LEVELS = P1000, P850, P700, P500, P200, P100, P50, P20, P10 - -FCST_VAR6_NAME = O3MR -FCST_VAR6_LEVELS = P100, P70, P50, P30, P20, P10 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 24, 48, 72, 96, 120, 144, 168, 192, 216, 240 -VX_MASK_LIST = G002, NHX, SHX, TRO, PNA -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -# Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2grid -MAKE_PLOTS_VERIF_TYPE = pres -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SL1L2, VL1L2 -MAKE_PLOTS_STATS_LIST = bias, rmse, msess, rsd, rmse_md, rmse_pv, pcor -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G002 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/examples/plot_emc_grid2grid_sfc.conf b/internal/tests/plotting/examples/plot_emc_grid2grid_sfc.conf deleted file mode 100644 index 775102a5d..000000000 --- a/internal/tests/plotting/examples/plot_emc_grid2grid_sfc.conf +++ /dev/null @@ -1,172 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/grid2grid/sfc -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/grid2grid/sfc -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20170613 -VALID_END = 20170613 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00 -FCST_INIT_HOUR_LIST = 00 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = GFS -MODEL1_OBTYPE = ANLYS -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/grid2grid/sfc/{fcst_valid_hour?fmt=%H}Z/{MODEL1} -MODEL1_REFERENCE_NAME = gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = TMP -FCST_VAR1_LEVELS = Z2 - -FCST_VAR2_NAME = RH -FCST_VAR2_LEVELS = Z2 - -FCST_VAR3_NAME = SPFH -FCST_VAR3_LEVELS = Z2 - -FCST_VAR4_NAME = HPBL -FCST_VAR4_LEVELS = L0 - -FCST_VAR5_NAME = PRES -FCST_VAR5_LEVELS = Z0 - -FCST_VAR6_NAME = PRMSL -FCST_VAR6_OPTIONS = GRIB_lvl_typ = 102; -FCST_VAR6_LEVELS = L0 - -FCST_VAR7_NAME = TMP -FCST_VAR7_LEVELS = Z0 - -FCST_VAR8_NAME = UGRD -FCST_VAR8_LEVELS = Z10 - -FCST_VAR9_NAME = VGRD -FCST_VAR9_LEVELS = Z10 - -FCST_VAR10_NAME = TSOIL -FCST_VAR10_OPTIONS = GRIB_lvl_typ = 112; -FCST_VAR10_LEVELS = Z0-10 - -FCST_VAR11_NAME = SOILW -FCST_VAR11_OPTIONS = GRIB_lvl_typ = 112; -FCST_VAR11_LEVELS = Z0-10 - -FCST_VAR12_NAME = WEASD -FCST_VAR12_OPTIONS = GRIB_lvl_typ = 01; - -FCST_VAR13_NAME = CAPE -FCST_VAR13_LEVELS = Z0 - -FCST_VAR14_NAME = CWAT -FCST_VAR14_OPTIONS = GRIB_lvl_typ = 200; -FCST_VAR14_LEVELS = L0 - -FCST_VAR15_NAME = PWAT -FCST_VAR15_OPTIONS = GRIB_lvl_typ = 200; -FCST_VAR15_LEVELS = L0 - -FCST_VAR16_NAME = TMP -FCST_VAR16_OPTIONS = GRIB_lvl_typ = 07; -FCST_VAR16_LEVELS = L0 - -FCST_VAR17_NAME = HGT -FCST_VAR17_OPTIONS = GRIB_lvl_typ = 07; -FCST_VAR17_LEVELS = L0 - -FCST_VAR18_NAME = TOZNE -FCST_VAR18_OPTIONS = GRIB_lvl_typ = 200; -FCST_VAR18_LEVELS = L0 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 24, 48, 72, 96, 120, 144, 168, 192, 216, 240 -VX_MASK_LIST = G002, NHX, SHX, TRO, N60, S60, NPO, SPO, NAO, SAO, CONUS, CAM, NSA -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -# Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2grid -MAKE_PLOTS_VERIF_TYPE = sfc -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SL1L2, VL1L2 -MAKE_PLOTS_STATS_LIST = fbar -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G002 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/examples/plot_emc_grid2obs_conus_sfc.conf b/internal/tests/plotting/examples/plot_emc_grid2obs_conus_sfc.conf deleted file mode 100644 index 511195097..000000000 --- a/internal/tests/plotting/examples/plot_emc_grid2obs_conus_sfc.conf +++ /dev/null @@ -1,129 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/grid2obs/conus_sfc -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/grid2obs/conus_sfc -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = INIT -# blank or YYYYmmDD format -INIT_BEG = 20170601 -INIT_END = 20170603 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00, 06, 12, 18 -FCST_INIT_HOUR_LIST = 00 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_VALID_HOUR_LIST -LOOP_LIST_ITEMS = FCST_INIT_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = gfs -MODEL1_OBTYPE = ONLYSF -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/grid2obs/{init_hour?fmt=%H}Z/{MODEL1} -MODEL1_REFERENCE_NAME = ops_gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = TMP -FCST_VAR1_LEVELS = Z2 - -FCST_VAR2_NAME = RH -FCST_VAR2_LEVELS = Z2 - -FCST_VAR3_NAME = DPT -FCST_VAR3_LEVELS = Z2 - -FCST_VAR4_NAME = UGRD_VGRD -FCST_VAR4_LEVELS = Z10 - -FCST_VAR5_NAME = TCDC -FCST_VAR5_LEVELS = L0 - -FCST_VAR6_NAME = PRMSL -FCST_VAR7_LEVELS = Z0 - - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 06, 12, 18, 24, 36, 48, 60, 72, 84, 96, 108, 120, 132, 144, 156 -VX_MASK_LIST = CONUS, EAST, WEST, MDW, NPL, SPL, NEC, SEC, NMT SMT, SWD, GRB, LMV, GMC, APL -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -## Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2obs -MAKE_PLOTS_VERIF_TYPE = conus_sfc -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SL1L2, VL1L2 -MAKE_PLOTS_STATS_LIST = bias, rmse, fbar_obar -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G104 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/examples/plot_emc_grid2obs_upper_air.conf b/internal/tests/plotting/examples/plot_emc_grid2obs_upper_air.conf deleted file mode 100644 index dbb865bd4..000000000 --- a/internal/tests/plotting/examples/plot_emc_grid2obs_upper_air.conf +++ /dev/null @@ -1,125 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/grid2obs/upper_air -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/grid2obs/upper_air -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = INIT -# blank or YYYYmmDD format -INIT_BEG = 20170601 -INIT_END = 20170603 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00, 06, 12, 18 -FCST_INIT_HOUR_LIST = 00 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_VALID_HOUR_LIST -LOOP_LIST_ITEMS = FCST_INIT_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = gfs -MODEL1_OBTYPE = ONLYSF -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/grid2obs/{init_hour?fmt=%H}Z/{MODEL1} -MODEL1_REFERENCE_NAME = gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = TMP -FCST_VAR1_LEVELS = P850, P500, P200, P50, P10 - -FCST_VAR2_NAME = RH -FCST_VAR2_LEVELS = P850, P500, P200, P50, P10 - -FCST_VAR3_NAME = UGRD_VGRD -FCST_VAR3_LEVELS = P850, P500, P200, P50, P10 - -FCST_VAR4_NAME = UGRD -FCST_VAR4_LEVELS = P850, P500, P200, P50, P10 - -FCST_VAR5_NAME = VGDRD -FCST_VAR5_LEVELS = P850, P500, P200, P50, P10 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 06, 12, 18, 24, 36, 48, 60, 72, 84, 96, 108, 120, 132, 144, 156 -VX_MASK_LIST = G003, NH, SH, TRO, G236 -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -## Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2obs -MAKE_PLOTS_VERIF_TYPE = upper_air -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SL1L2, VL1L2 -MAKE_PLOTS_STATS_LIST = bias, rmse -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G003 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/examples/plot_emc_precip_ccpa.conf b/internal/tests/plotting/examples/plot_emc_precip_ccpa.conf deleted file mode 100644 index 841a4b451..000000000 --- a/internal/tests/plotting/examples/plot_emc_precip_ccpa.conf +++ /dev/null @@ -1,114 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/precip/ccpa -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/precip/ccpa -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20170613 -VALID_END = 20170613 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 12 -FCST_INIT_HOUR_LIST = 00, 12 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = GFS -MODEL1_OBTYPE = ANLYS -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/precip/ccpa/*/{MODEL1} -MODEL1_REFERENCE_NAME = gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = APCP -FCST_VAR1_LEVELS = A24 -FCST_VAR1_THRESH = >=0.2, >=2, >=5, >=10, >=15, >=25, >=35, >=50, >=75 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 24, 36, 48, 60, 72, 84, 96, 108, 120, 132, 144, 156, 168, 180 -VX_MASK_LIST = CONUS, EAST, WEST -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -# Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = precip -MAKE_PLOTS_VERIF_TYPE = ccpa -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = CTC -MAKE_PLOTS_STATS_LIST = bias, ets -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = AGGREGATION -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC_MONTE_CARLO -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G211 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/examples/plot_user_plotting_scripts.conf b/internal/tests/plotting/examples/plot_user_plotting_scripts.conf deleted file mode 100644 index 87c82556d..000000000 --- a/internal/tests/plotting/examples/plot_user_plotting_scripts.conf +++ /dev/null @@ -1,114 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/gather_by_info/stat_analysis/grid2grid/anom_HGT -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/make_plots/grid2grid/anom_HGT -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20170613 -VALID_END = 20170613 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00 -FCST_INIT_HOUR_LIST = 00 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = GFS -MODEL1_OBTYPE = ANLYS -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/gather_by_date/stat_analysis/grid2grid/anom/{fcst_valid_hour?fmt=%H}Z/{MODEL1} -MODEL1_REFERENCE_NAME = gfs - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = HGT -FCST_VAR1_LEVELS = P1000, P700, P500, P250 -VAR1_FOURIER_DECOMP = True -VAR1_WAVE_NUM_LIST = 0-3, 4-9, 10-20, 0-20 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1} -FCST_LEAD_LIST = 24, 48, 72, 96, 120, 144, 168, 192, 216, 240 -VX_MASK_LIST = NHX -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -## Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_USER_SCRIPT_LIST = plot_time_series.py -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SAL1L2, VAL1L2 -MAKE_PLOTS_STATS_LIST = acc -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G002 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/plotting/met_config/STATAnalysisConfig b/internal/tests/plotting/met_config/STATAnalysisConfig deleted file mode 100644 index 74e002049..000000000 --- a/internal/tests/plotting/met_config/STATAnalysisConfig +++ /dev/null @@ -1,93 +0,0 @@ -//////////////////////////////////////////////////////////////////////////////// -// -// STAT-Analysis configuration file. -// -// For additional information, see the MET_BASE/config/README file. -// -//////////////////////////////////////////////////////////////////////////////// - -// -// Filtering input STAT lines by the contents of each column -// -${METPLUS_MODEL} -${METPLUS_DESC} - -fcst_lead = [${FCST_LEAD}]; -obs_lead = [${OBS_LEAD}]; - -fcst_valid_beg = "${FCST_VALID_BEG}"; -fcst_valid_end = "${FCST_VALID_END}"; -fcst_valid_hour = [${FCST_VALID_HOUR}]; - -obs_valid_beg = "${OBS_VALID_BEG}"; -obs_valid_end = "${OBS_VALID_END}"; -obs_valid_hour = [${OBS_VALID_HOUR}]; - -fcst_init_beg = "${FCST_INIT_BEG}"; -fcst_init_end = "${FCST_INIT_END}"; -fcst_init_hour = [${FCST_INIT_HOUR}]; - -obs_init_beg = "${OBS_INIT_BEG}"; -obs_init_end = "${OBS_INIT_END}"; -obs_init_hour = [${OBS_INIT_HOUR}]; - -fcst_var = [${FCST_VAR}]; -obs_var = [${OBS_VAR}]; - -fcst_units = [${FCST_UNITS}]; -obs_units = [${OBS_UNITS}]; - -fcst_lev = [${FCST_LEVEL}]; -obs_lev = [${OBS_LEVEL}]; - -${METPLUS_OBTYPE} - -vx_mask = [${VX_MASK}]; - -interp_mthd = [${INTERP_MTHD}]; - -interp_pnts = [${INTERP_PNTS}]; - -fcst_thresh = [${FCST_THRESH}]; -obs_thresh = [${OBS_THRESH}]; -cov_thresh = [${COV_THRESH}]; - -alpha = [${ALPHA}]; - -line_type = [${LINE_TYPE}]; - -column = []; - -weight = []; - -//////////////////////////////////////////////////////////////////////////////// - -// -// Array of STAT-Analysis jobs to be performed on the filtered data -// -jobs = [ - "${JOB}" -]; - -//////////////////////////////////////////////////////////////////////////////// - -// -// Confidence interval settings -// -out_alpha = 0.05; - -boot = { - interval = PCTILE; - rep_prop = 1.0; - n_rep = 0; - rng = "mt19937"; - seed = ""; -} - -//////////////////////////////////////////////////////////////////////////////// - -rank_corr_flag = FALSE; -vif_flag = FALSE; -tmp_dir = "/tmp"; - -//////////////////////////////////////////////////////////////////////////////// diff --git a/internal/tests/pytests/plotting/make_plots/test_make_plots.conf b/internal/tests/pytests/plotting/make_plots/test_make_plots.conf deleted file mode 100644 index 4bad57f74..000000000 --- a/internal/tests/pytests/plotting/make_plots/test_make_plots.conf +++ /dev/null @@ -1,120 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/plotting/stat_analysis -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/plotting/make_plots -# Location of configuration files used by MET applications -CONFIG_DIR = {PARM_BASE}/use_cases/plotting/met_config - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20190101 -VALID_END = 20190101 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00, 06, 12, 18 -FCST_INIT_HOUR_LIST = 00, 06, 12, 18 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = MODEL_TEST1 -MODEL1_OBTYPE = MODEL_TEST1_ANL -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/stat_analysis -MODEL1_REFERENCE_NAME = MODEL_TEST1 - -MODEL2 = MODEL_TEST2 -MODEL2_OBTYPE = ANLYS2 -MODEL2_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/stat_analysis -MODEL2_REFERENCE_NAME = TEST2_MODEL - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -FCST_VAR1_NAME = HGT -FCST_VAR1_LEVELS = P1000, P850 -OBS_VAR1_NAME = HGT -OBS_VAR1_LEVELS = P1000, P850 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -# REQUIRED LISTS -MODEL_LIST = {MODEL1}, {MODEL2} -FCST_LEAD_LIST = 24, 48 -VX_MASK_LIST = NHX -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -# Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2grid -MAKE_PLOTS_VERIF_TYPE = pres -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SL1L2, VL1L2 -MAKE_PLOTS_STATS_LIST = bias, rmse, msess, rsd, rmse_md, rmse_pv, pcor -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G002 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py b/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py deleted file mode 100644 index 5c6b08c91..000000000 --- a/internal/tests/pytests/plotting/make_plots/test_make_plots_wrapper.py +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env python3 - -import pytest - -import os - -from metplus.wrappers.make_plots_wrapper import MakePlotsWrapper - -METPLUS_BASE = os.getcwd().split('/internal')[0] - - -def make_plots_wrapper(metplus_config): - """! Returns a default MakePlotsWrapper with /path/to entries in the - metplus_system.conf and metplus_runtime.conf configuration - files. Subsequent tests can customize the final METplus configuration - to over-ride these /path/to values.""" - - # Default, empty MakePlotsWrapper with some configuration values set - # to /path/to: - extra_configs = [] - extra_configs.append(os.path.join(os.path.dirname(__file__), 'test_make_plots.conf')) - config = metplus_config(extra_configs) - return MakePlotsWrapper(config) - - -@pytest.mark.plotting -def test_get_command(metplus_config): - # Independently test that the make_plots python - # command is being put together correctly with - # python command followed by the full path - # to the plotting script - mp = make_plots_wrapper(metplus_config) - # Test 1 - expected_command = ( - 'python plot_fake_script_name.py' - ) - mp.plotting_script = 'plot_fake_script_name.py' - test_command = mp.get_command() - assert(expected_command == test_command) - - -@pytest.mark.plotting -def test_create_c_dict(metplus_config): - # Independently test that c_dict is being created - # and that the wrapper and config reader - # is setting the values as expected - mp = make_plots_wrapper(metplus_config) - # Test 1 - c_dict = mp.create_c_dict() - # NOTE: MakePlots relies on output from StatAnalysis - # so its input resides in the output of StatAnalysis - assert(c_dict['INPUT_BASE_DIR'] == mp.config.getdir('OUTPUT_BASE') - +'/plotting/stat_analysis') - assert(c_dict['OUTPUT_BASE_DIR'] == mp.config.getdir('OUTPUT_BASE') - +'/plotting/make_plots') - assert(os.path.realpath(c_dict['SCRIPTS_BASE_DIR']) == METPLUS_BASE+'/ush/plotting_scripts') - assert(c_dict['DATE_TYPE'] == 'VALID') - assert(c_dict['VALID_BEG'] == '20190101') - assert(c_dict['VALID_END'] == '20190101') - assert(c_dict['INIT_BEG'] == '') - assert(c_dict['INIT_END'] == '') - assert(c_dict['GROUP_LIST_ITEMS'] == [ 'FCST_INIT_HOUR_LIST' ]) - assert(c_dict['LOOP_LIST_ITEMS'] == [ 'FCST_VALID_HOUR_LIST' ]) - assert(c_dict['VAR_LIST'] == [{'fcst_name': 'HGT', - 'fcst_output_name': 'HGT', - 'obs_name': 'HGT', - 'obs_output_name': 'HGT', - 'fcst_extra': '', 'obs_extra': '', - 'fcst_thresh': [], 'obs_thresh': [], - 'fcst_level': 'P1000', - 'obs_level': 'P1000', 'index': '1'}, - {'fcst_name': 'HGT', - 'fcst_output_name': 'HGT', - 'obs_name': 'HGT', - 'obs_output_name': 'HGT', - 'fcst_extra': '', 'obs_extra': '', - 'fcst_thresh': [], 'obs_thresh': [], - 'fcst_level': 'P850', - 'obs_level': 'P850', 'index': '1'}]) - assert(c_dict['MODEL_LIST'] == [ 'MODEL_TEST1', 'MODEL_TEST2']) - assert(c_dict['DESC_LIST'] == []) - assert(c_dict['FCST_LEAD_LIST'] == [ '24', '48' ]) - assert(c_dict['OBS_LEAD_LIST'] == []) - assert(c_dict['FCST_VALID_HOUR_LIST'] == [ '00', '06', '12', '18' ]) - assert(c_dict['FCST_INIT_HOUR_LIST'] == [ '00', '06', '12', '18' ]) - assert(c_dict['OBS_VALID_HOUR_LIST'] == []) - assert(c_dict['OBS_INIT_HOUR_LIST'] == []) - assert(c_dict['VX_MASK_LIST'] == [ 'NHX' ]) - assert(c_dict['INTERP_MTHD_LIST'] == []) - assert(c_dict['INTERP_PNTS_LIST'] == []) - assert(c_dict['COV_THRESH_LIST'] == []) - assert(c_dict['ALPHA_LIST'] == []) - assert(c_dict['LINE_TYPE_LIST'] == [ 'SL1L2', 'VL1L2' ]) - assert(c_dict['USER_SCRIPT_LIST'] == []) - assert(c_dict['VERIF_CASE'] == 'grid2grid') - assert(c_dict['VERIF_TYPE'] == 'pres') - assert(c_dict['STATS_LIST'] == [ 'bias', 'rmse', 'msess', 'rsd', - 'rmse_md', 'rmse_pv', 'pcor' ]) - assert(c_dict['AVERAGE_METHOD'] == 'MEAN') - assert(c_dict['CI_METHOD'] == 'EMC') - assert(c_dict['VERIF_GRID'] == 'G002') - assert(c_dict['EVENT_EQUALIZATION'] == 'False') - assert(c_dict['LOG_METPLUS'] == mp.config.getdir('OUTPUT_BASE') - +'/logs/metplus.log') diff --git a/internal/tests/pytests/plotting/plot_util/test_plot_util.py b/internal/tests/pytests/plotting/plot_util/test_plot_util.py deleted file mode 100644 index 8fb49fad8..000000000 --- a/internal/tests/pytests/plotting/plot_util/test_plot_util.py +++ /dev/null @@ -1,804 +0,0 @@ -#!/usr/bin/env python3 - -import pytest - -import os -import sys -import datetime -import logging - -import numpy as np -import pandas as pd - - -METPLUS_BASE = os.getcwd().split('/internal')[0] -sys.path.append(METPLUS_BASE+'/ush/plotting_scripts') -import plot_util -logger = logging.getLogger('~/metplus_pytest_plot_util.log') - - -@pytest.mark.plotting -def test_get_date_arrays(): - # Independently test the creation of - # the date arrays, one used for plotting - # the other the expected dates in the - # MET .stat file format - # Test 1 - date_type = 'VALID' - date_beg = '20190101' - date_end = '20190105' - fcst_valid_hour = '000000' - fcst_init_hour = '000000' - obs_valid_hour = '' - obs_init_hour = '' - lead = '240000' - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(days=i) for i in range(5)] - ) - expected_plot_time_dates = [] - expected_expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - seconds = (dt.hour * 60 + dt.minute) * 60 + dt.second - expected_plot_time_dates.append(date.toordinal() + seconds/86400.) - expected_expected_stat_file_dates.append( - date.strftime('%Y%m%d_%H%M%S') - ) - test_plot_time_dates, test_expected_stat_file_dates = ( - plot_util.get_date_arrays(date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, lead) - ) - assert(len(test_plot_time_dates) == - len(expected_plot_time_dates)) - for l in range(len(test_plot_time_dates)): - assert(test_plot_time_dates[l] == - expected_plot_time_dates[l]) - assert(len(test_expected_stat_file_dates) == - len(expected_expected_stat_file_dates)) - for l in range(len(test_expected_stat_file_dates)): - assert(test_expected_stat_file_dates[l] == - expected_expected_stat_file_dates[l]) - # Test 2 - date_type = 'VALID' - date_beg = '20190101' - date_end = '20190105' - fcst_valid_hour = '000000, 060000, 120000, 180000' - fcst_init_hour = '000000, 060000, 120000, 180000' - obs_valid_hour = '' - obs_init_hour = '' - lead = '480000' - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(hours=i) for i in range(0,120,6)] - ) - expected_plot_time_dates = [] - expected_expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - seconds = (dt.hour * 60 + dt.minute) * 60 + dt.second - expected_plot_time_dates.append(date.toordinal() + seconds/86400.) - expected_expected_stat_file_dates.append( - date.strftime('%Y%m%d_%H%M%S') - ) - test_plot_time_dates, test_expected_stat_file_dates = ( - plot_util.get_date_arrays(date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, lead) - ) - assert(len(test_plot_time_dates) == - len(expected_plot_time_dates)) - for l in range(len(test_plot_time_dates)): - assert(test_plot_time_dates[l] == - expected_plot_time_dates[l]) - assert(len(test_expected_stat_file_dates) == - len(expected_expected_stat_file_dates)) - for l in range(len(test_expected_stat_file_dates)): - assert(test_expected_stat_file_dates[l] == - expected_expected_stat_file_dates[l]) - # Test 3 - date_type = 'INIT' - date_beg = '20190101' - date_end = '20190105' - fcst_valid_hour = '000000' - fcst_init_hour = '000000' - obs_valid_hour = '' - obs_init_hour = '' - lead = '360000' - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(days=i) for i in range(5)] - ) - lead_hour_seconds = int(int(lead[:-4])) * 3600 - lead_min_seconds = int(lead[-4:-2]) * 60 - lead_seconds = int(lead[-2:]) - lead_offset = datetime.timedelta( - seconds=lead_hour_seconds + lead_min_seconds + lead_seconds - ) - expected_plot_time_dates = [] - expected_expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - seconds = (dt.hour * 60 + dt.minute) * 60 + dt.second - expected_plot_time_dates.append(date.toordinal() + seconds/86400.) - expected_expected_stat_file_dates.append( - (date + lead_offset).strftime('%Y%m%d_%H%M%S') - ) - test_plot_time_dates, test_expected_stat_file_dates = ( - plot_util.get_date_arrays(date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, lead) - ) - assert(len(test_plot_time_dates) == - len(expected_plot_time_dates)) - for l in range(len(test_plot_time_dates)): - assert(test_plot_time_dates[l] == - expected_plot_time_dates[l]) - assert(len(test_expected_stat_file_dates) == - len(expected_expected_stat_file_dates)) - for l in range(len(test_expected_stat_file_dates)): - assert(test_expected_stat_file_dates[l] == - expected_expected_stat_file_dates[l]) - # Test 4 - date_type = 'INIT' - date_beg = '20190101' - date_end = '20190105' - fcst_valid_hour = '000000, 060000, 120000, 180000' - fcst_init_hour = '000000, 060000, 120000, 180000' - obs_valid_hour = '' - obs_init_hour = '' - lead = '120000' - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(hours=i) for i in range(0,120,6)] - ) - lead_hour_seconds = int(int(lead[:-4])) * 3600 - lead_min_seconds = int(lead[-4:-2]) * 60 - lead_seconds = int(lead[-2:]) - lead_offset = datetime.timedelta( - seconds=lead_hour_seconds + lead_min_seconds + lead_seconds - ) - expected_plot_time_dates = [] - expected_expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - seconds = (dt.hour * 60 + dt.minute) * 60 + dt.second - expected_plot_time_dates.append(date.toordinal() + seconds/86400.) - expected_expected_stat_file_dates.append( - (date + lead_offset).strftime('%Y%m%d_%H%M%S') - ) - test_plot_time_dates, test_expected_stat_file_dates = ( - plot_util.get_date_arrays(date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, lead) - ) - assert(len(test_plot_time_dates) == - len(expected_plot_time_dates)) - for l in range(len(test_plot_time_dates)): - assert(test_plot_time_dates[l] == - expected_plot_time_dates[l]) - assert(len(test_expected_stat_file_dates) == - len(expected_expected_stat_file_dates)) - for l in range(len(test_expected_stat_file_dates)): - assert(test_expected_stat_file_dates[l] == - expected_expected_stat_file_dates[l]) - - -@pytest.mark.plotting -def test_format_thresh(): - # Independently test the formatting - # of thresholds - # Test 1 - thresh = '>=5' - expected_thresh_symbol = '>=5' - expected_thresh_letter = 'ge5' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 2 - thresh = 'ge5' - expected_thresh_symbol = '>=5' - expected_thresh_letter = 'ge5' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 3 - thresh = '>15' - expected_thresh_symbol = '>15' - expected_thresh_letter = 'gt15' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 4 - thresh = 'gt15' - expected_thresh_symbol = '>15' - expected_thresh_letter = 'gt15' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 5 - thresh = '==1' - expected_thresh_symbol = '==1' - expected_thresh_letter = 'eq1' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 6 - thresh = 'eq1' - expected_thresh_symbol = '==1' - expected_thresh_letter = 'eq1' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 7 - thresh = '!=0.5' - expected_thresh_symbol = '!=0.5' - expected_thresh_letter = 'ne0.5' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 8 - thresh = 'ne0.5' - expected_thresh_symbol = '!=0.5' - expected_thresh_letter = 'ne0.5' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 9 - thresh = '<=1000' - expected_thresh_symbol = '<=1000' - expected_thresh_letter = 'le1000' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 10 - thresh = 'le1000' - expected_thresh_symbol = '<=1000' - expected_thresh_letter = 'le1000' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 11 - thresh = '<0.001' - expected_thresh_symbol = '<0.001' - expected_thresh_letter = 'lt0.001' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - # Test 12 - thresh = 'lt0.001' - expected_thresh_symbol = '<0.001' - expected_thresh_letter = 'lt0.001' - test_thresh_symbol, test_thresh_letter = plot_util.format_thresh(thresh) - assert(test_thresh_symbol == expected_thresh_symbol) - assert(test_thresh_letter == expected_thresh_letter) - - -@pytest.mark.plotting -def test_get_stat_file_base_columns(): - # Independently test getting list - # of the base MET version .stat file columns - # Test 1 - met_version = '8.0' - expected_stat_file_base_columns = [ 'VERSION', 'MODEL', 'DESC', - 'FCST_LEAD', 'FCST_VALID_BEG', - 'FCST_VALID_END', 'OBS_LEAD', - 'OBS_VALID_BEG', 'OBS_VALID_END', - 'FCST_VAR', 'FCST_LEV', 'OBS_VAR', - 'OBS_LEV', 'OBTYPE', 'VX_MASK', - 'INTERP_MTHD', 'INTERP_PNTS', - 'FCST_THRESH', 'OBS_THRESH', - 'COV_THRESH', 'ALPHA', 'LINE_TYPE' ] - test_stat_file_base_columns = plot_util.get_stat_file_base_columns( - met_version - ) - assert(test_stat_file_base_columns == expected_stat_file_base_columns) - # Test 2 - met_version = '8.1' - expected_stat_file_base_columns = [ 'VERSION', 'MODEL', 'DESC', - 'FCST_LEAD', 'FCST_VALID_BEG', - 'FCST_VALID_END', 'OBS_LEAD', - 'OBS_VALID_BEG', 'OBS_VALID_END', - 'FCST_VAR', 'FCST_UNITS', 'FCST_LEV', - 'OBS_VAR', 'OBS_UNITS', 'OBS_LEV', - 'OBTYPE', 'VX_MASK', 'INTERP_MTHD', - 'INTERP_PNTS', 'FCST_THRESH', - 'OBS_THRESH', 'COV_THRESH', 'ALPHA', - 'LINE_TYPE' ] - test_stat_file_base_columns = plot_util.get_stat_file_base_columns( - met_version - ) - assert(test_stat_file_base_columns == expected_stat_file_base_columns) - - -@pytest.mark.plotting -def test_get_stat_file_line_type_columns(): - # Independently test getting list - # of the line type MET version .stat file columns - # Test 1 - met_version = '8.1' - line_type = 'SL1L2' - expected_stat_file_line_type_columns = [ 'TOTAL', 'FBAR', 'OBAR', 'FOBAR', - 'FFBAR', 'OOBAR', 'MAE' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - assert(test_stat_file_line_type_columns == - expected_stat_file_line_type_columns) - # Test 2 - met_version = '8.1' - line_type = 'SAL1L2' - expected_stat_file_line_type_columns = [ 'TOTAL', 'FABAR', 'OABAR', - 'FOABAR', 'FFABAR', 'OOABAR', - 'MAE' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - assert(test_stat_file_line_type_columns == - expected_stat_file_line_type_columns) - # Test 3 - met_version = '6.1' - line_type = 'VL1L2' - expected_stat_file_line_type_columns = [ 'TOTAL', 'UFBAR', 'VFBAR', - 'UOBAR', 'VOBAR', 'UVFOBAR', - 'UVFFBAR', 'UVOOBAR' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - assert(test_stat_file_line_type_columns == - expected_stat_file_line_type_columns) - # Test 4 - met_version = '8.1' - line_type = 'VL1L2' - expected_stat_file_line_type_columns = [ 'TOTAL', 'UFBAR', 'VFBAR', - 'UOBAR', 'VOBAR', 'UVFOBAR', - 'UVFFBAR', 'UVOOBAR', - 'F_SPEED_BAR', 'O_SPEED_BAR' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - assert(test_stat_file_line_type_columns == - expected_stat_file_line_type_columns) - # Test 5 - met_version = '8.1' - line_type = 'VAL1L2' - expected_stat_file_line_type_columns = [ 'TOTAL', 'UFABAR', 'VFABAR', - 'UOABAR', 'VOABAR', 'UVFOABAR', - 'UVFFABAR', 'UVOOABAR' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - # Test 6 - met_version = '8.1' - line_type = 'VCNT' - expected_stat_file_line_type_columns = [ 'TOTAL', 'FBAR', 'FBAR_NCL', - 'FBAR_NCU', 'OBAR', 'OBAR_NCL', - 'OBAR_NCU', 'FS_RMS', - 'FS_RMS_NCL', 'FS_RMS_NCU', - 'OS_RMS', 'OS_RMS_NCL', - 'OS_RMS_NCU', 'MSVE', 'MSVE_NCL', - 'MSVE_NCU', 'RMSVE', 'RMSVE_NCL', - 'RMSVE_NCU', 'FSTDEV', - 'FSTDEV_NCL', 'FSTDEV_NCU', - 'OSTDEV', 'OSTDEV_NCL', - 'OSTDEV_NCU', 'FDIR', 'FDIR_NCL', - 'FDIR_NCU', 'ODIR', 'ODIR_NCL', - 'ODIR_NCU', 'FBAR_SPEED', - 'FBAR_SPEED_NCL', - 'FBAR_SPEED_NCU', 'OBAR_SPEED', - 'OBAR_SPEED_NCL', - 'OBAR_SPEED_NCU', 'VDIFF_SPEED', - 'VDIFF_SPEED_NCL', - 'VDIFF_SPEED_NCU', 'VDIFF_DIR', - 'VDIFF_DIR_NCL', 'VDIFF_DIR_NCU', - 'SPEED_ERR', 'SPEED_ERR_NCL', - 'SPEED_ERR_NCU', 'SPEED_ABSERR', - 'SPEED_ABSERR_NCL', - 'SPEED_ABSERR_NCU', 'DIR_ERR', - 'DIR_ERR_NCL', 'DIR_ERR_NCU', - 'DIR_ABSERR', 'DIR_ABSERR_NCL', - 'DIR_ABSERR_NCU' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - assert(test_stat_file_line_type_columns == - expected_stat_file_line_type_columns) - # Test 7 - met_version = '8.1' - line_type = 'CTC' - expected_stat_file_line_type_columns = [ 'TOTAL', 'FY_OY', 'FY_ON', - 'FN_OY', 'FN_ON' ] - test_stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, met_version, - line_type) - ) - assert(test_stat_file_line_type_columns == - expected_stat_file_line_type_columns) - - -@pytest.mark.plotting -def get_clevels(): - # Independently test creating an array - # of levels centered about 0 to plot - # Test 1 - data = np.array([ 7.89643761, 2.98214969, 4.04690632, 1.1047872, - -3.42288272, 1.0111309, 8.02330262, -8.03515159, - -8.89454837, 2.45191295, 9.43015692, -0.53815455, - 4.34984478, 4.54528989, -1.35164646 ]) - expected_clevels = np.array([-10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10]) - test_clevels = plot_util.get_clevels(data) - assert(test_clevels == expected_clevels) - - -@pytest.mark.plotting -def test_calculate_average(): - # Independently test getting the average - # of a data array based on method - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(days=i) for i in range(5)] - ) - expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - expected_stat_file_dates.append( - date.strftime('%Y%m%d_%H%M%S') - ) - model_data_index = pd.MultiIndex.from_product( - [['MODEL_TEST'], expected_stat_file_dates], - names=['model_plot_name', 'dates'] - ) - model_data_array = np.array([ - [3600, 5525.75062, 5525.66493, 30615218.26089, 30615764.49722, - 30614724.90979, 5.06746], - [3600, 5519.11108, 5519.1014, 30549413.45946, 30549220.68868, - 30549654.24048, 5.12344], - [3600, 5516.80228, 5516.79513, 30522742.16484, 30522884.89927, - 30522660.30975, 5.61752], - [3600, 5516.93924, 5517.80544, 30525709.03932, 30520984.50965, - 30530479.99675, 4.94325], - [3600, 5514.52274, 5514.68224, 30495695.82208, 30494633.24046, - 30496805.48259, 5.20369] - ]) - model_data = pd.DataFrame(model_data_array, index=model_data_index, - columns=[ 'TOTAL', 'FBAR', 'OBAR', 'FOBAR', - 'FFBAR', 'OOBAR', 'MAE' ]) - stat_values_array = np.array([[[(5525.75062 - 5525.66493), - (5519.11108 - 5519.1014), - (5516.80228 - 5516.79513), - (5516.93924 - 5517.80544), - (5514.52274 - 5514.68224) - ]]]) - # Test 1 - average_method = 'MEAN' - stat = 'bias' - model_dataframe = model_data - model_stat_values = stat_values_array[:,0,:] - expected_average_array = np.array([-0.184636]) - test_average_array = plot_util.calculate_average(logger, average_method, - stat, model_dataframe, - model_stat_values) - assert(len(test_average_array) == len(expected_average_array)) - for l in range(len(test_average_array)): - assert(round(test_average_array[l],6) == expected_average_array[l]) - # Test 2 - average_method = 'MEDIAN' - stat = 'bias' - model_dataframe = model_data - model_stat_values = stat_values_array[:,0,:] - expected_average_array = np.array([0.00715]) - test_average_array = plot_util.calculate_average(logger, average_method, - stat, model_dataframe, - model_stat_values) - assert(len(test_average_array) == len(expected_average_array)) - for l in range(len(test_average_array)): - assert(round(test_average_array[l],6) == expected_average_array[l]) - # Test 3 - average_method = 'AGGREGATION' - stat = 'bias' - model_dataframe = model_data - model_stat_values = stat_values_array[:,0,:] - expected_average_array = np.array([-0.184636]) - test_average_array = plot_util.calculate_average(logger, average_method, - stat, model_dataframe, - model_stat_values) - assert(len(test_average_array) == len(expected_average_array)) - for l in range(len(test_average_array)): - assert(round(test_average_array[l],6) == expected_average_array[l]) - # Test 4 - stat_values_array = np.array([[[5525.75062, 5519.11108, - 5516.80228, 5516.93924, - 5514.52274]], - [[5525.66493, 5519.1014, - 5516.79513, 5517.80544, - 5514.68224 - ]]]) - average_method = 'MEAN' - stat = 'fbar_obar' - model_dataframe = model_data - model_stat_values = stat_values_array[:,0,:] - expected_average_array = np.array([5518.625192,5518.809828]) - test_average_array = plot_util.calculate_average(logger, average_method, - stat, model_dataframe, - model_stat_values) - assert(len(test_average_array) == len(expected_average_array)) - for l in range(len(test_average_array)): - assert(round(test_average_array[l],6) == expected_average_array[l]) - # Test 5 - average_method = 'MEDIAN' - stat = 'fbar_obar' - model_dataframe = model_data - model_stat_values = stat_values_array[:,0,:] - expected_average_array = np.array([5516.93924, 5517.80544]) - test_average_array = plot_util.calculate_average(logger, average_method, - stat, model_dataframe, - model_stat_values) - assert(len(test_average_array) == len(expected_average_array)) - for l in range(len(test_average_array)): - assert(round(test_average_array[l],6) == expected_average_array[l]) - - -@pytest.mark.long -def test_calculate_ci(): - pytest.skip("Takes far too long to run") - # Independently test getting the - # confidence interval between two data arrays - # based on method - randx_seed = np.random.seed(0) - # Test 1 - ci_method = 'EMC' - modelB_values = np.array([0.4983181, 0.63076339, 0.73753565, - 0.97960614, 0.74599612, 0.18829818, - 0.29490815, 0.5063043, 0.15074971, - 0.89009979, 0.81246532, 0.45399668, - 0.98247594, 0.38211414, 0.26690678]) - modelA_values = np.array([0.37520287, 0.89286092, 0.66785908, - 0.55742834, 0.60978346, 0.5760979, - 0.55055558, 0.00388764, 0.55821689, - 0.56042747, 0.30637593, 0.83325185, - 0.84098604, 0.04021844, 0.57214717]) - total_days = 15 - stat = 'bias' - average_method = 'MEAN' - randx = np.random.rand(10000, total_days) - expected_std = np.sqrt( - (( - (modelB_values - modelA_values) - - (modelB_values - modelA_values).mean() - )**2).mean() - ) - expected_intvl = 2.228*expected_std/np.sqrt(total_days-1) - test_intvl = plot_util.calculate_ci(logger, ci_method, modelB_values, - modelA_values, total_days, - stat, average_method, randx) - assert(test_intvl == expected_intvl) - # Test 2 - ci_method = 'EMC' - modelB_values = np.array([0.4983181, 0.63076339, 0.73753565, - 0.97960614, 0.74599612, 0.18829818, - 0.29490815, 0.5063043, 0.15074971, - 0.89009979, 0.81246532, 0.45399668, - 0.98247594, 0.38211414, 0.26690678, - 0.64162609, 0.01370935, 0.79477382, - 0.31573415, 0.35282921, 0.57511574, - 0.27815519, 0.49562973, 0.4859588, - 0.16461642, 0.75849444, 0.44332183, - 0.94935173, 0.62597888, 0.12819335]) - modelA_values = np.array([0.37520287, 0.89286092, 0.66785908, - 0.55742834, 0.60978346, 0.5760979, - 0.55055558, 0.00388764, 0.55821689, - 0.56042747, 0.30637593, 0.83325185, - 0.84098604, 0.04021844, 0.57214717, - 0.75091023, 0.47321941, 0.12862311, - 0.8644722, 0.92040807, 0.61376225, - 0.24347848, 0.69990467, 0.69711331, - 0.91866337, 0.63945963, 0.59999792, - 0.2920741, 0.64972479, 0.25025121]) - total_days = 30 - stat = 'bias' - average_method = 'MEAN' - randx = np.random.rand(10000, total_days) - expected_std = np.sqrt( - (( - (modelB_values - modelA_values) - - (modelB_values - modelA_values).mean() - )**2).mean() - ) - expected_intvl = 2.042*expected_std/np.sqrt(total_days-1) - test_intvl = plot_util.calculate_ci(logger, ci_method, modelB_values, - modelA_values, total_days, - stat, average_method, randx) - assert(test_intvl == expected_intvl) - # Test 3 - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(days=i) for i in range(5)] - ) - expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - expected_stat_file_dates.append( - date.strftime('%Y%m%d_%H%M%S') - ) - model_data_indexA = pd.MultiIndex.from_product( - [['MODEL_TESTA'], expected_stat_file_dates], - names=['model_plot_name', 'dates'] - ) - model_data_arrayA = np.array([ - [3600, 5525.75062, 5525.66493, 30615218.26089, 30615764.49722, - 30614724.90979, 5.06746], - [3600, 5519.11108, 5519.1014, 30549413.45946, 30549220.68868, - 30549654.24048, 5.12344], - [3600, 5516.80228, 5516.79513, 30522742.16484, 30522884.89927, - 30522660.30975, 5.61752], - [3600, 5516.93924, 5517.80544, 30525709.03932, 30520984.50965, - 30530479.99675, 4.94325], - [3600, 5514.52274, 5514.68224, 30495695.82208, 30494633.24046, - 30496805.48259, 5.20369] - ]) - model_dataA = pd.DataFrame(model_data_arrayA, index=model_data_indexA, - columns=[ 'TOTAL', 'FBAR', 'OBAR', 'FOBAR', - 'FFBAR', 'OOBAR', 'MAE' ]) - model_data_arrayB = np.array([ - [3600, 5527.43726, 5527.79714, 30635385.37277, 30633128.08035, - 30637667.9488, 3.74623], - [3600, 5520.22487, 5520.5867, 30562940.31742, 30560471.32084, - 30565442.31244, 4.17792], - [3600, 5518.16049, 5518.53379, 30538694.69234, 30536683.66886, - 30540732.11308, 3.86693], - [3600, 5519.20033, 5519.38443, 30545925.19732, 30544766.74602, - 30547108.75357, 3.7534], - [3600, 5515.78776, 5516.17552, 30509811.84136, 30507573.43899, - 30512077.12263, 4.02554] - ]) - model_data_indexB = pd.MultiIndex.from_product( - [['MODEL_TESTB'], expected_stat_file_dates], - names=['model_plot_name', 'dates'] - ) - model_dataB = pd.DataFrame(model_data_arrayB, index=model_data_indexB, - columns=[ 'TOTAL', 'FBAR', 'OBAR', 'FOBAR', - 'FFBAR', 'OOBAR', 'MAE' ]) - ci_method = 'EMC_MONTE_CARLO' - modelB_values = model_dataB - modelA_values = model_dataA - total_days = 5 - stat = 'bias' - average_method = 'AGGREGATION' - randx = np.random.rand(10000, total_days) - expected_intvl = 0.3893656076904014 - test_intvl = plot_util.calculate_ci(logger, ci_method, modelB_values, - modelA_values, total_days, - stat, average_method, randx) - assert(test_intvl == expected_intvl) - - -@pytest.mark.plotting -def test_get_stat_plot_name(): - # Independently test getting the - # a more formalized statistic name - # Test 1 - stat = 'bias' - expected_stat_plot_name = 'Bias' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 2 - stat = 'rmse_md' - expected_stat_plot_name = 'Root Mean Square Error from Mean Error' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 3 - stat = 'fbar_obar' - expected_stat_plot_name = 'Forecast and Observation Averages' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 4 - stat = 'acc' - expected_stat_plot_name = 'Anomaly Correlation Coefficient' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 5 - stat = 'vdiff_speed' - expected_stat_plot_name = 'Difference Vector Speed' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 6 - stat = 'baser' - expected_stat_plot_name = 'Base Rate' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 7 - stat = 'fbias' - expected_stat_plot_name = 'Frequency Bias' - test_stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - assert(test_stat_plot_name == expected_stat_plot_name) - - -@pytest.mark.plotting -def test_calculate_stat(): - # Independently test calculating - # statistic values - date_base = datetime.datetime(2019, 1, 1) - date_array = np.array( - [date_base + datetime.timedelta(days=i) for i in range(5)] - ) - expected_stat_file_dates = [] - for date in date_array: - dt = date.time() - expected_stat_file_dates.append( - date.strftime('%Y%m%d_%H%M%S') - ) - model_data_index = pd.MultiIndex.from_product( - [['MODEL_TEST'], expected_stat_file_dates], - names=['model_plot_name', 'dates'] - ) - model_data_array = np.array([ - [3600, 5525.75062, 5525.66493, 30615218.26089, 30615764.49722, - 30614724.90979, 5.06746], - [3600, 5519.11108, 5519.1014, 30549413.45946, 30549220.68868, - 30549654.24048, 5.12344], - [3600, 5516.80228, 5516.79513, 30522742.16484, 30522884.89927, - 30522660.30975, 5.61752], - [3600, 5516.93924, 5517.80544, 30525709.03932, 30520984.50965, - 30530479.99675, 4.94325], - [3600, 5514.52274, 5514.68224, 30495695.82208, 30494633.24046, - 30496805.48259, 5.20369] - ]) - model_data = pd.DataFrame(model_data_array, index=model_data_index, - columns=[ 'TOTAL', 'FBAR', 'OBAR', 'FOBAR', - 'FFBAR', 'OOBAR', 'MAE' ]) - # Test 1 - stat = 'bias' - expected_stat_values_array = np.array([[[(5525.75062 - 5525.66493), - (5519.11108 - 5519.1014), - (5516.80228 - 5516.79513), - (5516.93924 - 5517.80544), - (5514.52274 - 5514.68224) - ]]]) - expected_stat_values = pd.Series(expected_stat_values_array[0,0,:], - index=model_data_index) - expected_stat_plot_name = 'Bias' - test_stat_values, test_stat_values_array, test_stat_plot_name = ( - plot_util.calculate_stat(logger, model_data, stat) - ) - assert(test_stat_values.equals(expected_stat_values)) - assert(len(test_stat_values_array[0,0,:]) == - len(expected_stat_values_array[0,0,:])) - for l in range(len(test_stat_values_array[0,0,:])): - assert(test_stat_values_array[0,0,l] == - expected_stat_values_array[0,0,l]) - assert(test_stat_plot_name == expected_stat_plot_name) - # Test 2 - stat = 'fbar_obar' - expected_stat_values_array = np.array([[[5525.75062, 5519.11108, - 5516.80228, 5516.93924, - 5514.52274]], - [[5525.66493, 5519.1014, - 5516.79513, 5517.80544, - 5514.68224 - ]]]) - expected_stat_values = pd.DataFrame(expected_stat_values_array[:,0,:].T, - index=model_data_index, - columns=[ 'FBAR', 'OBAR' ]) - expected_stat_plot_name = 'Forecast and Observation Averages' - test_stat_values, test_stat_values_array, test_stat_plot_name = ( - plot_util.calculate_stat(logger, model_data, stat) - ) - assert(test_stat_values.equals(expected_stat_values)) - assert(len(test_stat_values_array[0,0,:]) == - len(expected_stat_values_array[0,0,:])) - for l in range(len(test_stat_values_array[0,0,:])): - assert(test_stat_values_array[0,0,l] == - expected_stat_values_array[0,0,l]) - assert(len(test_stat_values_array[1,0,:]) == - len(expected_stat_values_array[1,0,:])) - for l in range(len(test_stat_values_array[1,0,:])): - assert(test_stat_values_array[1,0,l] == - expected_stat_values_array[1,0,l]) - assert(test_stat_plot_name == expected_stat_plot_name) diff --git a/internal/tests/pytests/pytest.ini b/internal/tests/pytests/pytest.ini index 8630509ec..e9f3dd09e 100644 --- a/internal/tests/pytests/pytest.ini +++ b/internal/tests/pytests/pytest.ini @@ -4,6 +4,7 @@ markers = wrapper_a: custom marker for testing metplus/wrapper logic - A group wrapper_b: custom marker for testing metplus/wrapper logic - B group wrapper_c: custom marker for testing metplus/wrapper logic - C group + wrapper_d: custom marker for testing metplus/wrapper logic - D group wrapper: custom marker for testing metplus/wrapper logic - all others long: custom marker for tests that take a long time to run plotting: custom marker for tests that involve plotting diff --git a/internal/tests/pytests/util/config_metplus/test_config_metplus.py b/internal/tests/pytests/util/config_metplus/test_config_metplus.py index 07a32655f..8332aba14 100644 --- a/internal/tests/pytests/util/config_metplus/test_config_metplus.py +++ b/internal/tests/pytests/util/config_metplus/test_config_metplus.py @@ -995,8 +995,6 @@ def test_parse_var_list_py_embed_multi_levels(metplus_config, config_overrides, 'ASCII2NC', 'TCStat', 'TCPairs']), - # remove MakePlots from list - ('StatAnalysis, MakePlots', ['StatAnalysis']), ] ) @pytest.mark.util diff --git a/internal/tests/pytests/util/met_util/test_met_util.py b/internal/tests/pytests/util/met_util/test_met_util.py index 7d0df8dd5..481d4f9d4 100644 --- a/internal/tests/pytests/util/met_util/test_met_util.py +++ b/internal/tests/pytests/util/met_util/test_met_util.py @@ -297,7 +297,6 @@ def test_get_lead_sequence_init_min_10(metplus_config): ('GempakToCFWrapper', 'gempak_to_cf_wrapper'), ('GenVxMaskWrapper', 'gen_vx_mask_wrapper'), ('GridStatWrapper', 'grid_stat_wrapper'), - ('MakePlotsWrapper', 'make_plots_wrapper'), ('MODEWrapper', 'mode_wrapper'), ('MTDWrapper', 'mtd_wrapper'), ('PB2NCWrapper', 'pb2nc_wrapper'), @@ -331,21 +330,6 @@ def test_round_0p5(value, expected_result): assert util.round_0p5(value) == expected_result -@pytest.mark.parametrize( - 'expression, expected_result', [ - ('gt3', 'gt3'), - ('>3', 'gt3'), - ('le3.5', 'le3.5'), - ('<=3.5', 'le3.5'), - ('==4', 'eq4'), - ('!=3.5', 'ne3.5'), - ] -) -@pytest.mark.util -def test_comparison_to_letter_format(expression, expected_result): - assert util.comparison_to_letter_format(expression) == expected_result - - @pytest.mark.parametrize( 'skip_times_conf, expected_dict', [ ('"%d:30,31"', {'%d': ['30','31']}), diff --git a/internal/tests/pytests/util/string_manip/test_util_string_manip.py b/internal/tests/pytests/util/string_manip/test_util_string_manip.py index f21841583..fc78f5d46 100644 --- a/internal/tests/pytests/util/string_manip/test_util_string_manip.py +++ b/internal/tests/pytests/util/string_manip/test_util_string_manip.py @@ -121,25 +121,84 @@ def test_getlist_int(): ['2']), ('begin_end_incr(0,2,1), begin_end_incr(3,9,3)', - ['0','1','2','3','6','9']), + ['0', '1', '2', '3', '6', '9']), ('mem_begin_end_incr(0,2,1), mem_begin_end_incr(3,9,3)', - ['mem_0','mem_1','mem_2','mem_3','mem_6','mem_9']), + ['mem_0', 'mem_1', 'mem_2', 'mem_3', 'mem_6', 'mem_9']), ('mem_begin_end_incr(0,2,1,3), mem_begin_end_incr(3,12,3,3)', - ['mem_000', 'mem_001', 'mem_002', 'mem_003', 'mem_006', 'mem_009', 'mem_012']), + ['mem_000', 'mem_001', 'mem_002', 'mem_003', + 'mem_006', 'mem_009', 'mem_012']), - ('begin_end_incr(0,10,2)H, 12', [ '0H', '2H', '4H', '6H', '8H', '10H', '12']), + ('begin_end_incr(0,10,2)H, 12', + ['0H', '2H', '4H', '6H', '8H', '10H', '12']), - ('begin_end_incr(0,10800,3600)S, 4H', [ '0S', '3600S', '7200S', '10800S', '4H']), + ('begin_end_incr(0,10800,3600)S, 4H', + ['0S', '3600S', '7200S', '10800S', '4H']), ('data.{init?fmt=%Y%m%d%H?shift=begin_end_incr(0, 3, 3)H}.ext', ['data.{init?fmt=%Y%m%d%H?shift=0H}.ext', 'data.{init?fmt=%Y%m%d%H?shift=3H}.ext', ]), - + ('"%m:begin_end_incr(3,11,1)", "%m%d:0229"', + ['%m:3', '%m:4', '%m:5', '%m:6', '%m:7', '%m:8', '%m:9', '%m:10', + '%m:11', '%m%d:0229']) ] ) @pytest.mark.util def test_getlist_begin_end_incr(list_string, output_list): assert getlist(list_string) == output_list + + +@pytest.mark.parametrize( + 'input, add_quotes, expected_output', [ + (['a', 'b', 'c'], None, '"a", "b", "c"'), + (['0', '1', '2'], None, '"0", "1", "2"'), + (['a', 'b', 'c'], True, '"a", "b", "c"'), + (['0', '1', '2'], True, '"0", "1", "2"'), + (['a', 'b', 'c'], False, 'a, b, c'), + (['0', '1', '2'], False, '0, 1, 2'), + (['"a"', '"b"', '"c"'], True, '"a", "b", "c"'), + (['"0"', '"1"', '"2"'], True, '"0", "1", "2"'), + ] +) +@pytest.mark.util +def test_list_to_str(input, add_quotes, expected_output): + if add_quotes is None: + assert list_to_str(input) == expected_output + else: + assert list_to_str(input, add_quotes=add_quotes) == expected_output + + +@pytest.mark.parametrize( + 'expression, expected_result', [ + ('gt3', 'gt3'), + ('>3', 'gt3'), + ('le3.5', 'le3.5'), + ('<=3.5', 'le3.5'), + ('==4', 'eq4'), + ('!=3.5', 'ne3.5'), + ] +) +@pytest.mark.util +def test_comparison_to_letter_format(expression, expected_result): + assert comparison_to_letter_format(expression) == expected_result + + +@pytest.mark.parametrize( + 'expression, expected_result', [ + ('>1', 'gt1'), + ('>=0.2', 'ge0.2'), + ('<30', 'lt30'), + ('<=0.04', 'le0.04'), + ('==5', 'eq5'), + ('!=0.06', 'ne0.06'), + ('>0.05, gt0.05, >=1, ge1, <5, lt5, <=10, le10, ==15, eq15, !=20, ne20', + 'gt0.05,gt0.05,ge1,ge1,lt5,lt5,le10,le10,eq15,eq15,ne20,ne20'), + ('<805, <1609, <4828, <8045, >=8045, <16090', + 'lt805,lt1609,lt4828,lt8045,ge8045,lt16090'), + ] +) +@pytest.mark.util +def test_format_thresh(expression, expected_result): + assert format_thresh(expression) == expected_result diff --git a/internal/tests/pytests/util/time_util/test_time_util.py b/internal/tests/pytests/util/time_util/test_time_util.py index 86da6b314..6d133bd67 100644 --- a/internal/tests/pytests/util/time_util/test_time_util.py +++ b/internal/tests/pytests/util/time_util/test_time_util.py @@ -8,6 +8,25 @@ from metplus.util import time_util +@pytest.mark.parametrize( + 'input_str, expected_output', [ + ('', []), + ('0,1,2,3', ['000000', '010000', '020000', '030000']), + ('12, 24', ['120000', '240000']), + ('196', ['1960000']), + ('12H, 24H', ['120000', '240000']), + ('45M', ['004500']), + ('42S', ['000042']), + ('24, 48, 72, 96, 120, 144, 168, 192, 216, 240', + ['240000', '480000', '720000', '960000', '1200000', + '1440000', '1680000', '1920000', '2160000', '2400000']), + ] +) +@pytest.mark.wrapper_d +def test_get_met_time_list(input_str, expected_output): + assert time_util.get_met_time_list(input_str) == expected_output + + @pytest.mark.parametrize( 'rd, seconds, time_string, time_letter_only, hours', [ (relativedelta(seconds=1), 1, '1 second', '1S', 0), diff --git a/internal/tests/pytests/wrappers/stat_analysis/test.conf b/internal/tests/pytests/wrappers/stat_analysis/test.conf index de84a8897..6bdcc276d 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test.conf +++ b/internal/tests/pytests/wrappers/stat_analysis/test.conf @@ -45,7 +45,8 @@ MODEL1 = MODEL_TEST MODEL1_REFERENCE_NAME = MODELTEST MODEL1_OBTYPE = MODEL_TEST_ANL -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig +STAT_ANALYSIS_CONFIG_FILE = {PARM_BASE}/met_config/STATAnalysisConfig_wrapped + # stat_analysis job info STAT_ANALYSIS_JOB_NAME = filter # if using -dump_row, put in JOBS_ARGS "-dump_row [dump_row_file]" diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_plotting.conf b/internal/tests/pytests/wrappers/stat_analysis/test_plotting.conf deleted file mode 100644 index d0b462f43..000000000 --- a/internal/tests/pytests/wrappers/stat_analysis/test_plotting.conf +++ /dev/null @@ -1,127 +0,0 @@ -[dir] -# Dirs for StatAnalysis -STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/plotting/stat_analysis -# Dirs for MakePlots -MAKE_PLOTS_SCRIPTS_DIR = {METPLUS_BASE}/ush/plotting_scripts -MAKE_PLOTS_INPUT_DIR = {STAT_ANALYSIS_OUTPUT_DIR} -MAKE_PLOTS_OUTPUT_DIR = {OUTPUT_BASE}/plotting/make_plots -# Location of configuration files used by MET applications -CONFIG_DIR = {METPLUS_BASE}/internal/tests/plotting/met_config - -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/stat_analysis -MODEL2_STAT_ANALYSIS_LOOKIN_DIR = {OUTPUT_BASE}/stat_analysis - -[filename_templates] -STAT_ANALYSIS_DUMP_ROW_TEMPLATE = {model?fmt=%s}_{obtype?fmt=%s}_valid{valid_beg?fmt=%Y%m%d}to{valid_end?fmt=%Y%m%d}_valid{valid_hour_beg?fmt=%H%M}to{valid_hour_end?fmt=%H%M}Z_init{init_hour_beg?fmt=%H%M}to{init_hour_end?fmt=%H%M}Z_fcst_lead{fcst_lead?fmt=%s}_fcst{fcst_var?fmt=%s}{fcst_level?fmt=%s}{fcst_thresh?fmt=%s}{interp_mthd?fmt=%s}_obs{obs_var?fmt=%s}{obs_level?fmt=%s}{obs_thresh?fmt=%s}{interp_mthd?fmt=%s}_vxmask{vx_mask?fmt=%s}_dump_row.stat - -[config] -# LOOP_METHOD must be set to processes for plotting -LOOP_ORDER = processes -PROCESS_LIST = StatAnalysis, MakePlots - -# Date treatment, either VALID or INIT -DATE_TYPE = VALID -# blank or YYYYmmDD format -VALID_BEG = 20190101 -VALID_END = 20190101 -# blank for HH format (two digit hour format, ex. 06) -FCST_VALID_HOUR_LIST = 00, 06, 12, 18 -FCST_INIT_HOUR_LIST = 00, 06, 12, 18 -OBS_VALID_HOUR_LIST = -OBS_INIT_HOUR_LIST = -GROUP_LIST_ITEMS = FCST_INIT_HOUR_LIST -LOOP_LIST_ITEMS = FCST_VALID_HOUR_LIST - -# Models to process -# EACH MODEL IS LOOPED OVER -# MODELn is the model name to filter for in -# stat files [required] -# MODELn_OBTYPE is the observation name -# to filter for the .stat files -# [required] -# MODELn_STAT_ANALYSIS_LOOKIN_DIR is the directory to search for -# the .stat files in, wildcards (*) -# are okay to search for multiple -# directories and templates like -# {valid?fmt=%H%M%S} [required] -# MODELn_REFERENCE_NAME is a reference name for MODELn, defaults to -# MODELn, it can be used in the file template names -# [optional] -MODEL1 = MODEL_TEST1 -MODEL1_OBTYPE = MODEL_TEST1_ANL -MODEL1_REFERENCE_NAME = MODEL_TEST1 - -MODEL2 = TEST2_MODEL -MODEL2_OBTYPE = ANLYS2 -MODEL2_REFERENCE_NAME = TEST2_MODEL - -# Variables and levels to process -# EACH VARIABLE IS LOOPED OVER FOR ITS -# LEVELS, THRESHOLDS, AND IF APPLICABLE -# FOURIER WAVE DECOMPOSITION -# FCST_VARn_NAME and FCST_VARn_LEVELS required -# optional: FCST_VARn_THRESH, FCST_VARn_OPTIONS, FCST_VARn_UNITS, -# OBS_VARn_NAME, OBS_VARn_LEVELS, -# OBS_VARn_THRESH, OBS_VARn_OPTIONS, OBS_VARn_UNITS, -# VARn_FOURIER_DECOMP, VARn_WAVE_NUM_LIST -# if OBS_VARn variables not listed they are filled with FCST_VARn values -BOTH_VAR1_NAME = HGT -BOTH_VAR1_LEVELS = P1000, P850 -#FCST_VAR1_NAME = HGT -#FCST_VAR1_LEVELS = P1000, P850 - -STAT_ANALYSIS_CONFIG_FILE = {CONFIG_DIR}/STATAnalysisConfig - -STAT_ANALYSIS_JOB_NAME = filter -STAT_ANALYSIS_JOB_ARGS = -dump_row [dump_row_file] - -# REQUIRED LISTS -MODEL_LIST = {MODEL1}, {MODEL2} -FCST_LEAD_LIST = 24, 48 -VX_MASK_LIST = NHX -# OPTIONAL LISTS -DESC_LIST = -OBS_LEAD_LIST = -INTERP_MTHD_LIST = -INTERP_PNTS_LIST = -COV_THRESH_LIST = -ALPHA_LIST = - -# Plotting options -# MAKE_PLOTS_VERIF_CASE, MAKE_PLOTS_VERIF_TYPE - -# use to create plots for various verification -# use case and types. This produces plots like -# EMC uses for verification. -# MAKE_PLOTS_VERIF_CASE: grid2grid -# > MAKE_PLOTS_VERIF_TYPE: anom, pres, sfc -# MAKE_PLOTS_VERIF_CASE: grid2obs -# > MAKE_PLOTS_VERIF_TYPE: conus_sfc, upper_air -# MAKE_PLOTS_VERIF_CASE: precip -# > MAKE_PLOTS_VERIF_TYPE: [can be any string] -#-------------- OR USE -------------- -# MAKE_PLOTS_USER_SCRIPT_LIST - allows the user to -# give METplus user created scripts. Follow the -# plotting scripts in METplus as an example of -# how to create your own. The scripts should be -# located and wherever MAKE_PLOTS_SCRIPTS_DIR -# is set to -MAKE_PLOTS_VERIF_CASE = grid2grid -MAKE_PLOTS_VERIF_TYPE = pres -# LINE_TYPE_LIST = SL1L2, VL1L2 options: bias rms msess rsd rmse_md rmse_pv pcor, fbar, fbar_obar -# LINE_TYPE_LIST = SAL1L2, VAL1L2 options: acc -# LINE_TYPE_LIST = VCNT options: bias, fbar, fbar_obar, speed_err, dir_err, rmsve, vdiff_speed, vdiff_dir, -# rsd, fbar_speed, fbar_dir, fbar_obar_speed, fbar_obar_dir -# LINE_TYPE_LIST = CTC options: orate, baser, frate, orate_frate, baser_frate, accuracy, bias, fbias, pod, -# hrate, pofd, farate, podn, faratio, csi, ts, gss, ets, hk, tss, pss, hs -LINE_TYPE_LIST = SL1L2, VL1L2 -MAKE_PLOTS_STATS_LIST = bias, rmse, msess, rsd, rmse_md, rmse_pv, pcor -# Average Calculation Method -# options: MEAN, MEDIAN, AGGREGATION -MAKE_PLOTS_AVERAGE_METHOD = MEAN -# Confidence Interval Calculation Method -# options: EMC, EMC_MONTE_CARLO, NONE -MAKE_PLOTS_CI_METHOD = EMC -# Grid verification done on -MAKE_PLOTS_VERIF_GRID = G002 -# Do event equalization, True, don't do event equalization, False -MAKE_PLOTS_EVENT_EQUALIZATION = False diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py index 8a5755d26..e57017b9c 100644 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py +++ b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis.py @@ -4,12 +4,17 @@ import os import datetime +import pprint +from dateutil.relativedelta import relativedelta from metplus.wrappers.stat_analysis_wrapper import StatAnalysisWrapper from metplus.util import handle_tmp_dir METPLUS_BASE = os.getcwd().split('/internal')[0] +TEST_CONF = os.path.join(os.path.dirname(__file__), 'test.conf') + +pp = pprint.PrettyPrinter() def stat_analysis_wrapper(metplus_config): """! Returns a default StatAnalysisWrapper with /path/to entries in the @@ -19,14 +24,302 @@ def stat_analysis_wrapper(metplus_config): # Default, empty StatAnalysisWrapper with some configuration values set # to /path/to: - extra_configs = [] - extra_configs.append(os.path.join(os.path.dirname(__file__), 'test.conf')) + extra_configs = [TEST_CONF] config = metplus_config(extra_configs) handle_tmp_dir(config) return StatAnalysisWrapper(config) -@pytest.mark.plotting +def _set_config_dict_values(): + config_dict = {} + config_dict['FCST_VALID_HOUR'] = '' + config_dict['FCST_VAR'] = '' + config_dict['FCST_LEVEL'] = '' + config_dict['INTERP_MTHD'] = '' + config_dict['MODEL'] = '"MODEL_TEST"' + config_dict['VX_MASK'] = '' + config_dict['OBS_INIT_HOUR'] = '' + config_dict['COV_THRESH'] = '' + config_dict['OBS_UNITS'] = '' + config_dict['FCST_THRESH'] = '' + config_dict['OBS_VAR'] = '' + config_dict['FCST_INIT_HOUR'] = '' + config_dict['INTERP_PNTS'] = '' + config_dict['FCST_LEAD'] = '' + config_dict['LINE_TYPE'] = '' + config_dict['FCST_UNITS'] = '' + config_dict['DESC'] = '' + config_dict['OBS_LEAD'] = '' + config_dict['OBS_THRESH'] = '' + config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' + config_dict['OBS_VALID_HOUR'] = '' + config_dict['ALPHA'] = '' + config_dict['OBS_LEVEL'] = '' + return config_dict + + +def set_minimum_config_settings(config): + # set config variables to prevent command from running and bypass check + # if input files actually exist + config.set('config', 'DO_NOT_RUN_EXE', True) + config.set('config', 'INPUT_MUST_EXIST', False) + + # set process and time config variables + config.set('config', 'PROCESS_LIST', 'StatAnalysis') + config.set('config', 'LOOP_BY', 'INIT') + config.set('config', 'INIT_TIME_FMT', '%Y%m%d') + config.set('config', 'INIT_BEG', '20221014') + config.set('config', 'INIT_END', '20221014') + config.set('config', 'STAT_ANALYSIS_OUTPUT_DIR', + '{OUTPUT_BASE}/StatAnalysis/output') + config.set('config', 'STAT_ANALYSIS_OUTPUT_TEMPLATE', + '{valid?fmt=%Y%m%d%H}') + config.set('config', 'GROUP_LIST_ITEMS', 'DESC_LIST') + config.set('config', 'LOOP_LIST_ITEMS', 'MODEL_LIST') + config.set('config', 'MODEL_LIST', 'MODEL_A') + config.set('config', 'STAT_ANALYSIS_JOB1', '-job filter') + config.set('config', 'MODEL1', 'MODEL_A') + config.set('config', 'MODEL1_STAT_ANALYSIS_LOOKIN_DIR', + '{METPLUS_BASE}/internal/tests/data/stat_data') + + # not required, can be unset for certain tests + config.set('config', 'STAT_ANALYSIS_CONFIG_FILE', + '{PARM_BASE}/met_config/STATAnalysisConfig_wrapped') + + +@pytest.mark.parametrize( + 'config_overrides, expected_env_vars', [ + # 0 + ({}, {}), + # 1 - fcst valid beg + ({'STAT_ANALYSIS_FCST_VALID_BEG': '{fcst_valid_beg?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_FCST_VALID_BEG': 'fcst_valid_beg = "20221014_000000";'}), + # 2 - fcst valid end + ({'STAT_ANALYSIS_FCST_VALID_END': '{fcst_valid_end?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_FCST_VALID_END': 'fcst_valid_end = "20221015_235959";'}), + # 3 - fcst valid end with shift + ({'STAT_ANALYSIS_FCST_VALID_END': '{fcst_valid_end?fmt=%Y%m%d?shift=1d}_000000'}, + {'METPLUS_FCST_VALID_END': 'fcst_valid_end = "20221016_000000";'}), + # 4 - obs valid beg + ({'STAT_ANALYSIS_OBS_VALID_BEG': '{obs_valid_beg?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_OBS_VALID_BEG': 'obs_valid_beg = "20221014_000000";'}), + # 5 - obs valid end + ({'STAT_ANALYSIS_OBS_VALID_END': '{obs_valid_end?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_OBS_VALID_END': 'obs_valid_end = "20221015_235959";'}), + # 6 fcst init beg + ({'STAT_ANALYSIS_FCST_INIT_BEG': '{fcst_init_beg?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_FCST_INIT_BEG': 'fcst_init_beg = "20221014_000000";'}), + # 7 - fcst init end + ({'STAT_ANALYSIS_FCST_INIT_END': '{fcst_init_end?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_FCST_INIT_END': 'fcst_init_end = "20221015_235959";'}), + # 8 - fcst valid hour single + ({'FCST_VALID_HOUR_LIST': '12'}, + {'METPLUS_FCST_VALID_HOUR': 'fcst_valid_hour = ["120000"];'}), + # 9 - fcst valid hour multiple + ({'FCST_VALID_HOUR_LIST': '12,108'}, + {'METPLUS_FCST_VALID_HOUR': 'fcst_valid_hour = ["120000", "1080000"];'}), + # 10 - obs init beg + ({'STAT_ANALYSIS_OBS_INIT_BEG': '{obs_init_beg?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_OBS_INIT_BEG': 'obs_init_beg = "20221014_000000";'}), + # 11 - obs init end + ({'STAT_ANALYSIS_OBS_INIT_END': '{obs_init_end?fmt=%Y%m%d_%H%M%S}'}, + {'METPLUS_OBS_INIT_END': 'obs_init_end = "20221015_235959";'}), + # 12 - generic valid beg + ({'STAT_ANALYSIS_VALID_BEG': '{fcst_valid_beg?fmt=%Y%m%d}_12'}, + {'METPLUS_FCST_VALID_BEG': 'fcst_valid_beg = "20221014_12";', + 'METPLUS_OBS_VALID_BEG': 'obs_valid_beg = "20221014_12";'}), + # 13 - generic valid end + ({'STAT_ANALYSIS_VALID_END': '{fcst_valid_end?fmt=%Y%m%d}_12'}, + {'METPLUS_FCST_VALID_END': 'fcst_valid_end = "20221015_12";', + 'METPLUS_OBS_VALID_END': 'obs_valid_end = "20221015_12";'}), + # 14 - generic init beg + ({'STAT_ANALYSIS_INIT_BEG': '{fcst_init_beg?fmt=%Y%m%d}_12'}, + {'METPLUS_FCST_INIT_BEG': 'fcst_init_beg = "20221014_12";', + 'METPLUS_OBS_INIT_BEG': 'obs_init_beg = "20221014_12";'}), + # 15 - generic init end + ({'STAT_ANALYSIS_INIT_END': '{fcst_init_end?fmt=%Y%m%d}_12'}, + {'METPLUS_FCST_INIT_END': 'fcst_init_end = "20221015_12";', + 'METPLUS_OBS_INIT_END': 'obs_init_end = "20221015_12";'}), + ] +) +@pytest.mark.wrapper_d +def test_valid_init_env_vars(metplus_config, config_overrides, + expected_env_vars): + config = metplus_config() + set_minimum_config_settings(config) + config.set('config', 'INIT_END', '20221015') + for key, value in config_overrides.items(): + config.set('config', key, value) + + wrapper = StatAnalysisWrapper(config) + assert wrapper.isOK + + runtime_settings_dict_list = wrapper._get_all_runtime_settings() + assert runtime_settings_dict_list + + first_runtime_only = [runtime_settings_dict_list[0]] + wrapper._run_stat_analysis_job(first_runtime_only) + all_cmds = wrapper.all_commands + + print(f"ALL COMMANDS: {all_cmds}") + _, actual_env_vars = all_cmds[0] + + env_var_keys = [item for item in wrapper.WRAPPER_ENV_VAR_KEYS + if 'BEG' in item or 'END' in item] + for env_var_key in env_var_keys: + match = next((item for item in actual_env_vars if + item.startswith(env_var_key)), None) + assert match is not None + actual_value = match.split('=', 1)[1] + print(f"ENV VAR: {env_var_key}") + assert expected_env_vars.get(env_var_key, '') == actual_value + + +@pytest.mark.parametrize( + 'config_overrides, expected_result', [ + ({}, True), + ({'STAT_ANALYSIS_JOB1': '-job filter -dump_row [dump_row_file]'}, + False), + ({'STAT_ANALYSIS_JOB1': '-job filter -dump_row [dump_row_file]', + 'MODEL1_STAT_ANALYSIS_DUMP_ROW_TEMPLATE': 'some/template'}, + True), + ({'STAT_ANALYSIS_JOB1': '-job filter -out_stat [out_stat_file]'}, + False), + ({'STAT_ANALYSIS_JOB1': '-job filter -out_stat [out_stat_file]', + 'MODEL1_STAT_ANALYSIS_OUT_STAT_TEMPLATE': 'some/template'}, + True), + ({'STAT_ANALYSIS_JOB1': '-job filter -dump_row [dump_row_file]', + 'STAT_ANALYSIS_JOB2': '-job filter -out_stat [out_stat_file]', + 'MODEL1_STAT_ANALYSIS_DUMP_ROW_TEMPLATE': 'some/template'}, + False), + ({'STAT_ANALYSIS_JOB1': '-job filter -dump_row [dump_row_file]', + 'STAT_ANALYSIS_JOB2': '-job filter -out_stat [out_stat_file]', + 'MODEL1_STAT_ANALYSIS_DUMP_ROW_TEMPLATE': 'some/template', + 'MODEL1_STAT_ANALYSIS_OUT_STAT_TEMPLATE': 'some/template'}, + True), + ] +) +@pytest.mark.wrapper_d +def test_check_required_job_template(metplus_config, config_overrides, + expected_result): + config = metplus_config() + set_minimum_config_settings(config) + for key, value in config_overrides.items(): + config.set('config', key, value) + + wrapper = StatAnalysisWrapper(config) + print(wrapper.c_dict['JOBS']) + print(wrapper.c_dict['MODEL_INFO_LIST']) + assert wrapper.isOK == expected_result + + +@pytest.mark.parametrize( + 'c_dict, expected_result', [ + # 0 + ({ + 'GROUP_LIST_ITEMS': ['MODEL_LIST', 'FCST_LEAD_LIST'], + 'LOOP_LIST_ITEMS': [], + 'MODEL_LIST': ['"MODEL1"', '"MODEL2"'], + 'FCST_LEAD_LIST': ['0', '24'], + }, + [ + {'MODEL': '"MODEL1", "MODEL2"', + 'FCST_LEAD': '0, 24' + } + ] + ), + # 1 + ({ + 'GROUP_LIST_ITEMS': ['FCST_LEAD_LIST'], + 'LOOP_LIST_ITEMS': ['MODEL_LIST'], + 'MODEL_LIST': ['"MODEL1"', '"MODEL2"'], + 'FCST_LEAD_LIST': ['0', '24'], + }, + [ + {'MODEL': '"MODEL1"', 'FCST_LEAD': '0, 24'}, + {'MODEL': '"MODEL2"', 'FCST_LEAD': '0, 24'}, + ] + ), + # 2 + ({ + 'GROUP_LIST_ITEMS': [], + 'LOOP_LIST_ITEMS': ['MODEL_LIST', 'FCST_LEAD_LIST'], + 'MODEL_LIST': ['"MODEL1"', '"MODEL2"'], + 'FCST_LEAD_LIST': ['0', '24'], + }, + [ + {'MODEL': '"MODEL1"', 'FCST_LEAD': '0'}, + {'MODEL': '"MODEL2"', 'FCST_LEAD': '0'}, + {'MODEL': '"MODEL1"', 'FCST_LEAD': '24'}, + {'MODEL': '"MODEL2"', 'FCST_LEAD': '24'}, + ] + ), + # 3 + ({ + 'GROUP_LIST_ITEMS': ['DESC_LIST'], + 'LOOP_LIST_ITEMS': ['MODEL_LIST', 'FCST_LEAD_LIST', + 'FCST_THRESH_LIST'], + 'MODEL_LIST': ['"MODEL1"', '"MODEL2"'], + 'FCST_LEAD_LIST': ['0', '24'], + 'FCST_THRESH_LIST': ['gt3', 'ge4'], + 'DESC_LIST': ['"ONE_DESC"'], + }, + [ + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '0', 'FCST_THRESH': 'gt3', 'MODEL': '"MODEL1"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '0', 'FCST_THRESH': 'gt3', 'MODEL': '"MODEL2"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '0', 'FCST_THRESH': 'ge4', 'MODEL': '"MODEL1"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '0', 'FCST_THRESH': 'ge4', 'MODEL': '"MODEL2"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '24', 'FCST_THRESH': 'gt3', 'MODEL': '"MODEL1"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '24', 'FCST_THRESH': 'gt3', 'MODEL': '"MODEL2"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '24', 'FCST_THRESH': 'ge4', 'MODEL': '"MODEL1"'}, + {'DESC': '"ONE_DESC"', + 'FCST_LEAD': '24', 'FCST_THRESH': 'ge4', 'MODEL': '"MODEL2"'}, + ] + ), + ] +) +@pytest.mark.wrapper_d +def test_get_runtime_settings(metplus_config, c_dict, expected_result): + config = metplus_config() + wrapper = StatAnalysisWrapper(config) + + runtime_settings = wrapper._get_runtime_settings(c_dict) + pp.pprint(runtime_settings) + assert runtime_settings == expected_result + +@pytest.mark.parametrize( + 'list_name, config_overrides, expected_value', [ + ('FCST_LEAD_LIST', {'FCST_LEAD_LIST': '12'}, ['12']), + ('FCST_LEAD_LIST', {'FCST_LEAD_LIST': '12,24'}, ['12', '24']), + ('FCST_LEAD_LIST', + {'FCST_LEAD_LIST1': '12,24', 'FCST_LEAD_LIST2': '48,96'}, + ['12,24', '48,96']), + ('FCST_LEAD_LIST', + {'FCST_LEAD_LIST1': 'begin_end_incr(12,24,12)', + 'FCST_LEAD_LIST2': 'begin_end_incr(48,96,48)'}, + ['12,24', '48,96']), + ] +) +@pytest.mark.wrapper_d +def test_format_conf_list(metplus_config, list_name, config_overrides, + expected_value): + config = metplus_config() + for key, value in config_overrides.items(): + config.set('config', key, value) + + wrapper = StatAnalysisWrapper(config) + + assert wrapper._format_conf_list(list_name) == expected_value + + +@pytest.mark.wrapper_d def test_get_command(metplus_config): # Independently test that the stat_analysis command # is being put together correctly with @@ -36,17 +329,17 @@ def test_get_command(metplus_config): # Test 1 expected_command = ( st.config.getdir('MET_BIN_DIR', '') - +'/stat_analysis ' + +'/stat_analysis -v 2 ' +'-lookin /path/to/lookin_dir ' +'-config /path/to/STATAnalysisConfig' ) - st.lookindir = '/path/to/lookin_dir' - st.c_dict['CONFIG_FILE'] = '/path/to/STATAnalysisConfig' + st.c_dict['LOOKIN_DIR'] = '/path/to/lookin_dir' + st.args.append('-config /path/to/STATAnalysisConfig') test_command = st.get_command() assert expected_command == test_command -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_create_c_dict(metplus_config): # Independently test that c_dict is being created # and that the wrapper and config reader @@ -54,20 +347,23 @@ def test_create_c_dict(metplus_config): st = stat_analysis_wrapper(metplus_config) # Test 1 c_dict = st.create_c_dict() - assert(os.path.realpath(c_dict['CONFIG_FILE']) == (METPLUS_BASE+'/internal/tests/' - +'config/STATAnalysisConfig')) - assert(c_dict['OUTPUT_DIR'] == (st.config.getdir('OUTPUT_BASE') - +'/stat_analysis')) + + actual_config = os.path.join(METPLUS_BASE, 'parm', 'met_config', + 'STATAnalysisConfig_wrapped') + actual_outdir = os.path.join(st.config.getdir('OUTPUT_BASE'), + 'stat_analysis') + assert os.path.realpath(c_dict['CONFIG_FILE']) == actual_config + assert c_dict['OUTPUT_DIR'] == actual_outdir assert 'FCST_INIT_HOUR_LIST' in c_dict['GROUP_LIST_ITEMS'] - assert('FCST_VALID_HOUR_LIST' in c_dict['LOOP_LIST_ITEMS'] and - 'MODEL_LIST' in c_dict['LOOP_LIST_ITEMS']) + assert 'FCST_VALID_HOUR_LIST' in c_dict['LOOP_LIST_ITEMS'] + assert 'MODEL_LIST' in c_dict['LOOP_LIST_ITEMS'] assert c_dict['VAR_LIST'] == [] - assert c_dict['MODEL_LIST'] == ['MODEL_TEST'] + assert c_dict['MODEL_LIST'] == ['"MODEL_TEST"'] assert c_dict['DESC_LIST'] == [] assert c_dict['FCST_LEAD_LIST'] == [] assert c_dict['OBS_LEAD_LIST'] == [] - assert c_dict['FCST_VALID_HOUR_LIST'] == ['000000'] - assert c_dict['FCST_INIT_HOUR_LIST'] == ['000000', '060000', '120000', '180000'] + assert c_dict['FCST_VALID_HOUR_LIST'] == ['00'] + assert c_dict['FCST_INIT_HOUR_LIST'] == ['00', '06', '12', '18'] assert c_dict['OBS_VALID_HOUR_LIST'] == [] assert c_dict['OBS_INIT_HOUR_LIST'] == [] assert c_dict['VX_MASK_LIST'] == [] @@ -78,23 +374,7 @@ def test_create_c_dict(metplus_config): assert c_dict['LINE_TYPE_LIST'] == [] -@pytest.mark.plotting -def test_list_to_str(metplus_config): - # Independently test that a list of strings - # are being converted to a one - # string list correctly - st = stat_analysis_wrapper(metplus_config) - # Test 1 - expected_list = '"a", "b", "c"' - test_list = st.list_to_str([ 'a', 'b', 'c' ]) - assert(expected_list == test_list) - # Test 2 - expected_list = '"0", "1", "2"' - test_list = st.list_to_str([ '0', '1', '2' ]) - assert(expected_list == test_list) - - -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_set_lists_as_loop_or_group(metplus_config): # Independently test that the lists that are set # in the config file are being set @@ -103,18 +383,18 @@ def test_set_lists_as_loop_or_group(metplus_config): # and those not set are set to GROUP_LIST_ITEMS st = stat_analysis_wrapper(metplus_config) # Test 1 - expected_lists_to_group_items = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', - 'FCST_LEAD_LIST', 'OBS_LEAD_LIST', - 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', - 'OBS_VAR_LIST', 'FCST_UNITS_LIST', - 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', - 'INTERP_MTHD_LIST', 'INTERP_PNTS_LIST', - 'FCST_THRESH_LIST', 'OBS_THRESH_LIST', - 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - expected_lists_to_loop_items = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST' ] + expected_lists_to_group_items = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', + 'FCST_LEAD_LIST', 'OBS_LEAD_LIST', + 'OBS_VALID_HOUR_LIST', + 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', + 'OBS_VAR_LIST', 'FCST_UNITS_LIST', + 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', + 'OBS_LEVEL_LIST', 'VX_MASK_LIST', + 'INTERP_MTHD_LIST', 'INTERP_PNTS_LIST', + 'FCST_THRESH_LIST', 'OBS_THRESH_LIST', + 'COV_THRESH_LIST', 'ALPHA_LIST', + 'LINE_TYPE_LIST'] + expected_lists_to_loop_items = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] config_dict = {} config_dict['LOOP_ORDER'] = 'times' config_dict['PROCESS_LIST'] = 'StatAnalysis' @@ -122,8 +402,8 @@ def test_set_lists_as_loop_or_group(metplus_config): 'PARM_BASE/grid_to_grid/met_config/STATAnalysisConfig' ) config_dict['OUTPUT_DIR'] = 'OUTPUT_BASE/stat_analysis' - config_dict['GROUP_LIST_ITEMS'] = [ 'FCST_INIT_HOUR_LIST' ] - config_dict['LOOP_LIST_ITEMS'] = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST'] + config_dict['GROUP_LIST_ITEMS'] = ['FCST_INIT_HOUR_LIST'] + config_dict['LOOP_LIST_ITEMS'] = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] config_dict['FCST_VAR_LIST'] = [] config_dict['OBS_VAR_LIST'] = [] config_dict['FCST_LEVEL_LIST'] = [] @@ -132,12 +412,12 @@ def test_set_lists_as_loop_or_group(metplus_config): config_dict['OBS_UNITS_LIST'] = [] config_dict['FCST_THRESH_LIST'] = [] config_dict['OBS_THRESH_LIST'] = [] - config_dict['MODEL_LIST'] = [ 'MODEL_TEST' ] + config_dict['MODEL_LIST'] = ['MODEL_TEST'] config_dict['DESC_LIST'] = [] config_dict['FCST_LEAD_LIST'] = [] config_dict['OBS_LEAD_LIST'] = [] - config_dict['FCST_VALID_HOUR_LIST'] = [ '00', '06', '12', '18'] - config_dict['FCST_INIT_HOUR_LIST'] = [ '00', '06', '12', '18'] + config_dict['FCST_VALID_HOUR_LIST'] = ['00', '06', '12', '18'] + config_dict['FCST_INIT_HOUR_LIST'] = ['00', '06', '12', '18'] config_dict['OBS_VALID_HOUR_LIST'] = [] config_dict['OBS_INIT_HOUR_LIST'] = [] config_dict['VX_MASK_LIST'] = [] @@ -146,7 +426,7 @@ def test_set_lists_as_loop_or_group(metplus_config): config_dict['COV_THRESH_LIST'] = [] config_dict['ALPHA_LIST'] = [] config_dict['LINE_TYPE_LIST'] = [] - config_dict = st.set_lists_loop_or_group(config_dict) + config_dict = st._set_lists_loop_or_group(config_dict) test_lists_to_loop_items = config_dict['LOOP_LIST_ITEMS'] test_lists_to_group_items = config_dict['GROUP_LIST_ITEMS'] @@ -157,351 +437,201 @@ def test_set_lists_as_loop_or_group(metplus_config): @pytest.mark.parametrize( - 'expression, expected_result', [ - ('>1', 'gt1'), - ('>=0.2', 'ge0.2'), - ('<30', 'lt30'), - ('<=0.04', 'le0.04'), - ('==5', 'eq5'), - ('!=0.06', 'ne0.06'), - ('>0.05, gt0.05, >=1, ge1, <5, lt5, <=10, le10, ==15, eq15, !=20, ne20', - 'gt0.05,gt0.05,ge1,ge1,lt5,lt5,le10,le10,eq15,eq15,ne20,ne20'), - ('<805, <1609, <4828, <8045, >=8045, <16090', - 'lt805,lt1609,lt4828,lt8045,ge8045,lt16090'), + 'lists_to_loop,c_dict_overrides,config_dict_overrides,expected_values', [ + # Test 0 + (['FCST_VALID_HOUR_LIST', 'MODEL_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190105', 'DATE_TYPE': 'VALID'}, + {'FCST_VALID_HOUR': '0', 'FCST_INIT_HOUR': '0, 6, 12, 18'}, + {'valid_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'valid_end': datetime.datetime(2019, 1, 5, 0, 0, 0), + 'fcst_valid_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_valid_end': datetime.datetime(2019, 1, 5, 0, 0, 0), + 'fcst_valid_hour': relativedelta(), + 'valid_hour': relativedelta(), + 'fcst_valid_hour_beg': relativedelta(), + 'fcst_valid_hour_end': relativedelta(), + 'valid_hour_beg': relativedelta(), + 'valid_hour_end': relativedelta(), + 'model': 'MODEL_TEST', + 'obtype': 'MODEL_TEST_ANL', + 'fcst_init_hour': '000000_060000_120000_180000', + 'fcst_init_hour_beg': relativedelta(), + 'fcst_init_hour_end': relativedelta(hours=18), + 'init_hour_beg': relativedelta(), + 'init_hour_end': relativedelta(hours=18), + 'fcst_var': '', + 'fcst_level': '', + 'fcst_units': '', + 'fcst_thresh': '', + 'desc': '', + }, + ), + # Test 1 + (['FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190101', 'DATE_TYPE': 'VALID'}, + {'FCST_VALID_HOUR': '0', 'FCST_INIT_HOUR': '0, 6, 12, 18', + 'FCST_LEAD': '24'}, + {'valid': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_valid': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_lead_totalsec': '86400', + 'fcst_lead_hour': '24', + 'fcst_lead_min': '00', + 'fcst_lead_sec': '00', + 'fcst_lead': '240000', + 'lead_totalsec': '86400', + 'lead_hour': '24', + 'lead_min': '00', + 'lead_sec': '00', + 'lead': '240000', + }, + ), + # Test 2 + (['FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190101', 'DATE_TYPE': 'VALID'}, + {'FCST_VALID_HOUR': '0', 'FCST_INIT_HOUR': '0, 6, 12, 18', + 'FCST_LEAD': '120'}, + {'valid': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_valid': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_lead_totalsec': '432000', + 'fcst_lead_hour': '120', + 'fcst_lead_min': '00', + 'fcst_lead_sec': '00', + 'fcst_lead': '1200000', + 'lead_totalsec': '432000', + 'lead_hour': '120', + 'lead_min': '00', + 'lead_sec': '00', + 'lead': '1200000', + }, + ), + # Test 3 + (['FCST_VALID_HOUR_LIST', 'MODEL_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190105', 'DATE_TYPE': 'INIT'}, + {'FCST_VALID_HOUR': '0', 'FCST_INIT_HOUR': '0, 6, 12, 18'}, + {'init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'init_end': datetime.datetime(2019, 1, 5, 18, 0, 0), + 'fcst_init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_init_end': datetime.datetime(2019, 1, 5, 18, 0, 0), + 'fcst_init_hour_beg': relativedelta(), + 'fcst_init_hour_end': relativedelta(hours=18), + 'init_hour_beg': relativedelta(), + 'init_hour_end': relativedelta(hours=18), + }, + ), + # Test 4 + (['FCST_VALID_HOUR_LIST', 'MODEL_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190101', 'DATE_TYPE': 'INIT'}, + {'FCST_VALID_HOUR': '0', 'FCST_INIT_HOUR': '', 'FCST_LEAD': ''}, + {'init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'init_end': datetime.datetime(2019, 1, 1, 23, 59, 59), + 'fcst_init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'fcst_init_end': datetime.datetime(2019, 1, 1, 23, 59, 59), + 'obs_init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'obs_init_end': datetime.datetime(2019, 1, 1, 23, 59, 59), + 'fcst_init_hour_beg': relativedelta(), + 'fcst_init_hour_end': relativedelta(hours=23, minutes=59, seconds=59), + 'obs_init_hour_beg': relativedelta(), + 'obs_init_hour_end': relativedelta(hours=23, minutes=59, seconds=59), + }, + ), + # Test 5 - check computed init_beg/end + (['FCST_LEAD_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190105', + 'DATE_TYPE': 'VALID'}, + {'FCST_VALID_HOUR': '0', 'FCST_LEAD': '12,24'}, + {'valid_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'valid_end': datetime.datetime(2019, 1, 5, 0, 0, 0), + 'init_beg': datetime.datetime(2018, 12, 31, 0, 0, 0), + 'init_end': datetime.datetime(2019, 1, 4, 12, 0, 0), + }, + ), + # Test 6 - check computed valid_beg/end + (['FCST_LEAD_LIST'], + {'DATE_BEG': '20190101', 'DATE_END': '20190105', + 'DATE_TYPE': 'INIT'}, + {'FCST_INIT_HOUR': '0', 'FCST_LEAD': '12,24'}, + {'init_beg': datetime.datetime(2019, 1, 1, 0, 0, 0), + 'init_end': datetime.datetime(2019, 1, 5, 0, 0, 0), + 'valid_beg': datetime.datetime(2019, 1, 1, 12, 0, 0), + 'valid_end': datetime.datetime(2019, 1, 6, 0, 0, 0), + }, + ), ] ) -@pytest.mark.plotting -def test_format_thresh(metplus_config, expression, expected_result): - # Independently test the creation of - # string values for defining thresholds - st = stat_analysis_wrapper(metplus_config) - - assert st.format_thresh(expression) == expected_result - - -@pytest.mark.plotting -def test_build_stringsub_dict(metplus_config): +@pytest.mark.wrapper_d +def test_build_stringsub_dict(metplus_config, lists_to_loop, c_dict_overrides, + config_dict_overrides, expected_values): # Independently test the building of # the dictionary used in the stringtemplate # substitution and the values are being set # as expected st = stat_analysis_wrapper(metplus_config) - config_dict = {} - config_dict['FCST_VALID_HOUR'] = '000000' - config_dict['FCST_VAR'] = '' - config_dict['FCST_LEVEL'] = '' - config_dict['INTERP_MTHD'] = '' - config_dict['MODEL'] = '"MODEL_TEST"' - config_dict['VX_MASK'] = '' - config_dict['OBS_INIT_HOUR'] = '' - config_dict['COV_THRESH'] = '' - config_dict['OBS_UNITS'] = '' - config_dict['FCST_THRESH'] = '' - config_dict['OBS_VAR'] = '' - config_dict['FCST_INIT_HOUR'] = '"000000", "060000", "120000", "180000"' - config_dict['INTERP_PNTS'] = '' - config_dict['FCST_LEAD'] = '' - config_dict['LINE_TYPE'] = '' - config_dict['FCST_UNITS'] = '' - config_dict['DESC'] = '' - config_dict['OBS_LEAD'] = '' - config_dict['OBS_THRESH'] = '' - config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' - config_dict['OBS_VALID_HOUR'] = '' - config_dict['ALPHA'] = '' - config_dict['OBS_LEVEL'] = '' + config_dict = _set_config_dict_values() + # Test 1 - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190105' - st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST' ] - test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, - lists_to_group, config_dict) - assert(test_stringsub_dict['valid_beg'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['valid_end'] == - datetime.datetime(2019, 1, 5, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid_hour'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid_hour_end'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid_beg'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid_end'] == - datetime.datetime(2019, 1, 5, 0, 0, 0)) - assert(test_stringsub_dict['valid_hour'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['valid_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['valid_hour_end'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['model'] == 'MODEL_TEST') - assert(test_stringsub_dict['obtype'] == 'MODEL_TEST_ANL') - assert(test_stringsub_dict['fcst_init_hour'] == - '000000_060000_120000_180000') - assert(test_stringsub_dict['fcst_init_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_init_hour_end'] == - datetime.datetime(1900, 1, 1, 18, 0, 0)) - assert(test_stringsub_dict['init_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['init_hour_end'] == - datetime.datetime(1900, 1, 1, 18, 0, 0)) - assert(test_stringsub_dict['fcst_var'] == '') - assert(test_stringsub_dict['fcst_level'] == '') - assert(test_stringsub_dict['fcst_units'] == '') - assert(test_stringsub_dict['fcst_thresh'] == '') - assert(test_stringsub_dict['desc'] == '') - # Test 2 - config_dict['FCST_LEAD'] = '240000' - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' - st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST' ] - test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, - lists_to_group, config_dict) - assert(test_stringsub_dict['valid'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_lead_totalsec'] == '86400') - assert(test_stringsub_dict['fcst_lead_hour'] == '24') - assert(test_stringsub_dict['fcst_lead_min'] == '00') - assert(test_stringsub_dict['fcst_lead_sec'] == '00') - assert(test_stringsub_dict['fcst_lead'] == '240000') - assert(test_stringsub_dict['lead_totalsec'] == '86400') - assert(test_stringsub_dict['lead_hour'] == '24') - assert(test_stringsub_dict['lead_min'] == '00') - assert(test_stringsub_dict['lead_sec'] == '00') - assert(test_stringsub_dict['lead'] == '240000') - # Test 3 - config_dict['FCST_LEAD'] = '1200000' - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' - st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST', 'FCST_LEAD_LIST' ] - test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, - lists_to_group, config_dict) - assert(test_stringsub_dict['valid'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_valid'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_lead_totalsec'] == '432000') - assert(test_stringsub_dict['fcst_lead_hour'] == '120') - assert(test_stringsub_dict['fcst_lead_min'] == '00') - assert(test_stringsub_dict['fcst_lead_sec'] == '00') - assert(test_stringsub_dict['fcst_lead'] == '1200000') - assert(test_stringsub_dict['lead_totalsec'] == '432000') - assert(test_stringsub_dict['lead_hour'] == '120') - assert(test_stringsub_dict['lead_min'] == '00') - assert(test_stringsub_dict['lead_sec'] == '00') - assert(test_stringsub_dict['lead'] == '1200000') - # Test 4 - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190105' - st.c_dict['DATE_TYPE'] = 'INIT' - test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, - lists_to_group, config_dict) - assert(test_stringsub_dict['fcst_init_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_init_hour_end'] == - datetime.datetime(1900, 1, 1, 18, 0, 0)) - assert(test_stringsub_dict['fcst_init_beg'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_init_end'] == - datetime.datetime(2019, 1, 5, 18, 0, 0)) - assert(test_stringsub_dict['init_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['init_hour_end'] == - datetime.datetime(1900, 1, 1, 18, 0, 0)) - assert(test_stringsub_dict['init_beg'] == - datetime.datetime(2019, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['init_end'] == - datetime.datetime(2019, 1, 5, 18, 0, 0)) - # Test 5 - config_dict['FCST_INIT_HOUR'] = '' - config_dict['FCST_LEAD'] = '' - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' - st.c_dict['DATE_TYPE'] = 'INIT' - lists_to_group = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST' ] - test_stringsub_dict = st.build_stringsub_dict(lists_to_loop, - lists_to_group, config_dict) - assert(test_stringsub_dict['init_beg'] == - datetime.datetime(2019, 1, 1, 0, 0 ,0)) - assert(test_stringsub_dict['init_end'] == - datetime.datetime(2019, 1, 1, 23, 59 ,59)) - assert(test_stringsub_dict['fcst_init_beg'] == - datetime.datetime(2019, 1, 1, 0, 0 ,0)) - assert(test_stringsub_dict['fcst_init_end'] == - datetime.datetime(2019, 1, 1, 23, 59 ,59)) - assert(test_stringsub_dict['obs_init_beg'] == - datetime.datetime(2019, 1, 1, 0, 0 ,0)) - assert(test_stringsub_dict['obs_init_end'] == - datetime.datetime(2019, 1, 1, 23, 59 ,59)) - assert(test_stringsub_dict['fcst_init_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['fcst_init_hour_end'] == - datetime.datetime(1900, 1, 1, 23, 59 ,59)) - assert(test_stringsub_dict['obs_init_hour_beg'] == - datetime.datetime(1900, 1, 1, 0, 0, 0)) - assert(test_stringsub_dict['obs_init_hour_end'] == - datetime.datetime(1900, 1, 1, 23, 59 ,59)) - - -@pytest.mark.plotting -def test_get_output_filename(metplus_config): + for key, value in c_dict_overrides.items(): + if key in ('DATE_BEG', 'DATE_END'): + st.c_dict[key] = datetime.datetime.strptime(value, '%Y%m%d') + else: + st.c_dict[key] = value + + for key, value in config_dict_overrides.items(): + config_dict[key] = value + + lists_to_group = [item for item in st.EXPECTED_CONFIG_LISTS + if item not in lists_to_loop] + config_dict['LISTS_TO_GROUP'] = lists_to_group + config_dict['LISTS_TO_LOOP'] = lists_to_loop + test_stringsub_dict = st._build_stringsub_dict(config_dict) + + print(test_stringsub_dict) + for key, value in expected_values.items(): + print(f'key: {key}') + assert test_stringsub_dict[key] == value + + +@pytest.mark.parametrize( + 'filename_template, output_type, expected_output', [ + (('{fcst_valid_hour?fmt=%H}Z/{model?fmt=%s}/' + '{model?fmt=%s}_{valid?fmt=%Y%m%d}.stat'), + 'dump_row', '00Z/MODEL_TEST/MODEL_TEST_20190101.stat'), + (('{model?fmt=%s}_{obtype?fmt=%s}_valid{valid?fmt=%Y%m%d}_' + 'fcstvalidhour000000Z_dump_row.stat'), + 'dump_row', ('MODEL_TEST_MODEL_TEST_ANL_valid20190101_' + 'fcstvalidhour000000Z_dump_row.stat') + ), + (('{model?fmt=%s}_{obtype?fmt=%s}_valid{valid?fmt=%Y%m%d}' + '{valid_hour?fmt=%H}_init{fcst_init_hour?fmt=%s}.stat'), + 'out_stat', ('MODEL_TEST_MODEL_TEST_ANL_valid2019010100' + '_init000000_060000_120000_180000.stat') + ), + ] +) +@pytest.mark.wrapper_d +def test_get_output_filename(metplus_config, filename_template, output_type, + expected_output): # Independently test the building of # the output file name # using string template substitution # and test the values is # as expected st = stat_analysis_wrapper(metplus_config) - config_dict = {} - config_dict['FCST_VALID_HOUR'] = '000000' - config_dict['FCST_VAR'] = '' - config_dict['FCST_LEVEL'] = '' - config_dict['INTERP_MTHD'] = '' - config_dict['MODEL'] = '"MODEL_TEST"' - config_dict['VX_MASK'] = '' - config_dict['OBS_INIT_HOUR'] = '' - config_dict['COV_THRESH'] = '' - config_dict['OBS_UNITS'] = '' - config_dict['FCST_THRESH'] = '' - config_dict['OBS_VAR'] = '' - config_dict['FCST_INIT_HOUR'] = '"000000", "060000", "120000", "180000"' - config_dict['INTERP_PNTS'] = '' - config_dict['FCST_LEAD'] = '' - config_dict['LINE_TYPE'] = '' - config_dict['FCST_UNITS'] = '' - config_dict['DESC'] = '' - config_dict['OBS_LEAD'] = '' - config_dict['OBS_THRESH'] = '' - config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' - config_dict['OBS_VALID_HOUR'] = '' - config_dict['ALPHA'] = '' - config_dict['OBS_LEVEL'] = '' - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' + config_dict = _set_config_dict_values() + config_dict['FCST_VALID_HOUR'] = '0' + config_dict['FCST_INIT_HOUR'] = '0, 6, 12, 18' + + st.c_dict['DATE_BEG'] = datetime.datetime.strptime('20190101', '%Y%m%d') + st.c_dict['DATE_END'] = datetime.datetime.strptime('20190101', '%Y%m%d') st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST' ] - # Test 1 - expected_output_filename = '00Z/MODEL_TEST/MODEL_TEST_20190101.stat' - output_type = 'dump_row' - filename_template = ( - '{fcst_valid_hour?fmt=%H}Z/{model?fmt=%s}/{model?fmt=%s}_{valid?fmt=%Y%m%d}.stat' - ) - filename_type = 'user' - test_output_filename = st.get_output_filename(output_type, - filename_template, - filename_type, - lists_to_loop, - lists_to_group, - config_dict) - assert expected_output_filename == test_output_filename - # Test 2 - expected_output_filename = ( - 'MODEL_TEST_MODEL_TEST_ANL_' - +'valid20190101_fcstvalidhour000000Z' - +'_dump_row.stat' - ) - output_type = 'dump_row' - filename_template = ( - '{model?fmt=%s}_{obtype?fmt=%s}' - +'_valid{valid?fmt=%Y%m%d}_' - +'fcstvalidhour000000Z_dump_row.stat' - ) - filename_type = 'user' - test_output_filename = st.get_output_filename(output_type, - filename_template, - filename_type, - lists_to_loop, - lists_to_group, - config_dict) - assert expected_output_filename == test_output_filename - # Test 3 - expected_output_filename = ( - 'MODEL_TEST_MODEL_TEST_ANL' - +'_valid2019010100' - +'_init000000_060000_120000_180000.stat' - ) - output_type = 'out_stat' - filename_template = ( - '{model?fmt=%s}_{obtype?fmt=%s}' - +'_valid{valid?fmt=%Y%m%d}{valid_hour?fmt=%H}' - +'_init{fcst_init_hour?fmt=%s}.stat' - ) - filename_type = 'user' - test_output_filename = st.get_output_filename(output_type, - filename_template, - filename_type, - lists_to_loop, - lists_to_group, - config_dict) - assert expected_output_filename == test_output_filename - # Test 4 - expected_output_filename = ( - 'MODEL_TEST_MODEL_TEST_ANL' - +'valid20190101_fcstvalidhour000000Z' - +'_out_stat.stat' - ) - output_type = 'out_stat' - filename_template = ( - '{model?fmt=%s}_{obtype?fmt=%s}' - ) - filename_type = 'default' - test_output_filename = st.get_output_filename(output_type, - filename_template, - filename_type, - lists_to_loop, - lists_to_group, - config_dict) - assert expected_output_filename == test_output_filename + stringsub_dict = st._build_stringsub_dict(config_dict) + test_output_filename = st._get_output_filename(output_type, + filename_template, + stringsub_dict) + assert expected_output == test_output_filename -@pytest.mark.plotting + +@pytest.mark.wrapper_d def test_get_lookin_dir(metplus_config): # Independently test the building of # the lookin directory @@ -511,7 +641,7 @@ def test_get_lookin_dir(metplus_config): # as expected st = stat_analysis_wrapper(metplus_config) config_dict = {} - config_dict['FCST_VALID_HOUR'] = '000000' + config_dict['FCST_VALID_HOUR'] = '0' config_dict['FCST_VAR'] = '' config_dict['FCST_LEVEL'] = '' config_dict['INTERP_MTHD'] = '' @@ -522,7 +652,7 @@ def test_get_lookin_dir(metplus_config): config_dict['OBS_UNITS'] = '' config_dict['FCST_THRESH'] = '' config_dict['OBS_VAR'] = '' - config_dict['FCST_INIT_HOUR'] = '"000000", "060000", "120000", "180000"' + config_dict['FCST_INIT_HOUR'] = '0, 6, 12, 18' config_dict['INTERP_PNTS'] = '' config_dict['FCST_LEAD'] = '' config_dict['LINE_TYPE'] = '' @@ -530,47 +660,47 @@ def test_get_lookin_dir(metplus_config): config_dict['DESC'] = '' config_dict['OBS_LEAD'] = '' config_dict['OBS_THRESH'] = '' - config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' + config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' config_dict['OBS_VALID_HOUR'] = '' config_dict['ALPHA'] = '' config_dict['OBS_LEVEL'] = '' - st.c_dict['DATE_BEG'] = '20180201' - st.c_dict['DATE_END'] = '20180201' + st.c_dict['DATE_BEG'] = datetime.datetime.strptime('20180201', '%Y%m%d') + st.c_dict['DATE_END'] = datetime.datetime.strptime('20180201', '%Y%m%d') st.c_dict['DATE_TYPE'] = 'VALID' - lists_to_group = [ 'FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST' ] - lists_to_loop = [ 'FCST_VALID_HOUR_LIST', 'MODEL_LIST' ] + lists_to_group = ['FCST_INIT_HOUR_LIST', 'DESC_LIST', 'FCST_LEAD_LIST', + 'OBS_LEAD_LIST', 'OBS_VALID_HOUR_LIST', + 'OBS_INIT_HOUR_LIST', 'FCST_VAR_LIST', 'OBS_VAR_LIST', + 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', 'FCST_LEVEL_LIST', + 'OBS_LEVEL_LIST', 'VX_MASK_LIST', 'INTERP_MTHD_LIST', + 'INTERP_PNTS_LIST', 'FCST_THRESH_LIST', + 'OBS_THRESH_LIST', 'COV_THRESH_LIST', 'ALPHA_LIST', + 'LINE_TYPE_LIST'] + lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST'] + config_dict['LISTS_TO_GROUP'] = lists_to_group + config_dict['LISTS_TO_LOOP'] = lists_to_loop + pytest_data_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'data') # Test 1 expected_lookin_dir = os.path.join(pytest_data_dir, 'fake/20180201') dir_path = os.path.join(pytest_data_dir, 'fake/*') - test_lookin_dir = st.get_lookin_dir(dir_path, lists_to_loop, - lists_to_group, config_dict) + test_lookin_dir = st._get_lookin_dir(dir_path, config_dict) assert expected_lookin_dir == test_lookin_dir # Test 2 expected_lookin_dir = os.path.join(pytest_data_dir, 'fake/20180201') dir_path = os.path.join(pytest_data_dir, 'fake/{valid?fmt=%Y%m%d}') - test_lookin_dir = st.get_lookin_dir(dir_path, lists_to_loop, - lists_to_group, config_dict) + test_lookin_dir = st._get_lookin_dir(dir_path, config_dict) assert expected_lookin_dir == test_lookin_dir # Test 3 - no matches for lookin dir wildcard expected_lookin_dir = '' dir_path = os.path.join(pytest_data_dir, 'fake/*nothingmatches*') - test_lookin_dir = st.get_lookin_dir(dir_path, lists_to_loop, - lists_to_group, config_dict) + test_lookin_dir = st._get_lookin_dir(dir_path, config_dict) assert expected_lookin_dir == test_lookin_dir # Test 4 - 2 paths, one with wildcard @@ -579,114 +709,96 @@ def test_get_lookin_dir(metplus_config): dir_path = os.path.join(pytest_data_dir, 'fake/*') dir_path = f'{dir_path}, {dir_path}' - test_lookin_dir = st.get_lookin_dir(dir_path, lists_to_loop, - lists_to_group, config_dict) + test_lookin_dir = st._get_lookin_dir(dir_path, config_dict) assert expected_lookin_dir == test_lookin_dir -@pytest.mark.plotting -def test_format_valid_init(metplus_config): +@pytest.mark.parametrize( + 'c_dict_overrides, config_dict_overrides, expected_values', [ + # Test 0 + ({'DATE_BEG': '20190101', 'DATE_END': '20190105', 'DATE_TYPE': 'VALID'}, + {'FCST_VALID_HOUR': '0', 'FCST_INIT_HOUR': '0, 12', + 'OBS_VALID_HOUR': '', 'OBS_INIT_HOUR': ''}, + {'FCST_VALID_BEG': '20190101_000000', + 'FCST_VALID_END': '20190105_000000', + 'FCST_VALID_HOUR': '"000000"', + 'FCST_INIT_HOUR': '"000000", "120000"', + 'OBS_VALID_BEG': '20190101_000000', + 'OBS_VALID_END': '20190105_235959', + }, + ), + # Test 1 + ( + {'DATE_BEG': '20190101', 'DATE_END': '20190105', 'DATE_TYPE': 'VALID'}, + {'FCST_VALID_HOUR': '0, 12', 'FCST_INIT_HOUR': '0, 12', + 'OBS_VALID_HOUR': '', 'OBS_INIT_HOUR': ''}, + {'FCST_VALID_BEG': '20190101_000000', + 'FCST_VALID_END': '20190105_120000', + 'FCST_VALID_HOUR': '"000000", "120000"', + 'FCST_INIT_HOUR': '"000000", "120000"', + 'OBS_VALID_BEG': '20190101_000000', + 'OBS_VALID_END': '20190105_235959', + }, + ), + # Test 2 + ( + {'DATE_BEG': '20190101', 'DATE_END': '20190101', 'DATE_TYPE': 'VALID'}, + {'FCST_VALID_HOUR': '', 'FCST_INIT_HOUR': '', + 'OBS_VALID_HOUR': '000000', 'OBS_INIT_HOUR': '0, 12'}, + {'OBS_VALID_BEG': '20190101_000000', + 'OBS_VALID_END': '20190101_000000', + 'OBS_VALID_HOUR': '"000000"', + 'OBS_INIT_HOUR': '"000000", "120000"', + 'FCST_VALID_BEG': '20190101_000000', + 'FCST_VALID_END': '20190101_235959', + }, + ), + # Test 3 + ({'DATE_BEG': '20190101', 'DATE_END': '20190101', 'DATE_TYPE': 'INIT'}, + {'FCST_VALID_HOUR': '', 'FCST_INIT_HOUR': '', + 'OBS_VALID_HOUR': '000000', 'OBS_INIT_HOUR': '0, 12'}, + {'OBS_INIT_BEG': '20190101_000000', + 'OBS_INIT_END': '20190101_120000', + 'OBS_VALID_HOUR': '"000000"', + 'OBS_INIT_HOUR': '"000000", "120000"', + 'FCST_INIT_BEG': '20190101_000000', + 'FCST_INIT_END': '20190101_235959', + }, + ), + ] +) +@pytest.mark.wrapper_d +def test_format_valid_init(metplus_config, c_dict_overrides, + config_dict_overrides, expected_values): # Independently test the formatting # of the valid and initialization date and hours # from the METplus config file for the MET # config file and that they are formatted # correctly st = stat_analysis_wrapper(metplus_config) - # Test 1 - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190105' - st.c_dict['DATE_TYPE'] = 'VALID' - config_dict = {} - config_dict['FCST_VALID_HOUR'] = '000000' - config_dict['FCST_INIT_HOUR'] = '"000000", "120000"' - config_dict['OBS_VALID_HOUR'] = '' - config_dict['OBS_INIT_HOUR'] = '' - config_dict = st.format_valid_init(config_dict) - assert config_dict['FCST_VALID_BEG'] == '20190101_000000' - assert config_dict['FCST_VALID_END'] == '20190105_000000' - assert config_dict['FCST_VALID_HOUR'] == '"000000"' - assert config_dict['FCST_INIT_BEG'] == '' - assert config_dict['FCST_INIT_END'] == '' - assert config_dict['FCST_INIT_HOUR'] == '"000000", "120000"' - assert config_dict['OBS_VALID_BEG'] == '' - assert config_dict['OBS_VALID_END'] == '' - assert config_dict['OBS_VALID_HOUR'] == '' - assert config_dict['OBS_INIT_BEG'] == '' - assert config_dict['OBS_INIT_END'] == '' - assert config_dict['OBS_INIT_HOUR'] == '' - # Test 2 - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190105' - st.c_dict['DATE_TYPE'] = 'VALID' + for key, value in c_dict_overrides.items(): + if key in ('DATE_BEG', 'DATE_END'): + st.c_dict[key] = datetime.datetime.strptime(value, '%Y%m%d') + else: + st.c_dict[key] = value config_dict = {} - config_dict['FCST_VALID_HOUR'] = '"000000", "120000"' - config_dict['FCST_INIT_HOUR'] = '"000000", "120000"' - config_dict['OBS_VALID_HOUR'] = '' - config_dict['OBS_INIT_HOUR'] = '' - config_dict = st.format_valid_init(config_dict) - assert config_dict['FCST_VALID_BEG'] == '20190101_000000' - assert config_dict['FCST_VALID_END'] == '20190105_120000' - assert config_dict['FCST_VALID_HOUR'] == '"000000", "120000"' - assert config_dict['FCST_INIT_BEG'] == '' - assert config_dict['FCST_INIT_END'] == '' - assert config_dict['FCST_INIT_HOUR'] == '"000000", "120000"' - assert config_dict['OBS_VALID_BEG'] == '' - assert config_dict['OBS_VALID_END'] == '' - assert config_dict['OBS_VALID_HOUR'] == '' - assert config_dict['OBS_INIT_BEG'] == '' - assert config_dict['OBS_INIT_END'] == '' - assert config_dict['OBS_INIT_HOUR'] == '' - # Test 3 - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' - st.c_dict['DATE_TYPE'] = 'VALID' + for key, value in config_dict_overrides.items(): + config_dict[key] = value - config_dict = {} - config_dict['FCST_VALID_HOUR'] = '' - config_dict['FCST_INIT_HOUR'] = '' - config_dict['OBS_VALID_HOUR'] = '000000' - config_dict['OBS_INIT_HOUR'] = '"000000", "120000"' - config_dict = st.format_valid_init(config_dict) - assert config_dict['FCST_VALID_BEG'] == '' - assert config_dict['FCST_VALID_END'] == '' - assert config_dict['FCST_VALID_HOUR'] == '' - assert config_dict['FCST_INIT_BEG'] == '' - assert config_dict['FCST_INIT_END'] == '' - assert config_dict['FCST_INIT_HOUR'] == '' - assert config_dict['OBS_VALID_BEG'] == '20190101_000000' - assert config_dict['OBS_VALID_END'] == '20190101_000000' - assert config_dict['OBS_VALID_HOUR'] == '"000000"' - assert config_dict['OBS_INIT_BEG'] == '' - assert config_dict['OBS_INIT_END'] == '' - assert config_dict['OBS_INIT_HOUR'] == '"000000", "120000"' - # Test 3 - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' - st.c_dict['DATE_TYPE'] = 'INIT' + stringsub_dict = st._build_stringsub_dict(config_dict) + output_dict = st._format_valid_init(config_dict, stringsub_dict) + print(output_dict) + for key, value in output_dict.items(): + print(key) + if key not in expected_values: + assert value == '' + else: + assert value == expected_values[key] - config_dict = {} - config_dict['FCST_VALID_HOUR'] = '' - config_dict['FCST_INIT_HOUR'] = '' - config_dict['OBS_VALID_HOUR'] = '000000' - config_dict['OBS_INIT_HOUR'] = '"000000", "120000"' - config_dict = st.format_valid_init(config_dict) - assert config_dict['FCST_VALID_BEG'] == '' - assert config_dict['FCST_VALID_END'] == '' - assert config_dict['FCST_VALID_HOUR'] == '' - assert config_dict['FCST_INIT_BEG'] == '' - assert config_dict['FCST_INIT_END'] == '' - assert config_dict['FCST_INIT_HOUR'] == '' - assert config_dict['OBS_VALID_BEG'] == '' - assert config_dict['OBS_VALID_END'] == '' - assert config_dict['OBS_VALID_HOUR'] == '"000000"' - assert config_dict['OBS_INIT_BEG'] == '20190101_000000' - assert config_dict['OBS_INIT_END'] == '20190101_120000' - assert config_dict['OBS_INIT_HOUR'] == '"000000", "120000"' - - -@pytest.mark.plotting + +@pytest.mark.wrapper_d def test_parse_model_info(metplus_config): # Independently test the creation of # the model information dictionary @@ -694,9 +806,8 @@ def test_parse_model_info(metplus_config): # are as expected st = stat_analysis_wrapper(metplus_config) # Test 1 - expected_name = 'MODEL_TEST' - expected_reference_name = 'MODELTEST' - expected_obtype = 'MODEL_TEST_ANL' + expected_name = '"MODEL_TEST"' + expected_obtype = '"MODEL_TEST_ANL"' expected_dump_row_filename_template = ( '{fcst_valid_hour?fmt=%H}Z/MODEL_TEST/' +'MODEL_TEST_{valid?fmt=%Y%m%d}.stat' @@ -710,56 +821,58 @@ def test_parse_model_info(metplus_config): ) expected_out_stat_filename_type = 'user' - test_model_info_list = st.parse_model_info() + test_model_info_list = st._parse_model_info() assert test_model_info_list[0]['name'] == expected_name - assert test_model_info_list[0]['reference_name'] == expected_reference_name assert test_model_info_list[0]['obtype'] == expected_obtype - assert test_model_info_list[0]['dump_row_filename_template'] == expected_dump_row_filename_template - assert test_model_info_list[0]['dump_row_filename_type'] == expected_dump_row_filename_type - assert test_model_info_list[0]['out_stat_filename_template'] == expected_out_stat_filename_template - assert test_model_info_list[0]['out_stat_filename_type'] == expected_out_stat_filename_type + assert (test_model_info_list[0]['dump_row_filename_template'] == + expected_dump_row_filename_template) + assert (test_model_info_list[0]['out_stat_filename_template'] + == expected_out_stat_filename_template) -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_run_stat_analysis(metplus_config): # Test running of stat_analysis st = stat_analysis_wrapper(metplus_config) # Test 1 expected_filename = (st.config.getdir('OUTPUT_BASE')+'/stat_analysis' - +'/00Z/MODEL_TEST/MODEL_TEST_20190101.stat') + '/00Z/MODEL_TEST/MODEL_TEST_20190101.stat') + if os.path.exists(expected_filename): + os.remove(expected_filename) comparison_filename = (METPLUS_BASE+'/internal/tests/data/stat_data/' +'test_20190101.stat') - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' + st.c_dict['DATE_BEG'] = datetime.datetime.strptime('20190101', '%Y%m%d') + st.c_dict['DATE_END'] = datetime.datetime.strptime('20190101', '%Y%m%d') st.c_dict['DATE_TYPE'] = 'VALID' - st.run_stat_analysis() + st._run_stat_analysis() assert os.path.exists(expected_filename) - assert os.path.getsize(expected_filename) == os.path.getsize(comparison_filename) + assert (os.path.getsize(expected_filename) == + os.path.getsize(comparison_filename)) @pytest.mark.parametrize( 'data_type, config_list, expected_list', [ - ('FCST', '\"0,*,*\"', ["0,*,*"]), - ('FCST', '\"(0,*,*)\"', ["0,*,*"]), - ('FCST', '\"0,*,*\", \"1,*,*\"', ["0,*,*", "1,*,*"]), - ('FCST', '\"(0,*,*)\", \"(1,*,*)\"', ["0,*,*", "1,*,*"]), - ('OBS', '\"0,*,*\"', ["0,*,*"]), - ('OBS', '\"(0,*,*)\"', ["0,*,*"]), - ('OBS', '\"0,*,*\", \"1,*,*\"', ["0,*,*", "1,*,*"]), - ('OBS', '\"(0,*,*)\", \"(1,*,*)\"', ["0,*,*", "1,*,*"]), + ('FCST', '\"0,*,*\"', ['"0,*,*"']), + ('FCST', '\"(0,*,*)\"', ['"0,*,*"']), + ('FCST', '\"0,*,*\", \"1,*,*\"', ['"0,*,*"', '"1,*,*"']), + ('FCST', '\"(0,*,*)\", \"(1,*,*)\"', ['"0,*,*"', '"1,*,*"']), + ('OBS', '\"0,*,*\"', ['"0,*,*"']), + ('OBS', '\"(0,*,*)\"', ['"0,*,*"']), + ('OBS', '\"0,*,*\", \"1,*,*\"', ['"0,*,*"', '"1,*,*"']), + ('OBS', '\"(0,*,*)\", \"(1,*,*)\"', ['"0,*,*"', '"1,*,*"']), ] ) -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_get_level_list(metplus_config, data_type, config_list, expected_list): config = metplus_config() config.set('config', f'{data_type}_LEVEL_LIST', config_list) saw = StatAnalysisWrapper(config) - assert saw.get_level_list(data_type) == expected_list + assert saw._get_level_list(data_type) == expected_list -@pytest.mark.plotting +@pytest.mark.wrapper_d def test_get_config_file(metplus_config): fake_config_name = '/my/config/file' config = metplus_config() diff --git a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py b/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py deleted file mode 100644 index c7452295e..000000000 --- a/internal/tests/pytests/wrappers/stat_analysis/test_stat_analysis_plotting.py +++ /dev/null @@ -1,401 +0,0 @@ -#!/usr/bin/env python3 - -import pytest - -import os - -import glob - -from metplus.wrappers.stat_analysis_wrapper import StatAnalysisWrapper -from metplus.util import handle_tmp_dir - -METPLUS_BASE = os.getcwd().split('/internal')[0] - - -def stat_analysis_wrapper(metplus_config): - """! Returns a default StatAnalysisWrapper with /path/to entries in the - metplus_system.conf and metplus_runtime.conf configuration - files. Subsequent tests can customize the final METplus configuration - to over-ride these /path/to values.""" - - # Default, empty StatAnalysisWrapper with some configuration values set - # to /path/to: - extra_configs = [] - extra_configs.append(os.path.join(os.path.dirname(__file__), 'test_plotting.conf')) - config = metplus_config(extra_configs) - handle_tmp_dir(config) - return StatAnalysisWrapper(config) - - -@pytest.mark.plotting -def test_set_lists_as_loop_or_group(metplus_config): - # Independently test that the lists that are set - # in the config file are being set - # accordingly based on their place - # in GROUP_LIST_ITEMS and LOOP_LIST_ITEMS - # and those not set are set to GROUP_LIST_ITEMS - st = stat_analysis_wrapper(metplus_config) - # Test 1 - expected_lists_to_group_items = ['FCST_INIT_HOUR_LIST', - 'FCST_UNITS_LIST', 'OBS_UNITS_LIST', - 'FCST_THRESH_LIST', 'OBS_THRESH_LIST', - 'DESC_LIST', 'OBS_LEAD_LIST', - 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', - 'INTERP_MTHD_LIST', 'INTERP_PNTS_LIST', - 'COV_THRESH_LIST', 'ALPHA_LIST', - 'LINE_TYPE_LIST'] - expected_lists_to_loop_items = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST', - 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_LEVEL_LIST', 'OBS_LEVEL_LIST', - 'FCST_LEAD_LIST', 'VX_MASK_LIST'] - - config_dict = {} - config_dict['LOOP_ORDER'] = 'processes' - config_dict['OUTPUT_BASE_DIR'] = 'OUTPUT_BASE/stat_analysis' - config_dict['GROUP_LIST_ITEMS'] = ['FCST_INIT_HOUR_LIST'] - config_dict['LOOP_LIST_ITEMS'] = ['FCST_VALID_HOUR_LIST'] - config_dict['FCST_VAR_LIST'] = ['HGT'] - config_dict['OBS_VAR_LIST'] = ['HGT'] - config_dict['FCST_LEVEL_LIST'] = ['P1000', 'P500'] - config_dict['OBS_LEVEL_LIST'] = ['P1000', 'P500'] - config_dict['FCST_UNITS_LIST'] = [] - config_dict['OBS_UNITS_LIST'] = [] - config_dict['FCST_THRESH_LIST'] = [] - config_dict['OBS_THRESH_LIST'] = [] - config_dict['MODEL_LIST'] = ['MODEL_TEST1', 'MODEL_TEST2'] - config_dict['DESC_LIST'] = [] - config_dict['FCST_LEAD_LIST'] = ['24', '48'] - config_dict['OBS_LEAD_LIST'] = [] - config_dict['FCST_VALID_HOUR_LIST'] = ['00', '06', '12', '18'] - config_dict['FCST_INIT_HOUR_LIST'] = ['00', '06', '12', '18'] - config_dict['OBS_VALID_HOUR_LIST'] = [] - config_dict['OBS_INIT_HOUR_LIST'] = [] - config_dict['VX_MASK_LIST'] = ['NHX'] - config_dict['INTERP_MTHD_LIST'] = [] - config_dict['INTERP_PNTS_LIST'] = [] - config_dict['COV_THRESH_LIST'] = [] - config_dict['ALPHA_LIST'] = [] - config_dict['LINE_TYPE_LIST'] = ['SL1L2', 'VL1L2'] - - config_dict = st.set_lists_loop_or_group(config_dict) - - test_lists_to_loop_items = config_dict['LOOP_LIST_ITEMS'] - test_lists_to_group_items = config_dict['GROUP_LIST_ITEMS'] - - assert (all(elem in expected_lists_to_group_items - for elem in test_lists_to_group_items)) - assert (all(elem in expected_lists_to_loop_items - for elem in test_lists_to_loop_items)) - - -@pytest.mark.plotting -def test_get_output_filename(metplus_config): - # Independently test the building of - # the output file name - # using string template substitution# - # and test the values is - # as expected - st = stat_analysis_wrapper(metplus_config) - config_dict = {} - config_dict['FCST_VALID_HOUR'] = '000000' - config_dict['FCST_VAR'] = '"HGT"' - config_dict['FCST_LEVEL'] = '"P1000"' - config_dict['INTERP_MTHD'] = '' - config_dict['MODEL'] = '"MODEL_TEST"' - config_dict['VX_MASK'] = '"NHX"' - config_dict['OBS_INIT_HOUR'] = '' - config_dict['COV_THRESH'] = '' - config_dict['OBS_UNITS'] = '' - config_dict['FCST_THRESH'] = '' - config_dict['OBS_VAR'] = '"HGT"' - config_dict['FCST_INIT_HOUR'] = '"000000", "060000", "120000", "180000"' - config_dict['INTERP_PNTS'] = '' - config_dict['FCST_LEAD'] = '"240000"' - config_dict['LINE_TYPE'] = '' - config_dict['FCST_UNITS'] = '' - config_dict['DESC'] = '' - config_dict['OBS_LEAD'] = '' - config_dict['OBS_THRESH'] = '' - config_dict['OBTYPE'] = '"MODEL_TEST_ANL"' - config_dict['OBS_VALID_HOUR'] = '' - config_dict['ALPHA'] = '' - config_dict['OBS_LEVEL'] = '"P1000"' - st.c_dict['DATE_BEG'] = '20190101' - st.c_dict['DATE_END'] = '20190101' - st.c_dict['DATE_TYPE'] = 'VALID' - - # Test 1 - lists_to_group = ['FCST_INIT_HOUR_LIST', 'FCST_UNITS_LIST', - 'OBS_UNITS_LIST', 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', 'DESC_LIST', 'OBS_LEAD_LIST', - 'OBS_VALID_HOUR_LIST', 'OBS_INIT_HOUR_LIST', - 'INTERP_MTHD_LIST', 'INTERP_PNTS_LIST', - 'COV_THRESH_LIST', 'ALPHA_LIST', 'LINE_TYPE_LIST'] - lists_to_loop = ['FCST_VALID_HOUR_LIST', 'MODEL_LIST', - 'FCST_VAR_LIST', 'OBS_VAR_LIST', - 'FCST_LEVEL_LIST', 'OBS_LEVEL_LIST', - 'FCST_LEAD_LIST', 'VX_MASK_LIST'] - expected_output_filename = ( - 'MODEL_TEST_MODEL_TEST_ANL_valid20190101to20190101_valid0000to0000Z' - + '_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - + '_dump_row.stat' - ) - output_type = 'dump_row' - filename_template = ( - '{model?fmt=%s}_{obtype?fmt=%s}_valid{valid_beg?fmt=%Y%m%d}' - 'to{valid_end?fmt=%Y%m%d}_valid{valid_hour_beg?fmt=%H%M}to' - '{valid_hour_end?fmt=%H%M}Z_init{init_hour_beg?fmt=%H%M}to' - '{init_hour_end?fmt=%H%M}Z_fcst_lead{fcst_lead?fmt=%s}_' - 'fcst{fcst_var?fmt=%s}{fcst_level?fmt=%s}{fcst_thresh?fmt=%s}' - '{interp_mthd?fmt=%s}_obs{obs_var?fmt=%s}{obs_level?fmt=%s}' - '{obs_thresh?fmt=%s}{interp_mthd?fmt=%s}_vxmask{vx_mask?fmt=%s}' - '_dump_row.stat' - - ) - filename_type = 'user' - test_output_filename = st.get_output_filename(output_type, - filename_template, - filename_type, - lists_to_loop, - lists_to_group, - config_dict) - assert expected_output_filename == test_output_filename - - -@pytest.mark.plotting -def test_filter_for_plotting(metplus_config): - # Test running of stat_analysis - st = stat_analysis_wrapper(metplus_config) - - # clear output directory for next run - output_dir = st.config.getdir('OUTPUT_BASE') + '/plotting/stat_analysis' - output_files = glob.glob(os.path.join(output_dir, '*')) - for output_file in output_files: - os.remove(output_file) - - # Test 1 - expected_filename1 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename2 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename3 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename4 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename5 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename6 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename7 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename8 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename9 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename10 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename11 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename12 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename13 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename14 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename15 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename16 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/MODEL_TEST1_MODEL_TEST1_ANL_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename17 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename18 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename19 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename20 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0000to0000Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename21 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename22 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename23 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename24 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid0600to0600Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename25 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename26 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename27 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename28 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1200to1200Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename29 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename30 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead240000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename31 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP1000_obsHGTP1000_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename32 = ( - st.config.getdir('OUTPUT_BASE')+'/plotting/stat_analysis' - +'/TEST2_MODEL_ANLYS2_valid20190101to20190101_valid1800to1800Z' - +'_init0000to1800Z_fcst_lead480000_fcstHGTP850_obsHGTP850_vxmaskNHX' - +'_dump_row.stat' - ) - expected_filename_list = [ expected_filename1, expected_filename2, - expected_filename3, expected_filename4, - expected_filename5, expected_filename6, - expected_filename7, expected_filename8, - expected_filename9, expected_filename10, - expected_filename11, expected_filename12, - expected_filename13, expected_filename14, - expected_filename15, expected_filename16, - expected_filename17, expected_filename18, - expected_filename19, expected_filename20, - expected_filename21, expected_filename22, - expected_filename23, expected_filename24, - expected_filename25, expected_filename26, - expected_filename27, expected_filename28, - expected_filename29, expected_filename30, - expected_filename31, expected_filename32 ] - st.c_dict['DATE_TYPE'] = 'VALID' - st.c_dict['VALID_BEG'] = '20190101' - st.c_dict['VALID_END'] = '20190101' - st.c_dict['INIT_BEG'] = '' - st.c_dict['INIT_END'] = '' - st.c_dict['DATE_BEG'] = st.c_dict['VALID_BEG'] - st.c_dict['DATE_END'] = st.c_dict['VALID_END'] - - st.run_stat_analysis() - ntest_files = len( - os.listdir(st.config.getdir('OUTPUT_BASE') - +'/plotting/stat_analysis') - ) - assert ntest_files == 32 - for expected_filename in expected_filename_list: - assert os.path.exists(expected_filename) diff --git a/metplus/util/config_metplus.py b/metplus/util/config_metplus.py index 36a977be1..a42324cdb 100644 --- a/metplus/util/config_metplus.py +++ b/metplus/util/config_metplus.py @@ -1479,17 +1479,19 @@ def find_indices_in_config_section(regex, config, sec='config', regex = re.compile(regex) for conf in all_conf: result = regex.match(conf) - if result is not None: - index = result.group(index_index) - if id_index: - identifier = result.group(id_index) - else: - identifier = None + if result is None: + continue - if index not in indices: - indices[index] = [identifier] - else: - indices[index].append(identifier) + index = result.group(index_index) + if id_index: + identifier = result.group(id_index) + else: + identifier = None + + if index not in indices: + indices[index] = [identifier] + else: + indices[index].append(identifier) return indices @@ -1775,11 +1777,6 @@ def get_process_list(config): "may be invalid.") wrapper_name = process_name - # if MakePlots is in process list, remove it because - # it will be called directly from StatAnalysis - if wrapper_name == 'MakePlots': - continue - out_process_list.append((wrapper_name, instance)) return out_process_list diff --git a/metplus/util/constants.py b/metplus/util/constants.py index e73408a2f..e56f9def5 100644 --- a/metplus/util/constants.py +++ b/metplus/util/constants.py @@ -102,3 +102,9 @@ 'SCRUB_STAGING_DIR', 'MET_BIN_DIR', ] + +# datetime year month day (YYYYMMDD) notation +YMD = '%Y%m%d' + +# datetime year month day hour minute second (YYYYMMDD_HHMMSS) notation +YMD_HMS = '%Y%m%d_%H%M%S' diff --git a/metplus/util/met_util.py b/metplus/util/met_util.py index 32eecab66..d9fb9b6c5 100644 --- a/metplus/util/met_util.py +++ b/metplus/util/met_util.py @@ -895,17 +895,6 @@ def get_threshold_via_regex(thresh_string): return comparison_number_list -def comparison_to_letter_format(expression): - """! Convert comparison operator to the letter version if it is not already - @args expression string starting with comparison operator to - convert, i.e. gt3 or <=5.4 - @returns letter comparison operator, i.e. gt3 or le5.4 or None if invalid - """ - for symbol_comp, letter_comp in VALID_COMPARISONS.items(): - if letter_comp in expression or symbol_comp in expression: - return expression.replace(symbol_comp, letter_comp) - - return None def validate_thresholds(thresh_list): """ Checks list of thresholds to ensure all of them have the correct format diff --git a/metplus/util/string_manip.py b/metplus/util/string_manip.py index c0d1042f7..2779fa7eb 100644 --- a/metplus/util/string_manip.py +++ b/metplus/util/string_manip.py @@ -7,6 +7,9 @@ import re from csv import reader +from .constants import VALID_COMPARISONS + + def remove_quotes(input_string): """!Remove quotes from string""" if not input_string: @@ -15,6 +18,7 @@ def remove_quotes(input_string): # strip off double and single quotes return input_string.strip('"').strip("'") + def getlist(list_str, expand_begin_end_incr=True): """! Returns a list of string elements from a comma separated string of values. @@ -60,6 +64,7 @@ def getlist(list_str, expand_begin_end_incr=True): return item_list + def getlistint(list_str): """! Get list and convert all values to int @@ -88,6 +93,7 @@ def _handle_begin_end_incr(list_str): return list_str + def _begin_end_incr_findall(list_str): """! Find all instances of begin_end_incr in list string @@ -106,6 +112,7 @@ def _begin_end_incr_findall(list_str): list_str ) + def _begin_end_incr_evaluate(item): """! Expand begin_end_incr() items into a list of values @@ -143,6 +150,7 @@ def _begin_end_incr_evaluate(item): return None + def _fix_list(item_list): """! The logic that calls this function may have incorrectly split up a string that contains commas within quotation marks. This function @@ -182,3 +190,62 @@ def _fix_list(item_list): out_list.append(item) return out_list + + +def list_to_str(list_of_values, add_quotes=True): + """! Turn a list of values into a single string + + @param list_of_values list of values, i.e. ['value1', 'value2'] + @param add_quotes if True, add quotation marks around values, + default is True + + @returns string created from list_of_values with the values separated + by commas, i.e. '"value1", "value2"' or 1, 3 if add_quotes is False + """ + # return empty string if list is empty + if not list_of_values: + return '' + + if add_quotes: + # remove any quotes that are already around items, then add quotes + values = [remove_quotes(item) for item in list_of_values] + return '"' + '", "'.join(values) + '"' + + return ', '.join(list_of_values) + + +def comparison_to_letter_format(expression): + """! Convert comparison operator to the letter version if it is not already + + @param expression string starting with comparison operator to convert, + i.e. gt3 or <=5.4 + @returns letter comparison operator, i.e. gt3 or le5.4 or None if invalid + """ + for symbol_comp, letter_comp in VALID_COMPARISONS.items(): + if letter_comp in expression or symbol_comp in expression: + return expression.replace(symbol_comp, letter_comp) + + return None + + +def format_thresh(thresh_str): + """! Format thresholds for file naming + + @param thresh_str string of the thresholds. + Can be a comma-separated list, i.e. gt3,<=5.5, ==7 + + @returns string of comma-separated list of the threshold(s) with + letter format, i.e. gt3,le5.5,eq7 + """ + formatted_thresh_list = [] + # separate thresholds by comma and strip off whitespace around values + thresh_list = [thresh.strip() for thresh in thresh_str.split(',')] + for thresh in thresh_list: + if not thresh: + continue + + thresh_letter = comparison_to_letter_format(thresh) + if thresh_letter: + formatted_thresh_list.append(thresh_letter) + + return ','.join(formatted_thresh_list) diff --git a/metplus/util/string_template_substitution.py b/metplus/util/string_template_substitution.py index d389075e8..b5d1b9e4d 100644 --- a/metplus/util/string_template_substitution.py +++ b/metplus/util/string_template_substitution.py @@ -281,7 +281,7 @@ def handle_format_delimiter(split_string, idx, shift_seconds, truncate_seconds, seconds = time_util.ti_get_seconds_from_relativedelta(obj) if seconds is None: return time_util.ti_get_lead_string(obj, letter_only=True) - + seconds += shift_seconds return format_hms(fmt, seconds) # if input is integer, format with H, M, and S diff --git a/metplus/util/time_looping.py b/metplus/util/time_looping.py index ce0b036ac..2b4cdb2cd 100644 --- a/metplus/util/time_looping.py +++ b/metplus/util/time_looping.py @@ -12,7 +12,7 @@ def time_generator(config): Yields the next run time dictionary or None if something went wrong """ # determine INIT or VALID prefix - prefix = _get_time_prefix(config) + prefix = get_time_prefix(config) if not prefix: yield None return @@ -83,7 +83,7 @@ def time_generator(config): current_dt += time_interval def get_start_and_end_times(config): - prefix = _get_time_prefix(config) + prefix = get_time_prefix(config) if not prefix: return None, None @@ -150,7 +150,7 @@ def _create_time_input_dict(prefix, current_dt, clock_dt): 'today': clock_dt.strftime('%Y%m%d'), } -def _get_time_prefix(config): +def get_time_prefix(config): """! Read the METplusConfig object and determine the prefix for the time looping variables. diff --git a/metplus/util/time_util.py b/metplus/util/time_util.py index 4eb180f30..e1bd4b1f9 100755 --- a/metplus/util/time_util.py +++ b/metplus/util/time_util.py @@ -32,6 +32,7 @@ 'S': 'second', } + def get_relativedelta(value, default_unit='S'): """!Converts time values ending in Y, m, d, H, M, or S to relativedelta object Args: @@ -80,6 +81,7 @@ def get_relativedelta(value, default_unit='S'): # unsupported time unit specified, return None return None + def get_seconds_from_string(value, default_unit='S', valid_time=None): """!Convert string of time (optionally ending with time letter, i.e. HMSyMD to seconds Args: @@ -89,13 +91,15 @@ def get_seconds_from_string(value, default_unit='S', valid_time=None): rd_obj = get_relativedelta(value, default_unit) return ti_get_seconds_from_relativedelta(rd_obj, valid_time) -def time_string_to_met_time(time_string, default_unit='S'): + +def time_string_to_met_time(time_string, default_unit='S', force_hms=False): """!Convert time string (3H, 4M, 7, etc.) to format expected by the MET tools ([H]HH[MM[SS]])""" total_seconds = get_seconds_from_string(time_string, default_unit) - return seconds_to_met_time(total_seconds) + return seconds_to_met_time(total_seconds, force_hms=force_hms) + -def seconds_to_met_time(total_seconds): +def seconds_to_met_time(total_seconds, force_hms=False): seconds_time_string = str(total_seconds % 60).zfill(2) minutes_time_string = str(total_seconds // 60 % 60).zfill(2) hour_time_string = str(total_seconds // 3600).zfill(2) @@ -103,11 +107,13 @@ def seconds_to_met_time(total_seconds): # if hour is 6 or more digits, we need to add minutes and seconds # also if minutes and/or seconds they are defined # add minutes if seconds are defined as well - if len(hour_time_string) > 5 or minutes_time_string != '00' or seconds_time_string != '00': + if (force_hms or len(hour_time_string) > 5 or + minutes_time_string != '00' or seconds_time_string != '00'): return hour_time_string + minutes_time_string + seconds_time_string else: return hour_time_string + def ti_get_hours_from_relativedelta(lead, valid_time=None): """! Get hours from relativedelta. Simply calls get seconds function and divides the result by 3600. @@ -128,6 +134,7 @@ def ti_get_hours_from_relativedelta(lead, valid_time=None): return lead_seconds // 3600 + def ti_get_seconds_from_relativedelta(lead, valid_time=None): """!Check relativedelta object contents and compute the total number of seconds in the time. Return None if years or months are set, because the exact number @@ -160,6 +167,7 @@ def ti_get_seconds_from_relativedelta(lead, valid_time=None): return total_seconds + def ti_get_seconds_from_lead(lead, valid='*'): if isinstance(lead, int): return lead @@ -171,6 +179,7 @@ def ti_get_seconds_from_lead(lead, valid='*'): return ti_get_seconds_from_relativedelta(lead, valid_time) + def ti_get_hours_from_lead(lead, valid='*'): lead_seconds = ti_get_seconds_from_lead(lead, valid) if lead_seconds is None: @@ -178,12 +187,14 @@ def ti_get_hours_from_lead(lead, valid='*'): return lead_seconds // 3600 + def get_time_suffix(letter, letter_only): if letter_only: return letter return f" {TIME_LETTER_TO_STRING[letter]} " + def format_time_string(lead, letter, plural, letter_only): if letter == 'Y': value = lead.years @@ -211,6 +222,7 @@ def format_time_string(lead, letter, plural, letter_only): return output + def ti_get_lead_string(lead, plural=True, letter_only=False): """!Check relativedelta object contents and create string representation of the highest unit available (year, then, month, day, hour, minute, second). @@ -249,6 +261,64 @@ def ti_get_lead_string(lead, plural=True, letter_only=False): return f"{negative}{output}" + +def get_met_time_list(string_value, sort_list=True): + """! Convert a string into a list of strings in MET time format HHMMSS. + + @param string_value input string to parse + @param sort_list If True, sort the list values. If False, skip sorting. + Default is True. + @returns list of strings with MET times + """ + return _format_time_list(string_value, get_met_format=True, + sort_list=sort_list) + + +def get_delta_list(string_value, sort_list=True): + """! Convert a string into a list of relativedelta objects. + + @param string_value input string to parse + @param sort_list If True, sort the list values. If False, skip sorting. + Default is True. + @returns list of relativedelta objects + """ + return _format_time_list(string_value, get_met_format=False, + sort_list=sort_list) + + +def _format_time_list(string_value, get_met_format, sort_list=True): + """! Helper function to convert a string into a list of times. + + @param string_value input string to parse + @param get_met_format If True, format the items in MET time format HHMMSS. + If False, format each item as a relativedelta object + @param sort_list If True, sort the list values. If False, skip sorting. + Default is True. + @returns list of either strings with MET times or relativedelta objects + """ + out_list = [] + if not string_value: + return [] + + for time_string in string_value.split(','): + time_string = time_string.strip() + if get_met_format: + value = time_string_to_met_time(time_string, default_unit='H', + force_hms=True) + out_list.append(value) + else: + delta_obj = get_relativedelta(time_string, default_unit='H') + out_list.append(delta_obj) + + if sort_list: + if get_met_format: + out_list.sort(key=int) + else: + out_list.sort(key=ti_get_seconds_from_relativedelta) + + return out_list + + def ti_calculate(input_dict_preserve): out_dict = {} input_dict = input_dict_preserve.copy() diff --git a/metplus/wrappers/__init__.py b/metplus/wrappers/__init__.py index 7f29a2ef4..2b556ebd4 100644 --- a/metplus/wrappers/__init__.py +++ b/metplus/wrappers/__init__.py @@ -9,7 +9,6 @@ plotting_wrappers = [ 'tcmpr_plotter_wrapper', 'cyclone_plotter_wrapper', - 'make_plots_wrapper', ] # import classes that other wrappers import diff --git a/metplus/wrappers/make_plots_wrapper.py b/metplus/wrappers/make_plots_wrapper.py deleted file mode 100755 index 88ba564fb..000000000 --- a/metplus/wrappers/make_plots_wrapper.py +++ /dev/null @@ -1,306 +0,0 @@ -''' -Program Name: make_plots_wrapper.py -Contact(s): Mallory Row -Abstract: Reads filtered files from stat_analysis_wrapper run_all_times to make plots -History Log: Fourth version -Usage: make_plots_wrapper.py -Parameters: None -Input Files: MET .stat files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import logging -import os -import copy -import re -import subprocess -import datetime -import itertools - -from ..util import getlist -from ..util import met_util as util -from ..util import parse_var_list -from . import CommandBuilder - -# handle if module can't be loaded to run wrapper -WRAPPER_CANNOT_RUN = False -EXCEPTION_ERR = '' -try: - from ush.plotting_scripts import plot_util -except Exception as err_msg: - WRAPPER_CANNOT_RUN = True - EXCEPTION_ERR = err_msg - -class MakePlotsWrapper(CommandBuilder): - """! Wrapper to used to filter make plots from MET data - """ - accepted_verif_lists = { - 'grid2grid': { - 'pres': ['plot_time_series.py', - 'plot_lead_average.py', - 'plot_date_by_level.py', - 'plot_lead_by_level.py'], - 'anom': ['plot_time_series.py', - 'plot_lead_average.py', - 'plot_lead_by_date.py'], - 'sfc': ['plot_time_series.py', - 'plot_lead_average.py'], - }, - 'grid2obs': { - 'upper_air': ['plot_time_series.py', - 'plot_lead_average.py', - 'plot_stat_by_level.py', - 'plot_lead_by_level.py'], - 'conus_sfc': ['plot_time_series.py', - 'plot_lead_average.py'], - }, - # precip uses the same scripts for any verif case, so this value - # is a list instead of a dictionary - 'precip': ['plot_time_series.py', - 'plot_lead_average.py', - 'plot_threshold_average.py', - 'plot_threshold_by_lead.py'], - } - - add_from_c_dict_list = [ - 'VERIF_CASE', 'VERIF_TYPE', 'INPUT_BASE_DIR', 'OUTPUT_BASE_DIR', - 'SCRIPTS_BASE_DIR', 'DATE_TYPE', 'VALID_BEG', 'VALID_END', - 'INIT_BEG', 'INIT_END', 'AVERAGE_METHOD', 'CI_METHOD', - 'VERIF_GRID', 'EVENT_EQUALIZATION', 'LOG_METPLUS', 'LOG_LEVEL' - ] - - def __init__(self, config, instance=None): - self.app_path = 'python' - self.app_name = 'make_plots' - super().__init__(config, instance=instance) - - if WRAPPER_CANNOT_RUN: - self.log_error(f"There was a problem importing modules: {EXCEPTION_ERR}\n") - return - - def get_command(self): - - if not self.plotting_script: - self.log_error("No plotting script specified") - return None - - cmd = f"{self.app_path} {self.plotting_script}" - - return cmd - - def create_c_dict(self): - """! Create a data structure (dictionary) that contains all the - values set in the configuration files that are common for - make_plots_wrapper.py. - - Args: - - Returns: - c_dict - a dictionary containing the settings in the - configuration files unique to the wrapper - """ - c_dict = super().create_c_dict() - c_dict['VERBOSITY'] = ( - self.config.getstr('config', 'LOG_MAKE_PLOTS_VERBOSITY', - c_dict['VERBOSITY']) - ) - c_dict['INPUT_BASE_DIR'] = self.config.getdir('MAKE_PLOTS_INPUT_DIR') - c_dict['OUTPUT_BASE_DIR'] = self.config.getdir('MAKE_PLOTS_OUTPUT_DIR') - c_dict['SCRIPTS_BASE_DIR'] = self.config.getdir('MAKE_PLOTS_SCRIPTS_DIR') - c_dict['DATE_TYPE'] = self.config.getstr('config', 'DATE_TYPE') - c_dict['VALID_BEG'] = self.config.getstr('config', 'VALID_BEG', '') - c_dict['VALID_END'] = self.config.getstr('config', 'VALID_END', '') - c_dict['INIT_BEG'] = self.config.getstr('config', 'INIT_BEG', '') - c_dict['INIT_END'] = self.config.getstr('config', 'INIT_END', '') - c_dict['GROUP_LIST_ITEMS'] = getlist( - self.config.getstr('config', 'GROUP_LIST_ITEMS') - ) - c_dict['LOOP_LIST_ITEMS'] = getlist( - self.config.getstr('config', 'LOOP_LIST_ITEMS') - ) - c_dict['VAR_LIST'] = parse_var_list(self.config) - c_dict['MODEL_LIST'] = getlist( - self.config.getstr('config', 'MODEL_LIST', '') - ) - c_dict['DESC_LIST'] = getlist( - self.config.getstr('config', 'DESC_LIST', '') - ) - c_dict['FCST_LEAD_LIST'] = getlist( - self.config.getstr('config', 'FCST_LEAD_LIST', '') - ) - c_dict['OBS_LEAD_LIST'] = getlist( - self.config.getstr('config', 'OBS_LEAD_LIST', '') - ) - c_dict['FCST_VALID_HOUR_LIST'] = getlist( - self.config.getstr('config', 'FCST_VALID_HOUR_LIST', '') - ) - c_dict['FCST_INIT_HOUR_LIST'] = getlist( - self.config.getstr('config', 'FCST_INIT_HOUR_LIST', '') - ) - c_dict['OBS_VALID_HOUR_LIST'] = getlist( - self.config.getstr('config', 'OBS_VALID_HOUR_LIST', '') - ) - c_dict['OBS_INIT_HOUR_LIST'] = getlist( - self.config.getstr('config', 'OBS_INIT_HOUR_LIST', '') - ) - c_dict['VX_MASK_LIST'] = getlist( - self.config.getstr('config', 'VX_MASK_LIST', '') - ) - c_dict['INTERP_MTHD_LIST'] = getlist( - self.config.getstr('config', 'INTERP_MTHD_LIST', '') - ) - c_dict['INTERP_PNTS_LIST'] = getlist( - self.config.getstr('config', 'INTERP_PNTS_LIST', '') - ) - c_dict['COV_THRESH_LIST'] = getlist( - self.config.getstr('config', 'COV_THRESH_LIST', '') - ) - c_dict['ALPHA_LIST'] = getlist( - self.config.getstr('config', 'ALPHA_LIST', '') - ) - c_dict['LINE_TYPE_LIST'] = getlist( - self.config.getstr('config', 'LINE_TYPE_LIST', '') - ) - c_dict['USER_SCRIPT_LIST'] = getlist( - self.config.getstr('config', 'MAKE_PLOTS_USER_SCRIPT_LIST', '') - ) - c_dict['VERIF_CASE'] = self.config.getstr('config', - 'MAKE_PLOTS_VERIF_CASE', '') - - if c_dict['VERIF_CASE'] not in self.accepted_verif_lists: - self.log_error(self.c_dict['VERIF_CASE'] + " is not an" - + "an accepted MAKE_PLOTS_VERIF_CASE " - + "option. Options are " - + ', '.join(self.accepted_verif_lists.keys())) - - c_dict['VERIF_TYPE'] = self.config.getstr('config', - 'MAKE_PLOTS_VERIF_TYPE', '') - - # if not precip case, check that verif type is an accepted verif type - if c_dict['VERIF_CASE'] != 'precip' and c_dict['VERIF_TYPE'] not in ( - self.accepted_verif_lists.get(c_dict['VERIF_CASE'], []) - ): - print(f"VERIF CASE: {c_dict['VERIF_CASE']}") - accepted_types = self.accepted_verif_lists.get(c_dict['VERIF_CASE']).keys() - self.log_error(f"{c_dict['VERIF_TYPE']} is not " - "an accepted MAKE_PLOTS_VERIF_TYPE " - "option for MAKE_PLOTS_VERIF_CASE " - f"= {c_dict['VERIF_CASE']}. Options " - f"are {', '.join(accepted_types)}") - - if not c_dict['USER_SCRIPT_LIST'] and not(c_dict['VERIF_CASE'] or - c_dict['VERIF_TYPE']): - self.log_error("Please defined either " - "MAKE_PLOTS_VERIF_CASE and " - "MAKE_PLOTS_VERIF_TYPE, or " - "MAKE_PLOTS_USER_SCRIPT_LIST") - - c_dict['STATS_LIST'] = getlist( - self.config.getstr('config', 'MAKE_PLOTS_STATS_LIST', '') - ) - c_dict['AVERAGE_METHOD'] = self.config.getstr( - 'config','MAKE_PLOTS_AVERAGE_METHOD', 'MEAN' - ) - c_dict['CI_METHOD'] = self.config.getstr('config', - 'MAKE_PLOTS_CI_METHOD', - 'NONE') - c_dict['VERIF_GRID'] = self.config.getstr('config', - 'MAKE_PLOTS_VERIF_GRID') - c_dict['EVENT_EQUALIZATION'] = ( - self.config.getstr('config', 'MAKE_PLOTS_EVENT_EQUALIZATION') - ) - c_dict['LOG_METPLUS'] = self.config.getstr('config', 'LOG_METPLUS') - c_dict['LOG_LEVEL'] = self.config.getstr('config', 'LOG_LEVEL') - - # Get MET version used to run stat_analysis - c_dict['MET_VERSION'] = str(self.get_met_version()) - - return c_dict - - def setup_output_base(self): - # Set up output base - output_base_dir = self.c_dict['OUTPUT_BASE_DIR'] - output_base_data_dir = os.path.join(output_base_dir, 'data') - output_base_images_dir = os.path.join(output_base_dir, 'images') - if not os.path.exists(output_base_dir): - util.mkdir_p(output_base_dir) - util.mkdir_p(output_base_data_dir) - util.mkdir_p(output_base_images_dir) - else: - if os.path.exists(output_base_data_dir): - if len(output_base_data_dir) > 0: - for rmfile in os.listdir(output_base_data_dir): - os.remove(os.path.join(output_base_data_dir,rmfile)) - - def get_met_version(self): - stat_analysis_exe = os.path.join(self.config.getdir('MET_BIN_DIR'), - 'stat_analysis') - p = subprocess.Popen([stat_analysis_exe, "--version"], - stdout=subprocess.PIPE) - out, err = p.communicate() - out = out.decode(encoding='utf-8', errors='strict') - for line in out.split('\n'): - if 'MET Version:' in line: - met_verison_line = line - met_version_str = ( - met_verison_line.partition('MET Version:')[2].split('V')[1] - ) - if len(met_version_str) == 3: - met_version = float(met_version_str) - else: - met_version = float(met_version_str.rpartition('.')[0]) - - return met_version - - def create_plots(self, runtime_settings_dict_list): - - if self.c_dict['USER_SCRIPT_LIST']: - self.logger.info("Running plots for user specified list of " - "scripts.") - - elif (self.c_dict['VERIF_CASE'] and self.c_dict['VERIF_TYPE']): - self.logger.info("Running plots for VERIF_CASE = " - +self.c_dict['VERIF_CASE']+", " - +"VERIF_TYPE = " - +self.c_dict['VERIF_TYPE']) - - self.setup_output_base() - - if self.c_dict['USER_SCRIPT_LIST']: - scripts_to_run = self.c_dict['USER_SCRIPT_LIST'] - elif self.c_dict['VERIF_CASE'] == 'precip': - scripts_to_run = self.accepted_verif_lists.get(self.c_dict['VERIF_CASE']) - else: - scripts_to_run = self.accepted_verif_lists.get(self.c_dict['VERIF_CASE'])\ - .get(self.c_dict['VERIF_TYPE']) - - # Loop over run settings. - for runtime_settings_dict in runtime_settings_dict_list: - # set environment variables - for name, value in runtime_settings_dict.items(): - self.add_env_var(name, value.replace('"', '')) - - for key in self.add_from_c_dict_list: - if key not in runtime_settings_dict: - self.add_env_var(key, self.c_dict[key].replace('"', '')) - - self.add_env_var('MET_VERSION', self.c_dict['MET_VERSION']) - - # obtype env var is named differently in StatAnalysis wrapper - self.add_env_var('MODEL_OBTYPE', runtime_settings_dict['OBTYPE'].replace('"', '')) - - self.add_env_var('STATS', - ', '.join(self.c_dict['STATS_LIST']).replace('"', '')) - - # send environment variables to logger - self.set_environment_variables() - - for script in scripts_to_run: - self.plotting_script = ( - os.path.join(self.c_dict['SCRIPTS_BASE_DIR'], - script) - ) - - self.build() - self.clear() diff --git a/metplus/wrappers/stat_analysis_wrapper.py b/metplus/wrappers/stat_analysis_wrapper.py index ffde04030..ec6f182d6 100755 --- a/metplus/wrappers/stat_analysis_wrapper.py +++ b/metplus/wrappers/stat_analysis_wrapper.py @@ -1,33 +1,29 @@ -''' +"""! Program Name: stat_analysis_wrapper.py -Contact(s): Mallory Row -Abstract: Runs stat_analysis -History Log: Fourth version -Usage: stat_analysis_wrapper.py -Parameters: None -Input Files: MET STAT files -Output Files: MET STAT files -Condition codes: 0 for success, 1 for failure -''' - -import logging +Contact(s): Mallory Row, George McCabe +Abstract: Builds commands to run stat_analysis +""" + import os -import copy -import re import glob -import datetime +from datetime import datetime import itertools +from dateutil.relativedelta import relativedelta +import copy -from ..util import getlist -from ..util import met_util as util +from ..util import getlist, format_thresh from ..util import do_string_sub, find_indices_in_config_section -from ..util import parse_var_list, remove_quotes -from ..util import get_start_and_end_times +from ..util import parse_var_list, remove_quotes, list_to_str +from ..util import get_start_and_end_times, get_time_prefix +from ..util import ti_get_seconds_from_relativedelta +from ..util import get_met_time_list, get_delta_list +from ..util import YMD, YMD_HMS from . import CommandBuilder + class StatAnalysisWrapper(CommandBuilder): - """! Wrapper to the MET tool stat_analysis which is used to filter - and summarize data from MET's point_stat, grid_stat, + """! Wrapper to the MET tool stat_analysis which is used to filter + and summarize data from MET's point_stat, grid_stat, ensemble_stat, and wavelet_stat """ @@ -67,51 +63,57 @@ class StatAnalysisWrapper(CommandBuilder): 'METPLUS_HSS_EC_VALUE', ] - field_lists = ['FCST_VAR_LIST', - 'OBS_VAR_LIST', - 'FCST_UNITS_LIST', - 'OBS_UNITS_LIST', - 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', - 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', - ] - - format_lists = ['FCST_VALID_HOUR_LIST', - 'FCST_INIT_HOUR_LIST', - 'OBS_VALID_HOUR_LIST', - 'OBS_INIT_HOUR_LIST', - 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', - ] - - expected_config_lists = ['MODEL_LIST', - 'DESC_LIST', - 'VX_MASK_LIST', - 'INTERP_MTHD_LIST', - 'INTERP_PNTS_LIST', - 'COV_THRESH_LIST', - 'ALPHA_LIST', - 'LINE_TYPE_LIST', - ] + format_lists + field_lists - - force_group_for_make_plots_lists = ['MODEL_LIST', - 'FCST_LEAD_LIST', - 'OBS_LEAD_LIST', - 'FCST_LEVEL_LIST', - 'OBS_LEVEL_LIST', - 'FCST_THRESH_LIST', - 'OBS_THRESH_LIST', - 'FCST_UNITS_LIST', - 'OBS_UNITS_LIST', - ] - - list_categories = ['GROUP_LIST_ITEMS', 'LOOP_LIST_ITEMS'] - list_categories_make_plots = ['GROUP_LIST_ITEMS_MAKE_PLOTS', 'LOOP_LIST_ITEMS_MAKE_PLOTS'] - # what is the used for? these are not formatted later - format_later_list = [ - 'MODEL_LIST', 'FCST_VALID_HOUR_LIST', 'OBS_VALID_HOUR_LIST', - 'FCST_INIT_HOUR_LIST', 'OBS_INIT_HOUR_LIST' + FIELD_LISTS = [ + 'FCST_VAR_LIST', + 'OBS_VAR_LIST', + 'FCST_UNITS_LIST', + 'OBS_UNITS_LIST', + 'FCST_THRESH_LIST', + 'OBS_THRESH_LIST', + 'FCST_LEVEL_LIST', + 'OBS_LEVEL_LIST', + ] + + FORMAT_LISTS = [ + 'FCST_VALID_HOUR_LIST', + 'FCST_INIT_HOUR_LIST', + 'OBS_VALID_HOUR_LIST', + 'OBS_INIT_HOUR_LIST', + 'FCST_LEAD_LIST', + 'OBS_LEAD_LIST', + ] + + EXPECTED_CONFIG_LISTS = [ + 'MODEL_LIST', + 'DESC_LIST', + 'VX_MASK_LIST', + 'INTERP_MTHD_LIST', + 'INTERP_PNTS_LIST', + 'COV_THRESH_LIST', + 'ALPHA_LIST', + 'LINE_TYPE_LIST', + ] + FORMAT_LISTS + FIELD_LISTS + + LIST_CATEGORIES = ['GROUP_LIST_ITEMS', 'LOOP_LIST_ITEMS'] + + STRING_SUB_SPECIAL_KEYS = [ + 'fcst_valid_hour_beg', 'fcst_valid_hour_end', + 'fcst_init_hour_beg', 'fcst_init_hour_end', + 'obs_valid_hour_beg', 'obs_valid_hour_end', + 'obs_init_hour_beg', 'obs_init_hour_end', + 'valid_hour', 'valid_hour_beg', 'valid_hour_end', + 'init_hour', 'init_hour_beg', 'init_hour_end', + 'fcst_valid', 'fcst_valid_beg', 'fcst_valid_end', + 'fcst_init', 'fcst_init_beg', 'fcst_init_end', + 'obs_valid', 'obs_valid_beg', 'obs_valid_end', + 'obs_init', 'obs_init_beg', 'obs_init_end', + 'valid', 'valid_beg', 'valid_end', + 'init', 'init_beg', 'init_end', + 'fcst_lead_hour', 'fcst_lead_min', + 'fcst_lead_sec', 'fcst_lead_totalsec', + 'obs_lead_hour', 'obs_lead_min', + 'obs_lead_sec', 'obs_lead_totalsec', + 'lead', 'lead_hour', 'lead_min', 'lead_sec', 'lead_totalsec' ] def __init__(self, config, instance=None): @@ -121,37 +123,22 @@ def __init__(self, config, instance=None): super().__init__(config, instance=instance) def get_command(self): + """! Build command to run. It is assumed that any errors preventing a + successfully run will have preventing this function from being called. - cmd = self.app_path - if self.args: - cmd += ' ' + ' '.join(self.args) - - if not self.lookindir: - self.log_error("No lookin directory specified") - return None - - cmd += ' -lookin ' + self.lookindir - - if self.c_dict.get('CONFIG_FILE'): - cmd += f" -config {self.c_dict['CONFIG_FILE']}" - else: - cmd += f' {self.job_args}' - - if self.c_dict.get('OUTPUT_FILENAME'): - cmd += f" -out {self.c_dict['OUTPUT_FILENAME']}" + @returns string with command to run + """ + return (f"{self.app_path} -v {self.c_dict['VERBOSITY']}" + f" -lookin {self.c_dict['LOOKIN_DIR']}" + f" {' '.join(self.args)}").rstrip() - return cmd - def create_c_dict(self): """! Create a data structure (dictionary) that contains all the - values set in the configuration files that are common for + values set in the configuration files that are common for stat_analysis_wrapper.py. - - Args: - - Returns: - c_dict - a dictionary containing the settings in the - configuration files unique to the wrapper + + @returns dictionary containing the settings in the configuration files + unique to the wrapper """ c_dict = super().create_c_dict() c_dict['VERBOSITY'] = ( @@ -165,1082 +152,847 @@ def create_c_dict(self): c_dict['OUTPUT_DIR'] = self.config.getdir('STAT_ANALYSIS_OUTPUT_DIR', '') + if not c_dict['OUTPUT_DIR']: + self.log_error("Must set STAT_ANALYSIS_OUTPUT_DIR") # read optional template to set -out command line argument c_dict['OUTPUT_TEMPLATE'] = ( self.config.getraw('config', 'STAT_ANALYSIS_OUTPUT_TEMPLATE', '') ) - c_dict['DATE_TYPE'] = self.config.getstr('config', - 'DATE_TYPE', - self.config.getstr('config', - 'LOOP_BY', - '')) + # set date type, which is controlled by LOOP_BY + c_dict['DATE_TYPE'] = get_time_prefix(self.config) + if not c_dict['DATE_TYPE']: + self.isOK = False start_dt, end_dt = get_start_and_end_times(self.config) if not start_dt: self.log_error('Could not get start and end times. ' 'VALID_BEG/END or INIT_BEG/END must be set.') else: - c_dict['DATE_BEG'] = start_dt.strftime('%Y%m%d') - c_dict['DATE_END'] = end_dt.strftime('%Y%m%d') + c_dict['DATE_BEG'] = start_dt + c_dict['DATE_END'] = end_dt - for job_conf in ['JOB_NAME', 'JOB_ARGS']: - c_dict[job_conf] = self.config.getstr('config', - f'STAT_ANALYSIS_{job_conf}', - '') + # read jobs from STAT_ANALYSIS_JOB or legacy JOB_NAME/ARGS if unset + c_dict['JOBS'] = self._read_jobs_from_config() - # read in all lists except field lists, which will be read in afterwards and checked - all_lists_to_read = self.expected_config_lists + self.list_categories - non_field_lists = [conf_list for - conf_list in all_lists_to_read - if conf_list not in self.field_lists] - for conf_list in non_field_lists: - c_dict[conf_list] = getlist( - self.config.getstr('config', conf_list, '') - ) - - # if list in format lists, zero pad value to be at least 2 - # digits, then add 4 zeros - if conf_list in self.format_lists: - c_dict[conf_list] = ( - [value.zfill(2).ljust(4 + len(value.zfill(2)), '0') - for value in c_dict[conf_list]] - ) - - # read all field lists and check if they are all empty - c_dict['all_field_lists_empty'] = self.read_field_lists_from_config(c_dict) - - # check if MakePlots is in process list and set boolean - # MakePlots is removed from the list in met_util.get_process_list, so - # need to read the conf value again - self.runMakePlots = 'MakePlots' in self.config.getstr('config', 'PROCESS_LIST') - if self.runMakePlots: - # only import MakePlots wrappers if it will be used - from .make_plots_wrapper import MakePlotsWrapper, WRAPPER_CANNOT_RUN - if WRAPPER_CANNOT_RUN: - self.log_error("Cannot import MakePlots wrapper! Requires pandas and numpy") - else: - self.check_MakePlots_config(c_dict) - - # create MakePlots wrapper instance - self.MakePlotsWrapper = MakePlotsWrapper(self.config) - if not self.MakePlotsWrapper.isOK: - self.log_error("MakePlotsWrapper was not initialized correctly.") + # read all lists and check if field lists are all empty + all_field_lists_empty = self._read_lists_from_config(c_dict) + # read any [FCST/OBS]_VAR_* variables if they are set c_dict['VAR_LIST'] = parse_var_list(self.config) - c_dict['MODEL_INFO_LIST'] = self.parse_model_info() + c_dict['MODEL_INFO_LIST'] = self._parse_model_info() + + # if MODEL_LIST was not set, populate it from the model info list if not c_dict['MODEL_LIST'] and c_dict['MODEL_INFO_LIST']: - self.logger.warning("MODEL_LIST was left blank, " - + "creating with MODELn information.") - for model_info in c_dict['MODEL_INFO_LIST']: - c_dict['MODEL_LIST'].append(model_info['name']) + self.logger.warning("MODEL_LIST was left blank, " + + "creating with MODELn information.") + for model_info in c_dict['MODEL_INFO_LIST']: + c_dict['MODEL_LIST'].append(model_info['name']) - c_dict = self.set_lists_loop_or_group(c_dict) + c_dict = self._set_lists_loop_or_group(c_dict) + # read MET config settings that will apply to every run self.add_met_config(name='hss_ec_value', data_type='float', metplus_configs=['STAT_ANALYSIS_HSS_EC_VALUE']) - return self.c_dict_error_check(c_dict) + return self._c_dict_error_check(c_dict, all_field_lists_empty) - def c_dict_error_check(self, c_dict): + def run_all_times(self): + """! Function called when processing all times. - if not c_dict.get('CONFIG_FILE'): - self.logger.info("STAT_ANALYSIS_CONFIG_FILE not set. Passing job arguments to " - "stat_analysis directly on the command line. This will bypass " - "any filtering done unless you add the arguments to " - "STAT_ANALYSIS_JOB_ARGS") + @returns list of tuples containing all commands that were run and the + environment variables that were set for each + """ + self._run_stat_analysis() + return self.all_commands - if not c_dict['OUTPUT_DIR']: - self.log_error("Must set STAT_ANALYSIS_OUTPUT_DIR") + def _run_stat_analysis(self): + """! This runs stat_analysis over a period of valid + or initialization dates for a job defined by + the user. + """ + runtime_settings_dict_list = self._get_all_runtime_settings() + if not runtime_settings_dict_list: + self.log_error('Could not get runtime settings dict list') + return False - for job_conf in ['JOB_NAME', 'JOB_ARGS']: - if not c_dict[job_conf]: - self.log_error(f"Must set STAT_ANALYSIS_{job_conf} to run StatAnalysis") + self._run_stat_analysis_job(runtime_settings_dict_list) - for conf_list in self.list_categories: - if not c_dict[conf_list]: - self.log_error(f"Must set {conf_list} to run StatAnalysis") + return True - if not c_dict['DATE_TYPE']: - self.log_error("DATE_TYPE or LOOP_BY must be set to run " - "StatAnalysis wrapper") + def _get_all_runtime_settings(self): + """! Get all settings for each run of stat_analysis. - if c_dict['DATE_TYPE'] not in ['VALID', 'INIT']: - self.log_error("DATE_TYPE must be VALID or INIT") + @returns list of dictionaries containing settings for each run + """ + runtime_settings_dict_list = [] + c_dict_list = self._get_c_dict_list() + for c_dict in c_dict_list: + runtime_settings = self._get_runtime_settings(c_dict) + runtime_settings_dict_list.extend(runtime_settings) + + # Loop over run settings. + formatted_runtime_settings_dict_list = [] + for runtime_settings in runtime_settings_dict_list: + stringsub_dict = self._build_stringsub_dict(runtime_settings) + + # Set up stat_analysis -lookin argument, model and obs information + # and stat_analysis job. + model_info = self._get_model_obtype_and_lookindir(runtime_settings) + if model_info is None: + return None + + jobs = self._get_job_info(model_info, runtime_settings, + stringsub_dict) + + # get -out argument if set + output_file = None + if self.c_dict['OUTPUT_TEMPLATE']: + output_filename = ( + self._get_output_filename('output', + self.c_dict['OUTPUT_TEMPLATE'], + stringsub_dict) + ) + output_file = os.path.join(self.c_dict['OUTPUT_DIR'], + output_filename) + + # Set up forecast and observation valid and init time information + runtime_settings_fmt = self._format_valid_init(runtime_settings, + stringsub_dict) + + # add jobs and output file path to formatted runtime_settings + runtime_settings_fmt['JOBS'] = jobs + runtime_settings_fmt['OUTPUT_FILENAME'] = output_file + formatted_runtime_settings_dict_list.append(runtime_settings_fmt) + + return formatted_runtime_settings_dict_list + + def _run_stat_analysis_job(self, runtime_settings_dict_list): + """! Sets environment variables need to run StatAnalysis jobs + and calls the tool for each job. + + Args: + @param runtime_settings_dict_list list of dictionaries + containing information needed to run a StatAnalysis job + """ + for runtime_settings in runtime_settings_dict_list: + self.clear() + if not self._create_output_directories(runtime_settings): + continue + + # set METPLUS_ env vars for MET config file to be consistent + # with other wrappers + for key in self.WRAPPER_ENV_VAR_KEYS: + item = key.replace('METPLUS_', '') + if not runtime_settings.get(item, ''): + continue + value = runtime_settings.get(item, '') + if key.endswith('_JOBS'): + value = '["' + '","'.join(value) + '"]' + elif key.endswith('_BEG') or key.endswith('_END'): + value = f'"{value}"' + else: + value = f'[{value}]' + value = f'{item.lower()} = {value};' + self.env_var_dict[key] = value + + # send environment variables to logger + self.set_environment_variables() + + # set lookin dir to add to command + self.logger.debug("Setting -lookin dir to " + f"{runtime_settings['LOOKIN_DIR']}") + self.c_dict['LOOKIN_DIR'] = runtime_settings['LOOKIN_DIR'] + + # set any command line arguments + if self.c_dict.get('CONFIG_FILE'): + self.args.append(f"-config {self.c_dict['CONFIG_FILE']}") + else: + self.args.append(runtime_settings['JOBS'][0]) + + # set -out file path if requested, value will be set to None if not + output_filename = runtime_settings.get('OUTPUT_FILENAME') + if output_filename: + self.args.append(f"-out {output_filename}") + + self.build() + + def _read_jobs_from_config(self): + """! Parse the jobs from the METplusConfig object + + @returns list of strings containing each job specifications + """ + jobs = [] + job_indices = list( + find_indices_in_config_section(r'STAT_ANALYSIS_JOB(\d+)$', + self.config, + index_index=1).keys() + ) + + if job_indices: + for j_id in job_indices: + job = self.config.getraw('config', f'STAT_ANALYSIS_JOB{j_id}') + if job: + jobs.append(job) + + # if not jobs found, check for old _JOB_NAME and _JOB_ARGS variables + if not jobs: + job_name = self.config.getraw('config', 'STAT_ANALYSIS_JOB_NAME') + job_args = self.config.getraw('config', 'STAT_ANALYSIS_JOB_ARGS') + if job_name and job_args: + jobs.append(f'-job {job_name} {job_args}') + + return jobs + + def _c_dict_error_check(self, c_dict, all_field_lists_empty): + """! Check values read into c_dict from METplusConfig and report errors + if anything is misconfigured. + + @param c_dict dictionary containing config values to check + @param all_field_lists_empty True if no field lists were parsed + """ + if not c_dict.get('CONFIG_FILE'): + if len(c_dict['JOBS']) > 1: + self.log_error( + 'Only 1 job can be set with STAT_ANALYSIS_JOB if ' + 'STAT_ANALYSIS_CONFIG_FILE is not set.' + ) + else: + self.logger.info("STAT_ANALYSIS_CONFIG_FILE not set. Passing " + "job arguments to stat_analysis directly on " + "the command line. This will bypass " + "any filtering done unless you add the " + "arguments to STAT_ANALYSIS_JOBS") + + if not c_dict['JOBS']: + self.log_error( + "Must set at least one job with STAT_ANALYSIS_JOB" + ) + else: + # check if [dump_row_file] or [out_stat_file] are in any job + for job in c_dict['JOBS']: + for check in ('dump_row_file', 'out_stat_file'): + if f'[{check}]' not in job: + continue + for model in c_dict['MODEL_INFO_LIST']: + if model[f'{check}name_template']: + continue + conf = check.replace('_file', '').upper() + conf = f"STAT_ANALYSIS_{conf}_TEMPLATE" + self.log_error(f'Must set {conf} if [{check}] is used' + ' in a job') # if var list is set and field lists are not all empty, error - if c_dict['VAR_LIST'] and not c_dict['all_field_lists_empty']: + if c_dict['VAR_LIST'] and not all_field_lists_empty: self.log_error("Field information defined in both " "[FCST/OBS]_VAR_LIST and " "[FCST/OBS]_VAR_[NAME/LEVELS]. Use " "one or the other formats to run") - # if no var list is found, other lists must be set to run MakePlots - elif not c_dict['VAR_LIST'] and c_dict['all_field_lists_empty'] and self.runMakePlots: - self.log_error("No field information found. Must define fields to " - "process with either [FCST/OBS]_VAR_LIST or " - "[FCST/OBS]_VAR_[NAME/LEVELS]") - - # if MODEL_LIST was not set in config, populate it from the model info list - # if model info list is also not set, report and error + # if model list and info list were not set, report and error if not c_dict['MODEL_LIST'] and not c_dict['MODEL_INFO_LIST']: self.log_error("No model information was found.") - # if running MakePlots and model list in group list, error and exit - if self.runMakePlots: - if 'MODEL_LIST' in c_dict['GROUP_LIST_ITEMS']: - self.log_error("Cannot group MODELS if running MakePlots. Remove " - "MODEL_LIST from LOOP_LIST_ITEMS") - - if len(c_dict['MODEL_LIST']) > 8: - self.log_error("Number of models for plotting limited to 8.") - -# self.check_dump_row_templates_for_plotting() - - # set forMakePlots to False to begin. When gathering settings to - # send to MakePlots wrapper, this will be set to True - self.forMakePlots = False - return c_dict - def read_field_lists_from_config(self, field_dict): - """! Get field list configuration variables and add to dictionary - @param field_dict dictionary to hold output values - @returns True if all lists are empty or False if any have a value""" + def _read_lists_from_config(self, c_dict): + """! Get list configuration variables and add to dictionary + + @param c_dict dictionary to hold output values + @returns True if all field lists are empty or False if any are set + """ all_empty = True - for field_list in self.field_lists: - if 'LEVEL_LIST' in field_list: - field_dict[field_list] = ( - self.get_level_list(field_list.split('_')[0]) + + all_lists_to_read = self.EXPECTED_CONFIG_LISTS + self.LIST_CATEGORIES + for conf_list in all_lists_to_read: + if 'LEVEL_LIST' in conf_list: + c_dict[conf_list] = ( + self._get_level_list(conf_list.split('_')[0]) ) else: - field_dict[field_list] = getlist( - self.config.getstr('config', - field_list, - '') - ) + c_dict[conf_list] = self._format_conf_list(conf_list) - # keep track if any list is not empty - if field_dict[field_list]: + # keep track if any field list is not empty + if conf_list in self.FIELD_LISTS and c_dict[conf_list]: all_empty = False return all_empty - def check_MakePlots_config(self, c_dict): - - # the following are specific to running MakePlots wrapper - bad_config_variable_list = [ - 'FCST_VAR_LIST', 'FCST_LEVEL_LIST', - 'FCST_THRESH_LIST', 'FCST_UNITS_LIST', - 'OBS_VAR_LIST', 'OBS_LEVEL_LIST', - 'OBS_THRESH_LIST', 'OBS_UNITS_LIST' - ] - for bad_config_variable in bad_config_variable_list: - if c_dict[bad_config_variable]: - self.log_error("Bad config option for running StatAnalysis " - "followed by MakePlots. Please remove " - +bad_config_variable+" and set using FCST/OBS_VARn") - - loop_group_accepted_options = [ - 'FCST_VALID_HOUR_LIST', 'FCST_INIT_HOUR_LIST', - 'OBS_VALID_HOUR_LIST', 'OBS_INIT_HOUR_LIST' - ] - for config_list in c_dict['GROUP_LIST_ITEMS']: - if config_list not in loop_group_accepted_options: - self.log_error("Bad config option for running StatAnalysis " - +"followed by MakePlots. Only accepted " - +"values in GROUP_LIST_ITEMS are " - +"FCST_VALID_HOUR_LIST, " - +"FCST_INIT_HOUR_LIST, " - +"OBS_VALID_HOUR_LIST, " - +"OBS_INIT_HOUR_LIST. " - +"Found "+config_list) - - for config_list in c_dict['LOOP_LIST_ITEMS']: - if config_list not in loop_group_accepted_options: - self.log_error("Bad config option for running StatAnalysis " - +"followed by MakePlots. Only accepted " - +"values in LOOP_LIST_ITEMS are " - +"FCST_VALID_HOUR_LIST, " - +"FCST_INIT_HOUR_LIST, " - +"OBS_VALID_HOUR_LIST, " - +"OBS_INIT_HOUR_LIST. " - +"Found "+config_list) - - # Do checks for required configuration file options that are - # defined by user. - required_config_variable_list = [ - 'VX_MASK_LIST', 'FCST_LEAD_LIST', 'LINE_TYPE_LIST' - ] - for required_config_variable in required_config_variable_list: - if len(c_dict[required_config_variable]) == 0: - self.log_error(required_config_variable+" has no items. " - +"This list must have items to run " - +"StatAnalysis followed by MakePlots.") - - # if MakePlots is run but -dump_row is not found in the job args, error - if '-dump_row' not in c_dict['JOB_ARGS']: - self.log_error("Must include -dump_row in STAT_ANALYSIS_JOB_ARGS if running MakePlots") - - def list_to_str(self, list_of_values, add_quotes=True): - """! Turn a list of values into a single string so it can be - set to an environment variable and read by the MET - stat_analysis config file. - - Args: - @param list_of_values - list of values, i.e. ['value1', 'value2'] - @param add_quotes if True, add quotation marks around values - default is True - - @returns string created from list_of_values with the values separated - by commas, i.e. '"value1", "value2"' or 1, 3 if add_quotes is False + def _get_level_list(self, data_type): + """!Read forecast or observation level list from config. + Format list items to match the format expected by + StatAnalysis by removing parenthesis and any quotes, + then adding back single quotes + + @param data_type type of list to get, FCST or OBS + @returns list containing the formatted level list + """ + level_list = [] + + level_input = getlist( + self.config.getraw('config', f'{data_type}_LEVEL_LIST', '') + ) + + for level in level_input: + level = level.strip('(').strip(')') + level = f'{remove_quotes(level)}' + level_list.append(level) + + return [f'"{item}"' for item in level_list] + + def _format_conf_list(self, conf_list): + """! Process config list. If list name (e.g. FCST_LEAD_LIST) is not + set, then check if numbered config variable (e.g. FCST_LEAD_LIST) + is set. Format thresholds lists as thresholds. Add quotation marks + around any list not found in the self.FORMAT_LISTS. Format lists will + be formatted later based on the loop/group conditions. + + @param conf_list name of METplus config variable to process + @returns list of items parsed from configuration """ - # return empty string if list is empty - if not list_of_values: - return '' + items = getlist( + self.config.getraw('config', conf_list, '') + ) - if add_quotes: - return '"' + '", "'.join(list_of_values) + '"' + # if list is empty or unset, check for {LIST_NAME} + if not items: + indices = list( + find_indices_in_config_section(fr'{conf_list}(\d+)$', + self.config, + index_index=1).keys() + ) + if indices: + items = [] + for index in indices: + sub_items = getlist( + self.config.getraw('config', f'{conf_list}{index}') + ) + if not sub_items: + continue - return ', '.join(list_of_values) + items.append(','.join(sub_items)) - def set_lists_loop_or_group(self, c_dict): + # do not add quotes and format thresholds if threshold list + if 'THRESH' in conf_list: + return [format_thresh(item) for item in items] + + if conf_list in self.LIST_CATEGORIES: + return items + + formatted_items = [] + for item in items: + # do not format items in format list now + if conf_list not in self.FORMAT_LISTS: + sub_items = item.split(',') + sub_item_str = '", "'.join(sub_items) + formatted_items.append(f'"{sub_item_str}"') + else: + formatted_items.append(item) + + return formatted_items + + def _set_lists_loop_or_group(self, c_dict): """! Determine whether the lists from the METplus config file - should treat the items in that list as a group or items + should treat the items in that list as a group or items to be looped over based on user settings, the values in the list, and process being run. - - Args: - @param group_items list of the METplus config list - names to group the list's items set by user - @param loop_items list of the METplus config list - names to loop over the list's items set by user - @param config_dict dictionary containing the - configuration information - + + @param c_dict dictionary containing the configuration information + @returns tuple containing lists_to_group_items ( list of all the list names whose items are being grouped together) and lists_to_loop_items (list of all the list names whose items are being looped over) """ - # get list of config variables not found in either GROUP_LIST_ITEMS or LOOP_LIST_ITEMS - missing_config_list = [conf for conf in self.expected_config_lists if conf not in c_dict['GROUP_LIST_ITEMS']] - missing_config_list = [conf for conf in missing_config_list if conf not in c_dict['LOOP_LIST_ITEMS']] - found_config_list = [conf for conf in self.expected_config_lists if conf not in missing_config_list] + # get list of list variables not found in group or loop lists + missing_config_list = [conf for conf in self.EXPECTED_CONFIG_LISTS + if conf not in c_dict['GROUP_LIST_ITEMS'] + and conf not in c_dict['LOOP_LIST_ITEMS']] - # loop through lists not found in either loop or group lists + # add missing lists to group_lists for missing_config in missing_config_list: + c_dict['GROUP_LIST_ITEMS'].append(missing_config) - # if running MakePlots - if self.runMakePlots: - - # if LINE_TYPE_LIST is missing, add it to group list - if missing_config == 'LINE_TYPE_LIST': - c_dict['GROUP_LIST_ITEMS'].append(missing_config) - - # else if list in config_dict is empty, warn and add to group list - elif not c_dict[missing_config]: - self.logger.warning(missing_config + " is empty, " - + "will be treated as group.") - c_dict['GROUP_LIST_ITEMS'].append(missing_config) - - # otherwise add to loop list - else: - c_dict['LOOP_LIST_ITEMS'].append(missing_config) - - # if not running MakePlots, just add missing list to group list - else: - c_dict['GROUP_LIST_ITEMS'].append(missing_config) + # move empty lists in loop lists to group lists + for list_name in c_dict['LOOP_LIST_ITEMS']: + # skip if list has values + if c_dict[list_name]: + continue - # loop through lists found in either loop or group lists originally - for found_config in found_config_list: - # if list is empty and in loop list, warn and move to group list - if not c_dict[found_config] and found_config in c_dict['LOOP_LIST_ITEMS']: - self.logger.warning(found_config + " is empty, " - + "will be treated as group.") - c_dict['GROUP_LIST_ITEMS'].append(found_config) - c_dict['LOOP_LIST_ITEMS'].remove(found_config) + self.logger.warning(f'{list_name} was found in LOOP_LIST_ITEMS' + ' but is empty. Moving to group list') + c_dict['GROUP_LIST_ITEMS'].append(list_name) + c_dict['LOOP_LIST_ITEMS'].remove(list_name) + # log summary of group and loop lists self.logger.debug("Items in these lists will be grouped together: " + ', '.join(c_dict['GROUP_LIST_ITEMS'])) self.logger.debug("Items in these lists will be looped over: " + ', '.join(c_dict['LOOP_LIST_ITEMS'])) - # if running MakePlots, create new group and loop lists based on - # the criteria for running that wrapper - if self.runMakePlots: - c_dict['GROUP_LIST_ITEMS_MAKE_PLOTS'] = list(c_dict['GROUP_LIST_ITEMS']) - c_dict['LOOP_LIST_ITEMS_MAKE_PLOTS'] = list(c_dict['LOOP_LIST_ITEMS']) - for force_group_list in self.force_group_for_make_plots_lists: - if force_group_list in c_dict['LOOP_LIST_ITEMS_MAKE_PLOTS']: - c_dict['LOOP_LIST_ITEMS_MAKE_PLOTS'].remove(force_group_list) - c_dict['GROUP_LIST_ITEMS_MAKE_PLOTS'].append(force_group_list) - return c_dict - def format_thresh(self, thresh): - """! Format thresholds for file naming - - Args: - @param thresh string of the thresholds. Can be a comma-separated list, i.e. gt3,<=5.5, ==7 + def _build_stringsub_dict(self, config_dict): + """! Build a dictionary with list names, dates, and commonly + used identifiers to pass to string_template_substitution. - @returns string of comma-separated list of the threshold(s) with letter format, i.e. gt3, le5.5, eq7 + @param config_dict dictionary containing the configuration information + @returns dictionary with the formatted info to pass to do_string_sub """ - formatted_thresh_list = [] - # separate thresholds by comma and strip off whitespace around values - thresh_list = [thresh.strip() for thresh in thresh.split(',')] - for thresh in thresh_list: - if not thresh: - continue + date_type = self.c_dict['DATE_TYPE'] - thresh_letter = util.comparison_to_letter_format(thresh) - if thresh_letter: - formatted_thresh_list.append(thresh_letter) + clock_dt = datetime.strptime( + self.config.getstr('config', 'CLOCK_TIME'), '%Y%m%d%H%M%S' + ) + stringsub_dict = { + 'now': clock_dt, + 'today': clock_dt.strftime('%Y%m%d') + } + # add all loop list and group list items to string sub keys list + for list_item in self.EXPECTED_CONFIG_LISTS: + list_name = list_item.replace('_LIST', '').lower() + stringsub_dict[list_name] = '' - return ','.join(formatted_thresh_list) + # create a dictionary of empty string values from the special keys + for special_key in self.STRING_SUB_SPECIAL_KEYS: + stringsub_dict[special_key] = '' - def build_stringsub_dict(self, lists_to_loop, lists_to_group, config_dict): - """! Build a dictionary with list names, dates, and commonly - used identifiers to pass to string_template_substitution. - - Args: - lists_to_loop - list of all the list names whose items - are being grouped together - lists_to group - list of all the list names whose items - are being looped over - config_dict - dictionary containing the configuration - information - - Returns: - stringsub_dict - dictionary containing the formatted - information to pass to the - string_template_substitution - """ - date_beg = self.c_dict['DATE_BEG'] - date_end = self.c_dict['DATE_END'] - date_type = self.c_dict['DATE_TYPE'] + # Set string sub info from fcst/obs hour lists + self._set_stringsub_hours(stringsub_dict, + config_dict.get(f'FCST_{date_type}_HOUR'), + config_dict.get(f'OBS_{date_type}_HOUR')) + + # handle opposite of date_type VALID if INIT and vice versa + self._set_strinsub_other(stringsub_dict, date_type.lower(), + config_dict.get('FCST_LEAD'), + config_dict.get('OBS_LEAD')) + + # Set loop information + for loop_or_group_list in self.EXPECTED_CONFIG_LISTS: + list_name = loop_or_group_list.replace('_LIST', '') + sub_name = list_name.lower() + list_name_value = self._get_list_name_value(list_name, config_dict) + + if 'HOUR' not in list_name and 'LEAD' not in list_name: + stringsub_dict[sub_name] = list_name_value + + # if list is MODEL, also set obtype + if list_name == 'MODEL': + stringsub_dict['obtype'] = ( + config_dict.get('OBTYPE', '').replace('"', '') + .replace(' ', '') + ) - stringsub_dict_keys = [] - for loop_list in lists_to_loop: - list_name = loop_list.replace('_LIST', '') - stringsub_dict_keys.append(list_name.lower()) - for group_list in lists_to_group: - # if setting up MakePlots, skip adding forced - # group lists so they will remain templates - # to be filled in by the plotting scripts - if (self.forMakePlots and - group_list in self.force_group_for_make_plots_lists): continue - list_name = group_list.replace('_LIST', '') - stringsub_dict_keys.append(list_name.lower()) - - special_keys = [ - 'fcst_valid_hour_beg', 'fcst_valid_hour_end', - 'fcst_init_hour_beg', 'fcst_init_hour_end', - 'obs_valid_hour_beg', 'obs_valid_hour_end', - 'obs_init_hour_beg', 'obs_init_hour_end', - 'valid_hour', 'valid_hour_beg', 'valid_hour_end', - 'init_hour', 'init_hour_beg', 'init_hour_end', - 'fcst_valid', 'fcst_valid_beg', 'fcst_valid_end', - 'fcst_init', 'fcst_init_beg', 'fcst_init_end', - 'obs_valid', 'obs_valid_beg', 'obs_valid_end', - 'obs_init', 'obs_init_beg', 'obs_init_end', - 'valid', 'valid_beg', 'valid_end', - 'init', 'init_beg', 'init_end', - 'fcst_lead_hour', 'fcst_lead_min', - 'fcst_lead_sec', 'fcst_lead_totalsec', - 'obs_lead_hour', 'obs_lead_min', - 'obs_lead_sec', 'obs_lead_totalsec', - 'lead', 'lead_hour', 'lead_min', 'lead_sec', 'lead_totalsec' - ] - # create a dictionary of empty string values from the special keys - for special_key in special_keys: - stringsub_dict_keys.append(special_key) - stringsub_dict = dict.fromkeys(stringsub_dict_keys, '') - - # Set full date information - fcst_hour_list = config_dict['FCST_'+date_type+'_HOUR'] - obs_hour_list = config_dict['OBS_' + date_type + '_HOUR'] - if fcst_hour_list: - fcst_hour_list = [fhr.strip() for fhr in fcst_hour_list.replace('"', '').split(',')] - if obs_hour_list: - obs_hour_list = [fhr.strip() for fhr in obs_hour_list.replace('"', '').split(',')] - - # if fcst hour list is set, set fcst_{data_type}_beg/end with first and last values - # TODO: values should be sorted first - if fcst_hour_list: - stringsub_dict['fcst_'+date_type.lower()+'_beg'] = ( - datetime.datetime.strptime( - date_beg+fcst_hour_list[0], '%Y%m%d%H%M%S' - ) - ) - stringsub_dict['fcst_'+date_type.lower()+'_end'] = ( - datetime.datetime.strptime( - date_end+fcst_hour_list[-1], '%Y%m%d%H%M%S' - ) + if 'HOUR' in list_name: + self._build_stringsub_hours(list_name, config_dict, + stringsub_dict) + elif 'LEAD' in list_name: + self._build_stringsub_leads(list_name, config_dict, + stringsub_dict) + + # Some lines for debugging if needed in future + # for key, value in stringsub_dict.items(): + # self.logger.debug("{} ({})".format(key, value)) + return stringsub_dict + + def _build_stringsub_hours(self, list_name, config_dict, stringsub_dict): + """! Handle logic specific to setting lists named with HOUR + + @param list_name name of list to process + @param config_dict dictionary to read values from + @param stringsub_dict dictionary to set values + """ + sub_name = list_name.lower() + delta_list = get_delta_list(config_dict[list_name]) + if not delta_list: + list_name_value = self._get_list_name_value(list_name, config_dict) + stringsub_dict[sub_name] = list_name_value + stringsub_dict[sub_name + '_beg'] = relativedelta() + stringsub_dict[sub_name + '_end'] = ( + relativedelta(hours=+23, minutes=+59, seconds=+59) ) - if (stringsub_dict['fcst_'+date_type.lower()+'_beg'] - == stringsub_dict['fcst_'+date_type.lower()+'_end']): - stringsub_dict['fcst_'+date_type.lower()] = ( - stringsub_dict['fcst_'+date_type.lower()+'_beg'] - ) - # if fcst hour list is not set, use date beg 000000-235959 as fcst_{date_type}_beg/end - #TODO: should be date beg 000000 and date end 235959? + return + + if len(delta_list) == 1: + stringsub_dict[sub_name] = delta_list[0] else: - stringsub_dict['fcst_'+date_type.lower()+'_beg'] = ( - datetime.datetime.strptime( - date_beg+'000000', '%Y%m%d%H%M%S' - ) - ) - stringsub_dict['fcst_'+date_type.lower()+'_end'] = ( - datetime.datetime.strptime( - date_beg+'235959', '%Y%m%d%H%M%S' - ) + stringsub_dict[sub_name] = ( + '_'.join(get_met_time_list(config_dict[list_name])) ) - # if obs hour list is set, set obs_{data_type}_beg/end with first and last values - # TODO: values should be sorted first - # TODO: this could be made into function to handle fcst and obs - if obs_hour_list: - stringsub_dict['obs_'+date_type.lower()+'_beg'] = ( - datetime.datetime.strptime( - date_beg+obs_hour_list[0], '%Y%m%d%H%M%S' - ) + + stringsub_dict[sub_name + '_beg'] = delta_list[0] + stringsub_dict[sub_name + '_end'] = delta_list[-1] + + check_list = self._get_check_list(list_name, config_dict) + # if opposite fcst is not set or the same, + # set init/valid hour beg/end to fcst, same for obs + if not check_list or config_dict[list_name] == check_list: + # sub name e.g. fcst_valid_hour + # generic list e.g. valid_hour + generic_list = ( + sub_name.replace('fcst_', '').replace('obs_', '') ) - stringsub_dict['obs_'+date_type.lower()+'_end'] = ( - datetime.datetime.strptime( - date_end+obs_hour_list[-1], '%Y%m%d%H%M%S' - ) + stringsub_dict[f'{generic_list}_beg'] = ( + stringsub_dict[f'{sub_name}_beg'] ) - if (stringsub_dict['obs_'+date_type.lower()+'_beg'] - == stringsub_dict['obs_'+date_type.lower()+'_end']): - stringsub_dict['obs_'+date_type.lower()] = ( - stringsub_dict['obs_'+date_type.lower()+'_beg'] - ) - # if obs hour list is not set, use date beg 000000-235959 as obs_{date_type}_beg/end - #TODO: should be date beg 000000 and date end 235959? - else: - stringsub_dict['obs_'+date_type.lower()+'_beg'] = ( - datetime.datetime.strptime( - date_beg+'000000', '%Y%m%d%H%M%S' - ) + stringsub_dict[f'{generic_list}_end'] = ( + stringsub_dict[f'{sub_name}_end'] ) - stringsub_dict['obs_'+date_type.lower()+'_end'] = ( - datetime.datetime.strptime( - date_beg+'235959', '%Y%m%d%H%M%S' + if (stringsub_dict[f'{generic_list}_beg'] == + stringsub_dict[f'{generic_list}_end']): + stringsub_dict[generic_list] = ( + stringsub_dict[f'{sub_name}_end'] ) + + @staticmethod + def _get_list_name_value(list_name, config_dict): + value = config_dict.get(list_name, '') + value = value.replace('"', '').replace(' ', '').replace(',', '_') + value = value.replace('*', 'ALL') + return value + + def _build_stringsub_leads(self, list_name, config_dict, stringsub_dict): + """! Handle logic specific to setting lists named with LEAD + + @param list_name name of list to process + @param config_dict dictionary to read values from + @param stringsub_dict dictionary to set values + """ + sub_name = list_name.lower() + lead_list = get_met_time_list(config_dict.get(list_name)) + + if not lead_list: + return + + # if multiple leads are specified, format lead info + # using met time notation separated by underscore + if len(lead_list) > 1: + stringsub_dict[sub_name] = '_'.join(lead_list) + return + + stringsub_dict[sub_name] = lead_list[0] + + lead_rd = get_delta_list(config_dict[list_name])[0] + total_sec = ti_get_seconds_from_relativedelta(lead_rd) + stringsub_dict[sub_name + '_totalsec'] = str(total_sec) + + stringsub_dict[f'{sub_name}_hour'] = lead_list[0][:-4] + stringsub_dict[f'{sub_name}_min'] = lead_list[0][-4:-2] + stringsub_dict[f'{sub_name}_sec'] = lead_list[0][-2:] + + check_list = self._get_check_list(list_name, config_dict) + if not check_list or config_dict[list_name] == check_list: + stringsub_dict['lead'] = stringsub_dict[sub_name] + stringsub_dict['lead_hour'] = ( + stringsub_dict[sub_name + '_hour'] ) - # if fcst and obs hour lists the same, set {date_type}_beg/end to fcst_{date_type}_beg/end - if fcst_hour_list == obs_hour_list: - stringsub_dict[date_type.lower()+'_beg'] = ( - stringsub_dict['fcst_'+date_type.lower()+'_beg'] - ) - stringsub_dict[date_type.lower()+'_end'] = ( - stringsub_dict['fcst_'+date_type.lower()+'_end'] - ) - # if {date_type} beg and end are the same, set {date_type} - if (stringsub_dict[date_type.lower()+'_beg'] - == stringsub_dict[date_type.lower()+'_end']): - stringsub_dict[date_type.lower()] = ( - stringsub_dict['fcst_'+date_type.lower()+'_beg'] - ) - # if fcst hr list is not set but obs hr list is, set {date_type}_beg/end to fcst_{date_type}_beg/end - # TODO: should be elif? - if fcst_hour_list and not obs_hour_list: - stringsub_dict[date_type.lower()+'_beg'] = ( - stringsub_dict['fcst_'+date_type.lower()+'_beg'] - ) - stringsub_dict[date_type.lower()+'_end'] = ( - stringsub_dict['fcst_'+date_type.lower()+'_end'] + stringsub_dict['lead_min'] = ( + stringsub_dict[sub_name + '_min'] ) - # if {date_type} beg and end are the same, set {date_type} (same as above) - if (stringsub_dict[date_type.lower()+'_beg'] - == stringsub_dict[date_type.lower()+'_end']): - stringsub_dict[date_type.lower()] = ( - stringsub_dict['fcst_'+date_type.lower()+'_beg'] - ) - # if fcst hr list is set but obs hr list is not, set {date_type}_beg/end to obs_{date_type}_beg/end - # TODO: should be elif? - if not fcst_hour_list and obs_hour_list: - stringsub_dict[date_type.lower()+'_beg'] = ( - stringsub_dict['obs_'+date_type.lower()+'_beg'] + stringsub_dict['lead_sec'] = ( + stringsub_dict[sub_name + '_sec'] ) - stringsub_dict[date_type.lower()+'_end'] = ( - stringsub_dict['obs_'+date_type.lower()+'_end'] + stringsub_dict['lead_totalsec'] = ( + stringsub_dict[sub_name + '_totalsec'] ) - # if {date_type} beg and end are the same, set {date_type} (same as above twice) - if (stringsub_dict[date_type.lower()+'_beg'] - == stringsub_dict[date_type.lower()+'_end']): - stringsub_dict[date_type.lower()] = ( - stringsub_dict['obs_'+date_type.lower()+'_beg'] - ) - # if neither fcst or obs hr list are set, {date_type}_beg/end are not set at all (empty string) - # also {date_type} is not set - # Set loop information - for loop_list in lists_to_loop: - list_name = loop_list.replace('_LIST', '') - list_name_value = ( - config_dict[list_name].replace('"', '').replace(' ', '') - ) - # CHANGE: format thresh when it is read instead of here -# if 'THRESH' in list_name: -# stringsub_dict[list_name.lower()] = self.format_thresh( -# list_name_value -# ) -# elif list_name == 'MODEL': - if list_name == 'MODEL': - stringsub_dict[list_name.lower()] = list_name_value - stringsub_dict['obtype'] = ( - config_dict['OBTYPE'].replace('"', '').replace(' ', '') - ) - elif 'HOUR' in list_name: - stringsub_dict[list_name.lower()] = ( - datetime.datetime.strptime(list_name_value, '%H%M%S') - ) - stringsub_dict[list_name.lower()+'_beg'] = stringsub_dict[ - list_name.lower() - ] - stringsub_dict[list_name.lower()+'_end'] = stringsub_dict[ - list_name.lower() - ] - check_list1 = config_dict[list_name] - if 'FCST' in list_name: - check_list2 = config_dict[list_name.replace('FCST', - 'OBS')] - elif 'OBS' in list_name: - check_list2 = config_dict[list_name.replace('OBS', - 'FCST')] - if (check_list1 == check_list2 - or len(check_list2) == 0): - list_type = list_name.replace('_HOUR', '').lower() - if 'VALID' in list_name: - stringsub_dict['valid_hour_beg'] = ( - stringsub_dict[list_type+'_hour_beg'] - ) - stringsub_dict['valid_hour_end'] = ( - stringsub_dict[list_type+'_hour_end'] - ) - if (stringsub_dict['valid_hour_beg'] - == stringsub_dict['valid_hour_end']): - stringsub_dict['valid_hour'] = ( - stringsub_dict['valid_hour_end'] - ) - elif 'INIT' in list_name: - stringsub_dict['init_hour_beg'] = ( - stringsub_dict[list_type+'_hour_beg'] - ) - stringsub_dict['init_hour_end'] = ( - stringsub_dict[list_type+'_hour_end'] - ) - if (stringsub_dict['init_hour_beg'] - == stringsub_dict['init_hour_end']): - stringsub_dict['init_hour'] = ( - stringsub_dict['init_hour_end'] - ) - elif 'LEAD' in list_name: - lead_timedelta = datetime.timedelta( - hours=int(list_name_value[:-4]), - minutes=int(list_name_value[-4:-2]), - seconds=int(list_name_value[-2:]) - ) - stringsub_dict[list_name.lower()] = list_name_value - stringsub_dict[list_name.lower()+'_hour'] = ( - list_name_value[:-4] - ) - stringsub_dict[list_name.lower()+'_min'] = ( - list_name_value[-4:-2] - ) - stringsub_dict[list_name.lower()+'_sec'] = ( - list_name_value[-2:] - ) - stringsub_dict[list_name.lower()+'_totalsec'] = str(int( - lead_timedelta.total_seconds() - )) - list_type = list_name.replace('_LEAD', '').lower() - check_list1 = config_dict[list_name] - if 'FCST' in list_name: - check_list2 = config_dict[list_name.replace('FCST', 'OBS')] - elif 'OBS' in list_name: - check_list2 = config_dict[list_name.replace('OBS', 'FCST')] - if (check_list1 == check_list2 - or len(check_list2) == 0): - stringsub_dict['lead'] = stringsub_dict[list_name.lower()] - stringsub_dict['lead_hour'] = ( - stringsub_dict[list_name.lower()+'_hour'] - ) - stringsub_dict['lead_min'] = ( - stringsub_dict[list_name.lower()+'_min'] - ) - stringsub_dict['lead_sec'] = ( - stringsub_dict[list_name.lower()+'_sec'] - ) - stringsub_dict['lead_totalsec'] = ( - stringsub_dict[list_name.lower()+'_totalsec'] - ) - else: - stringsub_dict[list_name.lower()] = list_name_value - - # Set group information - for group_list in lists_to_group: - list_name = group_list.replace('_LIST', '') - list_name_value = ( - config_dict[list_name].replace('"', '').replace(' ', '') \ - .replace(',', '_').replace('*', 'ALL') - ) - if 'THRESH' in list_name: - if (self.forMakePlots and - group_list in self.force_group_for_make_plots_lists): - continue + @staticmethod + def _get_check_list(list_name, config_dict): + """! Helper function for getting opposite list from config dict. - thresh_letter = self.format_thresh( - config_dict[list_name] - ) + @param list_name either FCST or OBS + @param config_dict dictionary to query + @returns equivalent OBS item if list_name is FCST, + equivalent FCST item if list_name is OBS, or + None if list_name is not FCST or OBS + """ + if 'FCST' in list_name: + return config_dict[list_name.replace('FCST', 'OBS')] + if 'OBS' in list_name: + return config_dict[list_name.replace('OBS', 'FCST')] + return None + + def _set_stringsub_hours(self, sub_dict, fcst_hour_str, obs_hour_str): + """! Set string sub dictionary _beg and _end values for fcst and obs + hour lists. + Set other values depending on values set in fcst and obs hour lists. + Values that are set depend on what it set in c_dict DATE_TYPE, which + is either INIT or VALID. If neither fcst or obs hr list are set, + {date_type}_beg/end and {date_type} are not set at all (empty string). + + @param sub_dict dictionary to set string sub values + @param fcst_hour_str string with list of forecast hours to process + @param obs_hour_str string with list of observation hours to process + """ + if fcst_hour_str: + fcst_hour_list = get_delta_list(fcst_hour_str) + else: + fcst_hour_list = None - stringsub_dict[list_name.lower()] = ( - thresh_letter.replace(',', '_').replace('*', 'ALL') - ) - elif 'HOUR' in list_name: - list_name_values_list = ( - config_dict[list_name].replace('"', '').split(', ') - ) - stringsub_dict[list_name.lower()] = list_name_value - if list_name_values_list != ['']: - stringsub_dict[list_name.lower()+'_beg'] = ( - datetime.datetime.strptime(list_name_values_list[0], - '%H%M%S') - ) - stringsub_dict[list_name.lower()+'_end'] = ( - datetime.datetime.strptime(list_name_values_list[-1], - '%H%M%S') - ) - if (stringsub_dict[list_name.lower()+'_beg'] - == stringsub_dict[list_name.lower()+'_end']): - stringsub_dict[list_name.lower()] = ( - stringsub_dict[list_name.lower()+'_end'] - ) - check_list1 = config_dict[list_name] - if 'FCST' in list_name: - check_list2 = config_dict[list_name.replace('FCST', - 'OBS')] - elif 'OBS' in list_name: - check_list2 = config_dict[list_name.replace('OBS', - 'FCST')] - if (check_list1 == check_list2 - or len(check_list2) == 0): - list_type = list_name.replace('_HOUR', '').lower() - if 'VALID' in list_name: - stringsub_dict['valid_hour_beg'] = ( - stringsub_dict[list_type+'_hour_beg'] - ) - stringsub_dict['valid_hour_end'] = ( - stringsub_dict[list_type+'_hour_end'] - ) - if (stringsub_dict['valid_hour_beg'] - == stringsub_dict['valid_hour_end']): - stringsub_dict['valid_hour'] = ( - stringsub_dict['valid_hour_end'] - ) - elif 'INIT' in list_name: - stringsub_dict['init_hour_beg'] = ( - stringsub_dict[list_type+'_hour_beg'] - ) - stringsub_dict['init_hour_end'] = ( - stringsub_dict[list_type+'_hour_end'] - ) - if (stringsub_dict['init_hour_beg'] - == stringsub_dict['init_hour_end']): - stringsub_dict['init_hour'] = ( - stringsub_dict['init_hour_end'] - ) - else: - stringsub_dict[list_name.lower()+'_beg'] = ( - datetime.datetime.strptime('000000', - '%H%M%S') - ) - stringsub_dict[list_name.lower()+'_end'] = ( - datetime.datetime.strptime('235959', - '%H%M%S') - ) - check_list1 = config_dict[list_name] - if 'FCST' in list_name: - check_list2 = config_dict[list_name.replace('FCST', - 'OBS')] - elif 'OBS' in list_name: - check_list2 = config_dict[list_name.replace('OBS', - 'FCST')] - if (check_list1 == check_list2 - or len(check_list2) == 0): - list_type = list_name.replace('_HOUR', '').lower() - if 'VALID' in list_name: - stringsub_dict['valid_hour_beg'] = ( - stringsub_dict[list_type+'_hour_beg'] - ) - stringsub_dict['valid_hour_end'] = ( - stringsub_dict[list_type+'_hour_end'] - ) - if (stringsub_dict['valid_hour_beg'] - == stringsub_dict['valid_hour_end']): - stringsub_dict['valid_hour'] = ( - stringsub_dict['valid_hour_end'] - ) - elif 'INIT' in list_name: - stringsub_dict['init_hour_beg'] = ( - stringsub_dict[list_type+'_hour_beg'] - ) - stringsub_dict['init_hour_end'] = ( - stringsub_dict[list_type+'_hour_end'] - ) - if (stringsub_dict['init_hour_beg'] - == stringsub_dict['init_hour_end']): - stringsub_dict['init_hour'] = ( - stringsub_dict['init_hour_end'] - ) - elif not (self.forMakePlots and - group_list in self.force_group_for_make_plots_lists): - # if setting up MakePlots, skip adding forced - # group lists so they will remain templates - # to be filled in by the plotting scripts - stringsub_dict[list_name.lower()] = list_name_value - - nkeys_end = len(stringsub_dict_keys) - # Some lines for debugging if needed in future - #self.logger.info(nkeys_start) - #self.logger.info(nkeys_end) - #for key, value in stringsub_dict.items(): - # self.logger.info("{} ({})".format(key, value)) - return stringsub_dict + if obs_hour_str: + obs_hour_list = get_delta_list(obs_hour_str) + else: + obs_hour_list = None - def get_output_filename(self, output_type, filename_template, - filename_type, - lists_to_loop, lists_to_group, config_dict): - """! Create a file name for stat_analysis output. - - Args: - output_type - string for the type of - stat_analysis output, either - dump_row or out_stat - filename_template - string of the template to be used - to create the file name - filename_type - string of the source of the - template being used, either - default or user - lists_to_loop - list of all the list names whose - items are being grouped together - lists_to group - list of all the list names whose - items are being looped over - config_dict - dictionary containing the - configuration information - - Returns: - output_filename - string of the filled file name - template + self._set_stringsub_hours_item(sub_dict, 'fcst', fcst_hour_list) + self._set_stringsub_hours_item(sub_dict, 'obs', obs_hour_list) + + self._set_stringsub_generic(sub_dict, fcst_hour_list, obs_hour_list, + self.c_dict['DATE_TYPE'].lower()) + + def _set_stringsub_hours_item(self, sub_dict, fcst_or_obs, hour_list): + """! Set either fcst or obs values in string sub dictionary, e.g. + [fcst/obs]_[init/valid]_[beg/end]. + Values that are set depend on what it set in c_dict DATE_TYPE, which + is either INIT or VALID. If the beg and end values are the same, then + also set the same variable without the _beg/end extension, e.g. if + fcst_valid_beg is equal to fcst_valid_end, also set fcst_valid. + + @param sub_dict dictionary to set string sub values + @param fcst_or_obs string to note processing either fcst or obs + @param hour_list list of fcst or obs hours """ date_beg = self.c_dict['DATE_BEG'] date_end = self.c_dict['DATE_END'] - date_type = self.c_dict['DATE_TYPE'] + prefix = f"{fcst_or_obs}_{self.c_dict['DATE_TYPE'].lower()}" + + # get YYYYMMDD of begin and end time + beg_ymd = datetime.strptime(date_beg.strftime(YMD), YMD) + end_ymd = datetime.strptime(date_end.strftime(YMD), YMD) + + # if hour list is provided, truncate begin and end time to YYYYMMDD + # and add first hour offset to begin time and last hour to end time + if hour_list: + sub_dict[f'{prefix}_beg'] = beg_ymd + hour_list[0] + sub_dict[f'{prefix}_end'] = end_ymd + hour_list[-1] + if sub_dict[f'{prefix}_beg'] == sub_dict[f'{prefix}_end']: + sub_dict[prefix] = sub_dict[f'{prefix}_beg'] + + return + + sub_dict[f'{prefix}_beg'] = date_beg + + # if end time is only YYYYMMDD, set HHMMSS to 23:59:59 + # otherwise use HHMMSS from end time + if date_end == end_ymd: + sub_dict[f'{prefix}_end'] = end_ymd + relativedelta(hours=+23, + minutes=+59, + seconds=+59) + else: + sub_dict[f'{prefix}_end'] = date_end + + @staticmethod + def _set_stringsub_generic(sub_dict, fcst_hour_list, obs_hour_list, + date_type): + """! Set [init/valid]_[beg/end] values based on the hour lists that + are provided. + Set {date_type}_[beg/end] to fcst_{date_type}_[beg/end] if + fcst and obs lists are the same or if fcst list is set and obs is not. + Set {date_type}_[beg/end] to obs_{date_type}_[beg/end] if obs list is + set and fcst is not. + Also sets {date_type} if {date_type}_beg and {date_type}_end are equal. + + @param sub_dict dictionary to set string sub values + @param fcst_hour_list list of forecast hours or leads + @param obs_hour_list list of observation hours or leads + @param date_type type of date to process: valid or init + """ + # if fcst and obs hour lists the same or if fcst is set but not obs, + # set {date_type}_beg/end to fcst_{date_type}_beg/end + if (fcst_hour_list == obs_hour_list or + (fcst_hour_list and not obs_hour_list)): + sub_dict[f'{date_type}_beg'] = sub_dict[f'fcst_{date_type}_beg'] + sub_dict[f'{date_type}_end'] = sub_dict[f'fcst_{date_type}_end'] + + # if fcst hr list is set but obs hr list is not, + # set {date_type}_beg/end to obs_{date_type}_beg/end + elif not fcst_hour_list and obs_hour_list: + sub_dict[f'{date_type}_beg'] = sub_dict[f'obs_{date_type}_beg'] + sub_dict[f'{date_type}_end'] = sub_dict[f'obs_{date_type}_end'] + + # if {date_type} beg and end are the same, set {date_type} + if sub_dict[f'{date_type}_beg'] == sub_dict[f'{date_type}_end']: + sub_dict[date_type] = sub_dict[f'{date_type}_beg'] + + def _set_strinsub_other(self, sub_dict, date_type, fcst_lead_str, + obs_lead_str): + """! Compute beg and end values for opposite of date_type (e.g. valid + if init and vice versa) using min/max forecast leads. + + @param sub_dict dictionary to set string sub values + @param date_type type of date to process: valid or init + @param fcst_lead_str string to parse list of forecast leads + @param obs_lead_str string to parse list of observation leads + """ + if fcst_lead_str: + fcst_lead_list = get_delta_list(fcst_lead_str) + else: + fcst_lead_list = None - stringsub_dict = self.build_stringsub_dict(lists_to_loop, - lists_to_group, config_dict) + if obs_lead_str: + obs_lead_list = get_delta_list(obs_lead_str) + else: + obs_lead_list = None + + other_type = 'valid' if date_type == 'init' else 'init' + self._set_strinsub_other_item(sub_dict, date_type, 'fcst', + fcst_lead_list) + self._set_strinsub_other_item(sub_dict, date_type, 'obs', + obs_lead_list) + self._set_stringsub_generic(sub_dict, fcst_lead_list, obs_lead_list, + other_type) + + @staticmethod + def _set_strinsub_other_item(sub_dict, date_type, fcst_or_obs, hour_list): + """! Compute other type's begin and end values using the beg/end and + min/max forecast leads. + If date_type is init, + compute valid_beg by adding init_beg and min lead, + compute valid_end by adding init_end and max lead. + If date_type is valid, + compute init_beg by subtracting max lead from valid_beg, + compute init_end by subtracting min lead from valid_end. + + @param sub_dict dictionary to set string sub values + @param date_type type of date to process: valid or init + @param fcst_or_obs string to use to process either fcst or obs + @param hour_list list of forecast leads to use to calculate times + """ + other_type = 'valid' if date_type == 'init' else 'init' + date_prefix = f'{fcst_or_obs}_{date_type}' + other_prefix = f'{fcst_or_obs}_{other_type}' + if not hour_list: + sub_dict[f'{other_prefix}_beg'] = sub_dict[f'{date_prefix}_beg'] + sub_dict[f'{other_prefix}_end'] = sub_dict[f'{date_prefix}_end'] + return + + min_lead = hour_list[0] + max_lead = hour_list[-1] + + if date_type == 'init': + sub_dict[f'{other_prefix}_beg'] = ( + sub_dict[f'{date_prefix}_beg'] + min_lead + ) + sub_dict[f'{other_prefix}_end'] = ( + sub_dict[f'{date_prefix}_end'] + max_lead + ) + else: + sub_dict[f'{other_prefix}_beg'] = ( + sub_dict[f'{date_prefix}_beg'] - max_lead + ) + sub_dict[f'{other_prefix}_end'] = ( + sub_dict[f'{date_prefix}_end'] - min_lead + ) - if filename_type == 'default': - if (self.runMakePlots and output_type == 'dump_row'): - filename_template_prefix = ( - filename_template+date_type.lower() - +'{'+date_type.lower()+'_beg?fmt=%Y%m%d}' - +'to{'+date_type.lower()+'_end?fmt=%Y%m%d}_' - ) - if (stringsub_dict['valid_hour_beg'] != '' - and stringsub_dict['valid_hour_end'] != ''): - filename_template_prefix+=( - 'valid{valid_hour_beg?fmt=%H%M}to' - +'{valid_hour_end?fmt=%H%M}Z_' - ) - else: - filename_template_prefix+=( - 'fcst_valid{fcst_valid_hour_beg?fmt=%H%M}to' - +'{fcst_valid_hour_end?fmt=%H%M}Z_' - 'obs_valid{obs_valid_hour_beg?fmt=%H%M}to' - +'{obs_valid_hour_end?fmt=%H%M}Z_' - ) - if (stringsub_dict['init_hour_beg'] != '' - and stringsub_dict['init_hour_end'] != ''): - filename_template_prefix+=( - 'init{init_hour_beg?fmt=%H%M}to' - +'{init_hour_end?fmt=%H%M}Z' - ) - else: - filename_template_prefix+=( - 'fcst_init{fcst_init_hour_beg?fmt=%H%M}to' - +'{fcst_init_hour_end?fmt=%H%M}Z_' - 'obs_init{obs_init_hour_beg?fmt=%H%M}to' - +'{obs_init_hour_end?fmt=%H%M}Z' - ) - filename_template_prefix+=( - '_fcst_lead{fcst_lead?fmt=%s}' - +'_fcst{fcst_var?fmt=%s}{fcst_level?fmt=%s}' - +'{fcst_thresh?fmt=%s}{interp_mthd?fmt=%s}_' - +'obs{obs_var?fmt=%s}{obs_level?fmt=%s}' - +'{obs_thresh?fmt=%s}{interp_mthd?fmt=%s}_' - +'vxmask{vx_mask?fmt=%s}' - ) - if 'DESC_LIST' in lists_to_loop: - filename_template_prefix = ( - filename_template_prefix - +'_desc{desc?fmt=%s}' - ) - if 'OBS_LEAD_LIST' in lists_to_loop: - filename_template_prefix = ( - filename_template_prefix - +'_obs_lead{obs_lead?fmt=%s}' - ) - if 'INTERP_PNTS_LIST' in lists_to_loop: - filename_template_prefix = ( - filename_template_prefix - +'_interp_pnts{interp_pnts?fmt=%s}' - ) - if 'COV_THRESH_LIST' in lists_to_loop: - filename_template_prefix = ( - filename_template_prefix - +'_cov_thresh{cov_thresh?fmt=%s}' - ) - if 'ALPHA_LIST' in lists_to_loop: - filename_template_prefix = ( - filename_template_prefix - +'_alpha{alpha?fmt=%s}' - ) - filename_template = filename_template_prefix - else: - if date_beg == date_end: - filename_template = ( - filename_template+date_type.lower()+date_beg - ) - else: - filename_template = ( - filename_template+date_type.lower()+ - date_beg+'to'+date_end - ) - for loop_list in lists_to_loop: - if loop_list != 'MODEL_LIST': - list_name = loop_list.replace('_LIST', '') - if 'HOUR' in list_name: - filename_template = ( - filename_template+'_' - +list_name.replace('_', '').lower() - +config_dict[list_name].replace('"', '')+'Z' - ) - else: - filename_template = ( - filename_template+'_' - +list_name.replace('_', '').lower() - +config_dict[list_name].replace('"', '') - ) - filename_template += '_' + output_type + '.stat' - - self.logger.debug("Building "+output_type+" filename from " - +filename_type+" template: "+filename_template) + def _get_output_filename(self, output_type, filename_template, + stringsub_dict): + """! Create a file name for stat_analysis output. + + @param output_type string for the type of stat_analysis output, either + dump_row, out_stat, or output. + @param filename_template string of the template to create the file + name. + @param stringsub_dict dictionary with info to substitute into filename + templates + @returns string of the filled file name template + """ + self.logger.debug(f"Building {output_type} filename from " + f"template: {filename_template}") output_filename = do_string_sub(filename_template, - **stringsub_dict, - skip_missing_tags=self.forMakePlots) + **stringsub_dict) return output_filename - def get_lookin_dir(self, dir_path, lists_to_loop, lists_to_group, config_dict): - """!Fill in necessary information to get the path to - the lookin directory to pass to stat_analysis. - - Args: - dir_path - string of the user provided - directory path - lists_to_loop - list of all the list names whose - items are being grouped together - lists_to group - list of all the list names whose - items are being looped over - config_dict - dictionary containing the - configuration information - - Returns: - lookin_dir - string of the filled directory - from dir_path + def _get_lookin_dir(self, dir_path, config_dict): + """!Fill in necessary information to get the path to the lookin + directory to pass to stat_analysis. Expand any wildcards. + + @param dir_path string of the user provided directory path + @param config_dict dictionary containing the configuration information + @returns string of the filled directory from dir_path """ - stringsub_dict = self.build_stringsub_dict(lists_to_loop, - lists_to_group, - config_dict) + stringsub_dict = self._build_stringsub_dict(config_dict) dir_path_filled = do_string_sub(dir_path, **stringsub_dict) all_paths = [] for one_path in dir_path_filled.split(','): - if '*' in one_path: - self.logger.debug(f"Expanding wildcard path: {one_path}") - expand_path = glob.glob(one_path.strip()) - if not expand_path: - self.logger.warning(f"Wildcard expansion found no matches") - continue - all_paths.extend(sorted(expand_path)) - else: - all_paths.append(one_path.strip()) + if '*' not in one_path: + all_paths.append(one_path.strip()) + continue + + self.logger.debug(f"Expanding wildcard path: {one_path}") + expand_path = glob.glob(one_path.strip()) + if not expand_path: + self.logger.warning("Wildcard expansion found no matches") + continue + all_paths.extend(sorted(expand_path)) + return ' '.join(all_paths) - def format_valid_init(self, config_dict): + def _format_valid_init(self, config_dict, stringsub_dict): """! Format the valid and initialization dates and hours for the MET stat_analysis config file. - Args: - config_dict - dictionary containing the - configuration information - - Returns: - config_dict - dictionary containing the - edited configuration information - for valid and initialization dates - and hours + @param config_dict dictionary containing the configuration information + @param stringsub_dict dictionary with info to substitute into filename + templates + @returns dictionary containing the edited configuration information + for valid and initialization dates and hours """ - date_beg = self.c_dict['DATE_BEG'] - date_end = self.c_dict['DATE_END'] - date_type = self.c_dict['DATE_TYPE'] - - fcst_valid_hour_list = config_dict['FCST_VALID_HOUR'].split(', ') - fcst_init_hour_list = config_dict['FCST_INIT_HOUR'].split(', ') - obs_valid_hour_list = config_dict['OBS_VALID_HOUR'].split(', ') - obs_init_hour_list = config_dict['OBS_INIT_HOUR'].split(', ') - nfcst_valid_hour = len(fcst_valid_hour_list) - nfcst_init_hour = len(fcst_init_hour_list) - nobs_valid_hour = len(obs_valid_hour_list) - nobs_init_hour = len(obs_init_hour_list) - if nfcst_valid_hour > 1: - if date_type == 'VALID': - fcst_valid_hour_beg = fcst_valid_hour_list[0].replace('"','') - fcst_valid_hour_end = fcst_valid_hour_list[-1].replace('"','') - config_dict['FCST_VALID_BEG'] = ( - str(date_beg)+'_'+fcst_valid_hour_beg - ) - config_dict['FCST_VALID_END'] = ( - str(date_end)+'_'+fcst_valid_hour_end - ) - elif date_type == 'INIT': - config_dict['FCST_VALID_BEG'] = '' - config_dict['FCST_VALID_END'] = '' - elif nfcst_valid_hour == 1 and fcst_valid_hour_list != ['']: - fcst_valid_hour_now = fcst_valid_hour_list[0].replace('"','') - config_dict['FCST_VALID_HOUR'] = '"'+fcst_valid_hour_now+'"' - if date_type == 'VALID': - config_dict['FCST_VALID_BEG'] = ( - str(date_beg)+'_'+fcst_valid_hour_now - ) - config_dict['FCST_VALID_END'] = ( - str(date_end)+'_'+fcst_valid_hour_now - ) - elif date_type == 'INIT': - config_dict['FCST_VALID_BEG'] = '' - config_dict['FCST_VALID_END'] = '' - else: - config_dict['FCST_VALID_BEG'] = '' - config_dict['FCST_VALID_END'] = '' - config_dict['FCST_VALID_HOUR'] = '' - if nfcst_init_hour > 1: - if date_type == 'VALID': - config_dict['FCST_INIT_BEG'] = '' - config_dict['FCST_INIT_END'] = '' - elif date_type == 'INIT': - fcst_init_hour_beg = fcst_init_hour_list[0].replace('"','') - fcst_init_hour_end = fcst_init_hour_list[-1].replace('"','') - config_dict['FCST_INIT_BEG'] = ( - str(date_beg)+'_'+fcst_init_hour_beg - ) - config_dict['FCST_INIT_END'] = ( - str(date_end)+'_'+fcst_init_hour_end - ) - elif nfcst_init_hour == 1 and fcst_init_hour_list != ['']: - fcst_init_hour_now = fcst_init_hour_list[0].replace('"','') - config_dict['FCST_INIT_HOUR'] = '"'+fcst_init_hour_now+'"' - if date_type == 'VALID': - config_dict['FCST_INIT_BEG'] = '' - config_dict['FCST_INIT_END'] = '' - elif date_type == 'INIT': - config_dict['FCST_INIT_BEG'] = ( - str(date_beg)+'_'+fcst_init_hour_now - ) - config_dict['FCST_INIT_END'] = ( - str(date_end)+'_'+fcst_init_hour_now - ) - else: - config_dict['FCST_INIT_BEG'] = '' - config_dict['FCST_INIT_END'] = '' - config_dict['FCST_INIT_HOUR'] = '' - if nobs_valid_hour > 1: - if date_type == 'VALID': - obs_valid_hour_beg = obs_valid_hour_list[0].replace('"','') - obs_valid_hour_end = obs_valid_hour_list[-1].replace('"','') - config_dict['OBS_VALID_BEG'] = ( - str(date_beg)+'_'+obs_valid_hour_beg - ) - config_dict['OBS_VALID_END'] = ( - str(date_end)+'_'+obs_valid_hour_end - ) - elif date_type == 'INIT': - config_dict['OBS_VALID_BEG'] = '' - config_dict['OBS_VALID_END'] = '' - elif nobs_valid_hour == 1 and obs_valid_hour_list != ['']: - obs_valid_hour_now = obs_valid_hour_list[0].replace('"','') - config_dict['OBS_VALID_HOUR'] = '"'+obs_valid_hour_now+'"' - if date_type == 'VALID': - config_dict['OBS_VALID_BEG'] = ( - str(date_beg)+'_'+obs_valid_hour_now - ) - config_dict['OBS_VALID_END'] = ( - str(date_end)+'_'+obs_valid_hour_now - ) - elif date_type == 'INIT': - config_dict['OBS_VALID_BEG'] = '' - config_dict['OBS_VALID_END'] = '' - else: - config_dict['OBS_VALID_BEG'] = '' - config_dict['OBS_VALID_END'] = '' - config_dict['OBS_VALID_HOUR'] = '' - if nobs_init_hour > 1: - if date_type == 'VALID': - config_dict['OBS_INIT_BEG'] = '' - config_dict['OBS_INIT_END'] = '' - elif date_type == 'INIT': - obs_init_hour_beg = obs_init_hour_list[0].replace('"','') - obs_init_hour_end = obs_init_hour_list[-1].replace('"','') - config_dict['OBS_INIT_BEG'] = ( - str(date_beg)+'_'+obs_init_hour_beg - ) - config_dict['OBS_INIT_END'] = ( - str(date_end)+'_'+obs_init_hour_end - ) - elif nobs_init_hour == 1 and obs_init_hour_list != ['']: - obs_init_hour_now = obs_init_hour_list[0].replace('"','') - config_dict['OBS_INIT_HOUR'] = '"'+obs_init_hour_now+'"' - if date_type == 'VALID': - config_dict['OBS_INIT_BEG'] = '' - config_dict['OBS_INIT_END'] = '' - elif date_type == 'INIT': - config_dict['OBS_INIT_BEG'] = ( - str(date_beg)+'_'+obs_init_hour_now - ) - config_dict['OBS_INIT_END'] = ( - str(date_end)+'_'+obs_init_hour_now - ) - else: - config_dict['OBS_INIT_BEG'] = '' - config_dict['OBS_INIT_END'] = '' - config_dict['OBS_INIT_HOUR'] = '' - return config_dict - - def parse_model_info(self): + output_dict = copy.deepcopy(config_dict) + # set all of the HOUR and LEAD lists to include the MET time format + for list_name in self.FORMAT_LISTS: + list_name = list_name.replace('_LIST', '') + values = get_met_time_list(config_dict.get(list_name, '')) + values = [f'"{item}"' for item in values] + output_dict[list_name] = ', '.join(values) + + for fcst_or_obs in ['FCST', 'OBS']: + for init_or_valid in ['INIT', 'VALID']: + self._format_valid_init_item(output_dict, + stringsub_dict, + fcst_or_obs, + init_or_valid) + + return output_dict + + def _format_valid_init_item(self, output_dict, stringsub_dict, fcst_or_obs, + init_or_valid): + """! Check if variables are set in the METplusConfig to explicitly + set the begin and end values in the wrapped MET config file. + + @param output_dict dictionary to set values to set in MET config + @param fcst_or_obs string either FCST or OBS + @param init_or_valid string either INIT or VALID + """ + prefix = f'{fcst_or_obs}_{init_or_valid}' + + # check if explicit value is set for _BEG or _END + # e.g. STAT_ANALYSIS_FCST_INIT_BEG + app = self.app_name.upper() + for beg_or_end in ('BEG', 'END'): + var_prefix = f'{app}_{prefix}_{beg_or_end}' + generic_prefix = f'{app}_{init_or_valid}_{beg_or_end}' + value = None + if self.config.has_option('config', var_prefix): + value = self.config.getraw('config', var_prefix) + elif self.config.has_option('config', generic_prefix): + value = self.config.getraw('config', generic_prefix) + + if value: + formatted_value = do_string_sub(value, **stringsub_dict) + output_dict[f'{prefix}_{beg_or_end}'] = formatted_value + + def _parse_model_info(self): """! Parse for model information. - - Args: - - Returns: - model_list - list of dictionaries containing - model information + + @returns list of dictionaries containing model information """ model_info_list = [] model_indices = list( @@ -1249,122 +1001,88 @@ def parse_model_info(self): index_index=1).keys() ) for m in model_indices: - model_name = self.config.getstr('config', f'MODEL{m}') - model_reference_name = self.config.getstr('config', - f'MODEL{m}_REFERENCE_NAME', - model_name) - model_dir = self.config.getraw('dir', - f'MODEL{m}_STAT_ANALYSIS_LOOKIN_DIR') + model_name = self.config.getraw('config', f'MODEL{m}') + + # add quotes to model name if a value is set + model_name = f'"{model_name}"' if model_name else '' + + model_dir = ( + self.config.getraw('config', + f'MODEL{m}_STAT_ANALYSIS_LOOKIN_DIR') + ) if not model_dir: - self.log_error(f"MODEL{m}_STAT_ANALYSIS_LOOKIN_DIR must be set " - f"if MODEL{m} is set.") + self.log_error(f"MODEL{m}_STAT_ANALYSIS_LOOKIN_DIR must be " + f"set if MODEL{m} is set.") return None, None - model_obtype = self.config.getstr('config', f'MODEL{m}_OBTYPE', '') - if not model_obtype: - self.log_error(f"MODEL{m}_OBTYPE must be set " - f"if MODEL{m} is set.") - return None, None + model_obtype = self.config.getraw('config', f'MODEL{m}_OBTYPE') + model_obtype = f'"{model_obtype}"' if model_obtype else '' + model_dump_row_filename_template = None + model_out_stat_filename_template = None for output_type in ['DUMP_ROW', 'OUT_STAT']: - # if MODEL_STAT_ANALYSIS__TEMPLATE is set, use that + var_name = f'STAT_ANALYSIS_{output_type}_TEMPLATE' + # use MODEL_STAT_ANALYSIS__TEMPLATE if set model_filename_template = ( - self.config.getraw('filename_templates', - 'MODEL'+m+'_STAT_ANALYSIS_' - +output_type+'_TEMPLATE') + self.config.getraw('config', f'MODEL{m}_{var_name}') ) # if not set, use STAT_ANALYSIS__TEMPLATE if not model_filename_template: model_filename_template = ( - self.config.getraw('filename_templates', - 'STAT_ANALYSIS_' - + output_type + '_TEMPLATE') + self.config.getraw('config', var_name) ) - if not model_filename_template: - model_filename_template = '{model?fmt=%s}_{obtype?fmt=%s}_' - model_filename_type = 'default' - else: - model_filename_type = 'user' - if output_type == 'DUMP_ROW': - model_dump_row_filename_template = ( - model_filename_template - ) - model_dump_row_filename_type = model_filename_type + model_dump_row_filename_template = model_filename_template elif output_type == 'OUT_STAT': - # if MakePlots is run - if self.runMakePlots: - model_out_stat_filename_template = 'NA' - model_out_stat_filename_type = 'NA' - else: - model_out_stat_filename_template = ( - model_filename_template - ) - model_out_stat_filename_type = model_filename_type - - mod = {} - mod['name'] = model_name - mod['reference_name'] = model_reference_name - mod['dir'] = model_dir - mod['obtype'] = model_obtype - mod['dump_row_filename_template'] = ( - model_dump_row_filename_template - ) - mod['dump_row_filename_type'] = model_dump_row_filename_type - mod['out_stat_filename_template'] = ( - model_out_stat_filename_template - ) - mod['out_stat_filename_type'] = model_out_stat_filename_type + model_out_stat_filename_template = model_filename_template + + mod = { + 'name': model_name, + 'dir': model_dir, + 'obtype': model_obtype, + 'dump_row_filename_template': model_dump_row_filename_template, + 'out_stat_filename_template': model_out_stat_filename_template, + } model_info_list.append(mod) + if not model_info_list: + self.log_error('At least one set of model information must be ' + 'set using MODEL, MODEL_OBTYPE, and ' + 'MODEL_STAT_ANALYSIS_LOOKIN_DIR') + return model_info_list - def get_level_list(self, data_type): - """!Read forecast or observation level list from config. - Format list items to match the format expected by - StatAnalysis by removing parenthesis and any quotes, - then adding back single quotes - Args: - @param data_type type of list to get, FCST or OBS - @returns list containing the formatted level list + def _process_job_args(self, job_type, job, model_info, + runtime_settings_dict, stringsub_dict): + """! Get dump_row or out_stat file paths and replace [dump_row_file] + and [out_stat_file] keywords from job arguments with the paths. + + @param job_type type of job, either dump_row or out_stat + @param job string of job arguments to replace keywords + @param model_info dictionary containing info for each model processed. + Used to get filename template to use for substitution + @param runtime_settings_dict dictionary containing information for the + run that is being processed. Used to substitute values. + @param stringsub_dict dictionary with info to substitute into filename + templates + @returns job string with values substituted for [dump_row_file] or + [out_stat_file] """ - level_list = [] - - level_input = getlist( - self.config.getstr('config', f'{data_type}_LEVEL_LIST', '') - ) - - for level in level_input: - level = level.strip('(').strip(')') - level = f'{remove_quotes(level)}' - level_list.append(level) - - return level_list - - def process_job_args(self, job_type, job, model_info, - lists_to_loop_items, lists_to_group_items, runtime_settings_dict): - output_template = ( model_info[f'{job_type}_filename_template'] ) - filename_type = ( - model_info[f'{job_type}_filename_type'] - ) output_filename = ( - self.get_output_filename(job_type, - output_template, - filename_type, - lists_to_loop_items, - lists_to_group_items, - runtime_settings_dict) + self._get_output_filename(job_type, + output_template, + stringsub_dict) ) output_file = os.path.join(self.c_dict['OUTPUT_DIR'], output_filename) - # substitute output filename in JOB_ARGS line + # substitute output filename in JOBS line job = job.replace(f'[{job_type}_file]', output_file) job = job.replace(f'[{job_type}_filename]', output_file) @@ -1373,167 +1091,42 @@ def process_job_args(self, job_type, job, model_info, return job - def get_runtime_settings_dict_list(self): - runtime_settings_dict_list = [] - c_dict_list = self.get_c_dict_list() - for c_dict in c_dict_list: - runtime_settings = self.get_runtime_settings(c_dict) - runtime_settings_dict_list.extend(runtime_settings) - - # Loop over run settings. - formatted_runtime_settings_dict_list = [] - for runtime_settings_dict in runtime_settings_dict_list: - if self.forMakePlots: - loop_lists = c_dict['LOOP_LIST_ITEMS_MAKE_PLOTS'] - group_lists = c_dict['GROUP_LIST_ITEMS_MAKE_PLOTS'] - else: - loop_lists = c_dict['LOOP_LIST_ITEMS'] - group_lists = c_dict['GROUP_LIST_ITEMS'] - - # Set up stat_analysis -lookin argument, model and obs information - # and stat_analysis job. - model_info = self.get_model_obtype_and_lookindir(runtime_settings_dict, - loop_lists, - group_lists, - ) - if model_info is None: - return None - - runtime_settings_dict['JOB'] = self.get_job_info(model_info, - runtime_settings_dict, - loop_lists, - group_lists, - ) - - # get -out argument if set - if self.c_dict['OUTPUT_TEMPLATE']: - output_filename = ( - self.get_output_filename('output', - self.c_dict['OUTPUT_TEMPLATE'], - 'user', - loop_lists, - group_lists, - runtime_settings_dict) - ) - output_file = os.path.join(self.c_dict['OUTPUT_DIR'], - output_filename) - - # add output file path to runtime_settings_dict - runtime_settings_dict['OUTPUT_FILENAME'] = output_file - - # Set up forecast and observation valid - # and initialization time information. - runtime_settings_dict = ( - self.format_valid_init(runtime_settings_dict) - ) - formatted_runtime_settings_dict_list.append(runtime_settings_dict) - - return formatted_runtime_settings_dict_list - - def get_runtime_settings(self, c_dict): - - # Parse whether all expected METplus config _LIST variables - # to be treated as a loop or group. - group_lists = c_dict['GROUP_LIST_ITEMS'] - loop_lists = c_dict['LOOP_LIST_ITEMS'] - - if self.forMakePlots: - group_lists = c_dict['GROUP_LIST_ITEMS_MAKE_PLOTS'] - loop_lists = c_dict['LOOP_LIST_ITEMS_MAKE_PLOTS'] - - runtime_setup_dict = {} - # Fill setup dictionary for MET config variable name - # and its value as a string for group lists. - for group_list in group_lists: - runtime_setup_dict_name = group_list.replace('_LIST', '') - add_quotes = False if 'THRESH' in group_list else True - - # if preparing for MakePlots, change - # commas to _ and * to ALL in list items - if self.forMakePlots: - formatted_list = [] - for format_list in c_dict[group_list]: - formatted_list.append(format_list.replace(',', '_') - .replace('*', 'ALL')) - else: - formatted_list = c_dict[group_list] - runtime_setup_dict[runtime_setup_dict_name] = ( - [self.list_to_str(formatted_list, - add_quotes=add_quotes)] - ) + def _get_c_dict_list(self): + """! Build list of config dictionaries for each field + name/level/threshold specified by the [FCST/OBS]_VAR_* config vars. + If field information was specified in the field lists + [FCST_OBS]_[VAR/UNITS/THRESH/LEVEL]_LIST instead of these + variables, then return a list with a single dictionary that contains + the relevant values from self.c_dict. - # Fill setup dictionary for MET config variable name - # and its value as a list for loop lists. Some items - # in lists need to be formatted now, others done later. - - for loop_list in loop_lists: - # if not a threshold list, add quotes around each value in list - # if loop_list not in self.format_later_list and 'THRESH' not in loop_list: - if 'THRESH' not in loop_list: - c_dict[loop_list] = [f'"{value}"' for value in c_dict[loop_list]] - - runtime_setup_dict_name = loop_list.replace('_LIST', '') - runtime_setup_dict[runtime_setup_dict_name] = ( - c_dict[loop_list] - ) - - # Create run time dictionary with all the combinations - # of settings to be run. - runtime_setup_dict_names = sorted(runtime_setup_dict) - runtime_settings_dict_list = ( - [dict(zip(runtime_setup_dict_names, prod)) for prod in - itertools.product(*(runtime_setup_dict[name] for name in - runtime_setup_dict_names))] - ) - - return runtime_settings_dict_list - - def get_field_units(self, index): - """! Get units of fcst and obs fields if set based on VAR index - @params index VAR index corresponding to other [FCST/OBS] info - @returns tuple containing forecast and observation units respectively + @returns list of dictionaries for each field to process """ - fcst_units = self.config.getstr('config', - f'FCST_VAR{index}_UNITS', - '') - obs_units = self.config.getstr('config', - f'OBS_VAR{index}_UNITS', - '') - if not obs_units and fcst_units: - obs_units = fcst_units - if not fcst_units and obs_units: - fcst_units = obs_units - - return fcst_units, obs_units - - def get_c_dict_list(self): # if fields were not specified with [FCST/OBS]_VAR_* variables # return and array with only self.c_dict if not self.c_dict['VAR_LIST']: - return [copy.deepcopy(self.c_dict)] + c_dict = {} + self._add_other_lists_to_c_dict(c_dict) + return [c_dict] # otherwise, use field information to build lists with single items # make individual dictionaries for each threshold var_info_list = self.c_dict['VAR_LIST'] c_dict_list = [] for var_info in var_info_list: - fcst_units, obs_units = self.get_field_units(var_info['index']) + fcst_units, obs_units = self._get_field_units(var_info['index']) run_fourier = ( self.config.getbool('config', - 'VAR' + var_info['index'] + '_FOURIER_DECOMP', + f"VAR{var_info['index']}_FOURIER_DECOMP", False) ) + fourier_wave_num_pairs = [''] if run_fourier: fourier_wave_num_pairs = getlist( self.config.getstr('config', - 'VAR' + var_info['index'] + '_WAVE_NUM_LIST', + f"VAR{var_info['index']}_WAVE_NUM_LIST", '') ) - else: - # if not running fourier, use a list - # containing an empty string to loop one iteration - fourier_wave_num_pairs = [''] # if no thresholds were specified, use a list # containing an empty string to loop one iteration @@ -1547,90 +1140,117 @@ def get_c_dict_list(self): for fcst_thresh, obs_thresh in zip(fcst_thresholds, obs_thresholds): for pair in fourier_wave_num_pairs: - c_dict = {} - c_dict['index'] = var_info['index'] - c_dict['FCST_VAR_LIST'] = [ - var_info['fcst_name'] - ] - c_dict['OBS_VAR_LIST'] = [ - var_info['obs_name'] - ] - c_dict['FCST_LEVEL_LIST'] = [ - var_info['fcst_level'] - ] - c_dict['OBS_LEVEL_LIST'] = [ - var_info['obs_level'] - ] - - c_dict['FCST_THRESH_LIST'] = [] - c_dict['OBS_THRESH_LIST'] = [] + c_dict = { + 'index': var_info['index'], + 'FCST_VAR_LIST': [f'"{var_info["fcst_name"]}"'], + 'OBS_VAR_LIST': [f'"{var_info["obs_name"]}"'], + 'FCST_LEVEL_LIST': [f'"{var_info["fcst_level"]}"'], + 'OBS_LEVEL_LIST': [f'"{var_info["obs_level"]}"'], + 'FCST_THRESH_LIST': [], 'OBS_THRESH_LIST': [], + 'FCST_UNITS_LIST': [], 'OBS_UNITS_LIST': [], + 'INTERP_MTHD_LIST': [], + } + if fcst_thresh: - thresh_formatted = self.format_thresh(fcst_thresh) + thresh_formatted = format_thresh(fcst_thresh) c_dict['FCST_THRESH_LIST'].append(thresh_formatted) if obs_thresh: - thresh_formatted = self.format_thresh(obs_thresh) + thresh_formatted = format_thresh(obs_thresh) c_dict['OBS_THRESH_LIST'].append(thresh_formatted) - c_dict['FCST_UNITS_LIST'] = [] - c_dict['OBS_UNITS_LIST'] = [] if fcst_units: - c_dict['FCST_UNITS_LIST'].append(fcst_units) + c_dict['FCST_UNITS_LIST'].append(f'"{fcst_units}"') if obs_units: - c_dict['OBS_UNITS_LIST'].append(obs_units) + c_dict['OBS_UNITS_LIST'].append(f'"{obs_units}"') c_dict['run_fourier'] = run_fourier if pair: c_dict['INTERP_MTHD_LIST'] = ['WV1_' + pair] - else: - c_dict['INTERP_MTHD_LIST'] = [] - self.add_other_lists_to_c_dict(c_dict) + self._add_other_lists_to_c_dict(c_dict) c_dict_list.append(c_dict) - # if preparing for MakePlots, combine levels and thresholds for each name - if self.forMakePlots: - output_c_dict_list = [] - for c_dict in c_dict_list: - if c_dict['index'] not in [conf['index'] for conf in output_c_dict_list]: - output_c_dict_list.append(c_dict) - else: - for output_dict in output_c_dict_list: - if c_dict['index'] == output_dict['index']: + return c_dict_list - for level in c_dict['FCST_LEVEL_LIST']: - if level not in output_dict['FCST_LEVEL_LIST']: - output_dict['FCST_LEVEL_LIST'].append(level) + @staticmethod + def _get_runtime_settings(c_dict): + """! Build list of all combinations of runtime settings that should be + run. Combine all group lists into a single item separated by comma. + Compute the cartesian product to get all of the different combinations + of the loop lists to create the final list of settings to run. + + @param c_dict dictionary containing [GROUP/LOOP]_LIST_ITEMS that + contain list names to group or loop, as well the actual lists which + are named the same as the values in the [GROUP/LOOP]_LIST_ITEMS but + with the _LIST extension removed. + @returns list of dictionaries that contain all of the settings to use + for a given run. + """ + runtime_setup_dict = {} - for level in c_dict['OBS_LEVEL_LIST']: - if level not in output_dict['OBS_LEVEL_LIST']: - output_dict['OBS_LEVEL_LIST'].append(level) + # for group items, set the value to a list with a single item that is + # a string of all items separated by a comma + for group_list in c_dict['GROUP_LIST_ITEMS']: + key = group_list.replace('_LIST', '') + runtime_setup_dict[key] = [', '.join(c_dict[group_list])] - for thresh in c_dict['FCST_THRESH_LIST']: - if thresh not in output_dict['FCST_THRESH_LIST']: - output_dict['FCST_THRESH_LIST'].append(thresh) + # for loop items, pass the list directly as the value + for loop_list in c_dict['LOOP_LIST_ITEMS']: + key = loop_list.replace('_LIST', '') + runtime_setup_dict[key] = c_dict[loop_list] - for thresh in c_dict['OBS_THRESH_LIST']: - if thresh not in output_dict['OBS_THRESH_LIST']: - output_dict['OBS_THRESH_LIST'].append(thresh) + # Create a dict with all the combinations of settings to be run + runtime_setup_dict_names = sorted(runtime_setup_dict) + runtime_settings_dict_list = [] - return output_c_dict_list + # find cartesian product (all combos of the lists) of each dict key + products = itertools.product( + *(runtime_setup_dict[name] for name in runtime_setup_dict_names) + ) + for product in products: + # pair up product values with dict keys and add them to new dict + next_dict = {} + for key, value in zip(runtime_setup_dict_names, product): + next_dict[key] = value + runtime_settings_dict_list.append(next_dict) + + # NOTE: Logic to create list of runtime settings was previously + # handled using complex list comprehension that was difficult to + # read. New logic was intended to be more readable by other developers. + # Original code is commented below for reference: + # runtime_settings_dict_list = [ + # dict(zip(runtime_setup_dict_names, prod)) for prod in + # itertools.product(*(runtime_setup_dict[name] for name in + # runtime_setup_dict_names)) + # ] - return c_dict_list + return runtime_settings_dict_list - def add_other_lists_to_c_dict(self, c_dict): + def _get_field_units(self, index): + """! Get units of fcst and obs fields if set based on VAR index + + @param index VAR index corresponding to other [FCST/OBS] info + @returns tuple containing forecast and observation units respectively + """ + fcst_units = self.config.getraw('config', f'FCST_VAR{index}_UNITS') + obs_units = self.config.getraw('config', f'OBS_VAR{index}_UNITS') + if not obs_units and fcst_units: + obs_units = fcst_units + elif not fcst_units and obs_units: + fcst_units = obs_units + + return fcst_units, obs_units + + def _add_other_lists_to_c_dict(self, c_dict): """! Using GROUP_LIST_ITEMS and LOOP_LIST_ITEMS, add lists from self.c_dict that are not already in c_dict. @param c_dict dictionary to add values to """ # add group and loop lists - lists_to_add = self.list_categories - if self.runMakePlots: - lists_to_add.extend(self.list_categories_make_plots) - - for list_category in lists_to_add: + for list_category in self.LIST_CATEGORIES: list_items = self.c_dict[list_category] if list_category not in c_dict: c_dict[list_category] = list_items @@ -1639,199 +1259,85 @@ def add_other_lists_to_c_dict(self, c_dict): if list_item not in c_dict: c_dict[list_item] = self.c_dict[list_item] - def get_model_obtype_and_lookindir(self, runtime_settings_dict, loop_lists, group_lists): - """! Reads through model info dictionaries for given run. Sets lookindir command line - argument. Sets MODEL and OBTYPE values in runtime setting dictionary. - @param runtime_settings_dict dictionary containing all settings used in next run - @returns last model info dictionary is successful, None if not. + def _get_model_obtype_and_lookindir(self, runtime_settings_dict): + """! Reads through model info dictionaries for given run. + Sets lookindir command line argument. Sets MODEL and OBTYPE values in + runtime setting dictionary. + + @param runtime_settings_dict dictionary with all settings used in run + @returns last model info dictionary is successful, None if not. """ lookin_dirs = [] model_list = [] - reference_list = [] obtype_list = [] - dump_row_filename_list = [] + model_info = None + # get list of models to process - models_to_run = [model.strip().replace('"', '') for model in runtime_settings_dict['MODEL'].split(',')] + models_to_run = runtime_settings_dict['MODEL'].split(',') for model_info in self.c_dict['MODEL_INFO_LIST']: # skip model if not in list of models to process if model_info['name'] not in models_to_run: + self.logger.debug(f"Model {model_info['name']} not found in " + "list of models to run. Skipping.") continue model_list.append(model_info['name']) - reference_list.append(model_info['reference_name']) - obtype_list.append(model_info['obtype']) - dump_row_filename_list.append(model_info['dump_row_filename_template']) + if model_info['obtype']: + obtype_list.append(model_info['obtype']) + # set MODEL and OBTYPE to single item to find lookin dir - runtime_settings_dict['MODEL'] = '"'+model_info['name']+'"' - runtime_settings_dict['OBTYPE'] = '"'+model_info['obtype']+'"' - # don't get lookin dir if getting settings for MakePlots - if not self.forMakePlots: - lookin_dirs.append(self.get_lookin_dir(model_info['dir'], - loop_lists, - group_lists, - runtime_settings_dict, - ) - ) + runtime_settings_dict['MODEL'] = model_info["name"] + runtime_settings_dict['OBTYPE'] = model_info["obtype"] + + lookin_dirs.append( + self._get_lookin_dir(model_info['dir'], runtime_settings_dict) + ) # set lookin dir command line argument runtime_settings_dict['LOOKIN_DIR'] = ' '.join(lookin_dirs) # error and return None if lookin dir is empty - if not self.forMakePlots and not runtime_settings_dict['LOOKIN_DIR']: + if not runtime_settings_dict['LOOKIN_DIR']: self.log_error("No value found for lookin dir") return None - if not model_list or not obtype_list: - self.log_error("Could not find model or obtype to process") + if not model_list: + self.log_error("Could not find model to process") return None # set values in runtime settings dict for model and obtype - runtime_settings_dict['MODEL'] = self.list_to_str(model_list) - runtime_settings_dict['MODEL_REFERENCE_NAME'] = self.list_to_str(reference_list) - runtime_settings_dict['OBTYPE'] = self.list_to_str(obtype_list) + runtime_settings_dict['MODEL'] = list_to_str(model_list) + runtime_settings_dict['OBTYPE'] = list_to_str(obtype_list) # return last model info dict used return model_info - def get_job_info(self, model_info, runtime_settings_dict, loop_lists, group_lists): + def _get_job_info(self, model_info, runtime_settings_dict, stringsub_dict): """! Get job information and concatenate values into a string - @params model_info model information to use to determine output file paths - @params runtime_settings_dict dictionary containing all settings used in next run - @returns string containing job information to pass to StatAnalysis config file - """ - job = '-job ' + self.c_dict['JOB_NAME'] + ' ' + self.c_dict['JOB_ARGS'] - for job_type in ['dump_row', 'out_stat']: - if f"-{job_type}" in self.c_dict['JOB_ARGS']: - job = self.process_job_args(job_type, - job, - model_info, - loop_lists, - group_lists, - runtime_settings_dict, - ) - - return job - - def run_stat_analysis(self): - """! This runs stat_analysis over a period of valid - or initialization dates for a job defined by - the user. - """ - self.forMakePlots = False - - runtime_settings_dict_list = self.get_runtime_settings_dict_list() - if not runtime_settings_dict_list: - return False - - self.run_stat_analysis_job(runtime_settings_dict_list) - - # if running MakePlots, get its runtime_settings_dict_list and call - if self.runMakePlots: - self.logger.debug("Preparing settings to pass to MakePlots wrapper") - self.forMakePlots = True - runtime_settings_dict_list = ( - self.get_runtime_settings_dict_list() - ) - if not runtime_settings_dict_list: - return False - - self.MakePlotsWrapper.create_plots(runtime_settings_dict_list) - if self.MakePlotsWrapper.errors: - self.log_error("MakePlots produced " - f"{self.MakePlotsWrapper.errors} errors.") - return True - - def run_stat_analysis_job(self, runtime_settings_dict_list): - """! Sets environment variables need to run StatAnalysis jobs - and calls the tool for each job. - - Args: - @param runtime_settings_dict_list list of dictionaries - containing information needed to run a StatAnalysis job + @param model_info model info to use to determine output file paths + @param runtime_settings_dict dictionary with all settings for next run + @param stringsub_dict dictionary with info to substitute into filename + templates + @returns list of strings containing job info to pass config file """ - for runtime_settings_dict in runtime_settings_dict_list: - if not self.create_output_directories(runtime_settings_dict): - continue - - # Set environment variables and run stat_analysis. - for name, value in runtime_settings_dict.items(): - self.add_env_var(name, value) - - self.job_args = None - # set METPLUS_ env vars for MET config file to be consistent - # with other wrappers - mp_lists = ['MODEL', - 'DESC', - 'FCST_LEAD', - 'OBS_LEAD', - 'FCST_VALID_HOUR', - 'OBS_VALID_HOUR', - 'FCST_INIT_HOUR', - 'OBS_INIT_HOUR', - 'FCST_VAR', - 'OBS_VAR', - 'FCST_UNITS', - 'OBS_UNITS', - 'FCST_LEVEL', - 'OBS_LEVEL', - 'OBTYPE', - 'VX_MASK', - 'INTERP_MTHD', - 'INTERP_PNTS', - 'FCST_THRESH', - 'OBS_THRESH', - 'COV_THRESH', - 'ALPHA', - 'LINE_TYPE' - ] - for mp_list in mp_lists: - if not runtime_settings_dict.get(mp_list, ''): - continue - value = (f"{mp_list.lower()} = " - f"[{runtime_settings_dict.get(mp_list, '')}];") - self.env_var_dict[f'METPLUS_{mp_list}'] = value - - mp_items = ['FCST_VALID_BEG', - 'FCST_VALID_END', - 'OBS_VALID_BEG', - 'OBS_VALID_END', - 'FCST_INIT_BEG', - 'FCST_INIT_END', - 'OBS_INIT_BEG', - 'OBS_INIT_END', - ] - for mp_item in mp_items: - if not runtime_settings_dict.get(mp_item, ''): + jobs = [] + for job in self.c_dict['JOBS']: + for job_type in ['dump_row', 'out_stat']: + if f"-{job_type}" not in job: continue - value = remove_quotes(runtime_settings_dict.get(mp_item, - '')) - value = (f"{mp_item.lower()} = \"{value}\";") - self.env_var_dict[f'METPLUS_{mp_item}'] = value - value = f'jobs = ["' - value += runtime_settings_dict.get('JOB', '') - value += '"];' - self.env_var_dict[f'METPLUS_JOBS'] = value + job = self._process_job_args(job_type, job, model_info, + runtime_settings_dict, + stringsub_dict) - # send environment variables to logger - self.set_environment_variables() - - # set lookin dir - self.logger.debug(f"Setting -lookin dir to {runtime_settings_dict['LOOKIN_DIR']}") - self.lookindir = runtime_settings_dict['LOOKIN_DIR'] - self.job_args = runtime_settings_dict['JOB'] - - # set -out file path if requested, value will be set to None if not - self.c_dict['OUTPUT_FILENAME'] = ( - runtime_settings_dict.get('OUTPUT_FILENAME') - ) - - self.build() + # substitute filename templates that may be found in rest of job + job = do_string_sub(job, **stringsub_dict) + jobs.append(job) - self.clear() + return jobs - def create_output_directories(self, runtime_settings_dict): + def _create_output_directories(self, runtime_settings_dict): """! Check if output filename is set for dump_row or out_stat. If set, Check if the file already exists and if it should be skipped. @@ -1839,24 +1345,15 @@ def create_output_directories(self, runtime_settings_dict): @returns True if job should be run, False if it should be skipped """ run_job = True - for job_type in ['dump_row', 'out_stat', 'output']: + for job_type in ['DUMP_ROW', 'OUT_STAT', 'OUTPUT']: output_path = ( - runtime_settings_dict.get(f'{job_type.upper()}_FILENAME') + runtime_settings_dict.get(f'{job_type}_FILENAME') ) - if output_path: - if not self.find_and_check_output_file( - output_path_template=output_path): - run_job = False + if not output_path: + continue - return run_job + if not self.find_and_check_output_file( + output_path_template=output_path): + run_job = False - def run_all_times(self): - self.run_stat_analysis() - return self.all_commands - - def run_at_time(self, input_dict): - loop_by = self.c_dict['DATE_TYPE'] - run_date = input_dict[loop_by.lower()].strftime('%Y%m%d') - self.c_dict['DATE_BEG'] = run_date - self.c_dict['DATE_END'] = run_date - self.run_stat_analysis() + return run_job diff --git a/parm/metplus_config/defaults.conf b/parm/metplus_config/defaults.conf index 36c359d6d..c1e340545 100644 --- a/parm/metplus_config/defaults.conf +++ b/parm/metplus_config/defaults.conf @@ -67,9 +67,6 @@ GFDL_TRACKER_EXEC = /path/to/standalone_gfdl-vortextracker_v3.9a/trk_exec # Set to False to preserve these files # ############################################################################### - -LOOP_ORDER = processes - PROCESS_LIST = Usage OMP_NUM_THREADS = 1 diff --git a/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC.conf b/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC.conf index 45c6e7531..eb9143292 100644 --- a/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC.conf +++ b/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC_python_embedding.conf b/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC_python_embedding.conf index f2e61547f..4e008c316 100644 --- a/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/ASCII2NC/ASCII2NC_python_embedding.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat.conf b/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat.conf index 81233349e..8d4a0dcbd 100644 --- a/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat.conf +++ b/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=3600 LEAD_SEQ = 24H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat_python_embedding.conf b/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat_python_embedding.conf index 95eea3c84..7b76978fa 100644 --- a/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/EnsembleStat/EnsembleStat_python_embedding.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=3600 LEAD_SEQ = 24 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/Example/Example.conf b/parm/use_cases/met_tool_wrapper/Example/Example.conf index 096f7aae1..f20cee637 100644 --- a/parm/use_cases/met_tool_wrapper/Example/Example.conf +++ b/parm/use_cases/met_tool_wrapper/Example/Example.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 6H LEAD_SEQ = 3H, 6H, 9H, 12H -LOOP_ORDER = times - EXAMPLE_CUSTOM_LOOP_LIST = ext, nc diff --git a/parm/use_cases/met_tool_wrapper/GempakToCF/GempakToCF.conf b/parm/use_cases/met_tool_wrapper/GempakToCF/GempakToCF.conf index 4e0591ad8..142bd453e 100644 --- a/parm/use_cases/met_tool_wrapper/GempakToCF/GempakToCF.conf +++ b/parm/use_cases/met_tool_wrapper/GempakToCF/GempakToCF.conf @@ -33,8 +33,6 @@ VALID_INCREMENT=12H LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/GenEnsProd/GenEnsProd.conf b/parm/use_cases/met_tool_wrapper/GenEnsProd/GenEnsProd.conf index 997d0d6f9..696abe400 100644 --- a/parm/use_cases/met_tool_wrapper/GenEnsProd/GenEnsProd.conf +++ b/parm/use_cases/met_tool_wrapper/GenEnsProd/GenEnsProd.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 24H -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask.conf b/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask.conf index c601cefde..9b4958136 100644 --- a/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask.conf +++ b/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 12H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_multiple.conf b/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_multiple.conf index 92f08c16a..498297785 100644 --- a/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_multiple.conf +++ b/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_multiple.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 24H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_with_arguments.conf b/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_with_arguments.conf index 3c8009e84..33f9a30b5 100644 --- a/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_with_arguments.conf +++ b/parm/use_cases/met_tool_wrapper/GenVxMask/GenVxMask_with_arguments.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 24H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/GridDiag/GridDiag.conf b/parm/use_cases/met_tool_wrapper/GridDiag/GridDiag.conf index f2d3adb84..c35fe3994 100644 --- a/parm/use_cases/met_tool_wrapper/GridDiag/GridDiag.conf +++ b/parm/use_cases/met_tool_wrapper/GridDiag/GridDiag.conf @@ -36,9 +36,6 @@ INIT_INCREMENT = 21600 LEAD_SEQ = 141, 144, 147 -LOOP_ORDER = processes - - ### # File I/O # https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#directory-and-filename-template-info diff --git a/parm/use_cases/met_tool_wrapper/GridStat/GridStat.conf b/parm/use_cases/met_tool_wrapper/GridStat/GridStat.conf index 0ccb837af..e35f9dd74 100644 --- a/parm/use_cases/met_tool_wrapper/GridStat/GridStat.conf +++ b/parm/use_cases/met_tool_wrapper/GridStat/GridStat.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 12 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/GridStat/GridStat_python_embedding.conf b/parm/use_cases/met_tool_wrapper/GridStat/GridStat_python_embedding.conf index 46a2354fa..391561837 100644 --- a/parm/use_cases/met_tool_wrapper/GridStat/GridStat_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/GridStat/GridStat_python_embedding.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 12 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/METdbLoad/METdbLoad.conf b/parm/use_cases/met_tool_wrapper/METdbLoad/METdbLoad.conf index 1ddcb184a..72ad9f01f 100644 --- a/parm/use_cases/met_tool_wrapper/METdbLoad/METdbLoad.conf +++ b/parm/use_cases/met_tool_wrapper/METdbLoad/METdbLoad.conf @@ -34,8 +34,6 @@ VALID_BEG = 2005080712 VALID_END = 2005080800 VALID_INCREMENT = 12H -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/MODE/MODE.conf b/parm/use_cases/met_tool_wrapper/MODE/MODE.conf index 7d094f1f0..30ed8a2f1 100644 --- a/parm/use_cases/met_tool_wrapper/MODE/MODE.conf +++ b/parm/use_cases/met_tool_wrapper/MODE/MODE.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 12 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/MODE/MODE_python_embedding.conf b/parm/use_cases/met_tool_wrapper/MODE/MODE_python_embedding.conf index 84cc1e417..4bf6f106a 100644 --- a/parm/use_cases/met_tool_wrapper/MODE/MODE_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/MODE/MODE_python_embedding.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 12 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/MTD/MTD.conf b/parm/use_cases/met_tool_wrapper/MTD/MTD.conf index 8098d9c82..54ef20003 100644 --- a/parm/use_cases/met_tool_wrapper/MTD/MTD.conf +++ b/parm/use_cases/met_tool_wrapper/MTD/MTD.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=1M LEAD_SEQ = 6H, 9H, 12H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/MTD/MTD_python_embedding.conf b/parm/use_cases/met_tool_wrapper/MTD/MTD_python_embedding.conf index bf0d0c4d9..ae250104a 100644 --- a/parm/use_cases/met_tool_wrapper/MTD/MTD_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/MTD/MTD_python_embedding.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=1M LEAD_SEQ = 0, 1, 2 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_add.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_add.conf index 48126f344..529e6999c 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_add.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_add.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 15M -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_bucket.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_bucket.conf index ded9c6cd0..bd8ca1128 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_bucket.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_bucket.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 86400 LEAD_SEQ = 15H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_derive.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_derive.conf index da89543b9..b2f8d6f63 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_derive.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_derive.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 24H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_loop_custom.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_loop_custom.conf index dcc7aa126..64d449d8b 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_loop_custom.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_loop_custom.conf @@ -35,8 +35,6 @@ LEAD_SEQ = 24H PCP_COMBINE_CUSTOM_LOOP_LIST = arw-fer-gep1, arw-fer-gep5, arw-sch-gep2, arw-sch-gep6, arw-tom-gep3, arw-tom-gep7 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_python_embedding.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_python_embedding.conf index 8aaa2fff3..d9cd56f96 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_python_embedding.conf @@ -33,8 +33,6 @@ VALID_INCREMENT=43200 LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_subtract.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_subtract.conf index 63cebfefe..caf089040 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_subtract.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_subtract.conf @@ -34,7 +34,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 18H -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_sum.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_sum.conf index bbc63b60e..bdfa337eb 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_sum.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_sum.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 15M -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_user_defined.conf b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_user_defined.conf index c0503bced..a30bc6bd3 100644 --- a/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_user_defined.conf +++ b/parm/use_cases/met_tool_wrapper/PCPCombine/PCPCombine_user_defined.conf @@ -34,8 +34,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 24H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_grib1.conf b/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_grib1.conf index 1fd05a618..f67abc606 100644 --- a/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_grib1.conf +++ b/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_grib1.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_netcdf.conf b/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_netcdf.conf index 38edc7243..b98422004 100644 --- a/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_netcdf.conf +++ b/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_netcdf.conf @@ -33,7 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_python_embedding.conf b/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_python_embedding.conf index 5e4b3f5c0..5245f5a5b 100644 --- a/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/PlotDataPlane/PlotDataPlane_python_embedding.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/Point2Grid/Point2Grid.conf b/parm/use_cases/met_tool_wrapper/Point2Grid/Point2Grid.conf index 7d0a9e5a2..50646d631 100644 --- a/parm/use_cases/met_tool_wrapper/Point2Grid/Point2Grid.conf +++ b/parm/use_cases/met_tool_wrapper/Point2Grid/Point2Grid.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 24H LEAD_SEQ = 12H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PointStat/PointStat_once_per_field.conf b/parm/use_cases/met_tool_wrapper/PointStat/PointStat_once_per_field.conf index 3a1d098f6..054c1ef9c 100644 --- a/parm/use_cases/met_tool_wrapper/PointStat/PointStat_once_per_field.conf +++ b/parm/use_cases/met_tool_wrapper/PointStat/PointStat_once_per_field.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PointStat/PointStat_python_embedding.conf b/parm/use_cases/met_tool_wrapper/PointStat/PointStat_python_embedding.conf index e22eae519..e22d62d01 100644 --- a/parm/use_cases/met_tool_wrapper/PointStat/PointStat_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/PointStat/PointStat_python_embedding.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0H -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest.conf b/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest.conf index 6114202c6..4388fb057 100644 --- a/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest.conf +++ b/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 21600 LEAD_SEQ = 0 -LOOP_ORDER = times - ### # PyEmbedIngest Settings diff --git a/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest_multi_field_one_file.conf b/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest_multi_field_one_file.conf index 5f432ae4a..4c213f520 100644 --- a/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest_multi_field_one_file.conf +++ b/parm/use_cases/met_tool_wrapper/PyEmbedIngest/PyEmbedIngest_multi_field_one_file.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 21600 LEAD_SEQ = 0 -LOOP_ORDER = times - ### # PyEmbedIngest Settings diff --git a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane.conf b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane.conf index d228cd7b6..1a9940e23 100644 --- a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane.conf +++ b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 3H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_multi_file.conf b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_multi_file.conf index dff538882..667fbb550 100644 --- a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_multi_file.conf +++ b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_multi_file.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 3H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_one_file.conf b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_one_file.conf index f62044060..6dbd0dd9f 100644 --- a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_one_file.conf +++ b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_multi_field_one_file.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1M LEAD_SEQ = 3H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_python_embedding.conf b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_python_embedding.conf index 31b294421..177353c98 100644 --- a/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/RegridDataPlane/RegridDataPlane_python_embedding.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=43200 LEAD_SEQ = 3 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/SeriesAnalysis/SeriesAnalysis_python_embedding.conf b/parm/use_cases/met_tool_wrapper/SeriesAnalysis/SeriesAnalysis_python_embedding.conf index 097267655..a685eabee 100644 --- a/parm/use_cases/met_tool_wrapper/SeriesAnalysis/SeriesAnalysis_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/SeriesAnalysis/SeriesAnalysis_python_embedding.conf @@ -35,8 +35,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 12 -LOOP_ORDER = processes - SERIES_ANALYSIS_CUSTOM_LOOP_LIST = diff --git a/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf b/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf index 67f5f0b4b..14eb18019 100644 --- a/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf +++ b/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis.conf @@ -31,10 +31,6 @@ VALID_BEG=2005080700 VALID_END=2005080700 VALID_INCREMENT = 12H -LEAD_SEQ = 12 - -LOOP_ORDER = times - ### # File I/O @@ -61,8 +57,7 @@ STAT_ANALYSIS_CONFIG_FILE = {PARM_BASE}/met_config/STATAnalysisConfig_wrapped #STAT_ANALYSIS_HSS_EC_VALUE = -STAT_ANALYSIS_JOB_NAME = filter -STAT_ANALYSIS_JOB_ARGS = -dump_row [dump_row_file] +STAT_ANALYSIS_JOB1 = -job filter -dump_row [dump_row_file] MODEL_LIST = {MODEL1} DESC_LIST = diff --git a/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis_python_embedding.conf b/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis_python_embedding.conf index 72cfdee34..edc3af472 100644 --- a/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis_python_embedding.conf +++ b/parm/use_cases/met_tool_wrapper/StatAnalysis/StatAnalysis_python_embedding.conf @@ -24,15 +24,13 @@ PROCESS_LIST = StatAnalysis # LEAD_SEQ is the list of forecast leads to process # https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#timing-control ### + LOOP_BY = VALID -VALID_TIME_FMT = %Y%m%d%H -VALID_BEG=2005080700 -VALID_END=2005080700 +VALID_TIME_FMT = %Y%m%d +VALID_BEG=20070331 +VALID_END=20070331 VALID_INCREMENT = 12H -LEAD_SEQ = 12 - -LOOP_ORDER = times ### # File I/O @@ -40,9 +38,9 @@ LOOP_ORDER = times ### MODEL1 = WRF -MODEL1_OBTYPE = ADPSFC -MODEL1_STAT_ANALYSIS_LOOKIN_DIR = python {INPUT_BASE}/met_test/scripts/python/read_ascii_mpr.py {INPUT_BASE}/met_test/new/point_stat_120000L_20050807_120000V.stat -MODEL1_STAT_ANALYSIS_DUMP_ROW_TEMPLATE = stat_analysis_python_AGGR_MPR_to_SL1L2.stat +MODEL1_OBTYPE = ADPUPA +MODEL1_STAT_ANALYSIS_LOOKIN_DIR = python {INPUT_BASE}/met_test/scripts/python/read_ascii_mpr.py {INPUT_BASE}/met_test/out/point_stat/point_stat_360000L_20070331_120000V.stat + MODEL1_STAT_ANALYSIS_OUT_STAT_TEMPLATE = {model?fmt=%s}_{obtype?fmt=%s}_valid{valid?fmt=%Y%m%d}_fcstvalidhour{valid_hour?fmt=%H}0000Z_out_stat.stat STAT_ANALYSIS_OUTPUT_DIR = {OUTPUT_BASE}/met_tool_wrapper/StatAnalysis_python_embedding diff --git a/parm/use_cases/met_tool_wrapper/TCGen/TCGen.conf b/parm/use_cases/met_tool_wrapper/TCGen/TCGen.conf index 8d25a7fea..ace36034e 100644 --- a/parm/use_cases/met_tool_wrapper/TCGen/TCGen.conf +++ b/parm/use_cases/met_tool_wrapper/TCGen/TCGen.conf @@ -34,8 +34,6 @@ LEAD_SEQ = TC_GEN_CUSTOM_LOOP_LIST = -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/TCMPRPlotter/TCMPRPlotter.conf b/parm/use_cases/met_tool_wrapper/TCMPRPlotter/TCMPRPlotter.conf index 38a2046a4..29f10457b 100644 --- a/parm/use_cases/met_tool_wrapper/TCMPRPlotter/TCMPRPlotter.conf +++ b/parm/use_cases/met_tool_wrapper/TCMPRPlotter/TCMPRPlotter.conf @@ -31,8 +31,6 @@ INIT_BEG = 201503 INIT_END = 201503 INIT_INCREMENT = 6H -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/TCPairs/TCPairs_tropical.conf b/parm/use_cases/met_tool_wrapper/TCPairs/TCPairs_tropical.conf index 680a1515a..f7b103720 100644 --- a/parm/use_cases/met_tool_wrapper/TCPairs/TCPairs_tropical.conf +++ b/parm/use_cases/met_tool_wrapper/TCPairs/TCPairs_tropical.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 21600 #TC_PAIRS_SKIP_LEAD_SEQ = False -LOOP_ORDER = times - TC_PAIRS_RUN_ONCE = False diff --git a/parm/use_cases/met_tool_wrapper/TCRMW/TCRMW.conf b/parm/use_cases/met_tool_wrapper/TCRMW/TCRMW.conf index e92c2ba53..6d341db0a 100644 --- a/parm/use_cases/met_tool_wrapper/TCRMW/TCRMW.conf +++ b/parm/use_cases/met_tool_wrapper/TCRMW/TCRMW.conf @@ -31,8 +31,6 @@ INIT_BEG = 2016092900 INIT_END = 2016092900 INIT_INCREMENT = 21600 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/TCStat/TCStat.conf b/parm/use_cases/met_tool_wrapper/TCStat/TCStat.conf index d791c3ffe..cb6eb51d8 100644 --- a/parm/use_cases/met_tool_wrapper/TCStat/TCStat.conf +++ b/parm/use_cases/met_tool_wrapper/TCStat/TCStat.conf @@ -31,8 +31,6 @@ INIT_BEG = 2019103112 INIT_END = 2019103112 INIT_INCREMENT = 6H -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once.conf b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once.conf index 036913a99..704b434f4 100644 --- a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once.conf +++ b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once.conf @@ -37,8 +37,6 @@ LEAD_SEQ = 0H, 12H, 15H, 24H, 120H USER_SCRIPT_CUSTOM_LOOP_LIST = nc -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_for_each.conf b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_for_each.conf index d135fa6be..9a3247f45 100644 --- a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_for_each.conf +++ b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_for_each.conf @@ -37,8 +37,6 @@ LEAD_SEQ = 0H, 12H, 24H, 120H USER_SCRIPT_CUSTOM_LOOP_LIST = nc -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_init.conf b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_init.conf index dccb808ed..3dcb9a86f 100644 --- a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_init.conf +++ b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_init.conf @@ -37,8 +37,6 @@ LEAD_SEQ = 0H, 12H, 24H, 120H USER_SCRIPT_CUSTOM_LOOP_LIST = nc -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_lead.conf b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_lead.conf index e8b6c9558..560e652f4 100644 --- a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_lead.conf +++ b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_lead.conf @@ -1,56 +1,55 @@ -# UserScript wrapper example - [config] -# List of applications to run - only UserScript for this case +# Documentation for this use case can be found at +# https://metplus.readthedocs.io/en/latest/generated/met_tool_wrapper/UserScript/UserScript_run_once_per_lead.html + +# For additional information, please see the METplus Users Guide. +# https://metplus.readthedocs.io/en/latest/Users_Guide + +### +# Processes to run +# https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#process-list +### + PROCESS_LIST = UserScript -# time looping - options are INIT, VALID, RETRO, and REALTIME +### +# Time Info +# LOOP_BY options are INIT, VALID, RETRO, and REALTIME # If set to INIT or RETRO: # INIT_TIME_FMT, INIT_BEG, INIT_END, and INIT_INCREMENT must also be set # If set to VALID or REALTIME: # VALID_TIME_FMT, VALID_BEG, VALID_END, and VALID_INCREMENT must also be set -LOOP_BY = INIT +# LEAD_SEQ is the list of forecast leads to process +# https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#timing-control +### -# Format of INIT_BEG and INIT_END using % items -# %Y = 4 digit year, %m = 2 digit month, %d = 2 digit day, etc. -# see www.strftime.org for more information -# %Y%m%d%H expands to YYYYMMDDHH -INIT_TIME_FMT = %Y%m%d%H%M%S +USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_LEAD -# Start time for METplus run - must match VALID_TIME_FMT +LOOP_BY = INIT +INIT_TIME_FMT = %Y%m%d%H%M%S INIT_BEG = 20141031093015 - -# End time for METplus run - must match VALID_TIME_FMT INIT_END = 20141101093015 - -# Increment between METplus runs (in seconds if no units are specified) -# Must be >= 60 seconds INIT_INCREMENT = 12H -# List of forecast leads to process for each run time (init or valid) -# In hours if units are not specified -# If unset, defaults to 0 (don't loop through forecast leads) LEAD_SEQ = 0H, 12H, 24H, 120H -# Order of loops to process data - Options are times, processes -# Not relevant if only one item is in the PROCESS_LIST -# times = run all wrappers in the PROCESS_LIST for a single run time, then -# increment the run time and run all wrappers again until all times have -# been evaluated. -# processes = run the first wrapper in the PROCESS_LIST for all times -# specified, then repeat for the next item in the PROCESS_LIST until all -# wrappers have been run -LOOP_ORDER = processes - -# list of strings to loop over for each run time. -# value for each item can be referenced in filename templates with {custom?fmt=%s} USER_SCRIPT_CUSTOM_LOOP_LIST = nc -USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_LEAD + +### +# File I/O +# https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#directory-and-filename-template-info +### USER_SCRIPT_INPUT_TEMPLATE = init_{init?fmt=%Y%m%d%H%M%S}_valid_{valid?fmt=%Y%m%d%H%M%S}_lead_{lead?fmt=%3H}.{custom} USER_SCRIPT_INPUT_DIR = {INPUT_BASE}/met_test/new/test + +### +# UserScript Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#userscript +### + USER_SCRIPT_COMMAND = {PARM_BASE}/use_cases/met_tool_wrapper/UserScript/print_file_list.py diff --git a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_valid.conf b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_valid.conf index 89092717a..02a2b295d 100644 --- a/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_valid.conf +++ b/parm/use_cases/met_tool_wrapper/UserScript/UserScript_run_once_per_valid.conf @@ -37,8 +37,6 @@ LEAD_SEQ = 0H, 12H, 24H, 120H USER_SCRIPT_CUSTOM_LOOP_LIST = nc -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/air_quality_and_comp/EnsembleStat_fcstICAP_obsMODIS_aod.conf b/parm/use_cases/model_applications/air_quality_and_comp/EnsembleStat_fcstICAP_obsMODIS_aod.conf index 213142de0..594a0e165 100644 --- a/parm/use_cases/model_applications/air_quality_and_comp/EnsembleStat_fcstICAP_obsMODIS_aod.conf +++ b/parm/use_cases/model_applications/air_quality_and_comp/EnsembleStat_fcstICAP_obsMODIS_aod.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=06H LEAD_SEQ = 12H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/climate/GridStat_fcstCESM_obsGFS_ConusTemp.conf b/parm/use_cases/model_applications/climate/GridStat_fcstCESM_obsGFS_ConusTemp.conf index bc6b3cc26..f47638208 100644 --- a/parm/use_cases/model_applications/climate/GridStat_fcstCESM_obsGFS_ConusTemp.conf +++ b/parm/use_cases/model_applications/climate/GridStat_fcstCESM_obsGFS_ConusTemp.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 86400 LEAD_SEQ = 6, 12 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/climate/MODE_fcstCESM_obsGPCP_AsianMonsoonPrecip.conf b/parm/use_cases/model_applications/climate/MODE_fcstCESM_obsGPCP_AsianMonsoonPrecip.conf index 4f304f865..0535ff7da 100644 --- a/parm/use_cases/model_applications/climate/MODE_fcstCESM_obsGPCP_AsianMonsoonPrecip.conf +++ b/parm/use_cases/model_applications/climate/MODE_fcstCESM_obsGPCP_AsianMonsoonPrecip.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 86400 LEAD_SEQ = 24, 48 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/data_assimilation/StatAnalysis_fcstHAFS_obsPrepBufr_JEDI_IODA_interface.conf b/parm/use_cases/model_applications/data_assimilation/StatAnalysis_fcstHAFS_obsPrepBufr_JEDI_IODA_interface.conf index 2cb82bf76..f82ffcdc8 100644 --- a/parm/use_cases/model_applications/data_assimilation/StatAnalysis_fcstHAFS_obsPrepBufr_JEDI_IODA_interface.conf +++ b/parm/use_cases/model_applications/data_assimilation/StatAnalysis_fcstHAFS_obsPrepBufr_JEDI_IODA_interface.conf @@ -34,8 +34,6 @@ VALID_INCREMENT = 12H LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_MODE_fcstIMS_obsNCEP_sea_ice.conf b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_MODE_fcstIMS_obsNCEP_sea_ice.conf index 6a0df6836..410d69420 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_MODE_fcstIMS_obsNCEP_sea_ice.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_MODE_fcstIMS_obsNCEP_sea_ice.conf @@ -32,8 +32,6 @@ VALID_BEG=20190201 VALID_END=20190201 VALID_INCREMENT=86400 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsAVISO_climHYCOM_ssh.conf b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsAVISO_climHYCOM_ssh.conf index 5679242bd..e33505f89 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsAVISO_climHYCOM_ssh.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsAVISO_climHYCOM_ssh.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 24 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsGHRSST_climWOA_sst.conf b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsGHRSST_climWOA_sst.conf index 2fbb89a9d..70ff844d2 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsGHRSST_climWOA_sst.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsGHRSST_climWOA_sst.conf @@ -33,7 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsOSTIA_iceCover.conf b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsOSTIA_iceCover.conf index 1f80253a3..4868d99bf 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsOSTIA_iceCover.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsOSTIA_iceCover.conf @@ -35,8 +35,6 @@ LEAD_SEQ = 0 GRID_STAT_CUSTOM_LOOP_LIST = north, south -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMAP_climWOA_sss.conf b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMAP_climWOA_sss.conf index 11804aeb4..f1d13bb48 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMAP_climWOA_sss.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMAP_climWOA_sss.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 24 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMOS_climWOA_sss.conf b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMOS_climWOA_sss.conf index 8d26d9cae..a89fa7012 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMOS_climWOA_sss.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/GridStat_fcstRTOFS_obsSMOS_climWOA_sss.conf @@ -33,7 +33,6 @@ VALID_INCREMENT = 1M LEAD_SEQ = 0 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/PlotDataPlane_obsHYCOM_coordTripolar.conf b/parm/use_cases/model_applications/marine_and_cryosphere/PlotDataPlane_obsHYCOM_coordTripolar.conf index a7f6c51d2..3a9e68c15 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/PlotDataPlane_obsHYCOM_coordTripolar.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/PlotDataPlane_obsHYCOM_coordTripolar.conf @@ -35,8 +35,6 @@ LEAD_SEQ = 0 PLOT_DATA_PLANE_CUSTOM_LOOP_LIST = north, south -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/marine_and_cryosphere/UserScript_fcstRTOFS_obsAOML_calcTransport.conf b/parm/use_cases/model_applications/marine_and_cryosphere/UserScript_fcstRTOFS_obsAOML_calcTransport.conf index fea9f36ee..75cc0c6d9 100644 --- a/parm/use_cases/model_applications/marine_and_cryosphere/UserScript_fcstRTOFS_obsAOML_calcTransport.conf +++ b/parm/use_cases/model_applications/marine_and_cryosphere/UserScript_fcstRTOFS_obsAOML_calcTransport.conf @@ -31,8 +31,6 @@ VALID_INCREMENT = 24H LEAD_SEQ = -LOOP_ORDER = processes - USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE diff --git a/parm/use_cases/model_applications/medium_range/GridStat_fcstGEFS_obsCADB_BinaryObsPOE.conf b/parm/use_cases/model_applications/medium_range/GridStat_fcstGEFS_obsCADB_BinaryObsPOE.conf index 7bce8e2c6..6152afdb6 100644 --- a/parm/use_cases/model_applications/medium_range/GridStat_fcstGEFS_obsCADB_BinaryObsPOE.conf +++ b/parm/use_cases/model_applications/medium_range/GridStat_fcstGEFS_obsCADB_BinaryObsPOE.conf @@ -12,8 +12,6 @@ INIT_INCREMENT = 12H LEAD_SEQ = 8d -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_Sfc_MultiField.conf b/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_Sfc_MultiField.conf index 20ad10659..fea4677c2 100644 --- a/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_Sfc_MultiField.conf +++ b/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_Sfc_MultiField.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 21600 LEAD_SEQ = 24 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_climoNCEP_MultiField.conf b/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_climoNCEP_MultiField.conf index 28c513fdd..8e8eb73c0 100644 --- a/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_climoNCEP_MultiField.conf +++ b/parm/use_cases/model_applications/medium_range/GridStat_fcstGFS_obsGFS_climoNCEP_MultiField.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 21600 LEAD_SEQ = 24, 48 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/MTD_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf b/parm/use_cases/model_applications/medium_range/MTD_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf index 77110adf8..c9b54fc87 100644 --- a/parm/use_cases/model_applications/medium_range/MTD_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf +++ b/parm/use_cases/model_applications/medium_range/MTD_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 6H LEAD_SEQ = begin_end_incr(0,30,6) -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsGDAS_UpperAir_MultiField_PrepBufr.conf b/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsGDAS_UpperAir_MultiField_PrepBufr.conf index ea774a66e..4a11246bf 100644 --- a/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsGDAS_UpperAir_MultiField_PrepBufr.conf +++ b/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsGDAS_UpperAir_MultiField_PrepBufr.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsNAM_Sfc_MultiField_PrepBufr.conf b/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsNAM_Sfc_MultiField_PrepBufr.conf index b722dd49a..3d2fd7ee6 100644 --- a/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsNAM_Sfc_MultiField_PrepBufr.conf +++ b/parm/use_cases/model_applications/medium_range/PointStat_fcstGFS_obsNAM_Sfc_MultiField_PrepBufr.conf @@ -32,8 +32,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 0 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByInit.conf b/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByInit.conf index 65dbe5ed8..8188d1a56 100644 --- a/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByInit.conf +++ b/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByInit.conf @@ -35,8 +35,6 @@ SERIES_ANALYSIS_RUNTIME_FREQ = RUN_ONCE_PER_INIT_OR_VALID SERIES_ANALYSIS_RUN_ONCE_PER_STORM_ID = True -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf b/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf index 32a43b9f1..dd0fc4827 100644 --- a/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf +++ b/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead.conf @@ -37,8 +37,6 @@ LEAD_SEQ_1_LABEL = Day1 LEAD_SEQ_2 = begin_end_incr(24,42,6) LEAD_SEQ_2_LABEL = Day2 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead_PyEmbed_Multiple_Diagnostics.conf b/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead_PyEmbed_Multiple_Diagnostics.conf index 4074f8d41..6cf9ba3c7 100644 --- a/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead_PyEmbed_Multiple_Diagnostics.conf +++ b/parm/use_cases/model_applications/medium_range/TCStat_SeriesAnalysis_fcstGFS_obsGFS_FeatureRelative_SeriesByLead_PyEmbed_Multiple_Diagnostics.conf @@ -37,8 +37,6 @@ SERIES_ANALYSIS_RUNTIME_FREQ = RUN_ONCE_PER_LEAD SERIES_ANALYSIS_RUN_ONCE_PER_STORM_ID = False -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/medium_range/UserScript_fcstGEFS_Difficulty_Index.conf b/parm/use_cases/model_applications/medium_range/UserScript_fcstGEFS_Difficulty_Index.conf index 06d8309c2..bd7820e33 100644 --- a/parm/use_cases/model_applications/medium_range/UserScript_fcstGEFS_Difficulty_Index.conf +++ b/parm/use_cases/model_applications/medium_range/UserScript_fcstGEFS_Difficulty_Index.conf @@ -37,8 +37,6 @@ USER_SCRIPT_CUSTOM_LOOP_LIST = nc USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_FOR_EACH -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/EnsembleStat_fcstWOFS_obsWOFS.conf b/parm/use_cases/model_applications/precipitation/EnsembleStat_fcstWOFS_obsWOFS.conf index 03904ba0e..64814fce7 100644 --- a/parm/use_cases/model_applications/precipitation/EnsembleStat_fcstWOFS_obsWOFS.conf +++ b/parm/use_cases/model_applications/precipitation/EnsembleStat_fcstWOFS_obsWOFS.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 3600 LEAD_SEQ = 1 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/GenEnsProd_fcstHRRRE_FcstOnly_NetCDF.conf b/parm/use_cases/model_applications/precipitation/GenEnsProd_fcstHRRRE_FcstOnly_NetCDF.conf index 18a5e568c..7f1eb5478 100644 --- a/parm/use_cases/model_applications/precipitation/GenEnsProd_fcstHRRRE_FcstOnly_NetCDF.conf +++ b/parm/use_cases/model_applications/precipitation/GenEnsProd_fcstHRRRE_FcstOnly_NetCDF.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=43200 LEAD_SEQ = 3,6,9,12 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/GridStat_fcstGFS_obsCCPA_GRIB.conf b/parm/use_cases/model_applications/precipitation/GridStat_fcstGFS_obsCCPA_GRIB.conf index ae536ee11..51f74951c 100644 --- a/parm/use_cases/model_applications/precipitation/GridStat_fcstGFS_obsCCPA_GRIB.conf +++ b/parm/use_cases/model_applications/precipitation/GridStat_fcstGFS_obsCCPA_GRIB.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 24 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_Gempak.conf b/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_Gempak.conf index 2964e6b59..5c9fd77b4 100644 --- a/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_Gempak.conf +++ b/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_Gempak.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=43200 LEAD_SEQ = 18 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_NetCDF.conf b/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_NetCDF.conf index 9ef9bb8df..6428e9122 100644 --- a/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_NetCDF.conf +++ b/parm/use_cases/model_applications/precipitation/GridStat_fcstHREFmean_obsStgIV_NetCDF.conf @@ -33,7 +33,6 @@ INIT_INCREMENT=43200 LEAD_SEQ = 18 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/GridStat_fcstHRRR-TLE_obsStgIV_GRIB.conf b/parm/use_cases/model_applications/precipitation/GridStat_fcstHRRR-TLE_obsStgIV_GRIB.conf index 1f88a2e87..8caa06740 100644 --- a/parm/use_cases/model_applications/precipitation/GridStat_fcstHRRR-TLE_obsStgIV_GRIB.conf +++ b/parm/use_cases/model_applications/precipitation/GridStat_fcstHRRR-TLE_obsStgIV_GRIB.conf @@ -34,8 +34,6 @@ INIT_INCREMENT=60 LEAD_SEQ = 6, 7 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_FcstOnly_RevisionSeries_GRIB.conf b/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_FcstOnly_RevisionSeries_GRIB.conf index e8e4c525f..086403690 100644 --- a/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_FcstOnly_RevisionSeries_GRIB.conf +++ b/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_FcstOnly_RevisionSeries_GRIB.conf @@ -33,8 +33,6 @@ VALID_INCREMENT=86400 LEAD_SEQ = begin_end_incr(12, 0, -1) -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_obsMRMS.conf b/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_obsMRMS.conf index 598e1a9a3..b81242a67 100644 --- a/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_obsMRMS.conf +++ b/parm/use_cases/model_applications/precipitation/MTD_fcstHRRR-TLE_obsMRMS.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=43200 LEAD_SEQ = 1,2,3 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/s2s/GridStat_SeriesAnalysis_fcstNMME_obsCPC_seasonal_forecast.conf b/parm/use_cases/model_applications/s2s/GridStat_SeriesAnalysis_fcstNMME_obsCPC_seasonal_forecast.conf index 52a98f302..c30b22a23 100644 --- a/parm/use_cases/model_applications/s2s/GridStat_SeriesAnalysis_fcstNMME_obsCPC_seasonal_forecast.conf +++ b/parm/use_cases/model_applications/s2s/GridStat_SeriesAnalysis_fcstNMME_obsCPC_seasonal_forecast.conf @@ -35,8 +35,6 @@ LEAD_SEQ = 1m, 2m, 3m, 4m, 5m, 6m SERIES_ANALYSIS_RUNTIME_FREQ = RUN_ONCE_PER_LEAD -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/s2s/GridStat_fcstCFSv2_obsGHCNCAMS_MultiTercile.conf b/parm/use_cases/model_applications/s2s/GridStat_fcstCFSv2_obsGHCNCAMS_MultiTercile.conf index 59b26fb25..88b003635 100644 --- a/parm/use_cases/model_applications/s2s/GridStat_fcstCFSv2_obsGHCNCAMS_MultiTercile.conf +++ b/parm/use_cases/model_applications/s2s/GridStat_fcstCFSv2_obsGHCNCAMS_MultiTercile.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 1Y LEAD_SEQ = -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/s2s/SeriesAnalysis_fcstCFSv2_obsGHCNCAMS_climoStandardized_MultiStatisticTool.conf b/parm/use_cases/model_applications/s2s/SeriesAnalysis_fcstCFSv2_obsGHCNCAMS_climoStandardized_MultiStatisticTool.conf index 7e32b1e8d..2bbc1b76b 100644 --- a/parm/use_cases/model_applications/s2s/SeriesAnalysis_fcstCFSv2_obsGHCNCAMS_climoStandardized_MultiStatisticTool.conf +++ b/parm/use_cases/model_applications/s2s/SeriesAnalysis_fcstCFSv2_obsGHCNCAMS_climoStandardized_MultiStatisticTool.conf @@ -36,8 +36,6 @@ LEAD_SEQ = SERIES_ANALYSIS_CUSTOM_LOOP_LIST = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23 SERIES_ANALYSIS_RUNTIME_FREQ = RUN_ONCE -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/s2s/TCGen_fcstGFSO_obsBDECKS_GDF_TDF.conf b/parm/use_cases/model_applications/s2s/TCGen_fcstGFSO_obsBDECKS_GDF_TDF.conf index 5c0210894..5f0a273ad 100644 --- a/parm/use_cases/model_applications/s2s/TCGen_fcstGFSO_obsBDECKS_GDF_TDF.conf +++ b/parm/use_cases/model_applications/s2s/TCGen_fcstGFSO_obsBDECKS_GDF_TDF.conf @@ -33,8 +33,6 @@ LEAD_SEQ = USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_FOR_EACH -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/s2s/UserScript_obsERA_obsOnly_Stratosphere.conf b/parm/use_cases/model_applications/s2s/UserScript_obsERA_obsOnly_Stratosphere.conf index 86c4e5264..0d8159406 100644 --- a/parm/use_cases/model_applications/s2s/UserScript_obsERA_obsOnly_Stratosphere.conf +++ b/parm/use_cases/model_applications/s2s/UserScript_obsERA_obsOnly_Stratosphere.conf @@ -32,8 +32,6 @@ VALID_BEG = 2013 USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE -LOOP_ORDER = processes - ### # UserScript Settings diff --git a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_Blocking.conf b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_Blocking.conf index 6bb4c9f07..4bad3cef0 100644 --- a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_Blocking.conf +++ b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_Blocking.conf @@ -36,8 +36,6 @@ LEAD_SEQ = 0 # Only Process DJF SKIP_TIMES = "%m:begin_end_incr(3,11,1)", "%m%d:0229" -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_WeatherRegime.conf b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_WeatherRegime.conf index 4d7492772..37b224e45 100644 --- a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_WeatherRegime.conf +++ b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_fcstGFS_obsERA_WeatherRegime.conf @@ -41,8 +41,6 @@ LEAD_SEQ = 0 # Only Process DJF SKIP_TIMES = "%m:begin_end_incr(3,11,1)", "%m%d:1201,0229" -LOOP_ORDER = processes - ### # RegridDataPlane(regrid_obs) Settings diff --git a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_Blocking.conf b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_Blocking.conf index 0b4a32cca..526074c95 100644 --- a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_Blocking.conf +++ b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_Blocking.conf @@ -40,9 +40,6 @@ LEAD_SEQ = 0 # Only Process DJF SKIP_TIMES = "%m:begin_end_incr(3,11,1)", "%m%d:0229" -LOOP_ORDER = processes - - # Run the obs data # A variable set to be used in the pre-processing steps OBS_RUN = True diff --git a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_WeatherRegime.conf b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_WeatherRegime.conf index 1d0583ea4..47d836707 100644 --- a/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_WeatherRegime.conf +++ b/parm/use_cases/model_applications/s2s_mid_lat/UserScript_obsERA_obsOnly_WeatherRegime.conf @@ -41,8 +41,6 @@ SKIP_TIMES = "%m:begin_end_incr(3,11,1)", "%m%d:0229" USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_LEAD -LOOP_ORDER = processes - ### # RegridDataPlane(regrid_obs) Settings diff --git a/parm/use_cases/model_applications/s2s_mjo/UserScript_fcstGFS_obsERA_OMI.conf b/parm/use_cases/model_applications/s2s_mjo/UserScript_fcstGFS_obsERA_OMI.conf index 63fd483b9..38dd0439b 100644 --- a/parm/use_cases/model_applications/s2s_mjo/UserScript_fcstGFS_obsERA_OMI.conf +++ b/parm/use_cases/model_applications/s2s_mjo/UserScript_fcstGFS_obsERA_OMI.conf @@ -37,8 +37,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 0 -LOOP_ORDER = processes - # variables referenced in other sections # Run the obs for these cases diff --git a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO.conf b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO.conf index 115bb060a..6bb38ebf6 100644 --- a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO.conf +++ b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO.conf @@ -1,6 +1,16 @@ -# MJO_ENSO UserScript wrapper - [config] + +# Documentation for this use case can be found at +# https://metplus.readthedocs.io/en/latest/generated/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO.html + +# For additional information, please see the METplus Users Guide. +# https://metplus.readthedocs.io/en/latest/Users_Guide + +### +# Processes to run +# https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#process-list +### + # All steps, including creating daily means and mean daily annual cycle #PROCESS_LIST = RegridDataPlane(regrid_obs_taux), RegridDataPlane(regrid_obs_tauy), RegridDataPlane(regrid_obs_sst), RegridDataPlane(regrid_obs_ucur), RegridDataPlane(regrid_obs_vcur), UserScript(script_mjo_enso) # Computing regridding, and MJO ENSO Analysis script @@ -8,50 +18,38 @@ PROCESS_LIST = UserScript(script_mjo_enso) -# time looping - options are INIT, VALID, RETRO, and REALTIME + +### +# Time Info +# LOOP_BY options are INIT, VALID, RETRO, and REALTIME # If set to INIT or RETRO: # INIT_TIME_FMT, INIT_BEG, INIT_END, and INIT_INCREMENT must also be set # If set to VALID or REALTIME: # VALID_TIME_FMT, VALID_BEG, VALID_END, and VALID_INCREMENT must also be set -LOOP_BY = VALID - -# Format of VALID_BEG and VALID_END using % items -# %Y = 4 digit year, %m = 2 digit month, %d = 2 digit day, etc. -# see www.strftime.org for more information -# %Y%m%d%H expands to YYYYMMDDHH -VALID_TIME_FMT = %Y%m%d +# LEAD_SEQ is the list of forecast leads to process +# https://metplus.readthedocs.io/en/latest/Users_Guide/systemconfiguration.html#timing-control +### -# Start time for METplus run -VALID_BEG = 19900101 -# End time for METplus run +LOOP_BY = VALID +VALID_TIME_FMT = %Y%m%d +VALID_BEG = 19900101 VALID_END = 20211231 - -# Increment between METplus runs in seconds. Must be >= 60 VALID_INCREMENT = 86400 -# List of forecast leads to process for each run time (init or valid) -# In hours if units are not specified -# If unset, defaults to 0 (don't loop through forecast leads) LEAD_SEQ = 0 -# Order of loops to process data - Options are times, processes -# Not relevant if only one item is in the PROCESS_LIST -# times = run all wrappers in the PROCESS_LIST for a single run time, then -# increment the run time and run all wrappers again until all times have -# been evaluated. -# processes = run the first wrapper in the PROCESS_LIST for all times -# specified, then repeat for the next item in the PROCESS_LIST until all -# wrappers have been run -LOOP_ORDER = processes - -# location of configuration files used by MET applications -CONFIG_DIR={PARM_BASE}/use_cases/model_applications/s2s_mjo # Run the obs for these cases OBS_RUN = True FCST_RUN = False + +### +# RegridDataPlane Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#regriddataplane +### + # Mask to use for regridding REGRID_DATA_PLANE_VERIF_GRID = latlon 156 61 -30 125 1 1 @@ -62,6 +60,11 @@ REGRID_DATA_PLANE_METHOD = NEAREST REGRID_DATA_PLANE_WIDTH = 1 +### +# RegridDataPlane(regrid_obs_taux) Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#regriddataplane +### + # Configurations for regrid_data_plane: Regrid OLR to -15 to 15 latitude [regrid_obs_taux] # Run regrid_data_plane on forecast data @@ -87,6 +90,11 @@ OBS_REGRID_DATA_PLANE_INPUT_TEMPLATE = cfsr_zonalWindStress_{valid?fmt=%Y%m%d}.n OBS_REGRID_DATA_PLANE_OUTPUT_TEMPLATE =cfsr_zonalWindStress_{valid?fmt=%Y%m%d}.nc +### +# RegridDataPlane(regrid_obs_tauy) Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#regriddataplane +### + # Configurations for regrid_data_plane: Regrid meridional wind stress [regrid_obs_tauy] # Run regrid_data_plane on forecast data @@ -111,6 +119,12 @@ OBS_REGRID_DATA_PLANE_OUTPUT_DIR = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsO OBS_REGRID_DATA_PLANE_INPUT_TEMPLATE = cfsr_meridionalWindStress_{valid?fmt=%Y%m%d}.nc OBS_REGRID_DATA_PLANE_OUTPUT_TEMPLATE = cfsr_meridionalWindStress_{valid?fmt=%Y%m%d}.nc + +### +# RegridDataPlane(regrid_obs_sst) Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#regriddataplane +### + # Configurations for regrid_data_plane: Regrid sst [regrid_obs_sst] # Run regrid_data_plane on forecast data @@ -133,6 +147,12 @@ OBS_REGRID_DATA_PLANE_OUTPUT_DIR = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsO OBS_REGRID_DATA_PLANE_INPUT_TEMPLATE = cfsr_sst_{valid?fmt=%Y%m%d}.nc OBS_REGRID_DATA_PLANE_OUTPUT_TEMPLATE = cfsr_sst_{valid?fmt=%Y%m%d}.nc + +### +# RegridDataPlane(regrid_obs_ucur) Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#regriddataplane +### + # Configurations for regrid_data_plane: Regrid zonal ocean current [regrid_obs_ucur] # Run regrid_data_plane on forecast data @@ -155,6 +175,12 @@ OBS_REGRID_DATA_PLANE_OUTPUT_DIR = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsO OBS_REGRID_DATA_PLANE_INPUT_TEMPLATE = cfsr_zonalOceanCurrent_{valid?fmt=%Y%m%d}.nc OBS_REGRID_DATA_PLANE_OUTPUT_TEMPLATE = cfsr_zonalOceanCurrent_{valid?fmt=%Y%m%d}.nc + +### +# RegridDataPlane(regrid_obs_vcur) Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#regriddataplane +### + # Configurations for regrid_data_plane: Regrid meridional ocean current [regrid_obs_vcur] # Run regrid_data_plane on forecast data @@ -179,6 +205,32 @@ OBS_REGRID_DATA_PLANE_OUTPUT_DIR = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsO OBS_REGRID_DATA_PLANE_INPUT_TEMPLATE = cfsr_meridionalOceanCurrent_{valid?fmt=%Y%m%d}.nc OBS_REGRID_DATA_PLANE_OUTPUT_TEMPLATE = cfsr_meridionalOceanCurrent_{valid?fmt=%Y%m%d}.nc + +### +# UserScript(script_mjo_enso) Settings +# https://metplus.readthedocs.io/en/latest/Users_Guide/wrappers.html#userscript +### + +# Configurations for UserScript: Run the MJO_ENSO Analysis driver +[script_mjo_enso] +# list of strings to loop over for each run time. +# Run the user script once per lead +USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_LEAD + +# Template of filenames to input to the user-script +#USER_SCRIPT_INPUT_TEMPLATE = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/zonalWindStress/cfsr_zonalWindStress_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/meridionalWindStress/cfsr_meridionalWindStress_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/sst/cfsr_sst_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/zonalOceanCurrent/cfsr_zonalOceanCurrent_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/meridionalOceanCurrent/cfsr_meridionalOceanCurrent_{valid?fmt=%Y%m%d}.nc + +USER_SCRIPT_INPUT_TEMPLATE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/zonalWindStress/cfsr_zonalWindStress_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/meridionalWindStress/cfsr_meridionalWindStress_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/sst/cfsr_sst_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/zonalOceanCurrent/cfsr_zonalOceanCurrent_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/meridionalOceanCurrent/cfsr_meridionalOceanCurrent_{valid?fmt=%Y%m%d}.nc + +# Name of the file containing the listing of input files +# The options are OBS_TAUX_INPUT, OBS_TAUY_INPUT, OBS_SST_INPUT, OBS_UCUR_INPUT, OBS_VCUR_INPUT, FCST_TAUX_INPUT, FCST_TAUY_INPUT, FCST_SST_INPUT, FCST_UCUR_INPUT,and FCST_VCUR_INPUT +# *** Make sure the order is the same as the order of templates listed in USER_SCRIPT_INPUT_TEMPLATE +USER_SCRIPT_INPUT_TEMPLATE_LABELS = OBS_TAUX_INPUT,OBS_TAUY_INPUT, OBS_SST_INPUT, OBS_UCUR_INPUT, OBS_VCUR_INPUT + +# Command to run the user script with input configuration file +USER_SCRIPT_COMMAND = {METPLUS_BASE}/parm/use_cases/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/mjo_enso_driver.py + + # Configurations for the MJO-ENSO analysis script [user_env_vars] # Whether to Run the model or obs @@ -192,18 +244,18 @@ SCRIPT_OUTPUT_BASE = {OUTPUT_BASE} OBS_PER_DAY = 1 # Variable names for TAUX, TAUY, SST, UCUR, VCUR -OBS_TAUX_VAR_NAME = uflx -OBS_TAUY_VAR_NAME = vflx -OBS_SST_VAR_NAME = sst +OBS_TAUX_VAR_NAME = uflx +OBS_TAUY_VAR_NAME = vflx +OBS_SST_VAR_NAME = sst OBS_UCUR_VAR_NAME = u OBS_VCUR_VAR_NAME = v # EOF Filename -TAUX_EOF_INPUT_FILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/cfs_uflx_eof.nc +TAUX_EOF_INPUT_FILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/cfs_uflx_eof.nc TAUY_EOF_INPUT_FILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/cfs_vflx_eof.nc WMJOK_SST_EOF_INPUT_FILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/cfs_multivarEOF.nc -# Filters weights +# Filters weights TAUX_Filter1_TEXTFILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/taux.filter1.txt TAUX_Filter2_TEXTFILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/taux.filter2.txt TAUY_Filter1_TEXTFILE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Data/tauy.filter1.txt @@ -222,24 +274,5 @@ MAKE_MAKI_OUTPUT_TEXT_FILE = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJ PLOT_TIME_BEG = 19900101 PLOT_TIME_END = 20211231 PLOT_TIME_FMT = {VALID_TIME_FMT} -OBS_PLOT_OUTPUT_NAME = MAKE_MAKI_timeseries +OBS_PLOT_OUTPUT_NAME = MAKE_MAKI_timeseries OBS_PLOT_OUTPUT_FORMAT = png - -# Configurations for UserScript: Run the MJO_ENSO Analysis driver -[script_mjo_enso] -# list of strings to loop over for each run time. -# Run the user script once per lead -USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_LEAD - -# Template of filenames to input to the user-script -#USER_SCRIPT_INPUT_TEMPLATE = {OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/zonalWindStress/cfsr_zonalWindStress_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/meridionalWindStress/cfsr_meridionalWindStress_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/sst/cfsr_sst_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/zonalOceanCurrent/cfsr_zonalOceanCurrent_{valid?fmt=%Y%m%d}.nc,{OUTPUT_BASE}/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/Regrid/meridionalOceanCurrent/cfsr_meridionalOceanCurrent_{valid?fmt=%Y%m%d}.nc - -USER_SCRIPT_INPUT_TEMPLATE = {INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/zonalWindStress/cfsr_zonalWindStress_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/meridionalWindStress/cfsr_meridionalWindStress_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/sst/cfsr_sst_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/zonalOceanCurrent/cfsr_zonalOceanCurrent_{valid?fmt=%Y%m%d}.nc,{INPUT_BASE}/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/meridionalOceanCurrent/cfsr_meridionalOceanCurrent_{valid?fmt=%Y%m%d}.nc - -# Name of the file containing the listing of input files -# The options are OBS_TAUX_INPUT, OBS_TAUY_INPUT, OBS_SST_INPUT, OBS_UCUR_INPUT, OBS_VCUR_INPUT, FCST_TAUX_INPUT, FCST_TAUY_INPUT, FCST_SST_INPUT, FCST_UCUR_INPUT,and FCST_VCUR_INPUT -# *** Make sure the order is the same as the order of templates listed in USER_SCRIPT_INPUT_TEMPLATE -USER_SCRIPT_INPUT_TEMPLATE_LABELS = OBS_TAUX_INPUT,OBS_TAUY_INPUT, OBS_SST_INPUT, OBS_UCUR_INPUT, OBS_VCUR_INPUT - -# Command to run the user script with input configuration file -USER_SCRIPT_COMMAND = {METPLUS_BASE}/parm/use_cases/model_applications/s2s_mjo/UserScript_obsCFSR_obsOnly_MJO_ENSO/mjo_enso_driver.py diff --git a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_OMI.conf b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_OMI.conf index 8f7a87db2..c2fe8ab7c 100644 --- a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_OMI.conf +++ b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_OMI.conf @@ -16,9 +16,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 0 -LOOP_ORDER = processes - - # variables referenced in other sections # Run the obs for these cases diff --git a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_PhaseDiagram.conf b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_PhaseDiagram.conf index ed50889f3..015b5f195 100644 --- a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_PhaseDiagram.conf +++ b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_PhaseDiagram.conf @@ -33,8 +33,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 0 -LOOP_ORDER = processes - # variables referenced in other sections # Run the obs for these cases diff --git a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_RMM.conf b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_RMM.conf index add8fae51..352a49f39 100644 --- a/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_RMM.conf +++ b/parm/use_cases/model_applications/s2s_mjo/UserScript_obsERA_obsOnly_RMM.conf @@ -37,8 +37,6 @@ VALID_INCREMENT = 86400 LEAD_SEQ = 0 -LOOP_ORDER = processes - # variables referenced in other sections # Run the obs for these cases diff --git a/parm/use_cases/model_applications/short_range/EnsembleStat_fcstHRRRE_obsHRRRE_Sfc_MultiField.conf b/parm/use_cases/model_applications/short_range/EnsembleStat_fcstHRRRE_obsHRRRE_Sfc_MultiField.conf index 02e6da04e..73be8cda9 100644 --- a/parm/use_cases/model_applications/short_range/EnsembleStat_fcstHRRRE_obsHRRRE_Sfc_MultiField.conf +++ b/parm/use_cases/model_applications/short_range/EnsembleStat_fcstHRRRE_obsHRRRE_Sfc_MultiField.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=3600 LEAD_SEQ = 0,1,2 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/GenEnsProd_fcstHRRR_fcstOnly_SurrogateSevere.conf b/parm/use_cases/model_applications/short_range/GenEnsProd_fcstHRRR_fcstOnly_SurrogateSevere.conf index e63c21a0e..df6ead3e5 100644 --- a/parm/use_cases/model_applications/short_range/GenEnsProd_fcstHRRR_fcstOnly_SurrogateSevere.conf +++ b/parm/use_cases/model_applications/short_range/GenEnsProd_fcstHRRR_fcstOnly_SurrogateSevere.conf @@ -33,8 +33,6 @@ INIT_INCREMENT=86400 LEAD_SEQ = 36 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/GridStat_fcstFV3_obsGOES_BrightnessTempDmap.conf b/parm/use_cases/model_applications/short_range/GridStat_fcstFV3_obsGOES_BrightnessTempDmap.conf index 2deafe140..1452ca4cd 100644 --- a/parm/use_cases/model_applications/short_range/GridStat_fcstFV3_obsGOES_BrightnessTempDmap.conf +++ b/parm/use_cases/model_applications/short_range/GridStat_fcstFV3_obsGOES_BrightnessTempDmap.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 3600 LEAD_SEQ = 1,2 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevere.conf b/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevere.conf index 0301941e0..f5957064b 100644 --- a/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevere.conf +++ b/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevere.conf @@ -35,7 +35,6 @@ INIT_SEQ = 0 LEAD_SEQ_MIN = 36 LEAD_SEQ_MAX = 36 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevereProb.conf b/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevereProb.conf index 24c3be353..822e28e8c 100644 --- a/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevereProb.conf +++ b/parm/use_cases/model_applications/short_range/GridStat_fcstHRRR_obsPracPerfect_SurrogateSevereProb.conf @@ -35,7 +35,6 @@ INIT_SEQ = 0 LEAD_SEQ_MIN = 36 LEAD_SEQ_MAX = 36 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/METdbLoad_fcstFV3_obsGoes_BrightnessTemp.conf b/parm/use_cases/model_applications/short_range/METdbLoad_fcstFV3_obsGoes_BrightnessTemp.conf index 79ef5d57f..3ac50c4d4 100644 --- a/parm/use_cases/model_applications/short_range/METdbLoad_fcstFV3_obsGoes_BrightnessTemp.conf +++ b/parm/use_cases/model_applications/short_range/METdbLoad_fcstFV3_obsGoes_BrightnessTemp.conf @@ -32,8 +32,6 @@ VALID_BEG = 2019052112 VALID_END = 2019052100 VALID_INCREMENT = 12H -LOOP_ORDER = processes - MET_DB_LOAD_RUNTIME_FREQ = RUN_ONCE diff --git a/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTemp.conf b/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTemp.conf index d85396cee..b658849cf 100644 --- a/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTemp.conf +++ b/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTemp.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 3600 LEAD_SEQ = 1,2 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTempObjs.conf b/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTempObjs.conf index ff97185d2..9a8d8b5cb 100644 --- a/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTempObjs.conf +++ b/parm/use_cases/model_applications/short_range/MODE_fcstFV3_obsGOES_BrightnessTempObjs.conf @@ -33,7 +33,6 @@ INIT_INCREMENT = 3600 LEAD_SEQ = 1,2 -LOOP_ORDER = processes ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/MODE_fcstHRRR_obsMRMS_Hail_GRIB2.conf b/parm/use_cases/model_applications/short_range/MODE_fcstHRRR_obsMRMS_Hail_GRIB2.conf index 16f73d367..2983b999c 100644 --- a/parm/use_cases/model_applications/short_range/MODE_fcstHRRR_obsMRMS_Hail_GRIB2.conf +++ b/parm/use_cases/model_applications/short_range/MODE_fcstHRRR_obsMRMS_Hail_GRIB2.conf @@ -35,8 +35,6 @@ INIT_SEQ = 0 LEAD_SEQ_MAX = 36 LEAD_SEQ_MIN = 12 -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/short_range/Point2Grid_obsLSR_ObsOnly_PracticallyPerfect.conf b/parm/use_cases/model_applications/short_range/Point2Grid_obsLSR_ObsOnly_PracticallyPerfect.conf index 525581479..5f24ac803 100644 --- a/parm/use_cases/model_applications/short_range/Point2Grid_obsLSR_ObsOnly_PracticallyPerfect.conf +++ b/parm/use_cases/model_applications/short_range/Point2Grid_obsLSR_ObsOnly_PracticallyPerfect.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 24H LEAD_SEQ = 12H -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/space_weather/GenVxMask_fcstGloTEC_FcstOnly_solar_altitude.conf b/parm/use_cases/model_applications/space_weather/GenVxMask_fcstGloTEC_FcstOnly_solar_altitude.conf index a2f6d992b..d6a4a7df1 100644 --- a/parm/use_cases/model_applications/space_weather/GenVxMask_fcstGloTEC_FcstOnly_solar_altitude.conf +++ b/parm/use_cases/model_applications/space_weather/GenVxMask_fcstGloTEC_FcstOnly_solar_altitude.conf @@ -36,8 +36,6 @@ VALID_INCREMENT = 600 LEAD_SEQ = 0 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/space_weather/GridStat_fcstGloTEC_obsGloTEC_vx7.conf b/parm/use_cases/model_applications/space_weather/GridStat_fcstGloTEC_obsGloTEC_vx7.conf index 04e2575d1..dd6494eb8 100644 --- a/parm/use_cases/model_applications/space_weather/GridStat_fcstGloTEC_obsGloTEC_vx7.conf +++ b/parm/use_cases/model_applications/space_weather/GridStat_fcstGloTEC_obsGloTEC_vx7.conf @@ -36,7 +36,6 @@ VALID_INCREMENT = 600 LEAD_SEQ = 0 -LOOP_ORDER = times ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/CyclonePlotter_fcstGFS_obsGFS_UserScript_ExtraTC.conf b/parm/use_cases/model_applications/tc_and_extra_tc/CyclonePlotter_fcstGFS_obsGFS_UserScript_ExtraTC.conf index ef1344706..1e3b3fc45 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/CyclonePlotter_fcstGFS_obsGFS_UserScript_ExtraTC.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/CyclonePlotter_fcstGFS_obsGFS_UserScript_ExtraTC.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 21600 USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_INIT_OR_VALID -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/GridStat_fcstHAFS_obsTDR_NetCDF.conf b/parm/use_cases/model_applications/tc_and_extra_tc/GridStat_fcstHAFS_obsTDR_NetCDF.conf index 38b229e0f..6c28e387a 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/GridStat_fcstHAFS_obsTDR_NetCDF.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/GridStat_fcstHAFS_obsTDR_NetCDF.conf @@ -35,8 +35,6 @@ LEAD_SEQ = 0,6,12,18 CUSTOM_LOOP_LIST = 190829H1 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_ExtraTC.conf b/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_ExtraTC.conf index 60bf71574..e45b93aa0 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_ExtraTC.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_ExtraTC.conf @@ -33,8 +33,6 @@ INIT_INCREMENT = 21600 TC_PAIRS_RUN_ONCE = True -LOOP_ORDER = processes - ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_RPlotting.conf b/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_RPlotting.conf index dbd4e3926..854815370 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_RPlotting.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/Plotter_fcstGFS_obsGFS_RPlotting.conf @@ -31,7 +31,6 @@ INIT_BEG = 20141214 INIT_END = 20141214 INIT_INCREMENT = 21600 ;; set to every 6 hours=21600 seconds -LOOP_ORDER = processes ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/TCGen_fcstGFS_obsBDECK_2021season.conf b/parm/use_cases/model_applications/tc_and_extra_tc/TCGen_fcstGFS_obsBDECK_2021season.conf index 3ea343ac8..50aee90b9 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/TCGen_fcstGFS_obsBDECK_2021season.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/TCGen_fcstGFS_obsBDECK_2021season.conf @@ -29,8 +29,6 @@ LOOP_BY = INIT INIT_TIME_FMT = %Y INIT_BEG = 2021 -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/TCRMW_fcstGFS_fcstOnly_gonzalo.conf b/parm/use_cases/model_applications/tc_and_extra_tc/TCRMW_fcstGFS_fcstOnly_gonzalo.conf index f44ee2d2c..4782ccae8 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/TCRMW_fcstGFS_fcstOnly_gonzalo.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/TCRMW_fcstGFS_fcstOnly_gonzalo.conf @@ -34,8 +34,6 @@ INIT_INCREMENT = 6H LEAD_SEQ = begin_end_incr(0, 24, 6) #LEAD_SEQ = begin_end_incr(0, 126, 6) -LOOP_ORDER = times - ### # File I/O diff --git a/parm/use_cases/model_applications/tc_and_extra_tc/UserScript_ASCII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.conf b/parm/use_cases/model_applications/tc_and_extra_tc/UserScript_ASCII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.conf index 409c3422c..5cc81d15d 100644 --- a/parm/use_cases/model_applications/tc_and_extra_tc/UserScript_ASCII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.conf +++ b/parm/use_cases/model_applications/tc_and_extra_tc/UserScript_ASCII2NC_PointStat_fcstHAFS_obsFRD_NetCDF.conf @@ -35,8 +35,6 @@ LEAD_SEQ = 0,6,12,18 USER_SCRIPT_RUNTIME_FREQ = RUN_ONCE_PER_INIT_OR_VALID -LOOP_ORDER = processes - ### # File I/O diff --git a/ush/plotting_scripts/plot_date_by_level.py b/ush/plotting_scripts/plot_date_by_level.py deleted file mode 100644 index 291759341..000000000 --- a/ush/plotting_scripts/plot_date_by_level.py +++ /dev/null @@ -1,819 +0,0 @@ -''' -Name: plot_date_by_level.py -Contact(s): Mallory Row -Abstract: Reads filtered files from stat_analysis_wrapper - run_all_times to make date-pressure plots -History Log: Third version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: MET .stat files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import sys -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md -import matplotlib.gridspec as gridspec - - -import plot_util as plot_util - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = os.environ['FCST_LEAD'].split(', ') -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = [os.environ['FCST_LEVEL'].split(', ')] -fcst_var_thresh_list = os.environ['FCST_THRESH'].split(', ') -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = [os.environ['OBS_LEVEL'].split(', ')] -obs_var_thresh_list = os.environ['OBS_THRESH'].split(', ') -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -cmap_bias = plt.cm.PiYG_r -cmap = plt.cm.BuPu -cmap_diff = plt.cm.coolwarm -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) - -for level_list in fcst_var_level_list: - for level in level_list: - if not level.startswith('P'): - logger.warning(f"Forecast level value ({level}) expected " - "to be in pressure, i.e. P500. Exiting.") - sys.exit(0) - -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha -# MET .stat file formatting -stat_file_base_columns = plot_util.get_stat_file_base_columns(met_version) -nbase_columns = len(stat_file_base_columns) - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_lead = plot_info[0] - fcst_var_levels = plot_info[1] - obs_var_levels = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_levels) - ] - fcst_var_thresh = plot_info[2] - obs_var_thresh = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_thresh) - ] - fcst_var_thresh_symbol, fcst_var_thresh_letter = plot_util.format_thresh( - fcst_var_thresh - ) - obs_var_thresh_symbol, obs_var_thresh_letter = plot_util.format_thresh( - obs_var_thresh - ) - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name - obs_var_plot_title = 'Obs: '+obs_var_name - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_thresh != '': - fcst_var_plot_title+=' '+fcst_var_thresh - if obs_var_thresh != '': - obs_var_plot_title+=' '+obs_var_thresh - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - # Build plot title for forecast lead - fcst_lead_plot_title = 'Fcst Lead: '+fcst_lead[:-4]+'hr' - if fcst_lead[-4:-2] != '00': - fcst_lead_plot_title+=fcst_lead[-4:-2]+'min' - if fcst_lead[-2:] != '00': - fcst_lead_plot_title+=fcst_lead[-2:]+'sec' - # Clean up time information for plot title - # if valid/init is a single hour, then init/valid - # is also a single hour - date_time_plot_title = date_plot_title+time_plot_title - date_type_beg_hour = valid_init_dict[date_type.lower()+'_hour_beg'] - date_type_end_hour = valid_init_dict[date_type.lower()+'_hour_end'] - if (date_type_beg_hour != '' and date_type_end_hour != '' - and date_type_beg_hour == date_type_end_hour): - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ) - date_type_timedelta = datetime.timedelta( - hours=int(date_type_beg_hour[0:2]), - minutes=int(date_type_beg_hour[2:4]), - seconds=int(date_type_beg_hour[4:]) - ) - if date_type == 'VALID': - check_time_plot_title = 'Init' - time_diff = ( - date_type_timedelta - fcst_lead_timedelta - ).total_seconds() - elif date_type == 'INIT': - check_time_plot_title = 'Valid' - time_diff = ( - date_type_timedelta - fcst_lead_timedelta - ).total_seconds() - day_diff = time_diff//86400 - hr_diff = (time_diff - (day_diff*86400))//3600 - min_diff = (time_diff%3600) // 60 - sec_diff = (time_diff%3600)%60 - time_title_replace = re.search(check_time_plot_title+': (.*)Z', - date_time_plot_title) - date_time_plot_title = date_time_plot_title.replace( - check_time_plot_title+': '+time_title_replace.group(1), - check_time_plot_title+': '+str(int(hr_diff)).zfill(2) - +str(int(min_diff)).zfill(2) - ) - logger.info("Working on forecast lead "+fcst_lead+" " - +"and forecast variable "+fcst_var_name+" " - +fcst_var_thresh) - # Set up base name for file naming convention for MET .stat files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead'+fcst_lead - +'_fcst'+fcst_var_name+'FCSTLEVELHOLDER' - +fcst_var_thresh_letter.replace(',', '_')+interp_mthd - +'_obs'+obs_var_name+'OBSLEVELHOLDER' - +obs_var_thresh_letter.replace(',', '_')+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - # Set up expected date in MET .stat file and date plot information - plot_time_dates, expected_stat_file_dates = plot_util.get_date_arrays( - date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, - fcst_lead - ) - total_dates = len(plot_time_dates) - if len(plot_time_dates) == 0: - logger.error("Date array constructed information from METplus " - +"conf file has length of 0. Not enough information " - +"was provided to build date information. Please check " - +"provided VALID/INIT_BEG/END and " - +"OBS/FCST_INIT/VALID_HOUR_LIST") - exit(1) - elif len(plot_time_dates) <= 3: - date_tick_intvl = 1 - elif len(plot_time_dates) > 3 and len(plot_time_dates) <= 10: - date_tick_intvl = 2 - elif len(plot_time_dates) > 10 and len(plot_time_dates) < 31: - date_tick_intvl = 5 - else: - date_tick_intvl = 10 - # Build date by forecst level grid for plotting - fcst_var_levels_int = np.empty(len(fcst_var_levels), dtype=int) - for vl in range(len(fcst_var_levels)): - fcst_var_levels_int[vl] = fcst_var_levels[vl][1:] - xmesh, ymesh = np.meshgrid(plot_time_dates, fcst_var_levels_int) - # Reading in model .stat files from stat_analysis - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - for vl in range(len(fcst_var_levels)): - fcst_var_level = fcst_var_levels[vl] - obs_var_level = obs_var_levels[vl] - model_level_now_data_index = pd.MultiIndex.from_product( - [ - [model_plot_name], [fcst_var_level], - expected_stat_file_dates - ], - names=['model_plot_name', 'levels', 'dates'] - ) -# model_stat_filename = ( -# model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTLEVELHOLDER', fcst_var_level) \ -# .replace('OBSLEVELHOLDER', obs_var_level) -# +'_dump_row.stat' -# ) -# model_stat_file = os.path.join(input_base_dir, -# model_stat_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - if os.path.exists(model_stat_file): - nrow = sum(1 for line in open(model_stat_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" empty") - model_level_now_data = ( - pd.DataFrame(np.nan, - index=model_level_now_data_index, - columns=[ 'TOTAL' ]) - ) - else: - logger.debug("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" exists") - model_level_now_stat_file_data = pd.read_csv( - model_stat_file, sep=" ", skiprows=1, - skipinitialspace=True, header=None - ) - model_level_now_stat_file_data.rename( - columns=dict(zip( - model_level_now_stat_file_data \ - .columns[:nbase_columns], - stat_file_base_columns - )), inplace=True - ) - line_type = model_level_now_stat_file_data['LINE_TYPE'][0] - stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, - met_version, - line_type) - ) - model_level_now_stat_file_data.rename( - columns=dict(zip( - model_level_now_stat_file_data \ - .columns[nbase_columns:], - stat_file_line_type_columns - )), inplace=True - ) - model_level_now_stat_file_data_fcstvaliddates = ( - model_level_now_stat_file_data.loc[:] \ - ['FCST_VALID_BEG'].values - ) - model_level_now_data = ( - pd.DataFrame(np.nan, index=model_level_now_data_index, - columns=stat_file_line_type_columns) - ) - model_level_now_stat_file_data.fillna( - {'FCST_UNITS':'NA', 'OBS_UNITS':'NA', 'VX_MASK':'NA'}, - inplace=True - ) - if float(met_version) >= 8.1: - model_now_fcst_units = ( - model_level_now_stat_file_data \ - .loc[0]['FCST_UNITS'] - ) - model_now_obs_units = ( - model_level_now_stat_file_data \ - .loc[0]['OBS_UNITS'] - ) - if model_now_fcst_units != 'NA': - fcst_var_units_list.append(model_now_fcst_units) - if model_now_obs_units != 'NA': - obs_var_units_list.append(model_now_obs_units) - for expected_date in expected_stat_file_dates: - if expected_date in \ - model_level_now_stat_file_data_fcstvaliddates: - matching_date_idx = ( - model_level_now_stat_file_data_fcstvaliddates \ - .tolist().index(expected_date) - ) - model_level_now_stat_file_data_indexed = ( - model_level_now_stat_file_data \ - .loc[matching_date_idx][:] - ) - for col in stat_file_line_type_columns: - model_level_now_data.loc[ - (model_plot_name, - fcst_var_level, - expected_date) - ][col] = ( - model_level_now_stat_file_data_indexed \ - .loc[:][col] - ) - else: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" does not exist") - model_level_now_data = ( - pd.DataFrame(np.nan, - index=model_level_now_data_index, - columns=[ 'TOTAL' ]) - ) - if vl > 0: - model_now_data = pd.concat( - [model_now_data, model_level_now_data] - ) - else: - model_now_data = model_level_now_data - if model_num > 1: - model_data = pd.concat([model_data, model_now_data]) - else: - model_data = model_now_data - if fcst_var_units_list != []: - fcst_var_units_plot_title = ( - '['+', '.join(list(set(fcst_var_units_list)))+']' - ) - else: - fcst_var_units_plot_title = '' - if obs_var_units_list != []: - obs_var_units_plot_title = ( - '['+', '.join(list(set(obs_var_units_list)))+']' - ) - else: - obs_var_units_plot_title = '' - # Calculate statistics and plots - logger.info("Calculating and plotting statistics") - for stat in stats_list: - logger.debug("Working on "+stat) - stat_values, stat_values_array, stat_plot_name = ( - plot_util.calculate_stat(logger, model_data, stat) - ) - if event_equalization == "True": - logger.debug("Doing event equalization") - for l in range(len(stat_values_array[:,0,0])): - for vl in range(len(fcst_var_level_list)): - stat_values_array[l,:,vl,:] = ( - np.ma.mask_cols(stat_values_array[l,:,vl,:]) - ) - np.ma.set_fill_value(stat_values_array, np.nan) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - nsubplots = nmodels + 1 - else: - nsubplots = nmodels - if nsubplots == 1: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(1,1) - elif nsubplots == 2: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(2,1) - gs.update(hspace=0.35) - elif nsubplots > 2 and nsubplots <= 4: - fig = plt.figure(figsize=(20,12)) - gs = gridspec.GridSpec(2,2) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 4 and nsubplots <= 6: - fig = plt.figure(figsize=(30,12)) - gs = gridspec.GridSpec(2,3) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 6 and nsubplots <= 9: - fig = plt.figure(figsize=(30,18)) - gs = gridspec.GridSpec(3,3) - gs.update(wspace=0.4, hspace=0.35) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting observations") - obs_stat_values_array = stat_values_array[1,0,:,:] - ax = plt.subplot(gs[0]) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel(date_type.title()+' Date', labelpad=20) - ax.set_xlim([plot_time_dates[0],plot_time_dates[-1]]) - ax.xaxis.set_major_locator( - md.DayLocator(interval=date_tick_intvl) - ) - ax.xaxis.set_major_formatter(md.DateFormatter('%d%b%Y')) - ax.xaxis.set_minor_locator(md.DayLocator()) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Pressure Level (hPa)', labelpad=20) - ax.set_yscale('log') - ax.invert_yaxis() - ax.minorticks_off() - ax.set_yticks(fcst_var_levels_int) - ax.set_yticklabels(fcst_var_levels_int) - ax.set_ylim([fcst_var_levels_int[0],fcst_var_levels_int[-1]]) - ax.set_title('obs', loc='left') - CF1 = ax.contourf(xmesh, ymesh, obs_stat_values_array, - cmap=cmap, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, obs_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_stat_values_array = stat_values_array[0,model_idx,:,:] - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax = plt.subplot(gs[model_num]) - else: - ax = plt.subplot(gs[model_idx]) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel(date_type.title()+' Date', labelpad=20) - ax.set_xlim([plot_time_dates[0],plot_time_dates[-1]]) - ax.xaxis.set_major_locator( - md.DayLocator(interval=date_tick_intvl) - ) - ax.xaxis.set_major_formatter(md.DateFormatter('%d%b%Y')) - ax.xaxis.set_minor_locator(md.DayLocator()) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Pressure Level (hPa)', labelpad=20) - ax.set_yscale('log') - ax.invert_yaxis() - ax.minorticks_off() - ax.set_yticks(fcst_var_levels_int) - ax.set_yticklabels(fcst_var_levels_int) - ax.set_ylim([fcst_var_levels_int[0],fcst_var_levels_int[-1]]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - obs " - +"with name on plot "+model_plot_name+" " - +"- obs") - ax.set_title(model_plot_name+' - obs', loc='left') - model_obs_diff = ( - model_stat_values_array - - stat_values_array[1,model_idx,:,:] - ) - if model_num == 1: - clevels_diff = plot_util.get_clevels(model_obs_diff) - CF2 = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - elif stat == 'bias' or stat == 'fbias': - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name) - ax.set_title(model_plot_name, loc='left') - if model_num == 1: - clevels_bias = plot_util.get_clevels( - model_stat_values_array - ) - CF1 = ax.contourf(xmesh, ymesh, model_stat_values_array, - levels=clevels_bias, - cmap=cmap_bias, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - cmap=cmap_bias, - extend='both') - C = ax.contour(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - if model_num == 1: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name) - model1_name = model_name - model1_plot_name = model_plot_name - model1_stat_values_array = model_stat_values_array - ax.set_title(model_plot_name, loc='left') - CF1 = ax.contourf(xmesh, ymesh, model_stat_values_array, - cmap=cmap, - extend='both') - C1 = ax.contour(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - model 1 "+model1_name+" " - +"with name on plot "+model_plot_name+" " - +"- "+model1_plot_name) - ax.set_title(model_plot_name+' - '+model1_plot_name, - loc='left') - model_model1_diff = ( - model_stat_values_array - model1_stat_values_array - ) - if model_num == 2: - clevels_diff = plot_util.get_clevels(model_model1_diff) - CF2 = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - cax = fig.add_axes([0.1, -0.05, 0.8, 0.05]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - elif stat == 'bias' or stat == 'fbias': - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - if nsubplots == 1: - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - fig.suptitle(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title+', '+fcst_lead_plot_title+'\n', - fontsize=14, fontweight='bold') - savefig_imagename = ( - stat+'_'+base_name.replace('FCSTLEVELHOLDER', 'all') \ - .replace('OBSLEVELHOLDER', 'all')+'.png' - ) - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_lead_average.py b/ush/plotting_scripts/plot_lead_average.py deleted file mode 100644 index 977940359..000000000 --- a/ush/plotting_scripts/plot_lead_average.py +++ /dev/null @@ -1,657 +0,0 @@ -''' -Name: plot_lead_average.py -Contact(s): Mallory Row -Abstract: Reads average and CI files from plot_time_series.py to make dieoff plots -History Log: Third version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: Text files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import sys -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md - -import plot_util as plot_util -from plot_util import get_ci_file, get_lead_avg_file - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = [os.environ['FCST_LEAD'].split(', ')] -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = os.environ['FCST_LEVEL'].split(', ') -fcst_var_thresh_list = os.environ['FCST_THRESH'].split(', ') -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = os.environ['OBS_LEVEL'].split(', ') -obs_var_thresh_list = os.environ['OBS_THRESH'].split(', ') -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -colors = [ - '#000000', '#2F1E80', '#D55E00', '#882255', - '#018C66', '#D6B616', '#036398', '#CC79A7' -] -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) - - -if len(fcst_lead_list[0]) < 2: - logger.warning("Must provide more than one forecast lead to " - "plot lead average") - sys.exit(0) - -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -date_time_plot_title = date_plot_title+time_plot_title -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_leads = plot_info[0] - fcst_lead_timedeltas = np.full_like(fcst_leads, np.nan, dtype=float) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ).total_seconds() - fcst_lead_timedeltas[fcst_lead_idx] = float(fcst_lead_timedelta) - fcst_lead_timedeltas_str = [] - for tdelta in fcst_lead_timedeltas: - h = int(tdelta/3600) - m = int((tdelta-(h*3600))/60) - s = int(tdelta-(h*3600)-(m*60)) - if h < 100: - tdelta_str = f"{h:02d}" - else: - tdelta_str = f"{h:03d}" - if m != 0: - tdelta_str+=f":{m:02d}" - if s != 0: - tdelta_str+=f":{s:02d}" - fcst_lead_timedeltas_str.append(tdelta_str) - fcst_var_level = plot_info[1] - obs_var_level = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_level) - ] - fcst_var_thresh = plot_info[2] - obs_var_thresh = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_thresh) - ] - fcst_var_thresh_symbol, fcst_var_thresh_letter = plot_util.format_thresh( - fcst_var_thresh - ) - obs_var_thresh_symbol, obs_var_thresh_letter = plot_util.format_thresh( - obs_var_thresh - ) - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name+' '+fcst_var_level - obs_var_plot_title = 'Obs: '+obs_var_name+' '+obs_var_level - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_thresh != '': - fcst_var_plot_title+=' '+fcst_var_thresh - if obs_var_thresh != '': - obs_var_plot_title+=' '+obs_var_thresh - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - logger.info("Working on forecast lead averages " - +"for forecast variable "+fcst_var_name+" "+fcst_var_level+" " - +fcst_var_thresh) - # Set up base name for file naming convention for lead averages files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead_avgs' - +'_fcst'+fcst_var_name+fcst_var_level - +fcst_var_thresh_letter.replace(',', '_')+interp_mthd - +'_obs'+obs_var_name+obs_var_level - +obs_var_thresh_letter.replace(',', '_')+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - for stat in stats_list: - logger.debug("Working on "+stat) - stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', - 'VALS', 'OBS_VALS'] - else: - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', 'VALS'] - avg_cols_to_array = avg_file_cols[3:] - CI_file_cols = ['LEADS', 'CI_VALS'] - CI_bar_max_widths = np.append( - np.diff(fcst_lead_timedeltas), - fcst_lead_timedeltas[-1]-fcst_lead_timedeltas[-2] - )/1.5 - CI_bar_min_widths = np.append( - np.diff(fcst_lead_timedeltas), - fcst_lead_timedeltas[-1]-fcst_lead_timedeltas[-2] - )/nmodels - CI_bar_intvl_widths = ( - (CI_bar_max_widths-CI_bar_min_widths)/nmodels - ) - # Reading in model lead average files produced from plot_time_series.py - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_avg_data = np.empty( - [len(avg_cols_to_array), len(fcst_leads)] - ) - model_avg_data.fill(np.nan) -# lead_avg_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name -# +'.txt' -# ) -# lead_avg_file = os.path.join(output_base_dir, 'data', -# lead_avg_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - logger.debug(f"FCST LEAD IS {fcst_lead}") - lead_avg_file = get_lead_avg_file(stat, - model_stat_file, - fcst_lead, - output_base_dir) - if os.path.exists(lead_avg_file): - nrow = sum(1 for line in open(lead_avg_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" exists") - model_avg_file_data = pd.read_csv( - lead_avg_file, sep=' ', header=None, - names=avg_file_cols, dtype=str - ) - model_avg_file_data_leads = ( - model_avg_file_data.loc[:]['LEADS'].tolist() - ) - if model_avg_file_data.loc[0]['FCST_UNITS'] == '[NA]': - fcst_var_units_plot_title = '' - else: - fcst_var_units_plot_title = ( - model_avg_file_data.loc[0]['FCST_UNITS'] - ) - if model_avg_file_data.loc[0]['OBS_UNITS'] == '[NA]': - obs_var_units_plot_title = '' - else: - obs_var_units_plot_title = ( - model_avg_file_data.loc[0]['OBS_UNITS'] - ) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - if fcst_lead in model_avg_file_data_leads: - model_fcst_lead_idx = ( - model_avg_file_data_leads.index(fcst_lead) - ) - for col in avg_cols_to_array: - col_idx = avg_cols_to_array.index(col) - model_avg_file_data_col = ( - model_avg_file_data.loc[:][col].tolist() - ) - if (model_avg_file_data_col[model_fcst_lead_idx] - != '--'): - model_avg_data[col_idx, fcst_lead_idx] = ( - float(model_avg_file_data_col \ - [model_fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" does not exist") -# CI_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name -# +'_CI_'+ci_method+'.txt' -# ) -# CI_file = os.path.join(output_base_dir, 'data', CI_filename) - CI_file = get_ci_file(stat, - model_stat_file, - fcst_lead, - output_base_dir, - ci_method) - - model_CI_data = np.empty(len(fcst_leads)) - model_CI_data.fill(np.nan) - if ci_method != 'NONE': - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - diff_from_avg_data = model_avg_data[1,:] - if os.path.exists(CI_file): - nrow = sum(1 for line in open(CI_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" exists") - model_CI_file_data = pd.read_csv( - CI_file, sep=' ', header=None, - names=CI_file_cols, dtype=str - ) - model_CI_file_data_leads = ( - model_CI_file_data.loc[:]['LEADS'].tolist() - ) - model_CI_file_data_vals = ( - model_CI_file_data.loc[:]['CI_VALS'].tolist() - ) - for fcst_lead in fcst_leads: - fcst_lead_idx = ( - fcst_leads.index(fcst_lead) - ) - if fcst_lead in model_CI_file_data_leads: - model_CI_file_data_lead_idx = ( - model_CI_file_data_leads.index( - fcst_lead - ) - ) - if (model_CI_file_data_vals[fcst_lead_idx] - != '--'): - model_CI_data[fcst_lead_idx] = ( - float(model_CI_file_data_vals \ - [fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" does not exist") - else: - if model_num == 1: - diff_from_avg_data = model_avg_data[0,:] - else: - if os.path.exists(CI_file): - nrow = sum(1 for line in open(CI_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with " - +"plot name " - +model_plot_name+" " - +"file: "+CI_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with " - +"plot name " - +model_plot_name+" " - +"file: "+CI_file+" exists") - model_CI_file_data = pd.read_csv( - CI_file, sep=' ', header=None, - names=CI_file_cols, dtype=str - ) - model_CI_file_data_leads = ( - model_CI_file_data.loc[:]['LEADS'] \ - .tolist() - ) - model_CI_file_data_vals = ( - model_CI_file_data.loc[:]['CI_VALS'] \ - .tolist() - ) - for fcst_lead in fcst_leads: - fcst_lead_idx = ( - fcst_leads.index(fcst_lead) - ) - if fcst_lead in model_CI_file_data_leads: - model_CI_file_data_lead_idx = ( - model_CI_file_data_leads.index( - fcst_lead - ) - ) - if (model_CI_file_data_vals \ - [fcst_lead_idx] - != '--'): - model_CI_data[fcst_lead_idx] = ( - float(model_CI_file_data_vals \ - [fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" does not exist") - if model_num == 1: - fig, (ax1, ax2) = plt.subplots(2,1,figsize=(10,12), - sharex=True) - ax1.grid(True) - ax1.tick_params(axis='x', pad=15) - ax1.set_xticks(fcst_lead_timedeltas) - ax1.set_xticklabels(fcst_lead_timedeltas_str) - ax1.set_xlim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - ax1.tick_params(axis='y', pad=15) - ax1.set_ylabel(average_method.title(), labelpad=30) - ax2.grid(True) - ax2.tick_params(axis='x', pad=15) - ax2.set_xlabel('Forecast Lead', labelpad=30) - ax2.tick_params(axis='y', pad=15) - ax2.set_ylabel('Difference', labelpad=30) - boxstyle = matplotlib.patches.BoxStyle('Square', pad=0.25) - props = {'boxstyle': boxstyle, - 'facecolor': 'white', - 'linestyle': 'solid', - 'linewidth': 1, - 'edgecolor': 'black',} - ax2.text(0.7055, 1.05, 'Note: differences outside the ' - +'outline bars are significant\n at the 95% ' - +'confidence interval', ha='center', va='center', - fontsize=10, bbox=props, transform=ax2.transAxes) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax1.plot(fcst_lead_timedeltas, model_avg_data[1,:], - color='#888888', - ls='-', linewidth=2.0, - marker='o', markersize=7, - label='obs', - zorder=4) - ax2.plot(fcst_lead_timedeltas, - np.zeros_like(fcst_lead_timedeltas), - color='#888888', - ls='-', linewidth=2.0, - zorder=4) - ax2.plot(fcst_lead_timedeltas, - model_avg_data[0,:] - diff_from_avg_data, - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - zorder=(nmodels-model_idx)+4) - else: - ax2.plot(fcst_lead_timedeltas, - np.zeros_like(fcst_lead_timedeltas), - color='black', - ls='-', linewidth=2.0, - zorder=4) - ax1.plot(fcst_lead_timedeltas, model_avg_data[0,:], - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=model_plot_name, - zorder=(nmodels-model_idx)+4) - else: - ax1.plot(fcst_lead_timedeltas, model_avg_data[0,:], - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=model_plot_name, - zorder=(nmodels-model_idx)+4) - ax2.plot(fcst_lead_timedeltas, - model_avg_data[0,:] - diff_from_avg_data, - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - zorder=(nmodels-model_idx)+4) - ax2.bar(fcst_lead_timedeltas, 2*np.absolute(model_CI_data), - bottom=-1*np.absolute(model_CI_data), - width=CI_bar_max_widths-(CI_bar_intvl_widths*model_idx), - color='None', edgecolor=colors[model_idx], linewidth=1.5) - fig.suptitle(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title, - fontsize=14, fontweight='bold') - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax1.legend(bbox_to_anchor=(0.0, 1.01, 1.0, .102), loc=3, - ncol=nmodels+1, fontsize='13', - mode='expand', borderaxespad=0.) - else: - ax1.legend(bbox_to_anchor=(0.0, 1.01, 1.0, .102), loc=3, - ncol=nmodels, fontsize='13', - mode='expand', borderaxespad=0.) - savefig_imagename = stat+'_'+base_name+'.png' - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_lead_by_date.py b/ush/plotting_scripts/plot_lead_by_date.py deleted file mode 100644 index 2c5b52729..000000000 --- a/ush/plotting_scripts/plot_lead_by_date.py +++ /dev/null @@ -1,776 +0,0 @@ -''' -Name: plot_lead_by_date.py -Contact(s): Mallory Row -Abstract: Reads filtered files from stat_analysis_wrapper run_all_times - to make lead-date plots -History Log: Third version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: Text files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import sys -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md -import matplotlib.gridspec as gridspec - -import plot_util as plot_util - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = [os.environ['FCST_LEAD'].split(', ')] -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = os.environ['FCST_LEVEL'].split(', ') -fcst_var_thresh_list = os.environ['FCST_THRESH'].split(', ') -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = os.environ['OBS_LEVEL'].split(', ') -obs_var_thresh_list = os.environ['OBS_THRESH'].split(', ') -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -cmap_bias = plt.cm.PiYG_r -cmap = plt.cm.BuPu -cmap_diff = plt.cm.coolwarm -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -date_time_plot_title = date_plot_title+time_plot_title -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha -# MET .stat file formatting -stat_file_base_columns = plot_util.get_stat_file_base_columns(met_version) -nbase_columns = len(stat_file_base_columns) - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_leads = plot_info[0] - fcst_lead_timedeltas = np.full_like(fcst_leads, np.nan, dtype=float) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ).total_seconds() - fcst_lead_timedeltas[fcst_lead_idx] = float(fcst_lead_timedelta) - fcst_lead_timedeltas_str = [] - for tdelta in fcst_lead_timedeltas: - h = int(tdelta/3600) - m = int((tdelta-(h*3600))/60) - s = int(tdelta-(h*3600)-(m*60)) - if h < 100: - tdelta_str = f"{h:02d}" - else: - tdelta_str = f"{h:03d}" - if m != 0: - tdelta_str+=f":{m:02d}" - if s != 0: - tdelta_str+=f":{s:02d}" - fcst_lead_timedeltas_str.append(tdelta_str) - fcst_var_level = plot_info[1] - obs_var_level = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_level) - ] - fcst_var_thresh = plot_info[2] - obs_var_thresh = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_thresh) - ] - fcst_var_thresh_symbol, fcst_var_thresh_letter = plot_util.format_thresh( - fcst_var_thresh - ) - obs_var_thresh_symbol, obs_var_thresh_letter = plot_util.format_thresh( - obs_var_thresh - ) - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name+' '+fcst_var_level - obs_var_plot_title = 'Obs: '+obs_var_name+' '+obs_var_level - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_thresh != '': - fcst_var_plot_title+=' '+fcst_var_thresh - if obs_var_thresh != '': - obs_var_plot_title+=' '+obs_var_thresh - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - logger.info("Working on forecast lead averages " - +"for forecast variable "+fcst_var_name+" " - +fcst_var_thresh) - # Set up base name for file naming convention for lead average files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_leadFCSTLEADHOLDER' - +'_fcst'+fcst_var_name+fcst_var_level - +fcst_var_thresh_letter.replace(',', '_')+interp_mthd - +'_obs'+obs_var_name+obs_var_level - +obs_var_thresh_letter.replace(',', '_')+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - # Reading in model .stat files from stat_analysis - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - for fl in range(len(fcst_leads)): - fcst_lead = fcst_leads[fl] - # Set up expected date in MET .stat file - # and date plot information - plot_time_dates, expected_stat_file_dates = ( - plot_util.get_date_arrays(date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, - fcst_lead) - ) - total_dates = len(plot_time_dates) - if len(plot_time_dates) == 0: - logger.error("Date array constructed information from " - +"METplus conf file has length of 0. Not enough " - +"information was provided to build date " - +"information. Please check provided " - +"VALID/INIT_BEG/END and " - +"OBS/FCST_INIT/VALID_HOUR_LIST") - exit(1) - elif len(plot_time_dates) <= 3: - date_tick_intvl = 1 - elif len(plot_time_dates) > 3 and len(plot_time_dates) <= 10: - date_tick_intvl = 2 - elif len(plot_time_dates) > 10 and len(plot_time_dates) < 31: - date_tick_intvl = 5 - else: - date_tick_intvl = 10 - model_lead_now_data_index = pd.MultiIndex.from_product( - [[model_plot_name], [fcst_lead], expected_stat_file_dates], - names=['model_plot_name', 'leads', 'dates'] - ) -# model_stat_filename = ( -# model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTLEADHOLDER', fcst_lead) -# +'_dump_row.stat' -# ) -# model_stat_file = os.path.join(input_base_dir, -# model_stat_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'obs_lead': obs_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - if os.path.exists(model_stat_file): - nrow = sum(1 for line in open(model_stat_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" empty") - model_lead_now_data = pd.DataFrame( - np.nan, index=model_lead_now_index, - columns=[ 'TOTAL' ] - ) - else: - logger.debug("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" exists") - model_lead_now_stat_file_data = pd.read_csv( - model_stat_file, sep=" ", skiprows=1, - skipinitialspace=True, header=None - ) - model_lead_now_stat_file_data.rename( - columns=dict(zip( - model_lead_now_stat_file_data.columns \ - [:len(stat_file_base_columns)], - stat_file_base_columns - )), inplace=True - ) - line_type = model_lead_now_stat_file_data['LINE_TYPE'][0] - stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, - met_version, - line_type) - ) - model_lead_now_stat_file_data.rename( - columns=dict(zip( - model_lead_now_stat_file_data.columns \ - [len(stat_file_base_columns):], - stat_file_line_type_columns - )), inplace=True - ) - model_lead_now_stat_file_data_fcstvaliddates = ( - model_lead_now_stat_file_data.loc[:] \ - ['FCST_VALID_BEG'].values - ) - model_lead_now_data = ( - pd.DataFrame(np.nan, index=model_lead_now_data_index, - columns=stat_file_line_type_columns) - ) - model_lead_now_stat_file_data.fillna( - {'FCST_UNITS':'NA', 'OBS_UNITS':'NA', 'VX_MASK':'NA'}, - inplace=True - ) - if float(met_version) >= 8.1: - model_now_fcst_units = ( - model_lead_now_stat_file_data \ - .loc[0]['FCST_UNITS'] - ) - model_now_obs_units = ( - model_lead_now_stat_file_data \ - .loc[0]['OBS_UNITS'] - ) - if model_now_fcst_units != 'NA': - fcst_var_units_list.append(model_now_fcst_units) - if model_now_obs_units != 'NA': - obs_var_units_list.append(model_now_obs_units) - for expected_date in expected_stat_file_dates: - if expected_date in \ - model_lead_now_stat_file_data_fcstvaliddates: - matching_date_idx = ( - model_lead_now_stat_file_data_fcstvaliddates \ - .tolist().index(expected_date) - ) - model_lead_now_stat_file_data_indexed = ( - model_lead_now_stat_file_data \ - .loc[matching_date_idx][:] - ) - for col in stat_file_line_type_columns: - model_lead_now_data.loc[ - (model_plot_name, - fcst_lead, - expected_date) - ][col] = ( - model_lead_now_stat_file_data_indexed \ - .loc[:][col] - ) - else: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" does not exist") - model_lead_now_data = pd.DataFrame( - np.nan, index=model_lead_now_index, - columns=[ 'TOTAL' ] - ) - if fl > 0: - model_now_data = pd.concat( - [model_now_data, model_lead_now_data] - ) - else: - model_now_data = model_lead_now_data - if model_num > 1: - model_data = pd.concat([model_data, model_now_data]) - else: - model_data = model_now_data - # Build lead by date grid for plotting - ymesh, xmesh = np.meshgrid(plot_time_dates, fcst_lead_timedeltas) - # Calculate statistics and plots - if fcst_var_units_list != []: - fcst_var_units_plot_title = ( - '['+', '.join(list(set(fcst_var_units_list)))+']' - ) - else: - fcst_var_units_plot_title = '' - if obs_var_units_list != []: - obs_var_units_plot_title = ( - '['+', '.join(list(set(obs_var_units_list)))+']' - ) - else: - obs_var_units_plot_title = '' - logger.info("Calculating and plotting statistics") - for stat in stats_list: - logger.debug("Working on "+stat) - stat_values, stat_values_array, stat_plot_name = ( - plot_util.calculate_stat(logger, model_data, stat) - ) - if event_equalization == 'True': - logger.debug("Doing event equalization") - for l in range(len(stat_values_array[:,0,0,0])): - for fl in range(len(fcst_leads)): - stat_values_array[l,:,fl,:] = ( - np.ma.mask_cols(stat_values_array[l,:,fl,:]) - ) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - nsubplots = nmodels + 1 - else: - nsubplots = nmodels - if nsubplots == 1: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(1,1) - elif nsubplots == 2: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(2,1) - gs.update(hspace=0.35) - elif nsubplots > 2 and nsubplots <= 4: - fig = plt.figure(figsize=(20,12)) - gs = gridspec.GridSpec(2,2) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 4 and nsubplots <= 6: - fig = plt.figure(figsize=(30,12)) - gs = gridspec.GridSpec(2,3) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 6 and nsubplots <= 9: - fig = plt.figure(figsize=(30,18)) - gs = gridspec.GridSpec(3,3) - gs.update(wspace=0.4, hspace=0.35) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting observations") - obs_stat_values_array = stat_values_array[1,0,:,:] - ax = plt.subplot(gs[0]) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel('Forecast Lead', labelpad=20) - ax.set_xticks(fcst_lead_timedeltas) - ax.set_xticklabels(fcst_lead_timedeltas_str) - ax.set_xlim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel(date_type.title()+' Date', labelpad=20) - ax.set_ylim([plot_time_dates[0],plot_time_dates[-1]]) - ax.yaxis.set_major_locator( - md.DayLocator(interval=date_tick_intvl) - ) - ax.yaxis.set_major_formatter(md.DateFormatter('%d%b%Y')) - ax.yaxis.set_minor_locator(md.DayLocator()) - ax.set_title('obs', loc='left') - CF1 = ax.contourf(xmesh, ymesh, obs_stat_values_array, - cmap=cmap, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, obs_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_stat_values_array = stat_values_array[0,model_idx,:,:] - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax = plt.subplot(gs[model_num]) - else: - ax = plt.subplot(gs[model_idx]) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel('Forecast Lead', labelpad=20) - ax.set_xticks(fcst_lead_timedeltas) - ax.set_xticklabels(fcst_lead_timedeltas_str) - ax.set_xlim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel(date_type.title()+' Date', labelpad=20) - ax.set_ylim([plot_time_dates[0],plot_time_dates[-1]]) - ax.yaxis.set_major_locator( - md.DayLocator(interval=date_tick_intvl) - ) - ax.yaxis.set_major_formatter(md.DateFormatter('%d%b%Y')) - ax.yaxis.set_minor_locator(md.DayLocator()) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - obs " - +"with name on plot "+model_plot_name - +" - obs") - ax.set_title(model_plot_name+' - obs', loc='left') - model_obs_diff = ( - model_stat_values_array - - stat_values_array[1,model_idx,:,:] - ) - if model_num == 1: - clevels_diff = plot_util.get_clevels(model_obs_diff) - CF2 = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - elif stat == 'bias' or stat == 'fbias': - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name) - ax.set_title(model_plot_name, loc='left') - if model_num == 1: - clevels_bias = plot_util.get_clevels( - model_stat_values_array - ) - CF1 = ax.contourf(xmesh, ymesh, model_stat_values_array, - levels=clevels_bias, - cmap=cmap_bias, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - cmap=cmap_bias, - extend='both') - C = ax.contour(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - if model_num == 1: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name) - model1_name = model_name - model1_plot_name = model_plot_name - model1_stat_values_array = model_stat_values_array - ax.set_title(model_plot_name, loc='left') - CF1 = ax.contourf(xmesh, ymesh, model_stat_values_array, - cmap=cmap, - extend='both') - C1 = ax.contour(xmesh, ymesh, model_stat_values_array, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - model 1 "+model1_name+" " - +"with name on plot "+model_plot_name+" " - +"- "+model1_plot_name) - ax.set_title(model_plot_name+' - '+model1_plot_name, - loc='left') - model_model1_diff = ( - model_stat_values_array - model1_stat_values_array - ) - if model_num == 2: - clevels_diff = plot_util.get_clevels(model_model1_diff) - CF2 = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - cax = fig.add_axes([0.1, -0.05, 0.8, 0.05]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - elif stat == 'bias' or stat == 'fbias': - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - if nsubplots == 1: - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - fig.suptitle(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title+'\n', - fontsize=14, fontweight='bold') - savefig_imagename = ( - stat+'_'+base_name.replace('FCSTLEADHOLDER', 'all')+'.png' - ) - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_lead_by_level.py b/ush/plotting_scripts/plot_lead_by_level.py deleted file mode 100644 index c26ee96c8..000000000 --- a/ush/plotting_scripts/plot_lead_by_level.py +++ /dev/null @@ -1,707 +0,0 @@ -''' -Name: plot_lead_by_level.py -Contact(s): Mallory Row -Abstract: Reads average files from plot_time_series.py to make lead-pressue plots -History Log: Third version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: Text files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import sys -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md -import matplotlib.gridspec as gridspec - -import plot_util as plot_util -from plot_util import get_lead_avg_file - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = [os.environ['FCST_LEAD'].split(', ')] -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = [os.environ['FCST_LEVEL'].split(', ')] -fcst_var_thresh_list = os.environ['FCST_THRESH'].split(', ') -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = [os.environ['OBS_LEVEL'].split(', ')] -obs_var_thresh_list = os.environ['OBS_THRESH'].split(', ') -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -cmap_bias = plt.cm.PiYG_r -cmap = plt.cm.BuPu -cmap_diff = plt.cm.coolwarm -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) - -for level_list in fcst_var_level_list: - for level in level_list: - if not level.startswith('P'): - logger.warning(f"Forecast level value ({level}) expected " - "to be in pressure, i.e. P500. Exiting.") - sys.exit(0) - -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -date_time_plot_title = date_plot_title+time_plot_title -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_leads = plot_info[0] - fcst_lead_timedeltas = np.full_like(fcst_leads, np.nan, dtype=float) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ).total_seconds() - fcst_lead_timedeltas[fcst_lead_idx] = float(fcst_lead_timedelta) - fcst_lead_timedeltas_str = [] - for tdelta in fcst_lead_timedeltas: - h = int(tdelta/3600) - m = int((tdelta-(h*3600))/60) - s = int(tdelta-(h*3600)-(m*60)) - if h < 100: - tdelta_str = f"{h:02d}" - else: - tdelta_str = f"{h:03d}" - if m != 0: - tdelta_str+=f":{m:02d}" - if s != 0: - tdelta_str+=f":{s:02d}" - fcst_lead_timedeltas_str.append(tdelta_str) - fcst_var_levels = plot_info[1] - obs_var_levels = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_levels) - ] - fcst_var_thresh = plot_info[2] - obs_var_thresh = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_thresh) - ] - fcst_var_thresh_symbol, fcst_var_thresh_letter = plot_util.format_thresh( - fcst_var_thresh - ) - obs_var_thresh_symbol, obs_var_thresh_letter = plot_util.format_thresh( - obs_var_thresh - ) - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name - obs_var_plot_title = 'Obs: '+obs_var_name - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_thresh != '': - fcst_var_plot_title+=' '+fcst_var_thresh - if obs_var_thresh != '': - obs_var_plot_title+=' '+obs_var_thresh - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - logger.info("Working on forecast lead averages" - +" for forecast variable "+fcst_var_name - +" "+fcst_var_thresh) - # Set up base name for file naming convention for lead average files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead_avgs' - +'_fcst'+fcst_var_name+'FCSTLEVELHOLDER' - +fcst_var_thresh_letter.replace(',', '_')+interp_mthd - +'_obs'+obs_var_name+'OBSLEVELHOLDER' - +obs_var_thresh_letter.replace(',', '_')+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - # Build date by forecst level grid for plotting - fcst_var_levels_int = np.empty(len(fcst_var_levels), dtype=int) - for vl in range(len(fcst_var_levels)): - fcst_var_levels_int[vl] = fcst_var_levels[vl][1:] - xmesh, ymesh = np.meshgrid(fcst_lead_timedeltas, fcst_var_levels_int) - for stat in stats_list: - logger.debug("Working on "+stat) - stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', - 'VALS', 'OBS_VALS'] - else: - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', 'VALS'] - avg_cols_to_array = avg_file_cols[3:] - # Reading in model lead average files produced from - # plot_time_series.py - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_avg_data = np.empty( - [len(avg_cols_to_array), len(fcst_var_levels), - len(fcst_leads)] - ) - model_avg_data.fill(np.nan) - for vl in range(len(fcst_var_levels)): - fcst_var_level = fcst_var_levels[vl] - obs_var_level = obs_var_levels[vl] -# lead_avg_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTLEVELHOLDER', fcst_var_level) \ -# .replace('OBSLEVELHOLDER', obs_var_level) -# +'.txt' -# ) -# lead_avg_file = os.path.join(output_base_dir, 'data', -# lead_avg_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - lead_avg_file = get_lead_avg_file(stat, - model_stat_file, - fcst_lead, - output_base_dir) - - if os.path.exists(lead_avg_file): - nrow = sum(1 for line in open(lead_avg_file)) - if nrow == 0: - logger.error("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" empty") - sys.exit(1) - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" exists") - model_avg_file_data = pd.read_csv( - lead_avg_file, sep=' ', header=None, - names=avg_file_cols, dtype=str - ) - model_avg_file_data_leads = ( - model_avg_file_data.loc[:]['LEADS'].tolist() - ) - if model_avg_file_data.loc[0]['FCST_UNITS'] == '[NA]': - fcst_var_units_plot_title = '' - else: - fcst_var_units_plot_title = ( - model_avg_file_data.loc[0]['FCST_UNITS'] - ) - if model_avg_file_data.loc[0]['OBS_UNITS'] == '[NA]': - obs_var_units_plot_title = '' - else: - obs_var_units_plot_title = ( - model_avg_file_data.loc[0]['OBS_UNITS'] - ) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - if fcst_lead in model_avg_file_data_leads: - model_fcst_lead_idx = ( - model_avg_file_data_leads.index( - fcst_lead - ) - ) - for col in avg_cols_to_array: - col_idx = avg_cols_to_array.index(col) - model_avg_file_data_col = ( - model_avg_file_data.loc[:][col].tolist() - ) - if (model_avg_file_data_col[model_fcst_lead_idx] - != '--'): - model_avg_data[col_idx, vl, - fcst_lead_idx] = ( - float(model_avg_file_data_col \ - [model_fcst_lead_idx]) - ) - else: - logger.error("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" does not exist") - sys.exit(1) - - if model_num == 1: - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - nsubplots = nmodels + 1 - else: - nsubplots = nmodels - if nsubplots == 1: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(1,1) - elif nsubplots == 2: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(2,1) - gs.update(hspace=0.35) - elif nsubplots > 2 and nsubplots <= 4: - fig = plt.figure(figsize=(20,12)) - gs = gridspec.GridSpec(2,2) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 4 and nsubplots <= 6: - fig = plt.figure(figsize=(30,12)) - gs = gridspec.GridSpec(2,3) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 6 and nsubplots <= 9: - fig = plt.figure(figsize=(30,18)) - gs = gridspec.GridSpec(3,3) - gs.update(wspace=0.4, hspace=0.35) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting observations") - obs_avg_data = model_avg_data[1,:,:] - ax = plt.subplot(gs[0]) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel('Forecast Lead', labelpad=20) - ax.set_xticks(fcst_lead_timedeltas) - ax.set_xticklabels(fcst_lead_timedeltas_str) - ax.set_xlim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Pressure Level (hPa)', labelpad=20) - ax.set_yscale('log') - ax.invert_yaxis() - ax.minorticks_off() - ax.set_yticks(fcst_var_levels_int) - ax.set_yticklabels(fcst_var_levels_int) - ax.set_ylim([fcst_var_levels_int[0], - fcst_var_levels_int[-1]]) - ax.set_title('obs', loc='left') - CF1 = ax.contourf(xmesh, ymesh, obs_avg_data, - cmap=cmap, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, obs_avg_data, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax = plt.subplot(gs[model_num]) - else: - ax = plt.subplot(gs[model_idx]) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel('Forecast Lead', labelpad=20) - ax.set_xticks(fcst_lead_timedeltas) - ax.set_xticklabels(fcst_lead_timedeltas_str) - ax.set_xlim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Pressure Level (hPa)', labelpad=20) - ax.set_yscale('log') - ax.invert_yaxis() - ax.minorticks_off() - ax.set_yticks(fcst_var_levels_int) - ax.set_yticklabels(fcst_var_levels_int) - ax.set_ylim([fcst_var_levels_int[0], - fcst_var_levels_int[-1]]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - obs " - +"with name on plot "+model_plot_name+" " - +"- obs") - ax.set_title(model_plot_name+' - obs', loc='left') - model_obs_diff = ( - model_avg_data[0,:,:] - - model_avg_data[1,:,:] - ) - if model_num == 1: - clevels_diff = plot_util.get_clevels(model_obs_diff) - CF2 = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - elif stat == 'bias' or stat == 'fbias': - logger.debug("Plotting model "+str(model_num) - +" "+model_name+" with name on plot " - +model_plot_name) - ax.set_title(model_plot_name, loc='left') - if model_num == 1: - clevels_bias = plot_util.get_clevels( - model_avg_data[0,:,:] - ) - CF1 = ax.contourf(xmesh, ymesh, model_avg_data[0,:,:], - levels=clevels_bias, - cmap=cmap_bias, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - cmap=cmap_bias, - extend='both') - C = ax.contour(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - if model_num == 1: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name) - model1_name = model_name - model1_plot_name = model_plot_name - model1_avg_data = model_avg_data[0,:,:] - ax.set_title(model_plot_name, loc='left') - CF1 = ax.contourf(xmesh, ymesh, model_avg_data[0,:,:], - cmap=cmap, - extend='both') - C1 = ax.contour(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - model 1 "+model1_name+" " - +"with name on plot "+model_plot_name+" " - +"- "+model1_plot_name) - ax.set_title(model_plot_name+' - '+model1_plot_name, - loc='left') - model_model1_diff = ( - model_avg_data[0,:,:] - model1_avg_data - ) - if model_num == 2: - clevels_diff = plot_util.get_clevels(model_model1_diff) - CF2 = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - cax = fig.add_axes([0.1, -0.05, 0.8, 0.05]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - elif stat == 'bias' or stat == 'fbias': - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - if nsubplots == 1: - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - fig.suptitle(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title, - fontsize=14, fontweight='bold') - savefig_imagename = ( - stat+'_'+base_name.replace('FCSTLEVELHOLDER', 'all') \ - .replace('OBSLEVELHOLDER', 'all')+'.png' - ) - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_stat_by_level.py b/ush/plotting_scripts/plot_stat_by_level.py deleted file mode 100644 index e96bb8e8a..000000000 --- a/ush/plotting_scripts/plot_stat_by_level.py +++ /dev/null @@ -1,504 +0,0 @@ -''' -Name: plot_stat_by_level.py -Contact(s): Mallory Row -Abstract: Reads average forecast hour files from plot_time_series.py - to make stat-pressue plots -History Log: Third version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: Text files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import sys -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md - -import plot_util as plot_util -from plot_util import get_lead_avg_file - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = os.environ['FCST_LEAD'].split(', ') -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = [os.environ['FCST_LEVEL'].split(', ')] -fcst_var_thresh_list = os.environ['FCST_THRESH'].split(', ') -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = [os.environ['OBS_LEVEL'].split(', ')] -obs_var_thresh_list = os.environ['OBS_THRESH'].split(', ') -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -colors = [ - '#000000', '#2F1E80', '#D55E00', '#882255', - '#018C66', '#D6B616', '#036398', '#CC79A7' -] -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha -# MET .stat file formatting -stat_file_base_columns = plot_util.get_stat_file_base_columns(met_version) -nbase_columns = len(stat_file_base_columns) - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_lead = plot_info[0] - fcst_var_levels = plot_info[1] - obs_var_levels = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_levels) - ] - fcst_var_thresh = plot_info[2] - obs_var_thresh = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_thresh) - ] - fcst_var_thresh_symbol, fcst_var_thresh_letter = plot_util.format_thresh( - fcst_var_thresh - ) - obs_var_thresh_symbol, obs_var_thresh_letter = plot_util.format_thresh( - obs_var_thresh - ) - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name - obs_var_plot_title = 'Obs: '+obs_var_name - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_thresh != '': - fcst_var_plot_title+=' '+fcst_var_thresh - if obs_var_thresh != '': - obs_var_plot_title+=' '+obs_var_thresh - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - # Build plot title for forecast lead - fcst_lead_plot_title = 'Fcst Lead: '+fcst_lead[:-4]+'hr' - if fcst_lead[-4:-2] != '00': - fcst_lead_plot_title+=fcst_lead[-4:-2]+'min' - if fcst_lead[-2:] != '00': - fcst_lead_plot_title+=fcst_lead[-2:]+'sec' - # Clean up time information for plot title - # if valid/init is a single hour, then init/valid - # is also a single hour - date_time_plot_title = date_plot_title+time_plot_title - date_type_beg_hour = valid_init_dict[date_type.lower()+'_hour_beg'] - date_type_end_hour = valid_init_dict[date_type.lower()+'_hour_end'] - if (date_type_beg_hour != '' and date_type_end_hour != '' - and date_type_beg_hour == date_type_end_hour): - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ) - date_type_timedelta = datetime.timedelta( - hours=int(date_type_beg_hour[0:2]), - minutes=int(date_type_beg_hour[2:4]), - seconds=int(date_type_beg_hour[4:]) - ) - if date_type == 'VALID': - check_time_plot_title = 'Init' - time_diff = ( - date_type_timedelta - fcst_lead_timedelta - ).total_seconds() - elif date_type == 'INIT': - check_time_plot_title = 'Valid' - time_diff = ( - date_type_timedelta - fcst_lead_timedelta - ).total_seconds() - day_diff = time_diff//86400 - hr_diff = (time_diff - (day_diff*86400))//3600 - min_diff = (time_diff%3600) // 60 - sec_diff = (time_diff%3600)%60 - time_title_replace = re.search(check_time_plot_title+': (.*)Z', - date_time_plot_title) - date_time_plot_title = date_time_plot_title.replace( - check_time_plot_title+': '+time_title_replace.group(1), - check_time_plot_title+': '+str(int(hr_diff)).zfill(2) - +str(int(min_diff)).zfill(2) - ) - logger.info("Working on forecast lead "+fcst_lead - +" and forecast variable "+fcst_var_name - +" "+fcst_var_thresh) - # Set up base name for file naming convention for MET .stat files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead'+fcst_lead - +'_fcst'+fcst_var_name+'FCSTLEVELHOLDER' - +fcst_var_thresh_letter.replace(',', '_')+interp_mthd - +'_obs'+obs_var_name+'OBSLEVELHOLDER' - +obs_var_thresh_letter.replace(',', '_')+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - for stat in stats_list: - logger.debug("Working on "+stat) - stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', - 'VALS', 'OBS_VALS'] - else: - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', 'VALS'] - avg_cols_to_array = avg_file_cols[3:] - # Build forecast levels for plotting - fcst_var_levels_int = np.empty(len(fcst_var_levels), dtype=int) - for vl in range(len(fcst_var_levels)): - fcst_var_levels_int[vl] = fcst_var_levels[vl][1:] - # Reading in model lead averages files produced from plot_time_series.py - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_avg_data = np.empty( - [len(avg_cols_to_array), len(fcst_var_levels)] - ) - model_avg_data.fill(np.nan) - for vl in range(len(fcst_var_levels)): - fcst_var_level = fcst_var_levels[vl] - obs_var_level = obs_var_levels[vl] -# lead_avg_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTLEVELHOLDER', fcst_var_level) \ -# .replace('OBSLEVELHOLDER', obs_var_level) \ -# .replace(fcst_lead, '_avgs') -# +'.txt' -# ) -# lead_avg_file = os.path.join(output_base_dir, 'data', -# lead_avg_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - lead_avg_file = get_lead_avg_file(stat, - model_stat_file, - fcst_lead, - output_base_dir) - if os.path.exists(lead_avg_file): - nrow = sum(1 for line in open(lead_avg_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" exists") - model_avg_file_data = pd.read_csv( - lead_avg_file, sep=' ', header=None, - names=avg_file_cols, dtype=str - ) - model_avg_file_data_leads = ( - model_avg_file_data.loc[:]['LEADS'].tolist() - ) - if model_avg_file_data.loc[0]['FCST_UNITS'] == '[NA]': - fcst_var_units_plot_title = '' - else: - fcst_var_units_plot_title = ( - model_avg_file_data.loc[0]['FCST_UNITS'] - ) - if model_avg_file_data.loc[0]['OBS_UNITS'] == '[NA]': - obs_var_units_plot_title = '' - else: - obs_var_units_plot_title = ( - model_avg_file_data.loc[0]['OBS_UNITS'] - ) - if fcst_lead in model_avg_file_data_leads: - model_fcst_lead_idx = ( - model_avg_file_data_leads.index(fcst_lead) - ) - for col in avg_cols_to_array: - col_idx = avg_cols_to_array.index(col) - model_avg_file_data_col = ( - model_avg_file_data.loc[:][col].tolist() - ) - if (model_avg_file_data_col[model_fcst_lead_idx] - != '--'): - model_avg_data[col_idx, vl] = ( - float(model_avg_file_data_col \ - [model_fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+lead_avg_file+" does not exist") - if model_num == 1: - fig, ax = plt.subplots(1,1,figsize=(10,12)) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel(stat_plot_name, labelpad=30) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Pressure Level', labelpad=30) - ax.set_yscale('log') - ax.invert_yaxis() - ax.minorticks_off() - ax.set_yticks(fcst_var_levels_int) - ax.set_yticklabels(fcst_var_levels_int) - ax.set_ylim([fcst_var_levels_int[0],fcst_var_levels_int[-1]]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax.plot(model_avg_data[1,:], fcst_var_levels_int, - color='#888888', - ls='-', linewidth=2.0, - marker='o', markersize=7, - label='obs ', - zorder=4) - ax.plot(model_avg_data[0,:], fcst_var_levels_int, - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=model_plot_name, - zorder=(nmodels-model_idx)+4) - ax.legend(bbox_to_anchor=(1.025, 1.0, 0.3, 0.0), loc='upper right', - ncol=1, fontsize='13', mode='expand', borderaxespad=0.) - ax.set_title(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title+', '+fcst_lead_plot_title+'\n', - fontsize=14, fontweight='bold') - savefig_imagename = ( - stat+'_'+base_name.replace('FCSTLEVELHOLDER', 'all') \ - .replace('OBSLEVELHOLDER', 'all')+'.png' - ) - - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_threshold_average.py b/ush/plotting_scripts/plot_threshold_average.py deleted file mode 100644 index 9f8714f5b..000000000 --- a/ush/plotting_scripts/plot_threshold_average.py +++ /dev/null @@ -1,649 +0,0 @@ -''' -Name: plot_threshold_average.py -Contact(s): Mallory Row -Abstract: Reads average and CI files from plot_time_series.py to make dieoff plots -History Log: First version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: Text files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import sys -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md - -import plot_util as plot_util -from plot_util import get_ci_file, get_lead_avg_file - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = os.environ['FCST_LEAD'].split(', ') -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = os.environ['FCST_LEVEL'].split(', ') -fcst_var_thresh_list = [os.environ['FCST_THRESH'].split(', ')] -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = os.environ['OBS_LEVEL'].split(', ') -obs_var_thresh_list = [os.environ['OBS_THRESH'].split(', ')] -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -colors = [ - '#000000', '#2F1E80', '#D55E00', '#882255', - '#018C66', '#D6B616', '#036398', '#CC79A7' -] -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -date_time_plot_title = date_plot_title+time_plot_title -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_lead = plot_info[0] - fcst_var_level = plot_info[1] - obs_var_level = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_level) - ] - fcst_var_threshs = plot_info[2] - obs_var_threshs = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_threshs) - ] - fcst_var_threshs_format = np.full_like( - fcst_var_threshs, np.nan, dtype=object - ) - fcst_var_threshs_float = np.full_like( - fcst_var_threshs, np.nan, dtype=float - ) - for fcst_var_thresh in fcst_var_threshs: - fcst_var_thresh_idx = fcst_var_threshs.index(fcst_var_thresh) - fcst_var_thresh_symbol, fcst_var_thresh_letter = ( - plot_util.format_thresh(fcst_var_thresh) - ) - fcst_var_threshs_format[fcst_var_thresh_idx] = fcst_var_thresh_letter - fcst_var_threshs_float[fcst_var_thresh_idx] = ( - fcst_var_thresh_letter[2:] - ) - obs_var_threshs_format = np.full_like( - obs_var_threshs, np.nan, dtype=object - ) - for obs_var_thresh in obs_var_threshs: - obs_var_thresh_idx = obs_var_threshs.index(obs_var_thresh) - obs_var_thresh_symbol, obs_var_thresh_letter = ( - plot_util.format_thresh(obs_var_thresh) - ) - obs_var_threshs_format[obs_var_thresh_idx] = obs_var_thresh_letter - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name+' '+fcst_var_level - obs_var_plot_title = 'Obs: '+obs_var_name+' '+obs_var_level - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - # Build plot title for forecast lead - fcst_lead_plot_title = 'Fcst Lead: '+fcst_lead[:-4]+'hr' - if fcst_lead[-4:-2] != '00': - fcst_lead_plot_title+=fcst_lead[-4:-2]+'min' - if fcst_lead[-2:] != '00': - fcst_lead_plot_title+=fcst_lead[-2:]+'sec' - logger.info("Working on forecast threshold averages " - +"for forecast lead "+fcst_lead+" " - +"for forecast variable "+fcst_var_name+" "+fcst_var_level) - # Set up base name for file naming convention for lead averages files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead_avgs' - +'_fcst'+fcst_var_name+fcst_var_level - +'FCSTTHRESHHOLDER'+interp_mthd - +'_obs'+obs_var_name+obs_var_level - +'OBSTHRESHHOLDER'+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - for stat in stats_list: - logger.debug("Working on "+stat) - stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', - 'VALS', 'OBS_VALS'] - else: - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', 'VALS'] - avg_cols_to_array = avg_file_cols[3:] - CI_file_cols = ['LEADS', 'CI_VALS'] - CI_bar_max_widths = np.append( - np.diff(fcst_var_threshs_float), - fcst_var_threshs_float[-1]-fcst_var_threshs_float[-2] - )/1.5 - CI_bar_min_widths = np.append( - np.diff(fcst_var_threshs_float), - fcst_var_threshs_float[-1]-fcst_var_threshs_float[-2] - )/nmodels - CI_bar_intvl_widths = ( - (CI_bar_max_widths-CI_bar_min_widths)/nmodels - ) - # Reading in model lead average files produced from plot_time_series.py - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_avg_data = np.empty( - [len(avg_cols_to_array), len(fcst_var_threshs_format)] - ) - model_avg_data.fill(np.nan) - model_CI_data = np.empty(len(fcst_var_threshs_format)) - model_CI_data.fill(np.nan) - for vt in range(len(fcst_var_threshs_format)): - fcst_var_thresh_format = fcst_var_threshs_format[vt] - obs_var_thresh_format = obs_var_threshs_format[vt] -# lead_avg_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTTHRESHHOLDER', -# str(fcst_var_thresh_format)) \ -# .replace('OBSTHRESHHOLDER', -# str(obs_var_thresh_format)) \ -# +'.txt' -# ) -# lead_avg_file = os.path.join(output_base_dir, 'data', -# lead_avg_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - lead_avg_file = get_lead_avg_file(stat, - model_stat_file, - fcst_lead, - output_base_dir) - - if os.path.exists(lead_avg_file): - nrow = sum(1 for line in open(lead_avg_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" exists") - model_avg_file_data = pd.read_csv( - lead_avg_file, sep=' ', header=None, - names=avg_file_cols, dtype=str - ) - model_avg_file_data_leads = ( - model_avg_file_data.loc[:]['LEADS'].tolist() - ) - if model_avg_file_data.loc[0]['FCST_UNITS'] == '[NA]': - fcst_var_units_plot_title = '' - else: - fcst_var_units_plot_title = ( - model_avg_file_data.loc[0]['FCST_UNITS'] - ) - if model_avg_file_data.loc[0]['OBS_UNITS'] == '[NA]': - obs_var_units_plot_title = '' - else: - obs_var_units_plot_title = ( - model_avg_file_data.loc[0]['OBS_UNITS'] - ) - model_fcst_lead_idx = ( - model_avg_file_data_leads.index(fcst_lead) - ) - for col in avg_cols_to_array: - col_idx = avg_cols_to_array.index(col) - model_avg_file_data_col = ( - model_avg_file_data.loc[:][col].tolist() - ) - if (model_avg_file_data_col[model_fcst_lead_idx] - != '--'): - model_avg_data[col_idx, vt] = ( - float(model_avg_file_data_col \ - [model_fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+lead_avg_file+" does not exist") -# CI_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTTHRESHHOLDER', -# str(fcst_var_thresh_format)) \ -# .replace('OBSTHRESHHOLDER', -# str(obs_var_thresh_format)) \ -# +'_CI_'+ci_method+'.txt' -# ) -# CI_file = os.path.join(output_base_dir, 'data', CI_filename) - CI_file = get_ci_file(stat, - model_stat_file, - fcst_lead, - output_base_dir, - ci_method) - - if ci_method != 'NONE': - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - diff_from_avg_data = model_avg_data[1,:] - if os.path.exists(CI_file): - nrow = sum(1 for line in open(CI_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" exists") - model_CI_file_data = pd.read_csv( - CI_file, sep=' ', header=None, - names=CI_file_cols, dtype=str - ) - model_CI_file_data_leads = ( - model_CI_file_data.loc[:]['LEADS'] \ - .tolist() - ) - model_CI_file_data_vals = ( - model_CI_file_data.loc[:]['CI_VALS'] \ - .tolist() - ) - model_fcst_lead_idx = ( - model_CI_file_data_leads.index(fcst_lead) - ) - if (model_CI_file_data_vals \ - [model_fcst_lead_idx] - != '--'): - model_CI_data[vt] = ( - float(model_CI_file_data_vals \ - [model_fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" does not exist") - else: - if model_num == 1: - diff_from_avg_data = model_avg_data[0,:] - else: - if os.path.exists(CI_file): - nrow = sum(1 for line in open(CI_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot " - +"name " - +model_plot_name+" file: " - +CI_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot " - +"name " - +model_plot_name+" file: " - +CI_file+" exists") - model_CI_file_data = pd.read_csv( - CI_file, sep=' ', header=None, - names=CI_file_cols, dtype=str - ) - model_CI_file_data_leads = ( - model_CI_file_data.loc[:]['LEADS'] \ - .tolist() - ) - model_CI_file_data_vals = ( - model_CI_file_data.loc[:]['CI_VALS'] \ - .tolist() - ) - model_fcst_lead_idx = ( - model_CI_file_data_leads.index( - fcst_lead - ) - ) - if (model_CI_file_data_vals \ - [model_fcst_lead_idx] - != '--'): - model_CI_data[vt] = ( - float(model_CI_file_data_vals \ - [model_fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +CI_file+" does not exist") - if model_num == 1: - fig, (ax1, ax2) = plt.subplots(2,1,figsize=(10,12), - sharex=True) - ax1.grid(True) - ax1.tick_params(axis='x', pad=15) - ax1.set_xticks(fcst_var_threshs_float) - ax1.set_xticklabels(fcst_var_threshs_format) - ax1.set_xlim([fcst_var_threshs_float[0], - fcst_var_threshs_float[-1]]) - ax1.tick_params(axis='y', pad=15) - ax1.set_ylabel(average_method.title(), labelpad=30) - ax2.grid(True) - ax2.tick_params(axis='x', pad=15) - ax2.set_xlabel('Forecast Threshold', labelpad=30) - ax2.tick_params(axis='y', pad=15) - ax2.set_ylabel('Difference', labelpad=30) - boxstyle = matplotlib.patches.BoxStyle('Square', pad=0.25) - props = {'boxstyle': boxstyle, - 'facecolor': 'white', - 'linestyle': 'solid', - 'linewidth': 1, - 'edgecolor': 'black',} - ax2.text(0.7055, 1.05, 'Note: differences outside the ' - +'outline bars are significant\n at the 95% ' - +'confidence interval', ha='center', va='center', - fontsize=10, bbox=props, transform=ax2.transAxes) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax1.plot(fcst_var_threshs_float, model_avg_data[1,:], - color='#888888', - ls='-', linewidth=2.0, - marker='o', markersize=7, - label='obs', - zorder=4) - ax2.plot(fcst_var_threshs_float, - np.zeros_like(fcst_var_threshs_float), - color='#888888', - ls='-', linewidth=2.0, - zorder=4) - ax2.plot(fcst_var_threshs_float, - model_avg_data[0,:] - diff_from_avg_data, - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - zorder=(nmodels-model_idx)+4) - else: - ax2.plot(fcst_var_threshs_float, - np.zeros_like(fcst_var_threshs_float), - color='black', - ls='-', linewidth=2.0, - zorder=4) - ax1.plot(fcst_var_threshs_float, model_avg_data[0,:], - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=model_plot_name, - zorder=(nmodels-model_idx)+4) - else: - ax1.plot(fcst_var_threshs_float, model_avg_data[0,:], - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=model_plot_name, - zorder=(nmodels-model_idx)+4) - ax2.plot(fcst_var_threshs_float, - model_avg_data[0,:] - diff_from_avg_data, - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - zorder=(nmodels-model_idx)+4) - ax2.bar(fcst_var_threshs_float, 2*np.absolute(model_CI_data), - bottom=-1*np.absolute(model_CI_data), - width=CI_bar_max_widths-(CI_bar_intvl_widths*model_idx), - color='None', edgecolor=colors[model_idx], linewidth=1.5) - fig.suptitle(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title, - fontsize=14, fontweight='bold') - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax1.legend(bbox_to_anchor=(0.0, 1.01, 1.0, .102), loc=3, - ncol=nmodels+1, fontsize='13', - mode='expand', borderaxespad=0.) - else: - ax1.legend(bbox_to_anchor=(0.0, 1.01, 1.0, .102), loc=3, - ncol=nmodels, fontsize='13', - mode='expand', borderaxespad=0.) - savefig_imagename = (stat+'_' - +base_name.replace('FCSTTHRESHHOLDER', - 'all') \ - .replace('OBSTHRESHHOLDER', - 'all') \ - .replace('_avgs', fcst_lead) - +'.png') - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_threshold_by_lead.py b/ush/plotting_scripts/plot_threshold_by_lead.py deleted file mode 100644 index 2511db6de..000000000 --- a/ush/plotting_scripts/plot_threshold_by_lead.py +++ /dev/null @@ -1,700 +0,0 @@ -''' -Name: plot_threshold_by_lead.py -Contact(s): Mallory Row -Abstract: Reads average from plot_time_series.py to make treshold-lead plots -History Log: First version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: Text files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import sys -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import re -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md -import matplotlib.gridspec as gridspec - -import plot_util as plot_util -from plot_util import get_lead_avg_file - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = [os.environ['FCST_LEAD'].split(', ')] -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = os.environ['FCST_LEVEL'].split(', ') -fcst_var_thresh_list = [os.environ['FCST_THRESH'].split(', ')] -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = os.environ['OBS_LEVEL'].split(', ') -obs_var_thresh_list = [os.environ['OBS_THRESH'].split(', ')] -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -cmap_bias = plt.cm.PiYG_r -cmap = plt.cm.BuPu -cmap_diff = plt.cm.coolwarm -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -date_time_plot_title = date_plot_title+time_plot_title -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_leads = plot_info[0] - fcst_lead_timedeltas = np.full_like(fcst_leads, np.nan, dtype=float) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ).total_seconds() - fcst_lead_timedeltas[fcst_lead_idx] = float(fcst_lead_timedelta) - fcst_lead_timedeltas_str = [] - for tdelta in fcst_lead_timedeltas: - h = int(tdelta/3600) - m = int((tdelta-(h*3600))/60) - s = int(tdelta-(h*3600)-(m*60)) - if h < 100: - tdelta_str = f"{h:02d}" - else: - tdelta_str = f"{h:03d}" - if m != 0: - tdelta_str+=f":{m:02d}" - if s != 0: - tdelta_str+=f":{s:02d}" - fcst_lead_timedeltas_str.append(tdelta_str) - fcst_var_level = plot_info[1] - obs_var_level = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_level) - ] - fcst_var_threshs = plot_info[2] - obs_var_threshs = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_threshs) - ] - fcst_var_threshs_format = np.full_like( - fcst_var_threshs, np.nan, dtype=object - ) - fcst_var_threshs_float = np.full_like( - fcst_var_threshs, np.nan, dtype=float - ) - for fcst_var_thresh in fcst_var_threshs: - fcst_var_thresh_idx = fcst_var_threshs.index(fcst_var_thresh) - fcst_var_thresh_symbol, fcst_var_thresh_letter = ( - plot_util.format_thresh(fcst_var_thresh) - ) - fcst_var_threshs_format[fcst_var_thresh_idx] = fcst_var_thresh_letter - fcst_var_threshs_float[fcst_var_thresh_idx] = ( - fcst_var_thresh_letter[2:] - ) - xmesh, ymesh = np.meshgrid(fcst_var_threshs_float, fcst_lead_timedeltas) - obs_var_threshs_format = np.full_like( - obs_var_threshs, np.nan, dtype=object - ) - for obs_var_thresh in obs_var_threshs: - obs_var_thresh_idx = obs_var_threshs.index(obs_var_thresh) - obs_var_thresh_symbol, obs_var_thresh_letter = ( - plot_util.format_thresh(obs_var_thresh) - ) - obs_var_threshs_format[obs_var_thresh_idx] = obs_var_thresh_letter - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name+' '+fcst_var_level - obs_var_plot_title = 'Obs: '+obs_var_name+' '+obs_var_level - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - # Set up base name for file naming convention for lead averages files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead_avgs' - +'_fcst'+fcst_var_name+fcst_var_level - +'FCSTTHRESHHOLDER'+interp_mthd - +'_obs'+obs_var_name+obs_var_level - +'OBSTHRESHHOLDER'+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - for stat in stats_list: - logger.debug("Working on "+stat) - stat_plot_name = plot_util.get_stat_plot_name(logger, stat) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', - 'VALS', 'OBS_VALS'] - else: - avg_file_cols = ['LEADS', 'FCST_UNITS', 'OBS_UNITS', 'VALS'] - avg_cols_to_array = avg_file_cols[3:] - # Reading in model lead average files produced from plot_time_series.py - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_avg_data = np.empty( - [len(avg_cols_to_array), len(fcst_leads), - len(fcst_var_threshs_format)] - ) - model_avg_data.fill(np.nan) - for vt in range(len(fcst_var_threshs_format)): - fcst_var_thresh_format = fcst_var_threshs_format[vt] - obs_var_thresh_format = obs_var_threshs_format[vt] -# lead_avg_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name.replace('FCSTTHRESHHOLDER', -# str(fcst_var_thresh_format)) \ -# .replace('OBSTHRESHHOLDER', -# str(obs_var_thresh_format)) \ -# +'.txt' -# ) - logger.info("Working on forecast lead averages " - +"for forecast variable "+fcst_var_name+" " - +fcst_var_level+" "+fcst_var_thresh_format) -# lead_avg_file = os.path.join(output_base_dir, 'data', -# lead_avg_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - lead_avg_file = get_lead_avg_file(stat, - model_stat_file, - fcst_lead, - output_base_dir) - - if os.path.exists(lead_avg_file): - nrow = sum(1 for line in open(lead_avg_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" empty") - else: - logger.debug("Model "+str(model_num)+" " - +model_name+" with plot name " - +model_plot_name+" file: " - +lead_avg_file+" exists") - model_avg_file_data = pd.read_csv( - lead_avg_file, sep=' ', header=None, - names=avg_file_cols, dtype=str - ) - model_avg_file_data_leads = ( - model_avg_file_data.loc[:]['LEADS'].tolist() - ) - if model_avg_file_data.loc[0]['FCST_UNITS'] == '[NA]': - fcst_var_units_plot_title = '' - else: - fcst_var_units_plot_title = ( - model_avg_file_data.loc[0]['FCST_UNITS'] - ) - if model_avg_file_data.loc[0]['OBS_UNITS'] == '[NA]': - obs_var_units_plot_title = '' - else: - obs_var_units_plot_title = ( - model_avg_file_data.loc[0]['OBS_UNITS'] - ) - for fcst_lead in fcst_leads: - fcst_lead_idx = fcst_leads.index(fcst_lead) - if fcst_lead in model_avg_file_data_leads: - model_fcst_lead_idx = ( - model_avg_file_data_leads.index( - fcst_lead - ) - ) - for col in avg_cols_to_array: - col_idx = avg_cols_to_array.index(col) - model_avg_file_data_col = ( - model_avg_file_data.loc[:][col].tolist() - ) - if (model_avg_file_data_col[model_fcst_lead_idx] - != '--'): - model_avg_data[col_idx, - fcst_lead_idx, vt] = ( - float(model_avg_file_data_col \ - [model_fcst_lead_idx]) - ) - else: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+lead_avg_file+" does not exist") - if model_num == 1: - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - nsubplots = nmodels + 1 - else: - nsubplots = nmodels - if nsubplots == 1: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(1,1) - elif nsubplots == 2: - fig = plt.figure(figsize=(10,12)) - gs = gridspec.GridSpec(2,1) - gs.update(hspace=0.35) - elif nsubplots > 2 and nsubplots <= 4: - fig = plt.figure(figsize=(20,12)) - gs = gridspec.GridSpec(2,2) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 4 and nsubplots <= 6: - fig = plt.figure(figsize=(30,12)) - gs = gridspec.GridSpec(2,3) - gs.update(wspace=0.4, hspace=0.35) - elif nsubplots > 6 and nsubplots <= 9: - fig = plt.figure(figsize=(30,18)) - gs = gridspec.GridSpec(3,3) - gs.update(wspace=0.4, hspace=0.35) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting observations") - obs_avg_data = model_avg_data[1,:,:] - ax = plt.subplot(gs[0]) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel('Forecast Threshold', labelpad=20) - ax.set_xticks(fcst_var_threshs_float) - ax.set_xticklabels(fcst_var_threshs_format) - ax.set_xlim([fcst_var_threshs_float[0], - fcst_var_threshs_float[-1]]) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Forecast Lead', labelpad=20) - ax.set_yticks(fcst_lead_timedeltas) - ax.set_yticklabels(fcst_lead_timedeltas_str) - ax.set_ylim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - ax.set_title('obs', loc='left') - CF1 = ax.contourf(xmesh, ymesh, obs_avg_data, - cmap=cmap, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, obs_avg_data, - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, CF1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - ax = plt.subplot(gs[model_num]) - else: - ax = plt.subplot(gs[model_idx]) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel('Forecast Threshold', labelpad=20) - ax.set_xticks(fcst_var_threshs_float) - ax.set_xticklabels(fcst_var_threshs_format) - ax.set_xlim([fcst_var_threshs_float[0], - fcst_var_threshs_float[-1]]) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel('Forecast Lead', labelpad=20) - ax.set_yticks(fcst_lead_timedeltas) - ax.set_yticklabels(fcst_lead_timedeltas_str) - ax.set_ylim([fcst_lead_timedeltas[0], - fcst_lead_timedeltas[-1]]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - obs " - +"with name on plot "+model_plot_name+" " - +"- obs") - ax.set_title(model_plot_name+' - obs', loc='left') - model_obs_diff = ( - model_avg_data[0,:,:] - - model_avg_data[1,:,:] - ) - if model_num == 1: - clevels_diff = plot_util.get_clevels(model_obs_diff) - CF2 = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_obs_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - elif stat == 'bias' or stat == 'fbias': - logger.debug("Plotting model "+str(model_num) - +" "+model_name+" with name on plot " - +model_plot_name) - ax.set_title(model_plot_name, loc='left') - if model_num == 1: - clevels_bias = plot_util.get_clevels( - model_avg_data[0,:,:] - ) - CF1 = ax.contourf(xmesh, ymesh, model_avg_data[0,:,:], - levels=clevels_bias, - cmap=cmap_bias, - locator=matplotlib.ticker.MaxNLocator( - symmetric=True - ), extend='both') - C1 = ax.contour(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - cmap=cmap_bias, - extend='both') - C = ax.contour(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - if model_num == 1: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name) - model1_name = model_name - model1_plot_name = model_plot_name - model1_avg_data = model_avg_data[0,:,:] - ax.set_title(model_plot_name, loc='left') - CF1 = ax.contourf(xmesh, ymesh, model_avg_data[0,:,:], - cmap=cmap, - extend='both') - C1 = ax.contour(xmesh, ymesh, model_avg_data[0,:,:], - levels=CF1.levels, - colors='k', - linewidths=1.0) - ax.clabel(C1, C1.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - logger.debug("Plotting model "+str(model_num)+" " - +model_name+" - model 1 "+model1_name+" " - +"with name on plot "+model_plot_name+" " - +"- "+model1_plot_name) - ax.set_title(model_plot_name+' - '+model1_plot_name, - loc='left') - model_model1_diff = ( - model_avg_data[0,:,:] - model1_avg_data - ) - if model_num == 2: - clevels_diff = plot_util.get_clevels(model_model1_diff) - CF2 = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=clevels_diff, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C2 = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, colors='k', - linewidths=1.0) - ax.clabel(C2, C2.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - else: - CF = ax.contourf(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - cmap=cmap_diff, - locator= \ - matplotlib.ticker.MaxNLocator( - symmetric=True - ), - extend='both') - C = ax.contour(xmesh, ymesh, model_model1_diff, - levels=CF2.levels, - colors='k', - linewidths=1.0) - ax.clabel(C, C.levels, - fmt='%1.2f', - inline=True, - fontsize=12.5) - cax = fig.add_axes([0.1, -0.05, 0.8, 0.05]) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - elif stat == 'bias' or stat == 'fbias': - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - if nsubplots == 1: - cbar = fig.colorbar(CF1, cax=cax, orientation='horizontal', - ticks=CF1.levels) - else: - cbar = fig.colorbar(CF2, cax=cax, orientation='horizontal', - ticks=CF2.levels) - fig.suptitle(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title, - fontsize=14, fontweight='bold') - savefig_imagename = ( - stat+'_'+base_name.replace('FCSTTHRESHHOLDER', 'all') \ - .replace('OBSTHRESHHOLDER','all')+'.png' - ) - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_time_series.py b/ush/plotting_scripts/plot_time_series.py deleted file mode 100644 index cb90cc184..000000000 --- a/ush/plotting_scripts/plot_time_series.py +++ /dev/null @@ -1,775 +0,0 @@ -''' -Name: plot_time_series.py -Contact(s): Mallory Row -Abstract: Reads filtered files from stat_analysis_wrapper - run_all_times to make time series plots -History Log: Third version -Usage: Called by make_plots_wrapper.py -Parameters: None -Input Files: MET .stat files -Output Files: .png images -Condition codes: 0 for success, 1 for failure -''' - -import os -import numpy as np -import pandas as pd -import itertools -import warnings -import logging -import datetime -import math -import re -import sys -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -import matplotlib.dates as md - -import plot_util as plot_util -from plot_util import get_ci_file, get_lead_avg_file - -# add metplus directory to path so the wrappers and utilities can be found -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), - '..', - '..'))) -from metplus.util import do_string_sub - -# Read environment variables set in make_plots_wrapper.py -verif_case = os.environ['VERIF_CASE'] -verif_type = os.environ['VERIF_TYPE'] -date_type = os.environ['DATE_TYPE'] -valid_beg = os.environ['VALID_BEG'] -valid_end = os.environ['VALID_END'] -init_beg = os.environ['INIT_BEG'] -init_end = os.environ['INIT_END'] -fcst_valid_hour_list = os.environ['FCST_VALID_HOUR'].split(', ') -fcst_valid_hour = os.environ['FCST_VALID_HOUR'] -fcst_init_hour_list = os.environ['FCST_INIT_HOUR'].split(', ') -fcst_init_hour = os.environ['FCST_INIT_HOUR'] -obs_valid_hour_list = os.environ['OBS_VALID_HOUR'].split(', ') -obs_valid_hour = os.environ['OBS_VALID_HOUR'] -obs_init_hour_list = os.environ['OBS_INIT_HOUR'].split(', ') -obs_init_hour = os.environ['OBS_INIT_HOUR'] -fcst_lead_list = os.environ['FCST_LEAD'].split(', ') -fcst_var_name = os.environ['FCST_VAR'] -fcst_var_units = os.environ['FCST_UNITS'] -fcst_var_level_list = os.environ['FCST_LEVEL'].split(', ') -fcst_var_thresh_list = os.environ['FCST_THRESH'].split(', ') -obs_var_name = os.environ['OBS_VAR'] -obs_var_units = os.environ['OBS_UNITS'] -obs_var_level_list = os.environ['OBS_LEVEL'].split(', ') -obs_var_thresh_list = os.environ['OBS_THRESH'].split(', ') -interp_mthd = os.environ['INTERP_MTHD'] -interp_pnts = os.environ['INTERP_PNTS'] -vx_mask = os.environ['VX_MASK'] -alpha = os.environ['ALPHA'] -desc = os.environ['DESC'] -obs_lead = os.environ['OBS_LEAD'] -cov_thresh = os.environ['COV_THRESH'] -stats_list = os.environ['STATS'].split(', ') -model_list = os.environ['MODEL'].split(', ') -model_obtype_list = os.environ['MODEL_OBTYPE'].split(', ') -model_reference_name_list = os.environ['MODEL_REFERENCE_NAME'].split(', ') -dump_row_filename_template = os.environ['DUMP_ROW_FILENAME'] -average_method = os.environ['AVERAGE_METHOD'] -ci_method = os.environ['CI_METHOD'] -verif_grid = os.environ['VERIF_GRID'] -event_equalization = os.environ['EVENT_EQUALIZATION'] -met_version = os.environ['MET_VERSION'] -input_base_dir = os.environ['INPUT_BASE_DIR'] -output_base_dir = os.environ['OUTPUT_BASE_DIR'] -log_metplus = os.environ['LOG_METPLUS'] -log_level = os.environ['LOG_LEVEL'] - -# General set up and settings -# Plots -warnings.filterwarnings('ignore') -plt.rcParams['font.weight'] = 'bold' -plt.rcParams['axes.labelsize'] = 15 -plt.rcParams['axes.labelweight'] = 'bold' -plt.rcParams['xtick.labelsize'] = 15 -plt.rcParams['ytick.labelsize'] = 15 -plt.rcParams['axes.titlesize'] = 15 -plt.rcParams['axes.titleweight'] = 'bold' -plt.rcParams['axes.formatter.useoffset'] = False -colors = [ - '#000000', '#2F1E80', '#D55E00', '#882255', - '#018C66', '#D6B616', '#036398', '#CC79A7' -] -# Logging -logger = logging.getLogger(log_metplus) -logger.setLevel(log_level) -formatter = logging.Formatter( - '%(asctime)s.%(msecs)03d (%(filename)s:%(lineno)d) %(levelname)s: ' - +'%(message)s', - '%m/%d %H:%M:%S' - ) -file_handler = logging.FileHandler(log_metplus, mode='a') -file_handler.setFormatter(formatter) -logger.addHandler(file_handler) -output_data_dir = os.path.join(output_base_dir, 'data') -output_imgs_dir = os.path.join(output_base_dir, 'imgs') -# Model info -model_info_list = list( - zip(model_list, - model_reference_name_list, - model_obtype_list, - ) -) -nmodels = len(model_info_list) -# Plot info -plot_info_list = list( - itertools.product(*[fcst_lead_list, - fcst_var_level_list, - fcst_var_thresh_list]) - ) -# Date and time infomation and build title for plot -date_beg = os.environ[date_type+'_BEG'] -date_end = os.environ[date_type+'_END'] -date_plot_title = ( - date_type.title()+': ' - +str(datetime.datetime.strptime(date_beg, '%Y%m%d').strftime('%d%b%Y')) - +'-' - +str(datetime.datetime.strptime(date_end, '%Y%m%d').strftime('%d%b%Y')) -) -valid_init_dict = { - 'fcst_valid_hour_beg': fcst_valid_hour_list[0], - 'fcst_valid_hour_end': fcst_valid_hour_list[-1], - 'fcst_init_hour_beg': fcst_init_hour_list[0], - 'fcst_init_hour_end': fcst_init_hour_list[-1], - 'obs_valid_hour_beg': obs_valid_hour_list[0], - 'obs_valid_hour_end': obs_valid_hour_list[-1], - 'obs_init_hour_beg': obs_init_hour_list[0], - 'obs_init_hour_end': obs_init_hour_list[-1], - 'valid_hour_beg': '', - 'valid_hour_end': '', - 'init_hour_beg': '', - 'init_hour_end': '' -} -valid_init_type_list = [ - 'valid_hour_beg', 'valid_hour_end', 'init_hour_beg', 'init_hour_end' -] -for vitype in valid_init_type_list: - if (valid_init_dict['fcst_'+vitype] != '' - and valid_init_dict['obs_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] - elif (valid_init_dict['obs_'+vitype] != '' - and valid_init_dict['fcst_'+vitype] == ''): - valid_init_dict[vitype] = valid_init_dict['obs_'+vitype] - if valid_init_dict['fcst_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['fcst_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['fcst_'+vitype] = '235959' - if valid_init_dict['obs_'+vitype] == '': - if 'beg' in vitype: - valid_init_dict['obs_'+vitype] = '000000' - elif 'end' in vitype: - valid_init_dict['obs_'+vitype] = '235959' - if valid_init_dict['fcst_'+vitype] == valid_init_dict['obs_'+vitype]: - valid_init_dict[vitype] = valid_init_dict['fcst_'+vitype] -time_plot_title = '' -for vi in ['valid_hour', 'init_hour']: - beg_hr = valid_init_dict[vi+'_beg'] - end_hr = valid_init_dict[vi+'_end'] - fcst_beg_hr = valid_init_dict['fcst_'+vi+'_beg'] - fcst_end_hr = valid_init_dict['fcst_'+vi+'_end'] - obs_beg_hr = valid_init_dict['obs_'+vi+'_beg'] - obs_end_hr = valid_init_dict['obs_'+vi+'_end'] - time_label = vi.split('_')[0].title() - if beg_hr != '' and end_hr != '': - if beg_hr == end_hr: - time_plot_title+=', '+time_label+': '+beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', '+time_label+': '+beg_hr[0:4]+'-'+end_hr[0:4]+'Z' - ) - else: - if fcst_beg_hr == fcst_end_hr: - time_plot_title+=', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Fcst '+time_label+': '+fcst_beg_hr[0:4]+'-' - +fcst_end_hr[0:4]+'Z' - ) - if obs_beg_hr == obs_end_hr: - time_plot_title+=', Obs '+time_label+': '+obs_beg_hr[0:4]+'Z' - else: - time_plot_title+=( - ', Obs '+time_label+': '+obs_beg_hr[0:4]+'-' - +obs_end_hr[0:4]+'Z' - ) -# Common plotting information and build title for plot -if 'WV1' not in interp_mthd or interp_mthd != '': - extra_plot_title = verif_grid+'-'+vx_mask -else: - extra_plot_title = interp_mthd+', '+verif_grid+'-'+vx_mask -if desc != '': - extra_plot_title+=', Desc: '+desc -if obs_lead != '': - extra_plot_title+=', Obs Lead: '+obs_lead -if interp_pnts != '': - extra_plot_title+=', Interp. Pts.: '+interp_pnts -if cov_thresh != '': - extra_plot_title+=', Cov. Thresh:'+cov_thresh -if alpha != '': - extra_plot_title+=', Alpha: '+alpha -# MET .stat file formatting -stat_file_base_columns = plot_util.get_stat_file_base_columns(met_version) -nbase_columns = len(stat_file_base_columns) -# Significance testing info -# need to set up random number array [nmodels, ntests, ndays] -# for EMC Monte Carlo testing. Each model has its own -# "series" of random numbers used at all forecast hours -# and thresholds. -mc_dates, mc_expected_stat_file_dates = plot_util.get_date_arrays( - date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, - '000000' -) -ndays = len(mc_expected_stat_file_dates) -ntests = 10000 -randx = np.random.rand(nmodels,ntests,ndays) - -# Start looping to make plots -for plot_info in plot_info_list: - fcst_lead = plot_info[0] - fcst_var_level = plot_info[1] - obs_var_level = obs_var_level_list[ - fcst_var_level_list.index(fcst_var_level) - ] - fcst_var_thresh = plot_info[2] - obs_var_thresh = obs_var_thresh_list[ - fcst_var_thresh_list.index(fcst_var_thresh) - ] - fcst_var_thresh_symbol, fcst_var_thresh_letter = plot_util.format_thresh( - fcst_var_thresh - ) - obs_var_thresh_symbol, obs_var_thresh_letter = plot_util.format_thresh( - obs_var_thresh - ) - # Build plot title for variable info - fcst_var_plot_title = 'Fcst: '+fcst_var_name+' '+fcst_var_level - obs_var_plot_title = 'Obs: '+obs_var_name+' '+obs_var_level - if 'WV1' in interp_mthd: - fcst_var_plot_title+=' '+interp_mthd - obs_var_plot_title+=' '+interp_mthd - if fcst_var_thresh != '': - fcst_var_plot_title+=' '+fcst_var_thresh - if obs_var_thresh != '': - obs_var_plot_title+=' '+obs_var_thresh - if fcst_var_units == '': - fcst_var_units_list = [] - else: - fcst_var_units_list = fcst_var_units.split(', ') - if obs_var_units == '': - obs_var_units_list = [] - else: - obs_var_units_list = obs_var_units.split(', ') - # Build plot title for forecast lead - fcst_lead_plot_title = 'Fcst Lead: '+fcst_lead[:-4]+'hr' - if fcst_lead[-4:-2] != '00': - fcst_lead_plot_title+=fcst_lead[-4:-2]+'min' - if fcst_lead[-2:] != '00': - fcst_lead_plot_title+=fcst_lead[-2:]+'sec' - # Clean up time information for plot title - # if valid/init is a single hour, then init/valid - # is also a single hour - date_time_plot_title = date_plot_title+time_plot_title - date_type_beg_hour = valid_init_dict[date_type.lower()+'_hour_beg'] - date_type_end_hour = valid_init_dict[date_type.lower()+'_hour_end'] - if (date_type_beg_hour != '' and date_type_end_hour != '' - and date_type_beg_hour == date_type_end_hour): - fcst_lead_timedelta = datetime.timedelta( - hours=int(fcst_lead[:-4]), - minutes=int(fcst_lead[-4:-2]), - seconds=int(fcst_lead[-2:]) - ) - date_type_timedelta = datetime.timedelta( - hours=int(date_type_beg_hour[0:2]), - minutes=int(date_type_beg_hour[2:4]), - seconds=int(date_type_beg_hour[4:]) - ) - if date_type == 'VALID': - check_time_plot_title = 'Init' - time_diff = ( - date_type_timedelta - fcst_lead_timedelta - ).total_seconds() - elif date_type == 'INIT': - check_time_plot_title = 'Valid' - time_diff = ( - date_type_timedelta - fcst_lead_timedelta - ).total_seconds() - day_diff = time_diff//86400 - hr_diff = (time_diff - (day_diff*86400))//3600 - min_diff = (time_diff%3600) // 60 - sec_diff = (time_diff%3600)%60 - time_title_replace = re.search(check_time_plot_title+': (.*)Z', - date_time_plot_title) - date_time_plot_title = date_time_plot_title.replace( - check_time_plot_title+': '+time_title_replace.group(1), - check_time_plot_title+': '+str(int(hr_diff)).zfill(2) - +str(int(min_diff)).zfill(2) - ) - logger.info("Working on forecast lead "+fcst_lead+" " - +"and forecast variable "+fcst_var_name+" "+fcst_var_level+" " - +fcst_var_thresh) - # Set up base name for file naming convention for MET .stat files, - # and output data and images - base_name = date_type.lower()+date_beg+'to'+date_end - if (valid_init_dict['valid_hour_beg'] != '' - and valid_init_dict['valid_hour_end'] != ''): - base_name+=( - '_valid'+valid_init_dict['valid_hour_beg'][0:4] - +'to'+valid_init_dict['valid_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_valid'+valid_init_dict['fcst_valid_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_valid_hour_end'][0:4]+'Z' - +'_obs_valid'+valid_init_dict['obs_valid_hour_beg'][0:4] - +'to'+valid_init_dict['obs_valid_hour_end'][0:4]+'Z' - ) - if (valid_init_dict['init_hour_beg'] != '' - and valid_init_dict['init_hour_end'] != ''): - base_name+=( - '_init'+valid_init_dict['init_hour_beg'][0:4] - +'to'+valid_init_dict['init_hour_end'][0:4]+'Z' - ) - else: - base_name+=( - '_fcst_init'+valid_init_dict['fcst_init_hour_beg'][0:4] - +'to'+valid_init_dict['fcst_init_hour_end'][0:4]+'Z' - +'_obs_init'+valid_init_dict['obs_init_hour_beg'][0:4] - +'to'+valid_init_dict['obs_init_hour_end']+'Z' - ) - base_name+=( - '_fcst_lead'+fcst_lead - +'_fcst'+fcst_var_name+fcst_var_level - +fcst_var_thresh_letter.replace(',', '_')+interp_mthd - +'_obs'+obs_var_name+obs_var_level - +obs_var_thresh_letter.replace(',', '_')+interp_mthd - +'_vxmask'+vx_mask - ) - if desc != '': - base_name+='_desc'+desc - if obs_lead != '': - base_name+='_obs_lead'+obs_lead - if interp_pnts != '': - base_name+='_interp_pnts'+interp_pnts - if cov_thresh != '': - cov_thresh_symbol, cov_thresh_letter = plot_util.format_thresh( - cov_thresh - ) - base_name+='_cov_thresh'+cov_thresh_letter.replace(',', '_') - if alpha != '': - base_name+='_alpha'+alpha - # Set up expected date in MET .stat file and date plot information - plot_time_dates, expected_stat_file_dates = plot_util.get_date_arrays( - date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, - fcst_lead - ) - total_dates = len(plot_time_dates) - if len(plot_time_dates) == 0: - logger.error("Date array constructed information from METplus " - +"conf file has length of 0. Not enough information " - +"was provided to build date information. Please check " - +"provided VALID/INIT_BEG/END and " - +"OBS/FCST_INIT/VALID_HOUR_LIST") - exit(1) - elif len(plot_time_dates) <= 3: - date_tick_intvl = 1 - elif len(plot_time_dates) > 3 and len(plot_time_dates) <= 10: - date_tick_intvl = 2 - elif len(plot_time_dates) > 10 and len(plot_time_dates) < 31: - date_tick_intvl = 5 - else: - date_tick_intvl = 10 - # Reading in model .stat files from stat_analysis - logger.info("Reading in model data") - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_data_now_index = pd.MultiIndex.from_product( - [[model_plot_name], expected_stat_file_dates], - names=['model_plot_name', 'dates'] - ) -# model_stat_filename = ( -# model_plot_name+'_'+model_obtype+'_' -# +base_name -# +'_dump_row.stat' -# ) -# model_stat_file = os.path.join(input_base_dir, model_stat_filename) - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - if os.path.exists(model_stat_file): - nrow = sum(1 for line in open(model_stat_file)) - if nrow == 0: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" empty") - model_now_data = pd.DataFrame(np.nan, - index=model_data_now_index, - columns=[ 'TOTAL' ]) - else: - logger.debug("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" exists") - model_now_stat_file_data = pd.read_csv( - model_stat_file, sep=" ", skiprows=1, - skipinitialspace=True, header=None - ) - model_now_stat_file_data.rename( - columns=dict(zip( - model_now_stat_file_data.columns[:nbase_columns], - stat_file_base_columns - )), inplace=True - ) - line_type = model_now_stat_file_data['LINE_TYPE'][0] - stat_file_line_type_columns = ( - plot_util.get_stat_file_line_type_columns(logger, - met_version, - line_type) - ) - model_now_stat_file_data.rename( - columns=dict(zip( - model_now_stat_file_data.columns[nbase_columns:], - stat_file_line_type_columns - )), inplace=True - ) - model_now_stat_file_data_fcst_valid_dates = ( - model_now_stat_file_data.loc[:]['FCST_VALID_BEG'].values - ) - model_now_data = ( - pd.DataFrame(np.nan, index=model_data_now_index, - columns=stat_file_line_type_columns) - ) - model_now_stat_file_data.fillna( - {'FCST_UNITS':'NA', 'OBS_UNITS':'NA', 'VX_MASK':'NA'}, - inplace=True - ) - if float(met_version) >= 8.1: - model_now_fcst_units = ( - model_now_stat_file_data.loc[0]['FCST_UNITS'] - ) - model_now_obs_units = ( - model_now_stat_file_data.loc[0]['OBS_UNITS'] - ) - if model_now_fcst_units != 'NA': - fcst_var_units_list.append(model_now_fcst_units) - if model_now_obs_units != 'NA': - obs_var_units_list.append(model_now_obs_units) - for expected_date in expected_stat_file_dates: - if expected_date in \ - model_now_stat_file_data_fcst_valid_dates: - matching_date_idx = ( - model_now_stat_file_data_fcst_valid_dates \ - .tolist().index(expected_date) - ) - model_now_stat_file_data_indexed = ( - model_now_stat_file_data.loc[matching_date_idx][:] - ) - for col in stat_file_line_type_columns: - model_now_data.loc[ - (model_plot_name, expected_date) - ][col] = ( - model_now_stat_file_data_indexed.loc[:][col] - ) - else: - logger.warning("Model "+str(model_num)+" "+model_name+" " - +"with plot name "+model_plot_name+" " - +"file: "+model_stat_file+" does not exist") - model_now_data = pd.DataFrame(np.nan, - index=model_data_now_index, - columns=[ 'TOTAL' ]) - if model_num > 1: - model_data = pd.concat([model_data, model_now_data]) - else: - model_data = model_now_data - if fcst_var_units_list != []: - fcst_var_units_plot_title = ( - '['+', '.join(list(set(fcst_var_units_list)))+']' - ) - else: - fcst_var_units_plot_title = '' - if obs_var_units_list != []: - obs_var_units_plot_title = ( - '['+', '.join(list(set(obs_var_units_list)))+']' - ) - else: - obs_var_units_plot_title = '' - # Calculate statistics and plots - logger.info("Calculating and plotting statistics") - for stat in stats_list: - logger.debug("Working on "+stat) - stat_values, stat_values_array, stat_plot_name = ( - plot_util.calculate_stat(logger, model_data, stat) - ) - if event_equalization == 'True': - logger.debug("Doing event equalization") - for l in range(len(stat_values_array[:,0,0])): - stat_values_array[l,:,:] = ( - np.ma.mask_cols(stat_values_array[l,:,:]) - ) - np.ma.set_fill_value(stat_values_array, np.nan) - for model_info in model_info_list: - model_num = model_info_list.index(model_info) + 1 - model_idx = model_info_list.index(model_info) - model_name = model_info[0] - model_plot_name = model_info[1] - model_obtype = model_info[2] - model_stat_values_array = stat_values_array[:,model_idx,:] - -# lead_avg_filename = stat + '_' + os.path.basename(model_info[3]) - - # if fcst_leadX is in filename, replace it with fcst_lead_avgs - # and add .txt to end of filename -# if 'fcst_lead' + fcst_lead in model_info[3]: -# lead_avg_filename.replace('fcst_lead' + fcst_lead, 'fcst_lead_avgs') -# lead_avg_filename += '.txt' - - # if not, remove mention of forecast lead and - # add fcst_lead_avgs.txt to end of filename -# else: -# lead_avg_filename.replace('fcst_lead' + fcst_lead, '') -# lead_avg_filename += '_fcst_lead_avgs.txt' - -# lead_avg_file = os.path.join(output_base_dir, 'data', -# lead_avg_filename) - - - # Write model forecast lead average to file - model_stat_template = dump_row_filename_template - string_sub_dict = { - 'model': model_name, - 'model_reference': model_plot_name, - 'obtype': model_obtype, - 'fcst_lead': fcst_lead, - 'fcst_level': fcst_var_level, - 'obs_level': obs_var_level, - 'fcst_thresh': fcst_var_thresh, - 'obs_thresh': obs_var_thresh, - } - model_stat_file = do_string_sub(model_stat_template, - **string_sub_dict) - lead_avg_file = get_lead_avg_file(stat, - model_stat_file, - fcst_lead, - output_base_dir) - - logger.debug("Writing model "+str(model_num)+" "+model_name+" " - +"with name on plot "+model_plot_name+" lead " - +fcst_lead+" average to file: "+lead_avg_file) - model_stat_average_array = plot_util.calculate_average( - logger, average_method, stat, model_data.loc[[model_plot_name]], - model_stat_values_array - ) - with open(lead_avg_file, 'a') as file2write: - file2write.write(fcst_lead) - if fcst_var_units_plot_title != '': - file2write.write(' '+fcst_var_units_plot_title) - else: - file2write.write(' [NA]') - if obs_var_units_plot_title != '': - file2write.write(' '+obs_var_units_plot_title) - else: - file2write.write(' [NA]') - for l in range(len(model_stat_average_array)): - file2write.write( - ' '+str(model_stat_average_array[l]) - ) - file2write.write('\n') - # Write confidence intervals to file, if requested, - # using similar naming to model forecast lead average - if ci_method != 'NONE': -# CI_filename = ( -# stat+'_' -# +model_plot_name+'_'+model_obtype+'_' -# +base_name -# +'_CI_'+ci_method+'.txt' -# ).replace('fcst_lead'+fcst_lead, 'fcst_lead_avgs') -# CI_filename = stat + '_' + os.path.basename(model_info[3]) - # if fcst_leadX is in filename, replace it with fcst_lead_avgs - # and add .txt to end of filename -# if 'fcst_lead' + fcst_lead in model_info[3]: -# CI_filename.replace('fcst_lead' + fcst_lead, 'fcst_lead_avgs') -# CI_filename += '.txt' - - # if not, remove mention of forecast lead and - # add fcst_lead_avgs.txt to end of filename -# else: -# CI_filename.replace('fcst_lead' + fcst_lead, '') -# CI_filename += '_fcst_lead_avgs' - -# CI_filename += '_CI_' + ci_method + '.txt' - -# CI_file = os.path.join(output_base_dir, 'data', -# CI_filename) - - CI_file = get_ci_file(stat, - model_stat_file, - fcst_lead, - output_base_dir, - ci_method) - - - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - logger.debug("Writing "+ci_method+" confidence intervals " - +"for difference between model " - +str(model_num)+" "+model_name+" with name " - +"on plot "+model_plot_name+" and the " - +"observations at lead "+fcst_lead+" to " - +"file: "+CI_file) - if ci_method == 'EMC_MONTE_CARLO': - logger.warning("Monte Carlo resampling not " - +"done for fbar_obar, orate_frate, " - +"or baser_frate.") - stat_CI = '--' - else: - stat_CI = plot_util.calculate_ci( - logger, ci_method, model_stat_values_array[0,:], - model_stat_values_array[1,:],total_dates, - stat, average_method, randx[model_idx,:,:] - ) - with open(CI_file, 'a') as file2write: - file2write.write(fcst_lead+' '+str(stat_CI)+'\n') - else: - if model_num == 1: - model1_stat_values_array = ( - model_stat_values_array[0,:] - ) - model1_plot_name = model_plot_name - model1_name = model_name - else: - logger.debug("Writing "+ci_method+" confidence " - +"intervals for difference between " - +"model "+str(model_num)+" " - +model_name+" with name on plot " - +model_plot_name+" and model 1 " - +model1_name+" with name on plot " - +model1_plot_name+" at lead " - +fcst_lead+" to file: "+CI_file) - if ci_method == 'EMC_MONTE_CARLO': - stat_CI = plot_util.calculate_ci( - logger, ci_method, - model_data.loc[[model_plot_name]], - model_data.loc[[model1_plot_name]], total_dates, - stat, average_method, randx[model_idx,:,:] - ) - else: - stat_CI = plot_util.calculate_ci( - logger, ci_method, model_stat_values_array, - model1_stat_values_array, total_dates, - stat, average_method, randx[model_idx,:,:] - ) - with open(CI_file, 'a') as file2write: - file2write.write(fcst_lead+' '+str(stat_CI)+'\n') - logger.debug("Plotting model "+str(model_num)+" "+model_name+" " - +"with name on plot "+model_plot_name) - if model_num == 1: - fig, ax = plt.subplots(1,1,figsize=(10,6)) - ax.grid(True) - ax.tick_params(axis='x', pad=15) - ax.set_xlabel(date_type.title()+' Date', labelpad=30) - ax.set_xlim([plot_time_dates[0],plot_time_dates[-1]]) - ax.xaxis.set_major_locator( - md.DayLocator(interval=date_tick_intvl) - ) - ax.xaxis.set_major_formatter(md.DateFormatter('%d%b%Y')) - ax.xaxis.set_minor_locator(md.DayLocator()) - ax.tick_params(axis='y', pad=15) - ax.set_ylabel(stat_plot_name, labelpad=30) - if (stat == 'fbar_obar' or stat == 'orate_frate' - or stat == 'baser_frate'): - obs_stat_values_array = model_stat_values_array[1,:] - obs_count = ( - len(obs_stat_values_array) - - np.ma.count_masked(obs_stat_values_array) - ) - plot_time_dates_m = np.ma.masked_where( - np.ma.getmask(obs_stat_values_array), plot_time_dates - ) - plot_time_dates_mc = np.ma.compressed(plot_time_dates_m) - obs_stat_values_mc = np.ma.compressed( - obs_stat_values_array - ) - if np.ma.is_masked(model_stat_average_array[1]): - obs_legend_label = ( - 'obs ' - +str(model_stat_average_array[1])+' ' - +str(obs_count) - ) - else: - obs_legend_label = ( - 'obs ' - +str(round(model_stat_average_array[1],3))+' ' - +str(obs_count) - ) - ax.plot_date(plot_time_dates_mc, - obs_stat_values_mc, - color='#888888', - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=obs_legend_label, - zorder=4) - count = ( - len(model_stat_values_array[0,:]) - - np.ma.count_masked(model_stat_values_array[0,:]) - ) - plot_time_dates_m = np.ma.masked_where( - np.ma.getmask(model_stat_values_array[0,:]), plot_time_dates - ) - plot_time_dates_mc = np.ma.compressed(plot_time_dates_m) - model_stat_values_mc = np.ma.compressed( - model_stat_values_array[0,:] - ) - if np.ma.is_masked(model_stat_average_array[0]): - model_legend_label = ( - model_plot_name+' ' - +str(model_stat_average_array[0])+' ' - +str(count) - ) - else: - model_legend_label = ( - model_plot_name+' ' - +str(round(model_stat_average_array[0],3))+' ' - +str(count) - ) - ax.plot_date(plot_time_dates_mc, model_stat_values_mc, - color=colors[model_idx], - ls='-', linewidth=2.0, - marker='o', markersize=7, - label=model_legend_label, - zorder=(nmodels-model_idx)+4) - ax.legend(bbox_to_anchor=(1.025, 1.0, 0.375, 0.0), loc='upper right', - ncol=1, fontsize='13', mode='expand', borderaxespad=0.) - ax.set_title(stat_plot_name+'\n' - +fcst_var_plot_title+' '+fcst_var_units_plot_title - +', '+obs_var_plot_title+' '+obs_var_units_plot_title+'\n' - +extra_plot_title+'\n' - +date_time_plot_title+', '+fcst_lead_plot_title+'\n', - fontsize=14, fontweight='bold') - savefig_imagename = stat+'_'+base_name+'.png' - savefig_image = os.path.join(output_base_dir, 'images', - savefig_imagename) - logger.info("Saving image as "+savefig_image) - plt.savefig(savefig_image, bbox_inches='tight') - plt.close() diff --git a/ush/plotting_scripts/plot_util.py b/ush/plotting_scripts/plot_util.py deleted file mode 100644 index 898c93e0c..000000000 --- a/ush/plotting_scripts/plot_util.py +++ /dev/null @@ -1,1128 +0,0 @@ -import os -import datetime as datetime -import time -import numpy as np -import pandas as pd - - -"""!@namespace plot_util - @brief Provides utility functions for METplus plotting use case. -""" - -def get_date_arrays(date_type, date_beg, date_end, - fcst_valid_hour, fcst_init_hour, - obs_valid_hour, obs_init_hour, - lead): - """! Create arrays of requested dates plotting and - dates expected to be in MET .stat files - - Args: - date_type - string of describing the treatment - of dates, either VALID or INIT - date_beg - string of beginning date, - either blank or %Y%m%d format - date_end - string of end date, - either blank or %Y%m%d format - fcst_valid_hour - string of forecast valid hour(s) - information, blank or in %H%M%S - fcst_init_hour - string of forecast init hour(s) - information, blank or in %H%M%S - obs_valid_hour - string of observation valid hour(s) - information, blank or in %H%M%S - obs_init_hour - string of observation hour(s) - information, blank or in %H%M%S - lead - string of forecast lead, in %H%M%S - format - - Returns: - plot_time_dates - array of ordinal dates based on user - provided information - expected_stat_file_dates - array of dates that are expected to - be found in the MET .stat files - based on user provided information, - formatted as %Y%m%d_%H%M%S - """ - lead_hour_seconds = int(int(lead[:-4])%24) * 3600 - lead_min_seconds = int(lead[-4:-2]) * 60 - lead_seconds = int(lead[-2:]) - valid_init_time_info = { - 'fcst_valid_time': list(filter(None, fcst_valid_hour.split(', '))), - 'fcst_init_time': list(filter(None, fcst_init_hour.split(', '))), - 'obs_valid_time': list(filter(None, obs_valid_hour.split(', '))), - 'obs_init_time': list(filter(None, obs_init_hour.split(', '))), - } - # Extract missing information, if possible - for type in ['fcst', 'obs']: - valid_time_list = valid_init_time_info[type+'_valid_time'] - init_time_list = valid_init_time_info[type+'_init_time'] - if (len(valid_time_list) == 0 - and len(init_time_list) > 0): - for itime in init_time_list: - itime_hour_seconds = int(int(itime[0:2])%24) * 3600 - itime_min_seconds = int(itime[2:4]) * 60 - itime_seconds = int(itime[4:]) - offset = datetime.timedelta(seconds=lead_hour_seconds - + lead_min_seconds - + lead_seconds - + itime_hour_seconds - + itime_min_seconds - + itime_seconds) - tot_sec = offset.total_seconds() - valid_hour = int(tot_sec//3600) - valid_min = int((tot_sec%3600) // 60) - valid_sec = int((tot_sec%3600)%60) - valid_time = ( - str(valid_hour).zfill(2) - +str(valid_min).zfill(2) - +str(valid_sec).zfill(2) - ) - valid_init_time_info[type+'_valid_time'].append(valid_time) - if (len(init_time_list) == 0 - and len(valid_time_list) > 0): - for vtime in valid_time_list: - vtime_hour_seconds = int(int(vtime[0:2])%24) * 3600 - vtime_min_seconds = int(vtime[2:4]) * 60 - vtime_seconds = int(vtime[4:]) - offset = datetime.timedelta(seconds=lead_hour_seconds - + lead_min_seconds - + lead_seconds - - vtime_hour_seconds - - vtime_min_seconds - - vtime_seconds) - tot_sec = offset.total_seconds() - init_hour = int(tot_sec//3600) - init_min = int((tot_sec%3600) // 60) - init_sec = int((tot_sec%3600)%60) - init_time = ( - str(init_hour).zfill(2) - +str(init_min).zfill(2) - +str(init_sec).zfill(2) - ) - valid_init_time_info[type+'_init_time'].append(init_time) - for type in ['valid', 'init']: - fcst_time_list = valid_init_time_info['fcst_'+type+'_time'] - obs_time_list = valid_init_time_info['obs_'+type+'_time'] - if len(fcst_time_list) == 0: - if len(obs_time_list) > 0: - valid_init_time_info['fcst_'+type+'_time'] = ( - valid_init_time_info['obs_'+type+'_time'] - ) - if len(obs_time_list) == 0: - if len(fcst_time_list) > 0: - valid_init_time_info['obs_'+type+'_time'] = ( - valid_init_time_info['fcst_'+type+'_time'] - ) - date_info = {} - for type in ['fcst_'+date_type.lower(), - 'obs_'+date_type.lower()]: - time_list = valid_init_time_info[type+'_time'] - if len(time_list) != 0: - time_beg = min(time_list) - time_end = max(time_list) - if time_beg == time_end or len(time_list) == 1: - delta_t = datetime.timedelta(seconds=86400) - else: - delta_t_list = [] - for t in range(len(time_list)): - if time_list[t] == time_end: - delta_t_list.append( - ( - datetime.datetime.strptime('235959','%H%M%S') - - (datetime.datetime.strptime(time_list[t], - '%H%M%S')) - ) - + datetime.timedelta(seconds = 1) - ) - else: - delta_t_list.append( - datetime.datetime.strptime(time_list[t+1], - '%H%M%S') - - datetime.datetime.strptime(time_list[t], - '%H%M%S') - ) - delta_t_array = np.array(delta_t_list) - if np.all(delta_t_array == delta_t_array[0]): - delta_t = delta_t_array[0] - else: - delta_t = np.min(delta_t_array) - beg = datetime.datetime.strptime( - date_beg+time_beg, '%Y%m%d%H%M%S' - ) - end = datetime.datetime.strptime( - date_end+time_end, '%Y%m%d%H%M%S' - ) - dates = np.arange( - beg, end+delta_t, - delta_t - ).astype(datetime.datetime) - else: - dates = [] - date_info[type+'_dates'] = dates - # Build opposite dates - if date_type == 'VALID': - oppo_date_type = 'INIT' - elif date_type == 'INIT': - oppo_date_type = 'VALID' - lead_timedelta = datetime.timedelta( - seconds=(int(int(lead[:-4])) * 3600 + lead_min_seconds - + lead_seconds) - ) - if oppo_date_type == 'INIT': - lead_timedelta = -1 * lead_timedelta - for type in ['fcst', 'obs']: - date_info[type+'_'+oppo_date_type.lower()+'_dates'] = ( - date_info[type+'_'+date_type.lower()+'_dates'] + lead_timedelta - ) - # Use fcst_*_dates for dates - # this makes the assumption that - # fcst_*_dates and obs_*_dates - # are the same, and they should be for - # most cases - dates = date_info['fcst_'+date_type.lower()+'_dates'] - fv_dates = date_info['fcst_valid_dates'] - plot_time_dates = [] - expected_stat_file_dates = [] - for date in dates: - dt = date.time() - seconds = (dt.hour * 60 + dt.minute) * 60 + dt.second - plot_time_dates.append(date.toordinal() + seconds/86400.) - # MET .stat files saves valid dates in file - fv_dates = date_info['fcst_valid_dates'] - expected_stat_file_dates = [] - for fv_date in fv_dates: - expected_stat_file_dates.append(fv_date.strftime('%Y%m%d_%H%M%S')) - return plot_time_dates, expected_stat_file_dates - -def format_thresh(thresh): - """! Format thresholds for file naming - - Args: - thresh - string of the treshold(s) - - Return: - thresh_symbol - string of the threshold(s) - with symbols - thresh_letters - string of the threshold(s) - with letters - """ - thresh_list = thresh.split(' ') - thresh_symbol = '' - thresh_letter = '' - for thresh in thresh_list: - if thresh == '': - continue - thresh_value = thresh - for opt in ['>=', '>', '==','!=','<=', '<', - 'ge', 'gt', 'eq', 'ne', 'le', 'lt']: - if opt in thresh_value: - thresh_opt = opt - thresh_value = thresh_value.replace(opt, '') - if thresh_opt in ['>', 'gt']: - thresh_symbol+='>'+thresh_value - thresh_letter+='gt'+thresh_value - elif thresh_opt in ['>=', 'ge']: - thresh_symbol+='>='+thresh_value - thresh_letter+='ge'+thresh_value - elif thresh_opt in ['<', 'lt']: - thresh_symbol+='<'+thresh_value - thresh_letter+='lt'+thresh_value - elif thresh_opt in ['<=', 'le']: - thresh_symbol+='<='+thresh_value - thresh_letter+='le'+thresh_value - elif thresh_opt in ['==', 'eq']: - thresh_symbol+='=='+thresh_value - thresh_letter+='eq'+thresh_value - elif thresh_opt in ['!=', 'ne']: - thresh_symbol+='!='+thresh_value - thresh_letter+='ne'+thresh_value - return thresh_symbol, thresh_letter - -def get_stat_file_base_columns(met_version): - """! Get the standard MET .stat file columns based on - version number - - Args: - met_version - string of MET version - number being used to - run stat_analysis - - Returns: - stat_file_base_columns - list of the standard - columns shared among the - different line types - """ - met_version = float(met_version) - if met_version < 8.1: - stat_file_base_columns = [ - 'VERSION', 'MODEL', 'DESC', 'FCST_LEAD', 'FCST_VALID_BEG', - 'FCST_VALID_END', 'OBS_LEAD', 'OBS_VALID_BEG', 'OBS_VALID_END', - 'FCST_VAR', 'FCST_LEV', 'OBS_VAR', 'OBS_LEV', 'OBTYPE', 'VX_MASK', - 'INTERP_MTHD', 'INTERP_PNTS', 'FCST_THRESH', 'OBS_THRESH', - 'COV_THRESH', 'ALPHA', 'LINE_TYPE' - ] - else: - stat_file_base_columns = [ - 'VERSION', 'MODEL', 'DESC', 'FCST_LEAD', 'FCST_VALID_BEG', - 'FCST_VALID_END', 'OBS_LEAD', 'OBS_VALID_BEG', 'OBS_VALID_END', - 'FCST_VAR', 'FCST_UNITS', 'FCST_LEV', 'OBS_VAR', 'OBS_UNITS', - 'OBS_LEV', 'OBTYPE', 'VX_MASK', 'INTERP_MTHD', 'INTERP_PNTS', - 'FCST_THRESH', 'OBS_THRESH', 'COV_THRESH', 'ALPHA', 'LINE_TYPE' - ] - return stat_file_base_columns - -def get_stat_file_line_type_columns(logger, met_version, line_type): - """! Get the MET .stat file columns for line type based on - version number - - Args: - met_version - string of MET version number - being used to run stat_analysis - line_type - string of the line type of the MET - .stat file being read - - Returns: - stat_file_line_type_columns - list of the line - type columns - """ - met_version = float(met_version) - if line_type == 'SL1L2': - if met_version >= 6.0: - stat_file_line_type_columns = [ - 'TOTAL', 'FBAR', 'OBAR', 'FOBAR', 'FFBAR', 'OOBAR', 'MAE' - ] - elif line_type == 'SAL1L2': - if met_version >= 6.0: - stat_file_line_type_columns = [ - 'TOTAL', 'FABAR', 'OABAR', 'FOABAR', 'FFABAR', 'OOABAR', 'MAE' - ] - elif line_type == 'VL1L2': - if met_version <= 6.1: - stat_file_line_type_columns = [ - 'TOTAL', 'UFBAR', 'VFBAR', 'UOBAR', 'VOBAR', 'UVFOBAR', - 'UVFFBAR', 'UVOOBAR' - ] - elif met_version >= 7.0: - stat_file_line_type_columns = [ - 'TOTAL', 'UFBAR', 'VFBAR', 'UOBAR', 'VOBAR', 'UVFOBAR', - 'UVFFBAR', 'UVOOBAR', 'F_SPEED_BAR', 'O_SPEED_BAR' - ] - elif line_type == 'VAL1L2': - if met_version >= 6.0: - stat_file_line_type_columns = [ - 'TOTAL', 'UFABAR', 'VFABAR', 'UOABAR', 'VOABAR', 'UVFOABAR', - 'UVFFABAR', 'UVOOABAR' - ] - elif line_type == 'VCNT': - if met_version >= 7.0: - stat_file_line_type_columns = [ - 'TOTAL', 'FBAR', 'FBAR_NCL', 'FBAR_NCU', 'OBAR', 'OBAR_NCL', - 'OBAR_NCU', 'FS_RMS', 'FS_RMS_NCL', 'FS_RMS_NCU', 'OS_RMS', - 'OS_RMS_NCL', 'OS_RMS_NCU', 'MSVE', 'MSVE_NCL', 'MSVE_NCU', - 'RMSVE', 'RMSVE_NCL', 'RMSVE_NCU', 'FSTDEV', 'FSTDEV_NCL', - 'FSTDEV_NCU', 'OSTDEV', 'OSTDEV_NCL', 'OSTDEV_NCU', 'FDIR', - 'FDIR_NCL', 'FDIR_NCU', 'ODIR', 'ODIR_NCL', 'ODIR_NCU', - 'FBAR_SPEED', 'FBAR_SPEED_NCL', 'FBAR_SPEED_NCU', 'OBAR_SPEED', - 'OBAR_SPEED_NCL', 'OBAR_SPEED_NCU', 'VDIFF_SPEED', - 'VDIFF_SPEED_NCL', 'VDIFF_SPEED_NCU', 'VDIFF_DIR', - 'VDIFF_DIR_NCL', 'VDIFF_DIR_NCU', 'SPEED_ERR', 'SPEED_ERR_NCL', - 'SPEED_ERR_NCU', 'SPEED_ABSERR', 'SPEED_ABSERR_NCL', - 'SPEED_ABSERR_NCU', 'DIR_ERR', 'DIR_ERR_NCL', 'DIR_ERR_NCU', - 'DIR_ABSERR', 'DIR_ABSERR_NCL', 'DIR_ABSERR_NCU' - ] - else: - logger.error("VCNT is not a valid LINE_TYPE in METV"+met_version) - exit(1) - elif line_type == 'CTC': - if met_version >= 6.0: - stat_file_line_type_columns = [ - 'TOTAL', 'FY_OY', 'FY_ON', 'FN_OY', 'FN_ON' - ] - return stat_file_line_type_columns - -def get_clevels(data): - """! Get contour levels for plotting - - Args: - data - array of data to be contoured - - Returns: - clevels - array of contoure levels - """ - if np.abs(np.nanmin(data)) > np.nanmax(data): - cmax = np.abs(np.nanmin(data)) - cmin = np.nanmin(data) - else: - cmax = np.nanmax(data) - cmin = -1 * np.nanmax(data) - if cmax > 1: - cmin = round(cmin-1,0) - cmax = round(cmax+1,0) - else: - cmin = round(cmin-0.1,1) - cmax = round(cmax+0.1,1) - clevels = np.linspace(cmin, cmax, 11, endpoint=True) - return clevels - -def calculate_average(logger, average_method, stat, model_dataframe, - model_stat_values): - """! Calculate average of dataset - - Args: - logger - logging file - average_method - string of the method to - use to calculate the - average - stat - string of the statistic the - average is being taken for - model_dataframe - dataframe of model .stat - columns - model_stat_values - array of statistic values - - Returns: - average_array - array of average value(s) - """ - average_array = np.empty_like(model_stat_values[:,0]) - if average_method == 'MEAN': - for l in range(len(model_stat_values[:,0])): - average_array[l] = np.ma.mean(model_stat_values[l,:]) - elif average_method == 'MEDIAN': - for l in range(len(model_stat_values[:,0])): - logger.info(np.ma.median(model_stat_values[l,:])) - average_array[l] = np.ma.median(model_stat_values[l,:]) - elif average_method == 'AGGREGATION': - ndays = model_dataframe.shape[0] - model_dataframe_aggsum = ( - model_dataframe.groupby('model_plot_name').agg(['sum']) - ) - model_dataframe_aggsum.columns = ( - model_dataframe_aggsum.columns.droplevel(1) - ) - avg_values, avg_array, stat_plot_name = ( - calculate_stat(logger, model_dataframe_aggsum/ndays, stat) - ) - for l in range(len(avg_array[:,0])): - average_array[l] = avg_array[l] - else: - logger.error("Invalid entry for MEAN_METHOD, " - +"use MEAN, MEDIAN, or AGGREGATION") - exit(1) - return average_array - -def calculate_ci(logger, ci_method, modelB_values, modelA_values, total_days, - stat, average_method, randx): - """! Calculate confidence intervals between two sets of data - - Args: - logger - logging file - ci_method - string of the method to use to - calculate the confidence intervals - modelB_values - array of values - modelA_values - array of values - total_days - float of total number of days - being considered, sample size - stat - string of the statistic the - confidence intervals are being - calculated for - average_method - string of the method to - use to calculate the - average - randx - 2D array of random numbers [0,1) - - Returns: - intvl - float of the confidence interval - """ - if ci_method == 'EMC': - modelB_modelA_diff = modelB_values - modelA_values - ndays = total_days - np.ma.count_masked(modelB_modelA_diff) - modelB_modelA_diff_mean = modelB_modelA_diff.mean() - modelB_modelA_std = np.sqrt( - ((modelB_modelA_diff - modelB_modelA_diff_mean)**2).mean() - ) - if ndays >= 80: - intvl = 1.960*modelB_modelA_std/np.sqrt(ndays-1) - elif ndays >= 40 and ndays < 80: - intvl = 2.000*modelB_modelA_std/np.sqrt(ndays-1) - elif ndays >= 20 and ndays < 40: - intvl = 2.042*modelB_modelA_std/np.sqrt(ndays-1) - elif ndays < 20: - intvl = 2.228*modelB_modelA_std/np.sqrt(ndays-1) - elif ci_method == 'EMC_MONTE_CARLO': - ntest, ntests = 1, 10000 - scores_rand1 = np.empty(ntests) - scores_rand2 = np.empty(ntests) - scores_diff = np.empty(ntests) - while ntest <= ntests: - rand1_data = pd.DataFrame( - np.nan, index=modelB_values.index, - columns=modelB_values.columns - ) - replace_level= rand1_data.index.get_level_values(0)[0] - rand1_data.rename(index={replace_level: 'rand1'}, inplace=True) - rand2_data = pd.DataFrame( - np.nan, index=modelB_values.index, - columns=modelB_values.columns - ) - replace_level= rand2_data.index.get_level_values(0)[0] - rand2_data.rename(index={replace_level: 'rand2'}, inplace=True) - nday, ndays = 1, total_days - while nday <= ndays: - if randx[ntest-1,nday-1] - 0.5 >= 0: - rand1_data.iloc[nday-1,:] = modelA_values.iloc[nday-1,:] - rand2_data.iloc[nday-1,:] = modelB_values.iloc[nday-1,:] - else: - rand1_data.iloc[nday-1,:] = modelB_values.iloc[nday-1,:] - rand2_data.iloc[nday-1,:] = modelA_values.iloc[nday-1,:] - nday+=1 - rand1_stat_values, rand1_stat_values_array, stat_plot_name = ( - calculate_stat(logger, rand1_data, stat) - ) - rand2_stat_values, rand2_stat_values_array, stat_plot_name = ( - calculate_stat(logger, rand2_data, stat) - ) - rand1_average_array = calculate_average(logger, average_method, - stat, rand1_data, - rand1_stat_values_array[:,0,:]) - scores_rand1[ntest-1] = rand1_average_array[0] - rand2_average_array = calculate_average(logger, average_method, - stat, rand2_data, - rand2_stat_values_array[:,0,:]) - scores_rand2[ntest-1] = rand2_average_array[0] - scores_diff[ntest-1] = ( - rand2_average_array[0] - rand1_average_array[0] - ) - ntest+=1 - scores_diff_mean = np.sum(scores_diff)/ntests - scores_diff_var = np.sum((scores_diff-scores_diff_mean)**2) - scores_diff_std = np.sqrt(scores_diff_var/(ntests-1)) - intvl = 1.96*scores_diff_std - else: - logger.error("Invalid entry for MAKE_CI_METHOD, " - +"use EMC, EMC_MONTE_CARLO") - exit(1) - return intvl - -def get_stat_plot_name(logger, stat): - """! Get the formalized name of the statistic being plotted - - Args: - stat - string of the simple statistic - name being plotted - - Returns: - stat_plot_name - string of the formal statistic - name being plotted - """ - if stat == 'bias': - stat_plot_name = 'Bias' - elif stat == 'rmse': - stat_plot_name = 'Root Mean Square Error' - elif stat == 'msess': - stat_plot_name = "Murphy's Mean Square Error Skill Score" - elif stat == 'rsd': - stat_plot_name = 'Ratio of Standard Deviation' - elif stat == 'rmse_md': - stat_plot_name = 'Root Mean Square Error from Mean Error' - elif stat == 'rmse_pv': - stat_plot_name = 'Root Mean Square Error from Pattern Variation' - elif stat == 'pcor': - stat_plot_name = 'Pattern Correlation' - elif stat == 'acc': - stat_plot_name = 'Anomaly Correlation Coefficient' - elif stat == 'fbar': - stat_plot_name = 'Forecast Averages' - elif stat == 'fbar_obar': - stat_plot_name = 'Forecast and Observation Averages' - elif stat == 'speed_err': - stat_plot_name = ( - 'Difference in Average FCST and OBS Wind Vector Speeds' - ) - elif stat == 'dir_err': - stat_plot_name = ( - 'Difference in Average FCST and OBS Wind Vector Direction' - ) - elif stat == 'rmsve': - stat_plot_name = 'Root Mean Square Difference Vector Error' - elif stat == 'vdiff_speed': - stat_plot_name = 'Difference Vector Speed' - elif stat == 'vdiff_dir': - stat_plot_name = 'Difference Vector Direction' - elif stat == 'fbar_obar_speed': - stat_plot_name = 'Average Wind Vector Speed' - elif stat == 'fbar_obar_dir': - stat_plot_name = 'Average Wind Vector Direction' - elif stat == 'fbar_speed': - stat_plot_name = 'Average Forecast Wind Vector Speed' - elif stat == 'fbar_dir': - stat_plot_name = 'Average Forecast Wind Vector Direction' - elif stat == 'orate': - stat_plot_name = 'Observation Rate' - elif stat == 'baser': - stat_plot_name = 'Base Rate' - elif stat == 'frate': - stat_plot_name = 'Forecast Rate' - elif stat == 'orate_frate': - stat_plot_name = 'Observation and Forecast Rates' - elif stat == 'baser_frate': - stat_plot_name = 'Base and Forecast Rates' - elif stat == 'accuracy': - stat_plot_name = 'Accuracy' - elif stat == 'fbias': - stat_plot_name = 'Frequency Bias' - elif stat == 'pod': - stat_plot_name = 'Probability of Detection' - elif stat == 'hrate': - stat_plot_name = 'Hit Rate' - elif stat == 'pofd': - stat_plot_name = 'Probability of False Detection' - elif stat == 'farate': - stat_plot_name = 'False Alarm Rate' - elif stat == 'podn': - stat_plot_name = 'Probability of Detection of the Non-Event' - elif stat == 'faratio': - stat_plot_name = 'False Alarm Ratio' - elif stat == 'csi': - stat_plot_name = 'Critical Success Index' - elif stat == 'ts': - stat_plot_name = 'Threat Score' - elif stat == 'gss': - stat_plot_name = 'Gilbert Skill Score' - elif stat == 'ets': - stat_plot_name = 'Equitable Threat Score' - elif stat == 'hk': - stat_plot_name = 'Hanssen-Kuipers Discriminant' - elif stat == 'tss': - stat_plot_name = 'True Skill Score' - elif stat == 'pss': - stat_plot_name = 'Peirce Skill Score' - elif stat == 'hss': - stat_plot_name = 'Heidke Skill Score' - else: - logger.error(stat+" is not a valid option") - exit(1) - return stat_plot_name - -def calculate_stat(logger, model_data, stat): - """! Calculate the statistic from the data from the - read in MET .stat file(s) - - Args: - model_data - Dataframe containing the model(s) - information from the MET .stat - files - stat - string of the simple statistic - name being plotted - - Returns: - stat_values - Dataframe of the statistic values - stat_values_array - array of the statistic values - stat_plot_name - string of the formal statistic - name being plotted - """ - model_data_columns = model_data.columns.values.tolist() - if model_data_columns == [ 'TOTAL' ]: - logger.error("Empty model_data dataframe") - exit(1) - stat_values = model_data.loc[:]['TOTAL'] - else: - if all(elem in model_data_columns for elem in - ['FBAR', 'OBAR', 'MAE']): - line_type = 'SL1L2' - fbar = model_data.loc[:]['FBAR'] - obar = model_data.loc[:]['OBAR'] - fobar = model_data.loc[:]['FOBAR'] - ffbar = model_data.loc[:]['FFBAR'] - oobar = model_data.loc[:]['OOBAR'] - elif all(elem in model_data_columns for elem in - ['FABAR', 'OABAR', 'MAE']): - line_type = 'SAL1L2' - fabar = model_data.loc[:]['FABAR'] - oabar = model_data.loc[:]['OABAR'] - foabar = model_data.loc[:]['FOABAR'] - ffabar = model_data.loc[:]['FFABAR'] - ooabar = model_data.loc[:]['OOABAR'] - elif all(elem in model_data_columns for elem in - ['UFBAR', 'VFBAR']): - line_type = 'VL1L2' - ufbar = model_data.loc[:]['UFBAR'] - vfbar = model_data.loc[:]['VFBAR'] - uobar = model_data.loc[:]['UOBAR'] - vobar = model_data.loc[:]['VOBAR'] - uvfobar = model_data.loc[:]['UVFOBAR'] - uvffbar = model_data.loc[:]['UVFFBAR'] - uvoobar = model_data.loc[:]['UVOOBAR'] - elif all(elem in model_data_columns for elem in - ['UFABAR', 'VFABAR']): - line_type = 'VAL1L2' - ufabar = model_data.loc[:]['UFABAR'] - vfabar = model_data.loc[:]['VFABAR'] - uoabar = model_data.loc[:]['UOABAR'] - voabar = model_data.loc[:]['VOABAR'] - uvfoabar = model_data.loc[:]['UVFOABAR'] - uvffabar = model_data.loc[:]['UVFFABAR'] - uvooabar = model_data.loc[:]['UVOOABAR'] - elif all(elem in model_data_columns for elem in - ['VDIFF_SPEED', 'VDIFF_DIR']): - line_type = 'VCNT' - fbar = model_data.loc[:]['FBAR'] - obar = model_data.loc[:]['OBAR'] - fs_rms = model_data.loc[:]['FS_RMS'] - os_rms = model_data.loc[:]['OS_RMS'] - msve = model_data.loc[:]['MSVE'] - rmsve = model_data.loc[:]['RMSVE'] - fstdev = model_data.loc[:]['FSTDEV'] - ostdev = model_data.loc[:]['OSTDEV'] - fdir = model_data.loc[:]['FDIR'] - odir = model_data.loc[:]['ODIR'] - fbar_speed = model_data.loc[:]['FBAR_SPEED'] - obar_speed = model_data.loc[:]['OBAR_SPEED'] - vdiff_speed = model_data.loc[:]['VDIFF_SPEED'] - vdiff_dir = model_data.loc[:]['VDIFF_DIR'] - speed_err = model_data.loc[:]['SPEED_ERR'] - dir_err = model_data.loc[:]['DIR_ERR'] - elif all(elem in model_data_columns for elem in - ['FY_OY', 'FN_ON']): - line_type = 'CTC' - total = model_data.loc[:]['TOTAL'] - fy_oy = model_data.loc[:]['FY_OY'] - fy_on = model_data.loc[:]['FY_ON'] - fn_oy = model_data.loc[:]['FN_OY'] - fn_on = model_data.loc[:]['FN_ON'] - else: - logger.error("Could not recognize line type from columns") - exit(1) - if stat == 'bias': - stat_plot_name = 'Bias' - if line_type == 'SL1L2': - stat_values = fbar - obar - elif line_type == 'VL1L2': - stat_values = np.sqrt(uvffbar) - np.sqrt(uvoobar) - elif line_type == 'VCNT': - stat_values = fbar - obar - elif line_type == 'CTC': - stat_values = (fy_oy + fy_on)/(fy_oy + fn_oy) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'rmse': - stat_plot_name = 'Root Mean Square Error' - if line_type == 'SL1L2': - stat_values = np.sqrt(ffbar + oobar - 2*fobar) - elif line_type == 'VL1L2': - stat_values = np.sqrt(uvffbar + uvoobar - 2*uvfobar) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'msess': - stat_plot_name = "Murphy's Mean Square Error Skill Score" - if line_type == 'SL1L2': - mse = ffbar + oobar - 2*fobar - var_o = oobar - obar*obar - stat_values = 1 - mse/var_o - elif line_type == 'VL1L2': - mse = uvffbar + uvoobar - 2*uvfobar - var_o = uvoobar - uobar*uobar - vobar*vobar - stat_values = 1 - mse/var_o - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'rsd': - stat_plot_name = 'Ratio of Standard Deviation' - if line_type == 'SL1L2': - var_f = ffbar - fbar*fbar - var_o = oobar - obar*obar - stat_values = np.sqrt(var_f)/np.sqrt(var_o) - elif line_type == 'VL1L2': - var_f = uvffbar - ufbar*ufbar - vfbar*vfbar - var_o = uvoobar - uobar*uobar - vobar*vobar - stat_values = np.sqrt(var_f)/np.sqrt(var_o) - elif line_type == 'VCNT': - stat_values = fstdev/ostdev - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'rmse_md': - stat_plot_name = 'Root Mean Square Error from Mean Error' - if line_type == 'SL1L2': - stat_values = np.sqrt((fbar-obar)**2) - elif line_type == 'VL1L2': - stat_values = np.sqrt((ufbar - uobar)**2 + (vfbar - vobar)**2) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'rmse_pv': - stat_plot_name = 'Root Mean Square Error from Pattern Variation' - if line_type == 'SL1L2': - var_f = ffbar - fbar**2 - var_o = oobar - obar**2 - R = (fobar - (fbar*obar))/(np.sqrt(var_f*var_o)) - stat_values = np.sqrt(var_f + var_o - 2*np.sqrt(var_f*var_o)*R) - elif line_type == 'VL1L2': - var_f = uvffbar - ufbar*ufbar - vfbar*vfbar - var_o = uvoobar - uobar*uobar - vobar*vobar - R = (uvfobar - ufbar*uobar - vfbar*vobar)/(np.sqrt(var_f*var_o)) - stat_values = np.sqrt(var_f + var_o - 2*np.sqrt(var_f*var_o)*R) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'pcor': - stat_plot_name = 'Pattern Correlation' - if line_type == 'SL1L2': - var_f = ffbar - fbar*fbar - var_o = oobar - obar*obar - stat_values = (fobar - fbar*obar)/(np.sqrt(var_f*var_o)) - elif line_type == 'VL1L2': - var_f = uvffbar - ufbar*ufbar - vfbar*vfbar - var_o = uvoobar - uobar*uobar - vobar*vobar - stat_values = (uvfobar - ufbar*uobar - vfbar*vobar)/(np.sqrt( - var_f*var_o)) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'acc': - stat_plot_name = 'Anomaly Correlation Coefficient' - if line_type == 'SAL1L2': - stat_values = \ - (foabar - fabar*oabar)/(np.sqrt( - (ffabar - fabar*fabar)*(ooabar - oabar*oabar))) - elif line_type == 'VAL1L2': - stat_values = (uvfoabar)/(np.sqrt(uvffabar*uvooabar)) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbar': - stat_plot_name = 'Forecast Averages' - if line_type == 'SL1L2': - stat_values = fbar - elif line_type == 'VL1L2': - stat_values = np.sqrt(uvffbar) - elif line_type == 'VCNT': - stat_values = fbar - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbar_obar': - stat_plot_name = 'Forecast and Observation Averages' - if line_type == 'SL1L2': - stat_values = model_data.loc[:][['FBAR', 'OBAR']] - stat_values_fbar = model_data.loc[:]['FBAR'] - stat_values_obar = model_data.loc[:]['OBAR'] - elif line_type == 'VL1L2': - stat_values = model_data.loc[:][['UVFFBAR', 'UVOOBAR']] - stat_values_fbar = np.sqrt(model_data.loc[:]['UVFFBAR']) - stat_values_obar = np.sqrt(model_data.loc[:]['UVOOBAR']) - elif line_type == 'VCNT': - stat_values = model_data.loc[:][['FBAR', 'OBAR']] - stat_values_fbar = model_data.loc[:]['FBAR'] - stat_values_obar = model_data.loc[:]['OBAR'] - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'speed_err': - stat_plot_name = ( - 'Difference in Average FCST and OBS Wind Vector Speeds' - ) - if line_type == 'VCNT': - stat_values = speed_err - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'dir_err': - stat_plot_name = ( - 'Difference in Average FCST and OBS Wind Vector Direction' - ) - if line_type == 'VCNT': - stat_values = dir_err - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'rmsve': - stat_plot_name = 'Root Mean Square Difference Vector Error' - if line_type == 'VCNT': - stat_values = rmsve - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'vdiff_speed': - stat_plot_name = 'Difference Vector Speed' - if line_type == 'VCNT': - stat_values = vdiff_speed - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'vdiff_dir': - stat_plot_name = 'Difference Vector Direction' - if line_type == 'VCNT': - stat_values = vdiff_dir - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbar_obar_speed': - stat_plot_name = 'Average Wind Vector Speed' - if line_type == 'VCNT': - stat_values = model_data.loc[:][('FBAR_SPEED', 'OBAR_SPEED')] - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbar_obar_dir': - stat_plot_name = 'Average Wind Vector Direction' - if line_type == 'VCNT': - stat_values = model_data.loc[:][('FDIR', 'ODIR')] - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbar_speed': - stat_plot_name = 'Average Forecast Wind Vector Speed' - if line_type == 'VCNT': - stat_values = fbar_speed - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbar_dir': - stat_plot_name = 'Average Forecast Wind Vector Direction' - if line_type == 'VCNT': - stat_values = fdir - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'orate' or stat == 'baser': - if stat == 'orate': - stat_plot_name = 'Observation Rate' - elif stat == 'baser': - stat_plot_name = 'Base Rate' - if line_type == 'CTC': - stat_values = (fy_oy + fn_oy)/total - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'frate': - stat_plot_name = 'Forecast Rate' - if line_type == 'CTC': - stat_values = (fy_oy + fy_on)/total - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'orate_frate' or stat == 'baser_frate': - if stat == 'orate_frate': - stat_plot_name = 'Observation and Forecast Rates' - elif stat == 'baser_frate': - stat_plot_name = 'Base and Forecast Rates' - if line_type == 'CTC': - stat_values_fbar = (fy_oy + fy_on)/total - stat_values_obar = (fy_oy + fn_oy)/total - stat_values = pd.concat([stat_values_fbar, stat_values_obar], - axis=1) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'accuracy': - stat_plot_name = 'Accuracy' - if line_type == 'CTC': - stat_values = (fy_oy + fn_on)/total - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'fbias': - stat_plot_name = 'Frequency Bias' - if line_type == 'CTC': - stat_values = (fy_oy + fy_on)/(fy_oy + fn_oy) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'pod' or stat == 'hrate': - if stat == 'pod': - stat_plot_name = 'Probability of Detection' - elif stat == 'hrate': - stat_plot_name = 'Hit Rate' - if line_type == 'CTC': - stat_values = fy_oy/(fy_oy + fn_oy) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'pofd' or stat == 'farate': - if stat == 'pofd': - stat_plot_name = 'Probability of False Detection' - elif stat == 'farate': - stat_plot_name = 'False Alarm Rate' - if line_type == 'CTC': - stat_values = fy_on/(fy_on + fn_on) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'podn': - stat_plot_name = 'Probability of Detection of the Non-Event' - if line_type == 'CTC': - stat_values = fn_on/(fy_on + fn_on) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'faratio': - stat_plot_name = 'False Alarm Ratio' - if line_type == 'CTC': - stat_values = fy_on/(fy_on + fy_oy) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'csi' or stat == 'ts': - if stat == 'csi': - stat_plot_name = 'Critical Success Index' - elif stat == 'ts': - stat_plot_name = 'Threat Score' - if line_type == 'CTC': - stat_values = fy_oy/(fy_oy + fy_on + fn_oy) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'gss' or stat == 'ets': - if stat == 'gss': - stat_plot_name = 'Gilbert Skill Score' - elif stat == 'ets': - stat_plot_name = 'Equitable Threat Score' - if line_type == 'CTC': - C = ((fy_oy + fy_on)*(fy_oy + fn_oy))/total - stat_values = (fy_oy - C)/(fy_oy + fy_on+ fn_oy - C) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'hk' or stat == 'tss' or stat == 'pss': - if stat == 'hk': - stat_plot_name = 'Hanssen-Kuipers Discriminant' - elif stat == 'tss': - stat_plot_name = 'True Skill Score' - elif stat == 'pss': - stat_plot_name = 'Peirce Skill Score' - if line_type == 'CTC': - stat_values = ( - ((fy_oy*fn_on)-(fy_on*fn_oy))/((fy_oy+fn_oy)*(fy_on+fn_on)) - ) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - elif stat == 'hss': - stat_plot_name = 'Heidke Skill Score' - if line_type == 'CTC': - Ca = (fy_oy+fy_on)*(fy_oy+fn_oy) - Cb = (fn_oy+fn_on)*(fy_on+fn_on) - C = (Ca + Cb)/total - stat_values = (fy_oy + fn_on - C)/(total - C) - else: - logger.error(stat+" cannot be computed from line type "+line_type) - exit(1) - else: - logger.error(stat+" is not a valid option") - exit(1) - nindex = stat_values.index.nlevels - if stat == 'fbar_obar' or stat == 'orate_frate' or stat == 'baser_frate': - if nindex == 1: - index0 = len(stat_values_fbar.index.get_level_values(0).unique()) - stat_values_array_fbar = ( - np.ma.masked_invalid( - stat_values_fbar.values.reshape(index0) - ) - ) - index0 = len(stat_values_obar.index.get_level_values(0).unique()) - stat_values_array_obar = ( - np.ma.masked_invalid( - stat_values_obar.values.reshape(index0) - ) - ) - elif nindex == 2: - index0 = len(stat_values_fbar.index.get_level_values(0).unique()) - index1 = len(stat_values_fbar.index.get_level_values(1).unique()) - stat_values_array_fbar = ( - np.ma.masked_invalid( - stat_values_fbar.values.reshape(index0,index1) - ) - ) - index0 = len(stat_values_obar.index.get_level_values(0).unique()) - index1 = len(stat_values_obar.index.get_level_values(1).unique()) - stat_values_array_obar = ( - np.ma.masked_invalid( - stat_values_obar.values.reshape(index0,index1) - ) - ) - elif nindex == 3: - index0 = len(stat_values_fbar.index.get_level_values(0).unique()) - index1 = len(stat_values_fbar.index.get_level_values(1).unique()) - index2 = len(stat_values_fbar.index.get_level_values(2).unique()) - stat_values_array_fbar = ( - np.ma.masked_invalid( - stat_values_fbar.values.reshape(index0,index1,index2) - ) - ) - index0 = len(stat_values_obar.index.get_level_values(0).unique()) - index1 = len(stat_values_obar.index.get_level_values(1).unique()) - index2 = len(stat_values_obar.index.get_level_values(2).unique()) - stat_values_array_obar = ( - np.ma.masked_invalid( - stat_values_obar.values.reshape(index0,index1,index2) - ) - ) - stat_values_array = np.ma.array([stat_values_array_fbar, - stat_values_array_obar]) - else: - if nindex == 1: - index0 = len(stat_values.index.get_level_values(0).unique()) - stat_values_array = ( - np.ma.masked_invalid( - stat_values.values.reshape(1,index0) - ) - ) - elif nindex == 2: - index0 = len(stat_values.index.get_level_values(0).unique()) - index1 = len(stat_values.index.get_level_values(1).unique()) - stat_values_array = ( - np.ma.masked_invalid( - stat_values.values.reshape(1,index0,index1) - ) - ) - elif nindex == 3: - index0 = len(stat_values.index.get_level_values(0).unique()) - index1 = len(stat_values.index.get_level_values(1).unique()) - index2 = len(stat_values.index.get_level_values(2).unique()) - stat_values_array = ( - np.ma.masked_invalid( - stat_values.values.reshape(1,index0,index1,index2) - ) - ) - return stat_values, stat_values_array, stat_plot_name - -def get_lead_avg_file(stat, input_filename, fcst_lead, output_base_dir): - lead_avg_filename = stat + '_' + os.path.basename(input_filename) - - # if fcst_leadX is in filename, replace it with fcst_lead_avgs - # and add .txt to end of filename - if f'fcst_lead{fcst_lead}' in lead_avg_filename: - lead_avg_filename = lead_avg_filename.replace(f'fcst_lead{fcst_lead}', 'fcst_lead_avgs') - lead_avg_filename += '.txt' - - # if not, remove mention of forecast lead and - # add fcst_lead_avgs.txt to end of filename - elif 'fcst_lead_avgs' not in input_filename: - lead_avg_filename = lead_avg_filename.replace(f'fcst_lead{fcst_lead}', '') - lead_avg_filename += '_fcst_lead_avgs.txt' - - lead_avg_file = os.path.join(output_base_dir, 'data', - lead_avg_filename) - return lead_avg_file - -def get_ci_file(stat, input_filename, fcst_lead, output_base_dir, ci_method): - CI_filename = stat + '_' + os.path.basename(input_filename) - # if fcst_leadX is in filename, replace it with fcst_lead_avgs - # and add .txt to end of filename - if f'fcst_lead{fcst_lead}' in CI_filename: - CI_filename = CI_filename.replace(f'fcst_lead{fcst_lead}', - 'fcst_lead_avgs') - - # if not and fcst_lead_avgs isn't already in filename, - # remove mention of forecast lead and - # add fcst_lead_avgs.txt to end of filename - elif 'fcst_lead_avgs' not in CI_filename: - CI_filename = CI_filename.replace(f'fcst_lead{fcst_lead}', - '') - CI_filename += '_fcst_lead_avgs' - - CI_filename += '_CI_' + ci_method + '.txt' - - CI_file = os.path.join(output_base_dir, 'data', - CI_filename) - return CI_file