diff --git a/examples/bouss/README.txt b/examples/bouss/README.txt index 047848de2..2e71aa599 100644 --- a/examples/bouss/README.txt +++ b/examples/bouss/README.txt @@ -1,4 +1,6 @@ +GeoClaw Boussinesq solver examples + The radial_flat subdirectory contains one example using the Boussinesq solver introduced in Clawpack v5.10.0. @@ -14,8 +16,12 @@ OpenMP along with MPI. Some flags have to be set as environment variables or directly in the application Makefile, e.g. see the lines commented out in radial_flat/Makefile. +The file setenv.sh illustrates how you might set some environment +variables for the bash shell. + A file petscMPIoptions is also required to set some PETSc parameters for the iterative method used to solve the large sparse linear systems that arise at each refinement level when the Boussinesq equations are solved. One of the environment variables mentioned above points to this file, and a sample is included in this directory. + diff --git a/examples/bouss/radial_flat/README.rst b/examples/bouss/radial_flat/README.rst index 1d76fb818..6c3e6e086 100644 --- a/examples/bouss/radial_flat/README.rst +++ b/examples/bouss/radial_flat/README.rst @@ -8,6 +8,15 @@ A Gaussian hump of water at the origin spreads out over a flat bottom, as specified by the topo file `flat100.txt` (uniform water depth 100 m). The equations are solved in Cartesian coordinates with the SGN equations. +Running the GeoClaw Boussinesq solvers requires PETSc and MPI. +For more details see the documentation + https://www.clawpack.org/bouss2d.html +and + $CLAW/geoclaw/examples/bouss/README.txt +Run + make check +in this directory to check if things seem ok for running this code. + A flagregion specified by `RuledRectangle_Diagonal.data` (that is created by code in `setrun.py` is used to allow flagging for refinement to level 2 only near the diagonal (for abs(x-y) < 1000). The code is set up to diff --git a/examples/tsunami/chile2010/setplot.py b/examples/tsunami/chile2010/setplot.py index 525132748..fc01ec976 100644 --- a/examples/tsunami/chile2010/setplot.py +++ b/examples/tsunami/chile2010/setplot.py @@ -57,18 +57,14 @@ def addgauges(current_data): # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes('pcolor') - plotaxes.title = 'Surface' - plotaxes.scaled = True - - def fixup(current_data): - import pylab - addgauges(current_data) - t = current_data.t - t = t / 3600. # hours - pylab.title('Surface at %4.2f hours' % t, fontsize=20) - pylab.xticks(fontsize=15) - pylab.yticks(fontsize=15) - plotaxes.afteraxes = fixup + plotaxes.title = 'Surface at time h:m:s' + plotaxes.aspect_latitude = -30. + plotaxes.xticks_fontsize = 10 + plotaxes.yticks_fontsize = 10 + plotaxes.xlabel = 'longitude' + plotaxes.ylabel = 'latitude' + + plotaxes.afteraxes = addgauges # Water plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') @@ -78,6 +74,8 @@ def fixup(current_data): plotitem.pcolor_cmin = -0.2 plotitem.pcolor_cmax = 0.2 plotitem.add_colorbar = True + plotitem.colorbar_shrink = 0.8 + plotitem.colorbar_extend = 'both' plotitem.amr_celledges_show = [0,0,0] plotitem.patchedges_show = 1 @@ -114,18 +112,37 @@ def fixup(current_data): # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() - plotaxes.xlimits = 'auto' - plotaxes.ylimits = 'auto' + plotaxes.time_scale = 1/3600. # convert to hours + plotaxes.xlimits = [0, 9] + plotaxes.ylimits = [-0.3, 0.3] plotaxes.title = 'Surface' + plotaxes.title_fontsize = 15 + plotaxes.time_label = 'time (hours)' + plotaxes.ylabel = 'surface elevation (m)' + plotaxes.grid = True + + def add_obs(current_data): + from pylab import plot, legend + gaugeno = current_data.gaugeno + if gaugeno == 32412: + try: + tgauge = TG32412[:,0] / 3600. # convert to hours + plot(tgauge, TG32412[:,1], 'r', linewidth=1.0) + legend(['GeoClaw','Obs'],loc='lower right') + except: pass + + plotaxes.afteraxes = add_obs # Plot surface as blue curve: plotitem = plotaxes.new_plotitem(plot_type='1d_plot') - plotitem.plot_var = 3 + plotitem.plot_var = 3 # eta plotitem.plotstyle = 'b-' + plotitem.kwargs = {'linewidth':1.8} + # Plot topo as green curve: plotitem = plotaxes.new_plotitem(plot_type='1d_plot') - plotitem.show = False + plotitem.show = False # not being used def gaugetopo(current_data): q = current_data.q @@ -137,24 +154,6 @@ def gaugetopo(current_data): plotitem.plot_var = gaugetopo plotitem.plotstyle = 'g-' - def add_zeroline(current_data): - from pylab import plot, legend, xticks, floor, axis, xlabel - t = current_data.t - gaugeno = current_data.gaugeno - - if gaugeno == 32412: - try: - plot(TG32412[:,0], TG32412[:,1], 'r') - legend(['GeoClaw','Obs'],loc='lower right') - except: pass - axis((0,t.max(),-0.3,0.3)) - - plot(t, 0*t, 'k') - n = int(floor(t.max()/3600.) + 2) - xticks([3600*i for i in range(n)], ['%i' % i for i in range(n)]) - xlabel('time (hours)') - - plotaxes.afteraxes = add_zeroline #----------------------------------------- diff --git a/examples/tsunami/chile2010_fgmax-fgout/make_fgout_animation.py b/examples/tsunami/chile2010_fgmax-fgout/make_fgout_animation.py index 790e8b746..ab47c7588 100644 --- a/examples/tsunami/chile2010_fgmax-fgout/make_fgout_animation.py +++ b/examples/tsunami/chile2010_fgmax-fgout/make_fgout_animation.py @@ -34,7 +34,6 @@ fgno = 1 # which fgout grid outdir = '_output' -format = 'binary' # format of fgout grid output if 1: # use all fgout frames in outdir: @@ -48,7 +47,8 @@ fgframes = range(1,26) # frames of fgout solution to use in animation # Instantiate object for reading fgout frames: -fgout_grid = fgout_tools.FGoutGrid(fgno, outdir, format) +fgout_grid = fgout_tools.FGoutGrid(fgno, outdir) +fgout_grid.read_fgout_grids_data() # Plot one frame of fgout data and define the Artists that will need to diff --git a/examples/tsunami/chile2010_fgmax-fgout/make_fgout_animation_with_transect.py b/examples/tsunami/chile2010_fgmax-fgout/make_fgout_animation_with_transect.py index 4b32c40da..b9192ad9f 100644 --- a/examples/tsunami/chile2010_fgmax-fgout/make_fgout_animation_with_transect.py +++ b/examples/tsunami/chile2010_fgmax-fgout/make_fgout_animation_with_transect.py @@ -2,7 +2,7 @@ Make an mp4 animation of fgout grid results that also includes a transect of the solution, as often desired in animations. -This is done in a way that makes the animation quickly and with minimum +This is done in a way that makes the animation quickly and with minimum storage required, by making one plot and then defining an update function that only changes the parts of the plot that change in each frame. @@ -21,38 +21,39 @@ """ import sys -if 'matplotlib' not in sys.modules: + +if "matplotlib" not in sys.modules: # Use an image backend to insure animation has size specified by figsize import matplotlib - matplotlib.use('Agg') -from pylab import * -import os, glob -from clawpack.visclaw import plottools, geoplot, gridtools, animation_tools -from clawpack.geoclaw import util -from matplotlib import animation, colors + matplotlib.use("Agg") + +import glob +import os from datetime import timedelta -from clawpack.geoclaw import fgout_tools - +from clawpack.geoclaw import fgout_tools, util +from clawpack.visclaw import animation_tools, geoplot, gridtools, plottools +from matplotlib import animation, colors +from pylab import * + fgno = 1 # which fgout grid -outdir = '_output' -format = 'binary' # format of fgout grid output +outdir = "_output" if 1: # use all fgout frames in outdir: - fgout_frames = glob.glob(os.path.join(outdir, \ - 'fgout%s.t*' % str(fgno).zfill(4))) + fgout_frames = glob.glob(os.path.join(outdir, "fgout%s.t*" % str(fgno).zfill(4))) nout = len(fgout_frames) - fgframes = range(1, nout+1) - print('Found %i fgout frames in %s' % (nout,outdir)) + fgframes = range(1, nout + 1) + print("Found %i fgout frames in %s" % (nout, outdir)) else: # set explicitly, e.g. to test with only a few frames - fgframes = range(1,26) # frames of fgout solution to use in animation + fgframes = range(1, 26) # frames of fgout solution to use in animation # Instantiate object for reading fgout frames: -fgout_grid = fgout_tools.FGoutGrid(fgno, outdir, format) +fgout_grid = fgout_tools.FGoutGrid(fgno, outdir) +fgout_grid.read_fgout_grids_data() # Plot one frame of fgout data and define the Artists that will need to @@ -63,29 +64,29 @@ plot_extent = fgout.extent_edges ylat = fgout.Y.mean() # for aspect ratio of plots -fig = figure(figsize=(12,7)) +fig = figure(figsize=(12, 7)) # --------------------------------- # axis for planview plot of ocean: -ax = axes([.1,.1,.4,.8]) +ax = axes([0.1, 0.1, 0.4, 0.8]) ax.set_xlim(plot_extent[:2]) ax.set_ylim(plot_extent[2:]) -plottools.pcolorcells(fgout.X,fgout.Y,fgout.B, cmap=geoplot.land_colors) -clim(0,100) +plottools.pcolorcells(fgout.X, fgout.Y, fgout.B, cmap=geoplot.land_colors) +clim(0, 100) -eta = ma.masked_where(fgout.h<0.001, fgout.eta) +eta = ma.masked_where(fgout.h < 0.001, fgout.eta) -eta_plot = plottools.pcolorcells(fgout.X,fgout.Y,eta, - cmap=geoplot.tsunami_colormap) -clim(-0.3,0.3) -cb = colorbar(eta_plot, extend='both', shrink=0.5, - orientation='horizontal', anchor=(0.4,1)) -cb.set_label('meters') -title_text = title('Surface at time %s' % timedelta(seconds=fgout.t)) +eta_plot = plottools.pcolorcells(fgout.X, fgout.Y, eta, cmap=geoplot.tsunami_colormap) +clim(-0.3, 0.3) +cb = colorbar( + eta_plot, extend="both", shrink=0.5, orientation="horizontal", anchor=(0.4, 1) +) +cb.set_label("meters") +title_text = title("Surface at time %s" % timedelta(seconds=fgout.t)) -ax.set_aspect(1./cos(ylat*pi/180.)) +ax.set_aspect(1.0 / cos(ylat * pi / 180.0)) ticklabel_format(useOffset=False) xticks(rotation=20) ax.set_xlim(plot_extent[:2]) @@ -94,24 +95,26 @@ # --------------------------------- # Transect: -x1trans = -110.; y1trans = -20. -x2trans = -72.; y2trans = -35. +x1trans = -110.0 +y1trans = -20.0 +x2trans = -72.0 +y2trans = -35.0 -plot([x1trans,x2trans], [y1trans,y2trans],'k-',linewidth=0.8) -text(x1trans+1,y1trans+1,'Transect', ha='center', fontsize=10) +plot([x1trans, x2trans], [y1trans, y2trans], "k-", linewidth=0.8) +text(x1trans + 1, y1trans + 1, "Transect", ha="center", fontsize=10) # define points on transect: npts = 1000 # number of points on transect if 0: # straight line on longitude-latitude plane: - xtrans = linspace(x1trans,x2trans,npts) - ytrans = linspace(y1trans,y2trans,npts) + xtrans = linspace(x1trans, x2trans, npts) + ytrans = linspace(y1trans, y2trans, npts) else: # great circle on earth: - xtrans,ytrans = util.gctransect(x1trans,y1trans,x2trans,y2trans,npts) - + xtrans, ytrans = util.gctransect(x1trans, y1trans, x2trans, y2trans, npts) -def extract_transect(fgout_soln,xtrans,ytrans): + +def extract_transect(fgout_soln, xtrans, ytrans): """ Interpolate from fgout_solution to points on the transect, taking value from cell the fgout point lies in, giving piecewise constant interpolant @@ -119,59 +122,62 @@ def extract_transect(fgout_soln,xtrans,ytrans): Alternatively could use method='linear' for linear interpolation. """ - eta1d = gridtools.grid_eval_2d(fgout_soln.X, fgout_soln.Y, - fgout_soln.eta, xtrans, ytrans, - method='nearest') - B1d = gridtools.grid_eval_2d(fgout_soln.X, fgout_soln.Y, - fgout_soln.B, xtrans, ytrans, - method='nearest') - eta1d = where(B1d>0, nan, eta1d) # mask onshore region + eta1d = gridtools.grid_eval_2d( + fgout_soln.X, fgout_soln.Y, fgout_soln.eta, xtrans, ytrans, method="nearest" + ) + B1d = gridtools.grid_eval_2d( + fgout_soln.X, fgout_soln.Y, fgout_soln.B, xtrans, ytrans, method="nearest" + ) + eta1d = where(B1d > 0, nan, eta1d) # mask onshore region return B1d, eta1d + # ---------------------------------------- # Axes for transect of surface elevation: -axtrans1 = axes([.55,.6,.4,.3]) +axtrans1 = axes([0.55, 0.6, 0.4, 0.3]) -axtrans1.set_title('Surface on transect') +axtrans1.set_title("Surface on transect") -axtrans1.set_xlim(x1trans,x2trans) -axtrans1.set_ylim(-1,1) +axtrans1.set_xlim(x1trans, x2trans) +axtrans1.set_ylim(-1, 1) -Btrans1, etatrans1 = extract_transect(fgout,xtrans,ytrans) -#import pdb; pdb.set_trace() +Btrans1, etatrans1 = extract_transect(fgout, xtrans, ytrans) +# import pdb; pdb.set_trace() Btrans, etatrans = Btrans1, etatrans1 # surface plot: -etatrans1_plot, = axtrans1.plot(xtrans, etatrans, 'b') +(etatrans1_plot,) = axtrans1.plot(xtrans, etatrans, "b") axtrans1.grid(True) # ---------------------------------------- # Axes for transect of topography: -axtrans2 = axes([.55,.1,.4,.3]) +axtrans2 = axes([0.55, 0.1, 0.4, 0.3]) -axtrans2.set_title('Topography on transect') +axtrans2.set_title("Topography on transect") -axtrans2.set_xlim(x1trans,x2trans) -axtrans2.set_ylim(-5000,1000) +axtrans2.set_xlim(x1trans, x2trans) +axtrans2.set_ylim(-5000, 1000) -Btrans, etatrans = extract_transect(fgout,xtrans,ytrans) +Btrans, etatrans = extract_transect(fgout, xtrans, ytrans) # filled regions: -Bfill_plot = axtrans2.fill_between(xtrans, Btrans-1e5, Btrans, - color=[.7,1,.7,1]) # light green -etafill_plot = axtrans2.fill_between(xtrans, Btrans, etatrans, - color=[.7,.7,1,1]) # light blue +Bfill_plot = axtrans2.fill_between( + xtrans, Btrans - 1e5, Btrans, color=[0.7, 1, 0.7, 1] +) # light green +etafill_plot = axtrans2.fill_between( + xtrans, Btrans, etatrans, color=[0.7, 0.7, 1, 1] +) # light blue # surface and topo solid lines: -etatrans2_plot, = axtrans2.plot(xtrans, etatrans, 'b') -Btrans2_plot, = axtrans2.plot(xtrans, Btrans, 'g') +(etatrans2_plot,) = axtrans2.plot(xtrans, etatrans, "b") +(Btrans2_plot,) = axtrans2.plot(xtrans, Btrans, "g") # create a dummy figure and axes, only needed to update fill_between artists: -figdummy,axdummy = subplots() +figdummy, axdummy = subplots() def update(fgframeno): @@ -180,56 +186,56 @@ def update(fgframeno): Assumes blit==False in call to animation.FuncAnimation below, so tuple of update_artists does not need to be returned. """ - + fgout = fgout_grid.read_frame(fgframeno) - print('Updating plot at time %s' % timedelta(seconds=fgout.t)) - + print("Updating plot at time %s" % timedelta(seconds=fgout.t)) + # reset title to current time: - title_text.set_text('Surface at time %s' % timedelta(seconds=fgout.t)) + title_text.set_text("Surface at time %s" % timedelta(seconds=fgout.t)) # reset surface eta to current state: - eta = ma.masked_where(fgout.h<0.001, fgout.eta) + eta = ma.masked_where(fgout.h < 0.001, fgout.eta) eta_plot.set_array(eta.T.flatten()) - + # update transect data: - Btrans, etatrans = extract_transect(fgout,xtrans,ytrans) - + Btrans, etatrans = extract_transect(fgout, xtrans, ytrans) + # update lines plotted: - etatrans1_plot.set_data(xtrans,etatrans) - etatrans2_plot.set_data(xtrans,etatrans) - Btrans2_plot.set_data(xtrans,Btrans) + etatrans1_plot.set_data(xtrans, etatrans) + etatrans2_plot.set_data(xtrans, etatrans) + Btrans2_plot.set_data(xtrans, Btrans) # update the PolyCollections for fill_between plots: - # There doesn't seem to be an easier way to do this... - dummy = axdummy.fill_between(xtrans, Btrans-1e5, Btrans, color=[.5,1,.5,1]) + # There doesn't seem to be an easier way to do this... + dummy = axdummy.fill_between(xtrans, Btrans - 1e5, Btrans, color=[0.5, 1, 0.5, 1]) dp = dummy.get_paths()[0] dummy.remove() Bfill_plot.set_paths([dp.vertices]) - dummy = axdummy.fill_between(xtrans, Btrans, etatrans, color=[.5,.5,1,1]) + dummy = axdummy.fill_between(xtrans, Btrans, etatrans, color=[0.5, 0.5, 1, 1]) dp = dummy.get_paths()[0] dummy.remove() etafill_plot.set_paths([dp.vertices]) - -if __name__ == '__main__': - print('Making anim...') - anim = animation.FuncAnimation(fig, update, frames=fgframes, - interval=200, blit=False) - +if __name__ == "__main__": + print("Making anim...") + anim = animation.FuncAnimation( + fig, update, frames=fgframes, interval=200, blit=False + ) + # Output files: - name = 'fgout_animation_with_transect' + name = "fgout_animation_with_transect" - fname_mp4 = name + '.mp4' + fname_mp4 = name + ".mp4" - #fname_html = None - fname_html = name + '.html' + # fname_html = None + fname_html = name + ".html" if fname_mp4: fps = 5 - print('Making mp4...') - writer = animation.writers['ffmpeg'](fps=fps) + print("Making mp4...") + writer = animation.writers["ffmpeg"](fps=fps) anim.save(fname_mp4, writer=writer) print("Created %s" % fname_mp4) diff --git a/examples/tsunami/chile2010_fgmax-fgout/use_xarray_backends.py b/examples/tsunami/chile2010_fgmax-fgout/use_xarray_backends.py new file mode 100644 index 000000000..6d7fa9128 --- /dev/null +++ b/examples/tsunami/chile2010_fgmax-fgout/use_xarray_backends.py @@ -0,0 +1,87 @@ +import glob + +try: + import rioxarray + import xarray as xr +except ImportError: + "You must install xarray and rioxarray in order to use the xarray backends" + raise + +from clawpack.geoclaw.xarray_backends import FGMaxBackend, FGOutBackend + +# epsg code for lat-lon +# Optionally, provide an epsg code to assign the associated coordinate system to the file. +# default behavior assigns no coordinate system. Note that if no coordinate system is provided, +# it will come in a GIS with coordinates associated with row and column number (not the x and y position +# encoded in the netcdf). + +epsg_code = 4326 + +# An example of a fgout grid. +filename = "_output/fgout0001.b0001" +# provide the .bxxx file if binary format is used or the +# .qxxx file if ascii format is used. +# the format, fg number, and frame number are inferred from the filename. + +ds = xr.open_dataset( + filename, + engine=FGOutBackend, + backend_kwargs={ + "epsg": epsg_code, + "qmap": "geoclaw", + # qmap is the qmap specified to the fgout object in setrun.py see + # the following documentation page for more details. + # https://www.clawpack.org/dev/fgout.html#specifying-q-out-vars + "dry_tolerance": None, + # variables that are not eta and B are masked + # where h>dry_tolerance. To turn this functionality + # off set dry_tolerance = None. + }, +) +# ds is now an xarray object. It can be interacted with directly or written to netcdf using +ds.to_netcdf("fgout0001_0001.nc") + +# It is possible to combine all fgout files into a single netcdf file +# using xr.open_mfdataset (requires dask) or xr.concat (does not require dask) +# https://docs.xarray.dev/en/stable/generated/xarray.open_mfdataset.html +# https://docs.xarray.dev/en/latest/generated/xarray.concat.html +# for instructions on installing xarray with dask see: +# https://docs.xarray.dev/en/latest/getting-started-guide/installing.html#instructions + +fgout_files = glob.glob("_output/fgout0001.b*") + +try: + ds_all = xr.open_mfdataset( + fgout_files, + engine=FGOutBackend, + backend_kwargs={"epsg": epsg_code, "qmap": "geoclaw"}, + ) +except ValueError: # if dask is not available, use xr.concat. +# if dask is not installed xr.open_mfdataset() will fail with something like +# ValueError: unrecognized chunk manager dask - must be one of: [] + fgouts = [] + for filename in fgout_files: + ds = xr.open_dataset( + filename, + engine=FGOutBackend, + backend_kwargs={"epsg": epsg_code, "qmap": "geoclaw"}, + ) + fgouts.append(ds) + + ds_all = xr.concat(fgouts, dim="time") + +# save out. +ds_all.to_netcdf("fgout_all.nc") + +# An example of a fgmax grid. +filename = "_output/fgmax0001.txt" +ds = xr.open_dataset(filename, engine=FGMaxBackend, backend_kwargs={"epsg": epsg_code}) +ds.to_netcdf("fgmax0001.nc") + +# To see the use of clipping, change the tfinal in setrun to something like 2*3600.0 +# the fgmax0001_clipped.nc will only be the area where the wave arrived within the considered time. +filename = "_output/fgmax0001.txt" +ds = xr.open_dataset( + filename, engine=FGMaxBackend, backend_kwargs={"epsg": epsg_code, "clip": True} +) +ds.to_netcdf("fgmax0001_clipped.nc") diff --git a/src/2d/bouss/amr2.f90 b/src/2d/bouss/amr2.f90 index e8fa9d984..09e2b241b 100644 --- a/src/2d/bouss/amr2.f90 +++ b/src/2d/bouss/amr2.f90 @@ -513,7 +513,7 @@ program amr2 call read_dtopo_settings() ! specifies file with dtopo from earthquake call read_topo_settings(rest) ! specifies topography (bathymetry) files call set_qinit() ! specifies file with dh if this used instead - call set_fgout(rest) ! Fixed grid settings + call set_fgout(rest,nvar) ! Fixed grid settings call setup_variable_friction() ! Variable friction parameter call set_storm() ! Set storm parameters call set_regions() ! Set refinement regions @@ -548,7 +548,7 @@ program amr2 call read_topo_settings(rest) ! specifies topography (bathymetry) files call set_qinit() ! specifies file with dh if this used instead - call set_fgout(rest) ! Fixed grid settings + call set_fgout(rest,nvar) ! Fixed grid settings call setup_variable_friction() ! Variable friction parameter call set_multilayer() ! Set multilayer SWE parameters call set_storm() ! Set storm parameters diff --git a/src/2d/shallow/amr2.f90 b/src/2d/shallow/amr2.f90 index 139e01f1e..7dea01763 100644 --- a/src/2d/shallow/amr2.f90 +++ b/src/2d/shallow/amr2.f90 @@ -491,7 +491,7 @@ program amr2 call read_dtopo_settings() ! specifies file with dtopo from earthquake call read_topo_settings(rest) ! specifies topography (bathymetry) files call set_qinit() ! specifies file with dh if this used instead - call set_fgout(rest) ! Fixed grid settings + call set_fgout(rest,nvar) ! Fixed grid settings call setup_variable_friction() ! Variable friction parameter !call set_multilayer() ! Set multilayer SWE parameters call set_storm() ! Set storm parameters @@ -526,7 +526,7 @@ program amr2 call read_dtopo_settings() ! specifies file with dtopo from earthquake call read_topo_settings(rest) ! specifies topography (bathymetry) files call set_qinit() ! specifies file with dh if this used instead - call set_fgout(rest) ! Fixed grid settings + call set_fgout(rest,nvar) ! Fixed grid settings call setup_variable_friction() ! Variable friction parameter call set_multilayer() ! Set multilayer SWE parameters call set_storm() ! Set storm parameters diff --git a/src/2d/shallow/fgout_module.f90 b/src/2d/shallow/fgout_module.f90 index 981d11ac3..bd689af3c 100644 --- a/src/2d/shallow/fgout_module.f90 +++ b/src/2d/shallow/fgout_module.f90 @@ -10,7 +10,7 @@ module fgout_module real(kind=8), pointer :: late(:,:,:) ! Geometry - integer :: num_vars,mx,my,point_style,fgno,output_format,output_style + integer :: mx,my,point_style,fgno,output_format,output_style real(kind=8) :: dx,dy,x_low,x_hi,y_low,y_hi ! Time Tracking and output types @@ -19,11 +19,13 @@ module fgout_module integer, allocatable :: output_frames(:) real(kind=8), allocatable :: output_times(:) - + + integer :: num_vars ! number of variables to interpolate (num_eqn+3) integer :: nqout ! number of q components to output (+1 for eta) - logical, allocatable :: iqout(:) ! which components to output - integer :: bathy_index,eta_index - logical :: dclaw ! False for GeoClaw + logical, allocatable :: q_out_vars(:) ! which components to print + + !logical, allocatable :: iqout(:) ! which components to output + integer :: bathy_index,eta_index,time_index end type fgout_grid @@ -43,14 +45,16 @@ module fgout_module ! Setup routine that reads in the fixed grids data file and sets up the ! appropriate data structures - subroutine set_fgout(rest,fname) + subroutine set_fgout(rest,num_eqn,fname) use amr_module, only: parmunit, tstart_thisrun + use utility_module, only: parse_values implicit none ! Subroutine arguments - logical :: rest ! restart? + logical, intent(in) :: rest ! restart? + integer, intent(in) :: num_eqn character(len=*), optional, intent(in) :: fname ! Local storage @@ -59,6 +63,9 @@ subroutine set_fgout(rest,fname) type(fgout_grid), pointer :: fg real(kind=8) :: ts + integer :: n, iq + real(kind=8) :: values(num_eqn+2) + character(len=80) :: str if (.not.module_setup) then @@ -109,7 +116,25 @@ subroutine set_fgout(rest,fname) read(unit,*) fg%mx, fg%my read(unit,*) fg%x_low, fg%y_low read(unit,*) fg%x_hi, fg%y_hi - read(unit,*) fg%dclaw + + allocate(fg%q_out_vars(num_eqn+2)) ! for q + eta, topo + do iq=1,num_eqn+2 + fg%q_out_vars(iq) = .false. + enddo + read(unit,'(a)') str + call parse_values(str, n, values) + do k=1,n + iq = nint(values(k)) + fg%q_out_vars(iq) = .true. + enddo + write(6,*) '+++ q_out_vars:', fg%q_out_vars + + fg%num_vars = num_eqn + 3 ! also interp topo,eta,time + fg%nqout = 0 ! count how many are to be output + do k=1,num_eqn+2 + if (fg%q_out_vars(k)) fg%nqout = fg%nqout + 1 + enddo + !fg%nqout = fg%nqout + 1 ! for eta ! Initialize next_output_index @@ -193,68 +218,14 @@ subroutine set_fgout(rest,fname) print *,'y grid points my <= 0, set my >= 1' endif - - ! For now, hard-wire with defaults for either GeoClaw or D-Claw - ! need to save q plus topo, eta, t for interp in space-time - - if (fg%dclaw) then - ! For D-Claw: - fg%num_vars = 10 - ! for h, hu, hv, hm, pb, hchi, b_eroded, bathy, eta, time - else - ! GeoClaw: - fg%num_vars = 6 - ! for h, hu, hv, bathy, eta, time - endif - - ! specify which components of q (plus eta?) to output: - ! (eventually this should be set from user input) - - if (fg%num_vars == 6) then - ! GeoClaw - ! indexes used in early and late arrays: - ! 1:3 are q variables, 6 is time - fg%bathy_index = 4 - fg%eta_index = 5 - - allocate(fg%iqout(4)) - fg%iqout(1) = .true. ! output h? - fg%iqout(2) = .true. ! output hu? - fg%iqout(3) = .true. ! output hv? - fg%iqout(4) = .true. ! output eta? - fg%nqout = 0 - do k=1,4 - if (fg%iqout(k)) fg%nqout = fg%nqout + 1 - enddo - endif + fg%bathy_index = num_eqn + 2 + fg%eta_index = num_eqn + 1 + fg%time_index = num_eqn + 3 - if (fg%num_vars == 10) then - ! D-Claw: - ! indexes used in early and late arrays: - ! 1:7 are q variables, 10 is time - fg%bathy_index = 8 - fg%eta_index = 9 - - allocate(fg%iqout(8)) - fg%iqout(1) = .true. ! output h? - fg%iqout(2) = .true. ! output hu? - fg%iqout(3) = .true. ! output hv? - fg%iqout(4) = .true. ! output hm? - fg%iqout(5) = .true. ! output pb? - fg%iqout(6) = .true. ! output hchi? - fg%iqout(7) = .true. ! output beroded? - fg%iqout(8) = .true. ! output eta? - fg%nqout = 0 - do k=1,8 - if (fg%iqout(k)) fg%nqout = fg%nqout + 1 - enddo - endif - write(6,*) '+++ nqout = ',fg%nqout ! Allocate new fixed grid data arrays at early, late time: ! dimension (10,:,:) to work for either GeoClaw or D-Claw - allocate(fg%early(10, fg%mx,fg%my)) fg%early = nan() @@ -412,7 +383,7 @@ subroutine fgout_interp(fgrid_type,fgrid, & ! save the time this fgout point was computed: - fg_data(num_vars,ifg,jfg) = t + fg_data(fgrid%time_index,ifg,jfg) = t endif ! if fgout point is on this grid @@ -441,7 +412,7 @@ subroutine fgout_write(fgrid,out_time,out_index) ! I/O integer, parameter :: unit = 87 character(len=15) :: fg_filename - character(len=4) :: cfgno, cframeno + character(len=8) :: cfgno, cframeno character(len=8) :: file_format integer :: grid_number,ipos,idigit,out_number,columns integer :: ifg,ifg1, iframe,iframe1 @@ -472,7 +443,7 @@ subroutine fgout_write(fgrid,out_time,out_index) "a10,' format'/,/)" ! Other locals - integer :: i,j,m,iq,k + integer :: i,j,m,iq,k,jq,num_eqn real(kind=8) :: t0,tf,tau, qaug(10) real(kind=8), allocatable :: qeta(:,:,:) real(kind=4), allocatable :: qeta4(:,:,:) @@ -546,76 +517,34 @@ subroutine fgout_write(fgrid,out_time,out_index) endif endif - ! Output the conserved quantities and eta value + ! Output the requested quantities and eta value: + iq = 1 - ! qaug(1:3) are h,hu,hv for both GeoClaw and D-Claw: - if (fgrid%iqout(1)) then - qeta(iq,i,j) = qaug(1) ! h - iq = iq+1 - endif - if (fgrid%iqout(2)) then - qeta(iq,i,j) = qaug(2) ! hu - iq = iq+1 - endif - if (fgrid%iqout(3)) then - qeta(iq,i,j) = qaug(3) ! hv - iq = iq+1 - endif - - if (fgrid%num_vars == 6) then - ! GeoClaw: - if (fgrid%iqout(4)) then - qeta(iq,i,j) = qaug(5) ! eta since qaug(4)=topo - iq = iq+1 - endif - - else if (fgrid%num_vars == 10) then - ! D-Claw: - if (fgrid%iqout(4)) then - qeta(iq,i,j) = qaug(4) ! hm + num_eqn = size(fgrid%q_out_vars) - 2 ! last components eta,B + do jq=1,num_eqn+2 + if (fgrid%q_out_vars(jq)) then + qeta(iq,i,j) = qaug(jq) iq = iq+1 endif - if (fgrid%iqout(5)) then - qeta(iq,i,j) = qaug(5) ! pb - iq = iq+1 - endif - if (fgrid%iqout(6)) then - qeta(iq,i,j) = qaug(6) ! hchi - iq = iq+1 - endif - if (fgrid%iqout(7)) then - qeta(iq,i,j) = qaug(7) ! b_eroded - iq = iq+1 - endif - if (fgrid%iqout(8)) then - qeta(iq,i,j) = qaug(9) ! eta since qaug(8)=topo - iq = iq+1 - endif - endif + enddo + + ! now append eta value: (this is now included in loop above) + !qeta(iq,i,j) = qaug(fgrid%eta_index) + ! NOTE: qaug(fgrid%bathy_index) contains topo B value + enddo enddo - ! Make the file names and open output files - cfgno = '0000' - ifg = fgrid%fgno - ifg1 = ifg - do ipos=4,1,-1 - idigit = mod(ifg1,10) - cfgno(ipos:ipos) = char(ichar('0') + idigit) - ifg1 = ifg1/10 - enddo + ! convert fgrid%fgno to a string of length at least 4 with leading 0's + ! e.g. '0001' or '12345': + write(cfgno, '(I0.4)') fgrid%fgno - cframeno = '0000' - iframe = out_index - iframe1 = iframe - do ipos=4,1,-1 - idigit = mod(iframe1,10) - cframeno(ipos:ipos) = char(ichar('0') + idigit) - iframe1 = iframe1/10 - enddo + ! convert out_index to a string of length at least 4 with leading 0's + write(cframeno, '(I0.4)') out_index - fg_filename = 'fgout' // cfgno // '.q' // cframeno + + fg_filename = 'fgout' // trim(cfgno) // '.q' // trim(cframeno) open(unit,file=fg_filename,status='unknown',form='formatted') @@ -637,14 +566,14 @@ subroutine fgout_write(fgrid,out_time,out_index) if (fgrid%output_format == 3) then ! binary output goes in .b file as full 8-byte (float64): - fg_filename = 'fgout' // cfgno // '.b' // cframeno + fg_filename = 'fgout' // trim(cfgno) // '.b' // trim(cframeno) open(unit=unit, file=fg_filename, status="unknown", & access='stream') write(unit) qeta close(unit) else if (fgrid%output_format == 2) then ! binary output goes in .b file as 4-byte (float32): - fg_filename = 'fgout' // cfgno // '.b' // cframeno + fg_filename = 'fgout' // trim(cfgno) // '.b' // trim(cframeno) open(unit=unit, file=fg_filename, status="unknown", & access='stream') allocate(qeta4(fgrid%nqout, fgrid%mx, fgrid%my)) @@ -671,7 +600,7 @@ subroutine fgout_write(fgrid,out_time,out_index) write(6,*) '*** should be ascii, binary32, or binary64' endif - fg_filename = 'fgout' // cfgno // '.t' // cframeno + fg_filename = 'fgout' // trim(cfgno) // '.t' // trim(cframeno) open(unit=unit, file=fg_filename, status='unknown', form='formatted') ! time, num_eqn+1, num_grids, num_aux, num_dim, num_ghost: write(unit, t_file_format) out_time, fgrid%nqout, 1, 0, 2, 0,file_format @@ -680,10 +609,6 @@ subroutine fgout_write(fgrid,out_time,out_index) print "(a,i4,a,i4,a,e18.8)",'Writing fgout grid #',fgrid%fgno, & ' frame ',out_index,' at time =',out_time - ! Index into qeta for binary output - ! Note that this implicitly assumes that we are outputting only h, hu, hv - ! and will not output more (change num_eqn parameter above) - end subroutine fgout_write diff --git a/src/2d/shallow/topo_module.f90 b/src/2d/shallow/topo_module.f90 index 8bd1af908..2713e1694 100644 --- a/src/2d/shallow/topo_module.f90 +++ b/src/2d/shallow/topo_module.f90 @@ -439,7 +439,7 @@ subroutine read_topo_file(mx,my,topo_type,fname,xll,yll,topo) integer(kind=8) :: i, j, mtot ! NetCDF Support - character(len=10) :: direction, x_dim_name, x_var_name, y_dim_name, & + character(len=64) :: direction, x_dim_name, x_var_name, y_dim_name, & y_var_name, z_var_name, var_name ! character(len=1) :: axis_string real(kind=8), allocatable :: nc_buffer(:, :), xlocs(:), ylocs(:) @@ -744,8 +744,8 @@ subroutine read_topo_header(fname,topo_type,mx,my,xll,yll,xhi,yhi,dx,dy) logical, allocatable :: x_in_dom(:),y_in_dom(:) integer(kind=4) :: dim_ids(2), num_dims, var_type, num_vars, num_dims_tot integer(kind=4), allocatable :: var_ids(:) - character(len=10) :: var_name, x_var_name, y_var_name, z_var_name - character(len=10) :: x_dim_name, y_dim_name + character(len=64) :: var_name, x_var_name, y_var_name, z_var_name + character(len=64) :: x_dim_name, y_dim_name integer(kind=4) :: x_var_id, y_var_id, z_var_id, x_dim_id, y_dim_id verbose = .false. diff --git a/src/python/geoclaw/data.py b/src/python/geoclaw/data.py index 83d9fa769..01c132400 100755 --- a/src/python/geoclaw/data.py +++ b/src/python/geoclaw/data.py @@ -9,7 +9,7 @@ - GeoClawData - RefinementData - TopographyData - - FixedGridData + - FGoutData - FGmaxData - DTopoData - QinitData diff --git a/src/python/geoclaw/fgout_tools.py b/src/python/geoclaw/fgout_tools.py index ed8f675c6..5c443f0cc 100644 --- a/src/python/geoclaw/fgout_tools.py +++ b/src/python/geoclaw/fgout_tools.py @@ -18,18 +18,19 @@ fgout frames, as a single netCDF file - function read_netcdf: Read a netCDF file and return a list of fgout frames, assuming the file contains all the qoi's needed to reconstruct q. -- function read_netcdf_arrays: Read a netCDF file and extract the +- function read_netcdf_arrays: Read a netCDF file and extract the requested quantities of interest as numpy arrays. - print_netcdf_info: Print info about the contents of a netCDF file containing some fgout frames. """ import os -from numpy import sqrt, ma, mod + import numpy +from numpy import ma, mod, sqrt -class FGoutFrame(object): +class FGoutFrame(object): """ Class to hold a single frame of fgout data at one output time. Several attributes are defined as properties that can be evaluated @@ -52,10 +53,11 @@ def __init__(self, fgout_grid, frameno=None): self._v = None self._s = None self._hss = None + self._hm = None # Define shortcuts to attributes of self.fgout_grid that are the same # for all frames (e.g. X,Y) to avoid storing grid for every frame. - + @property def x(self): return self.fgout_grid.x @@ -63,15 +65,15 @@ def x(self): @property def y(self): return self.fgout_grid.y - + @property def X(self): return self.fgout_grid.X - + @property def Y(self): return self.fgout_grid.Y - + @property def delta(self): return self.fgout_grid.delta @@ -79,15 +81,19 @@ def delta(self): @property def extent_centers(self): return self.fgout_grid.extent_centers - + @property def extent_edges(self): return self.fgout_grid.extent_edges - + @property def drytol(self): return self.fgout_grid.drytol - + + @property + def qmap(self): + return self.fgout_grid.qmap + # Define attributes such as h as @properties with lazy evaluation: # the corresponding array is created and stored only when first # accessed by the user. Those not needed are not created. @@ -96,55 +102,113 @@ def drytol(self): def h(self): """depth""" if self._h is None: - #print('+++ setting _h...') - self._h = self.q[0,:,:] - #print('+++ getting _h...') + q_out_vars = self.fgout_grid.q_out_vars + try: + i_h = q_out_vars.index(self.qmap["h"]) + self._h = self.q[i_h, :, :] + except ( + ValueError + ): # if q_out_vars does not contain qmap['h'], a value error is thrown + try: + i_eta = q_out_vars.index(self.qmap["eta"]) + i_B = q_out_vars.index(self.qmap["B"]) + self._h = self.q[i_eta, :, :] - self.q[i_B, :, :] + except ValueError: + print("*** Could not find h or eta-B in q_out_vars") + raise AttributeError return self._h @property def hu(self): """momentum h*u""" if self._hu is None: - self._hu = self.q[1,:,:] + q_out_vars = self.fgout_grid.q_out_vars + try: + i_hu = q_out_vars.index(self.qmap["hu"]) + self._hu = self.q[i_hu, :, :] + except: + print("*** Could not find hu in q_out_vars") + raise return self._hu @property def u(self): """speed u, computed as hu/h or set to 0 if hself.drytol)) + self._u = numpy.divide( + self.hu, + self.h, + out=numpy.zeros(self.h.shape, dtype=float), + where=(self.h > self.drytol), + ) return self._u @property def hv(self): """momentum h*v""" if self._hv is None: - self._hv = self.q[2,:,:] + q_out_vars = self.fgout_grid.q_out_vars + try: + i_hv = q_out_vars.index(self.qmap["hv"]) + self._hv = self.q[i_hv, :, :] + except: + print("*** Could not find hv in q_out_vars") + raise return self._hv @property def v(self): """speed v, computed as hv/h or set to 0 if hself.drytol)) + self._v = numpy.divide( + self.hv, + self.h, + out=numpy.zeros(self.h.shape, dtype=float), + where=(self.h > self.drytol), + ) return self._v @property def eta(self): """surface eta = h+B""" if self._eta is None: - self._eta = self.q[-1,:,:] + q_out_vars = self.fgout_grid.q_out_vars + try: + i_eta = q_out_vars.index(self.qmap["eta"]) + self._eta = self.q[i_eta, :, :] + # print('+++qmap["eta"] = %i' % self.qmap["eta"]) + # print('+++i_eta = %i' % i_eta) + except: + try: + i_h = q_out_vars.index(self.qmap["h"]) + i_B = q_out_vars.index(self.qmap["B"]) + self._eta = self.q[i_h, :, :] + self.q[i_B, :, :] + except: + print("*** Could not find eta or h+B in q_out_vars") + raise return self._eta @property def B(self): """topography""" if self._B is None: - self._B = self.q[-1,:,:] - self.q[0,:,:] + q_out_vars = self.fgout_grid.q_out_vars + try: + i_B = q_out_vars.index(self.qmap["B"]) + self._B = self.q[i_B, :, :] + # print('+++qmap["B"] = %i' % self.qmap["B"]) + # print('+++i_B = %i' % i_B) + except: + try: + i_h = q_out_vars.index(self.qmap["h"]) + i_eta = q_out_vars.index(self.qmap["eta"]) + self._B = self.q[i_eta, :, :] - self.q[i_h, :, :] + # print('+++ computing B: i_h = %i, i_eta = %i' % (i_h,i_eta)) + # print('+++qmap["h"] = %i' % self.qmap["h"]) + # print('+++qmap["eta"] = %i' % self.qmap["eta"]) + except: + print("*** Could not find B or eta-h in q_out_vars") + raise return self._B @property @@ -161,44 +225,151 @@ def hss(self): self._hss = self.h * self.s**2 return self._hss + @property + def huc(self): + """huc - Boussinesq correction to hu""" + if self._huc is None: + q_out_vars = self.fgout_grid.q_out_vars + try: + i_huc = q_out_vars.index(self.qmap["huc"]) + self._huc = self.q[i_huc, :, :] + except: + print("*** Could not find huc in q_out_vars") + raise + return self._huc -class FGoutGrid(object): + @property + def hvc(self): + """hvc - Boussinesq correction to hv""" + if self._hvc is None: + q_out_vars = self.fgout_grid.q_out_vars + try: + i_hvc = q_out_vars.index(self.qmap["hvc"]) + self._hvc = self.q[i_hvc, :, :] + except: + print("*** Could not find hvc in q_out_vars") + raise + return self._hvc + + @property + def hm(self): + """dclaw: h * mass fraction""" + if self._hm is None: + q_out_vars = self.fgout_grid.q_out_vars + try: + i_hm = q_out_vars.index(self.qmap["hm"]) + self._hm = self.q[i_hm, :, :] + # print('+++qmap["hm"] = %i' % self.qmap["hm"]) + # print('+++i_hm = %i' % i_hm) + except: + print("*** Could not find hm in q_out_vars") + raise + return self._hm + + @property + def pb(self): + """dclaw variable""" + if self._pb is None: + q_out_vars = self.fgout_grid.q_out_vars + try: + i_pb = q_out_vars.index(self.qmap["pb"]) + self._pb = self.q[i_pb, :, :] + except: + print("*** Could not find pb in q_out_vars") + raise + return self._pb + + @property + def hchi(self): + """dclaw variable""" + if self._hchi is None: + q_out_vars = self.fgout_grid.q_out_vars + try: + i_hchi = q_out_vars.index(self.qmap["hchi"]) + self._hchi = self.q[i_hchi, :, :] + except: + print("*** Could not find hchi in q_out_vars") + raise + return self._hchi + + @property + def bdif(self): + """dclaw variable""" + if self._bdif is None: + q_out_vars = self.fgout_grid.q_out_vars + try: + i_bdif = q_out_vars.index(self.qmap["bdif"]) + self._bdif = self.q[i_bdif, :, :] + except: + print("*** Could not find bdif in q_out_vars") + raise + return self._bdif + +class FGoutGrid(object): """ New class introduced in 5.9.0 to keep store information both about the fgout input data and the output generated by a GeoClaw run. """ - def __init__(self,fgno=None,outdir=None,output_format=None): - + def __init__(self, fgno=None, outdir=".", output_format=None, qmap="geoclaw"): + # mapping from variable names to possible values in q_out_vars + if type(qmap) is dict: + self.qmap = qmap + elif qmap == "geoclaw": + # default for GeoClaw: + self.qmap = {"h": 1, "hu": 2, "hv": 3, "eta": 4, "B": 5} + elif qmap == "geoclaw-bouss": + self.qmap = {"h": 1, "hu": 2, "hv": 3, "huc": 4, "hvc": 5, "eta": 6, "B": 7} + elif qmap == "dclaw": + self.qmap = { + "h": 1, + "hu": 2, + "hv": 3, + "hm": 4, + "pb": 5, + "hchi": 6, + "bdif": 7, + "eta": 8, + "B": 9, + } + else: + raise ValueError("Invalid qmap: %s" % qmap) # GeoClaw input values: - self.id = '' # identifier, optional + # Many of these should be read from fgout_grids.data + # using read_fgout_grids_data before using read_frame + self.id = "" # identifier, optional self.point_style = 2 # only option currently supported self.npts = None self.nx = None self.ny = None self.output_style = 1 - self.tstart = None + self.tstart = None self.tend = None self.nout = None self.fgno = fgno self.outdir = outdir + + # Note output_format will be reset by read_fgout_grids_data: self.output_format = output_format - self.dclaw = False - - self.drytol = 1e-3 # used for computing u,v from hu,hv + + self.q_out_vars = [1, 2, 3, 4] # list of which components to print + # default: h,hu,hv,eta (5=topo) + self.nqout = None # number of vars to print out + + self.drytol = 1e-3 # used for computing u,v from hu,hv # private attributes for those that are only created if # needed by the user: - + self._X = None self._Y = None self._x = None self._y = None - self._delta = None # (dx,dy) + self._delta = None # (dx,dy) self._extent_centers = None # defined by fgout points - self._extent_edges = None # extended so points are cell centers + self._extent_edges = None # extended so points are cell centers self._plotdata = None @@ -206,18 +377,18 @@ def __init__(self,fgno=None,outdir=None,output_format=None): def x(self): """1D array x of longitudes (cell centers)""" if self._x is None: - dx = (self.x2 - self.x1)/self.nx - self._x = numpy.linspace(self.x1+dx/2, self.x2-dx/2, self.nx) + dx = (self.x2 - self.x1) / self.nx + self._x = numpy.linspace(self.x1 + dx / 2, self.x2 - dx / 2, self.nx) return self._x @property def y(self): """1D array y of latitudes (cell centers)""" if self._y is None: - dy = (self.y2 - self.y1)/self.ny - self._y = numpy.linspace(self.y1+dy/2, self.y2-dy/2, self.ny) + dy = (self.y2 - self.y1) / self.ny + self._y = numpy.linspace(self.y1 + dy / 2, self.y2 - dy / 2, self.ny) return self._y - + @property def X(self): """2D array X of longitudes (cell centers)""" @@ -235,31 +406,38 @@ def Y(self): @property def delta(self): if self._delta is None: - dx = (self.x2 - self.x1)/self.nx - dy = (self.y2 - self.y1)/self.ny - self._delta = (dx,dy) + dx = (self.x2 - self.x1) / self.nx + dy = (self.y2 - self.y1) / self.ny + self._delta = (dx, dy) return self._delta - + @property def extent_centers(self): """Extent of cell centers [xmin,xmax,ymin,ymax]""" if self._extent_centers is None: - self._extent_centers = [self.x.min(), self.x.max(),\ - self.y.min(), self.y.max()] + self._extent_centers = [ + self.x.min(), + self.x.max(), + self.y.min(), + self.y.max(), + ] return self._extent_centers - + @property def extent_edges(self): """Extent of cell edges [xmin,xmax,ymin,ymax]""" if self._extent_edges is None: - dx,dy = self.delta - self._extent_edges = [self.x.min()-dx/2, self.x.max()+dx/2,\ - self.y.min()-dy/2, self.y.max()+dy/2] + dx, dy = self.delta + self._extent_edges = [ + self.x.min() - dx / 2, + self.x.max() + dx / 2, + self.y.min() - dy / 2, + self.y.max() + dy / 2, + ] return self._extent_edges - # Create plotdata of class clawpack.visclaw.ClawPlotData - # only when needed for reading GeoClaw output, + # only when needed for reading GeoClaw output, # since this must be done after fgno, outdir, output_format # have been specified: @@ -276,23 +454,25 @@ def set_plotdata(self): """ from clawpack.visclaw.data import ClawPlotData - assert self.fgno is not None, '*** fgno must be set' - assert self.outdir is not None, '*** outdir must be set' - assert self.output_format is not None, '*** output_format must be set' + assert self.fgno is not None, "*** fgno must be set" + assert self.outdir is not None, "*** outdir must be set" + assert self.output_format is not None, "*** output_format must be set" if 0: - print('+++ creating plotdata for fgno=%i, outdir=%s, format=%s' \ - % (self.fgno,self.outdir,self.output_format)) + print( + "+++ creating plotdata for fgno=%i, outdir=%s, format=%s" + % (self.fgno, self.outdir, self.output_format) + ) plotdata = ClawPlotData() plotdata.outdir = self.outdir plotdata.format = self.output_format - plotdata.file_prefix = 'fgout%s' % str(self.fgno).zfill(4) - if self.output_format[:6]=='binary': + plotdata.file_prefix = "fgout%s" % str(self.fgno).zfill(4) + if self.output_format[:6] == "binary": # could be 'binary', 'binary32' or 'binary64' - file_prefix_str = plotdata.file_prefix + '.b' + file_prefix_str = plotdata.file_prefix + ".b" else: - file_prefix_str = plotdata.file_prefix + '.q' - + file_prefix_str = plotdata.file_prefix + ".q" + # Contrary to usual default, set save_frames to False so that all # the fgout frames read in are not saved in plotdata.framesoln_dict. # Otherwise this might be a memory hog when making an animation with @@ -301,35 +481,145 @@ def set_plotdata(self): return plotdata + def read_fgout_grids_data(self, fgno=None, data_file="fgout_grids.data"): + """ + Read input info for fgout grid number fgno from the data file + fgout_grids.data, which should have been created by setrun.py. + This file now contains info about all fgout grids. + """ + if fgno is not None: + self.fgno = fgno + assert self.fgno is not None, "*** fgno must be set" + + data_path = os.path.join(self.outdir, data_file) + print("Reading fgout grid info from \n %s" % data_path) - def read_fgout_grids_data(self, fgno=None, data_file='fgout_grids.data'): + with open(data_path) as filep: + lines = filep.readlines() + fgout_input = None + for lineno, line in enumerate(lines): + if "fgno" in line: + if int(line.split()[0]) == self.fgno: + fgout_input = lines[lineno + 1 :] + # print('Found line %i: %s' % (lineno,line)) + break + + if fgout_input is None: + raise ValueError( + "fgout grid fgno = %i not found in %s" % (self.fgno, data_file) + ) + + lineno = 0 # next input line + next_value = fgout_input[lineno].split()[lineno] # a string + next_value_int = "." not in next_value # should be True in v5.11 + err_msg = ( + "\n*** Expecting integer output_style next in %s" % data_file + + "\n If this is fgout data from before v5.11, try using" + + "\n read_fgout_grids_data_pre511" + ) + assert next_value_int, err_msg + self.output_style = int(next_value) + lineno += 1 + if self.output_style == 1: + # equally spaced times: + self.nout = int(fgout_input[lineno].split()[0]) + lineno += 1 + self.tstart = float(fgout_input[lineno].split()[0]) + lineno += 1 + self.tend = float(fgout_input[lineno].split()[0]) + lineno += 1 + self.times = numpy.linspace(self.tstart, self.tend, self.nout) + elif self.output_style == 2: + # list of times: + self.nout = int(fgout_input[lineno].split()[0]) + lineno += 1 + times_str = fgout_input[lineno].split()[: self.nout] + self.times = numpy.array([float(ts) for ts in times_str]) + lineno += 1 + else: + raise ValueError("Unrecognized fgout output_style: %s" % self.output_style) + self.point_style = point_style = int(fgout_input[lineno].split()[0]) + lineno += 1 + output_format = int(fgout_input[lineno].split()[0]) + lineno += 1 + if output_format == 1: + self.output_format = "ascii" + elif output_format == 2: + self.output_format = "binary32" + elif output_format == 3: + self.output_format = "binary" + else: + raise NotImplementedError( + "fgout not implemented for " + "output_format %i" % output_format + ) + print( + "Reading input for fgno=%i, point_style = %i " + % (self.fgno, self.point_style) + ) + if point_style == 2: + self.nx = nx = int(fgout_input[lineno].split()[0]) + self.ny = ny = int(fgout_input[lineno].split()[1]) + lineno += 1 + self.x1 = float(fgout_input[lineno].split()[0]) + self.y1 = float(fgout_input[lineno].split()[1]) + lineno += 1 + self.x2 = float(fgout_input[lineno].split()[0]) + self.y2 = float(fgout_input[lineno].split()[1]) + lineno += 1 + else: + raise NotImplementedError( + "fgout not implemented for point_style %i" % point_style + ) + tokens = fgout_input[lineno].split() + self.q_out_vars = [] + for token in tokens: + try: + self.q_out_vars.append(int(token)) + except: + break + print("Found fgout grid q_out_vars = ", self.q_out_vars) + print("Using this mapping to fgout variable names: ") + print(" qmap = ", self.qmap) + + def read_fgout_grids_data_pre511(self, fgno=None, data_file="fgout_grids.data"): """ + For backward compatibility, this reads fgout_grids.data files + in the format used prior to v5.11. + + In this case, the following values are used, as set in __init__(): + self.output_style = 1 + self.qmap = 'qmap' + self.q_out_vars = [1,2,3,4] # h,hu,hv,eta + Read input info for fgout grid number fgno from the data file fgout_grids.data, which should have been created by setrun.py. This file now contains info about all fgout grids. """ + if fgno is not None: self.fgno = fgno - assert self.fgno is not None, '*** fgno must be set' + assert self.fgno is not None, "*** fgno must be set" - with open(data_file) as filep: + data_path = os.path.join(self.outdir, data_file) + print("Reading fgout grid info from \n %s" % data_path) + + with open(data_path) as filep: lines = filep.readlines() fgout_input = None - for lineno,line in enumerate(lines): - if 'fgno' in line: + for lineno, line in enumerate(lines): + if "fgno" in line: if int(line.split()[0]) == self.fgno: - fgout_input = lines[lineno+1:] - #print('Found line %i: %s' % (lineno,line)) + fgout_input = lines[lineno + 1 :] + # print('Found line %i: %s' % (lineno,line)) break if fgout_input is None: - raise ValueError('fgout grid fgno = %i not found in %s' \ - % (fgno, data_file)) + raise ValueError("fgout grid fgno = %i not found in %s" % (fgno, data_file)) - lineno = 0 # next input line + lineno = 0 # next input line self.output_style = int(fgout_input[lineno].split()[lineno]) lineno += 1 - if (self.output_style == 1): + if self.output_style == 1: # equally spaced times: self.nout = int(fgout_input[lineno].split()[0]) lineno += 1 @@ -338,26 +628,27 @@ def read_fgout_grids_data(self, fgno=None, data_file='fgout_grids.data'): self.tend = float(fgout_input[lineno].split()[0]) lineno += 1 self.times = numpy.linspace(self.tstart, self.tend, self.nout) - elif (self.output_style == 2): + elif self.output_style == 2: # list of times: self.nout = int(fgout_input[lineno].split()[0]) lineno += 1 - times_str = fgout_input[lineno].split()[:self.nout] + times_str = fgout_input[lineno].split()[: self.nout] self.times = numpy.array([float(ts) for ts in times_str]) lineno += 1 else: - raise ValueError('Unrecognized fgout output_style: %s' \ - % self.output_style) + raise ValueError("Unrecognized fgout output_style: %s" % self.output_style) self.point_style = point_style = int(fgout_input[lineno].split()[0]) lineno += 1 output_format = int(fgout_input[lineno].split()[0]) lineno += 1 if output_format == 1: - self.output_format = 'ascii' + self.output_format = "ascii" elif output_format == 3: - self.output_format = 'binary' - print('Reading input for fgno=%i, point_style = %i ' \ - % (self.fgno, self.point_style)) + self.output_format = "binary" + print( + "Reading input for fgno=%i, point_style = %i " + % (self.fgno, self.point_style) + ) if point_style == 2: self.nx = nx = int(fgout_input[lineno].split()[0]) self.ny = ny = int(fgout_input[lineno].split()[1]) @@ -369,9 +660,9 @@ def read_fgout_grids_data(self, fgno=None, data_file='fgout_grids.data'): self.y2 = float(fgout_input[lineno].split()[1]) lineno += 1 else: - raise NotImplementedError("fgout not implemented for point_style %i" \ - % point_style) - + raise NotImplementedError( + "fgout not implemented for point_style %i" % point_style + ) def write_to_fgout_data(self, fid): """ @@ -380,83 +671,80 @@ def write_to_fgout_data(self, fid): """ print("\n---------------------------------------------- ") - assert self.point_style is not None, 'Need to set point_style' + assert self.point_style is not None, "Need to set point_style" point_style = self.point_style if point_style not in [2]: - errmsg = 'point_style %s is not implement, only point_style==2' \ - % point_style + errmsg = ( + "point_style %s is not implement, only point_style==2" % point_style + ) raise NotImplementedError(errmsg) - - if self.output_format == 'ascii': + + if self.output_format == "ascii": output_format = 1 - elif self.output_format == 'binary32': + elif self.output_format == "binary32": output_format = 2 - elif self.output_format in ['binary','binary64']: + elif self.output_format in ["binary", "binary64"]: output_format = 3 else: errmsg = "fgout output_format must be ascii, binary32, or binary64" raise NotImplementedError(errmsg) - - # write header, independent of point_style: - #fid = open(self.input_file_name,'w') + # fid = open(self.input_file_name,'w') fid.write("\n") fid.write("%i # fgno\n" % self.fgno) - fid.write("%i # output_style\n" \ - % self.output_style) - + fid.write("%i # output_style\n" % self.output_style) if self.output_style == 1: - assert self.tstart is not None, 'Need to set tstart' - assert self.tend is not None, 'Need to set tend' - assert self.nout is not None, 'Need to set nout' - fid.write("%i %s # nout\n" % (self.nout, 11*" ")) - fid.write("%16.10e # tstart\n" % self.tstart) - fid.write("%16.10e # tend\n" % self.tend) + assert self.tstart is not None, "Need to set tstart" + assert self.tend is not None, "Need to set tend" + assert self.nout is not None, "Need to set nout" + fid.write("%i %s # nout\n" % (self.nout, 11 * " ")) + fid.write("%16.10e # tstart\n" % self.tstart) + fid.write("%16.10e # tend\n" % self.tend) elif self.output_style == 2: self.nout = len(self.output_times) - fid.write("%i %s # nout\n" % (self.nout, 11*" ")) - + fid.write("%i %s # nout\n" % (self.nout, 11 * " ")) # remove [] and , from list of times: output_times_str = repr(list(self.output_times))[1:-1] - output_times_str = output_times_str.replace(',','') - fid.write("%s # output_times\n" % output_times_str) + output_times_str = output_times_str.replace(",", "") + fid.write("%s # output_times\n" % output_times_str) else: - raise ValueError('fgout output_style must be 1 or 2') - fid.write("%i %s # point_style\n" \ - % (self.point_style,12*" ")) - fid.write("%i %s # output_format\n" \ - % (output_format,12*" ")) - - print('fgout grid %i has point_style = %i' % (self.fgno, point_style)) + raise ValueError("fgout output_style must be 1 or 2") + fid.write("%i %s # point_style\n" % (self.point_style, 12 * " ")) + fid.write("%i %s # output_format\n" % (output_format, 12 * " ")) + print("fgout grid %i has point_style = %i" % (self.fgno, point_style)) if point_style == 2: # 2d grid of points - x1,x2 = self.x1, self.x2 - y1,y2 = self.y1, self.y2 - nx,ny = self.nx, self.ny + x1, x2 = self.x1, self.x2 + y1, y2 = self.y1, self.y2 + nx, ny = self.nx, self.ny - dx = (x2-x1)/nx # x1,x2 are cell edges - dy = (y2-y1)/ny # y1,y2 are cell edges + dx = (x2 - x1) / nx # x1,x2 are cell edges + dy = (y2 - y1) / ny # y1,y2 are cell edges - npts = nx*ny + npts = nx * ny - fid.write("%i %i %s # nx,ny\n" \ - % (nx,ny,10*" ")) - fid.write("%16.10e %20.10e # x1, y1\n" % (x1,y1)) - fid.write("%16.10e %20.10e # x2, y2\n" % (x2,y2)) + fid.write("%i %i %s # nx,ny\n" % (nx, ny, 10 * " ")) + fid.write("%16.10e %20.10e # x1, y1\n" % (x1, y1)) + fid.write("%16.10e %20.10e # x2, y2\n" % (x2, y2)) - print(" specifying fgout grid with shape %i by %i, with %i points" \ - % (nx,ny,npts)) - print(" lower left = (%15.10f,%15.10f)" % (x1,y1)) - print(" upper right = (%15.10f,%15.10f)" % (x2,y2)) - print(" dx = %15.10e, dy = %15.10e" % (dx,dy)) - - fid.write("%s # dclaw" % self.dclaw) + print( + " specifying fgout grid with shape %i by %i, with %i points" + % (nx, ny, npts) + ) + print(" lower left = (%15.10f,%15.10f)" % (x1, y1)) + print(" upper right = (%15.10f,%15.10f)" % (x2, y2)) + print(" dx = %15.10e, dy = %15.10e" % (dx, dy)) + # q_out_vars is a list of q components to print, e.g. [1,4] + + format = len(self.q_out_vars) * "%s " + fid.write(format % tuple(self.q_out_vars) + " # q_out_vars\n") + fid.write("\n") def read_frame(self, frameno): """ @@ -465,11 +753,31 @@ def read_frame(self, frameno): from datetime import timedelta + try: + fgoutX = self.X + fgoutY = self.Y + except: + msg = ( + "\n*** Before reading frame, you must set FGoutGrid data," + "\n*** Typically by calling read_fgout_grids_data" + ) + raise ValueError(msg) + + # prior to v5.11, self.read_fgout_grids_data() called here + # rather than raising exception... + print(msg) + print("*** Calling read_fgout_grids_data...") + self.read_fgout_grids_data() + fgoutX = self.X + fgoutY = self.Y + try: fr = self.plotdata.getframe(frameno) except: - print('*** Could not read fgout grid %i frame %i from %s' \ - % (self.fgno,frameno,self.plotdata.outdir)) + print( + "*** Could not read fgout grid %i frame %i from %s" + % (self.fgno, frameno, self.plotdata.outdir) + ) raise state = fr.states[0] # only 1 AMR grid patch = state.patch @@ -480,48 +788,19 @@ def read_frame(self, frameno): fgout_frame.q = state.q fgout_frame.t = state.t - + fgout_frame.frameno = frameno - - X,Y = patch.grid.p_centers[:2] - - try: - fgoutX = self.X - fgoutY = self.Y - except: - # presumably x,y, etc. were not set for this fgout_grid - # (e.g. by read_fgout_grids_data) so infer from this frame and set - - # reset most attributes including private _x etc to None: - self.__init__(fgno=self.fgno,outdir=self.outdir, - output_format=self.output_format) - - print(' Setting grid attributes based on Frame %i:' % frameno) - x = X[:,0] - y = Y[0,:] - dx = x[1] - x[0] - dy = y[1] - y[0] - self.x1 = x[0] - dx/2 - self.x2 = x[-1] + dx/2 - self.y1 = y[0] - dy/2 - self.y2 = y[-1] + dy/2 - self.nx = len(x) - self.ny = len(y) - fgoutX = self.X # will be computed from info above - fgoutY = self.Y # will be computed from info above - print(' Grid edges: ',self.extent_edges) - print(' with %i cells in x and %i cells in y' \ - % (self.nx,self.ny)) - - + + X, Y = patch.grid.p_centers[:2] + if not numpy.allclose(X, fgoutX): - errmsg = '*** X read from output does not match fgout_grid.X' + errmsg = "*** X read from output does not match fgout_grid.X" raise ValueError(errmsg) if not numpy.allclose(Y, fgoutY): - errmsg = '*** Y read from output does not match fgout_grid.Y' + errmsg = "*** Y read from output does not match fgout_grid.Y" raise ValueError(errmsg) - + return fgout_frame @@ -529,47 +808,53 @@ def read_frame(self, frameno): # Functions for interpolating from fgout grid to arbitrary points, # useful for example if using velocity field to model particle/debris motion -def make_fgout_fcn_xy(fgout, qoi, method='nearest', - bounds_error=False, fill_value=numpy.nan): + +def make_fgout_fcn_xy( + fgout, qoi, method="nearest", bounds_error=False, fill_value=numpy.nan +): """ Create a function that can be called at (x,y) and return the qoi interpolated in space from the fgout array. - - qoi should be a string (e.g. 'h', 'u' or 'v') corresponding to + + qoi should be a string (e.g. 'h', 'u' or 'v') corresponding to an attribute of fgout. - - The function returned takes arguments x,y that can be floats or + + The function returned takes arguments x,y that can be floats or (equal length) 1D arrays of values that lie within the spatial - extent of fgout. - - bounds_error and fill_value determine the behavior if (x,y) is not in + extent of fgout. + + bounds_error and fill_value determine the behavior if (x,y) is not in the bounds of the data, as in scipy.interpolate.RegularGridInterpolator. """ - + from scipy.interpolate import RegularGridInterpolator - + try: - q = getattr(fgout,qoi) + q = getattr(fgout, qoi) except: - print('*** fgout missing attribute qoi = %s?' % qoi) - - err_msg = '*** q must have same shape as fgout.X\n' \ - + 'fgout.X.shape = %s, q.shape = %s' % (fgout.X.shape,q.shape) + print("*** fgout missing attribute qoi = %s?" % qoi) + + err_msg = ( + "*** q must have same shape as fgout.X\n" + + "fgout.X.shape = %s, q.shape = %s" % (fgout.X.shape, q.shape) + ) assert fgout.X.shape == q.shape, err_msg - - x1 = fgout.X[:,0] - y1 = fgout.Y[0,:] - fgout_fcn1 = RegularGridInterpolator((x1,y1), q, method=method, - bounds_error=bounds_error, fill_value=fill_value) - def fgout_fcn(x,y): + x1 = fgout.X[:, 0] + y1 = fgout.Y[0, :] + fgout_fcn1 = RegularGridInterpolator( + (x1, y1), q, method=method, bounds_error=bounds_error, fill_value=fill_value + ) + + def fgout_fcn(x, y): """ Function that can be evaluated at single point or arrays (x,y). """ from numpy import array, vstack + xa = array(x) ya = array(y) - xyout = vstack((xa,ya)).T + xyout = vstack((xa, ya)).T qout = fgout_fcn1(xyout) if len(qout) == 1: qout = qout[0] # return scalar @@ -578,208 +863,258 @@ def fgout_fcn(x,y): return fgout_fcn -def make_fgout_fcn_xyt(fgout1, fgout2, qoi, method_xy='nearest', - method_t='linear', bounds_error=False, - fill_value=numpy.nan): +def make_fgout_fcn_xyt( + fgout1, + fgout2, + qoi, + method_xy="nearest", + method_t="linear", + bounds_error=False, + fill_value=numpy.nan, +): """ Create a function that can be called at (x,y,t) and return the qoi interpolated in space and time between the two frames fgout1 and fgout2. - - qoi should be a string (e.g. 'h', 'u' or 'v') corresponding to + + qoi should be a string (e.g. 'h', 'u' or 'v') corresponding to an attribute of fgout. - + method_xy is the method used in creating the spatial interpolator, and is passed to make_fgout_fcn_xy. - + method_t is the method used for interpolation in time, currently only 'linear' is supported, which linearly interpolates. - - bounds_error and fill_value determine the behavior if (x,y,t) is not in + + bounds_error and fill_value determine the behavior if (x,y,t) is not in the bounds of the data. - + The function returned takes arguments x,y (floats or equal-length 1D arrays) of values that lie within the spatial extent of fgout1, fgout2 (which are assumed to cover the same uniform grid at different times) - and t should be a float that lies between fgout1.t and fgout2.t. + and t should be a float that lies between fgout1.t and fgout2.t. """ - - assert numpy.allclose(fgout1.X, fgout2.X), \ - '*** fgout1 and fgout2 must have same X' - assert numpy.allclose(fgout1.Y, fgout2.Y), \ - '*** fgout1 and fgout2 must have same Y' - + + assert numpy.allclose(fgout1.X, fgout2.X), "*** fgout1 and fgout2 must have same X" + assert numpy.allclose(fgout1.Y, fgout2.Y), "*** fgout1 and fgout2 must have same Y" + t1 = fgout1.t t2 = fgout2.t - #assert t1 < t2, '*** expected fgout1.t < fgout2.t' - - fgout1_fcn_xy = make_fgout_fcn_xy(fgout1, qoi, method=method_xy, - bounds_error=bounds_error, fill_value=fill_value) - fgout2_fcn_xy = make_fgout_fcn_xy(fgout2, qoi, method=method_xy, - bounds_error=bounds_error, fill_value=fill_value) - - def fgout_fcn(x,y,t): + # assert t1 < t2, '*** expected fgout1.t < fgout2.t' + + fgout1_fcn_xy = make_fgout_fcn_xy( + fgout1, qoi, method=method_xy, bounds_error=bounds_error, fill_value=fill_value + ) + fgout2_fcn_xy = make_fgout_fcn_xy( + fgout2, qoi, method=method_xy, bounds_error=bounds_error, fill_value=fill_value + ) + + def fgout_fcn(x, y, t): """ Function that can be evaluated at single point or arrays (x,y) at a single time t. """ from numpy import array, ones + xa = array(x) ya = array(y) tol = 1e-6 # to make sure it works ok when called with t=t1 or t=t2 - if t1-tol <= t <= t2+tol: - alpha = (t-t1)/(t2-t1) + if t1 - tol <= t <= t2 + tol: + alpha = (t - t1) / (t2 - t1) elif bounds_error: - errmsg = '*** argument t=%g should be between t1=%g and t2=%g' \ - % (t,t1,t2) + errmsg = "*** argument t=%g should be between t1=%g and t2=%g" % (t, t1, t2) raise ValueError(errmsg) else: qout = fill_value * ones(xa.shape) return qout - qout1 = fgout1_fcn_xy(x,y) - qout2 = fgout2_fcn_xy(x,y) + qout1 = fgout1_fcn_xy(x, y) + qout2 = fgout2_fcn_xy(x, y) - if method_t == 'linear': + if method_t == "linear": if t1 <= t <= t2: - alpha = (t-t1)/(t2-t1) - qout = (1-alpha)*qout1 + alpha*qout2 + alpha = (t - t1) / (t2 - t1) + qout = (1 - alpha) * qout1 + alpha * qout2 else: - raise NotImplementedError('method_t = %s not supported' % method_t) - + raise NotImplementedError("method_t = %s not supported" % method_t) + return qout - + return fgout_fcn + # =============================== # Functions for writing a set of fgout frames as a netCDF file, and -# reading such a file: - -def write_netcdf(fgout_frames, fname_nc='fgout_frames.nc', - qois = ['h','hu','hv','eta'], datatype='f4', - include_B0=False, include_Bfinal=False, - description='', verbose=True): +# reading such a file: + + +def write_netcdf( + fgout_frames, + fname_nc="fgout_frames.nc", + qois=["h", "hu", "hv", "eta"], + datatype="f4", + include_B0=False, + include_Bfinal=False, + description="", + verbose=True, +): """ Write a list of fgout frames (at different times on the same rectangular grid) to a single netCDF file, with some metadata and the topography, if desired. - + fgout_frames should be a list of FGoutFrame objects, all of the same size and at increasing times. - + fname_nc is the name of the file to write. - + qois is a list of strings, the quantities of interest to include in file. - This could include any of: + This could include any of: 'h', 'eta', 'hu', 'hv', 'u', 'v', 's', 'hss', 'B'. All other quantities can be computed from h, hu, hv, eta, the original fgout variables from GeoClaw, but for some applications you might only want to save 'h' and 's', for example. - + datatype should be 'f4' [default] or 'f8', specifying bytes per qoi value. 'f8' has full precision of the original data, but the file will be twice as large and may not be needed for downstream applications. - + Note that the topography B = eta - h, so it is not necessary to store all three of these. Also, B is often the same for all frames, so rather than storing B at each frame as a qoi, two other options are also provided (and then storing eta or h for all frames allows calculating the other): - + include_Bfinal: If True, include the topography B array from the final frame as the Bfinal array. - + include_B0: If True, include the topography B array from the first frame - as the B0 array. This is only useful if, e.g., the first frame is initial + as the B0 array. This is only useful if, e.g., the first frame is initial topography before co-seismic deformation, and at later times the topography - is always equal to Bfinal. - + is always equal to Bfinal. + `description` is a string that will be added as metadata. - A metadata field `history` will also be added, which includes the - time the file was created and the path to the directory where it was made. - """ - - import netCDF4 - from datetime import datetime, timedelta - from cftime import num2date, date2num + A metadata field `history` will also be added, which includes the + time the file was created and the path to the directory where it was made. + """ + import time + from datetime import datetime, timedelta + + import netCDF4 + from cftime import date2num, num2date + timestr = time.ctime(time.time()) # current time for metadata - + fg_times = numpy.array([fg.t for fg in fgout_frames]) - + if verbose: - print('Creating %s with fgout frames at times: ' % fname_nc) + print("Creating %s with fgout frames at times: " % fname_nc) print(fg_times) - + fg0 = fgout_frames[0] x = fg0.x y = fg0.y - + xs = numpy.array([fg.x for fg in fgout_frames]) ys = numpy.array([fg.y for fg in fgout_frames]) # assert same for all times - - units = {'h':'meters', 'eta':'meters', 'hu':'m^2/s', 'hv':'m^2/s', - 'u':'m/s', 'v':'m/s', 's':'m/s', 'hss':'m^3/s^2', 'B':'meters'} - - with netCDF4.Dataset(fname_nc, 'w') as rootgrp: + units = { + "h": "meters", + "eta": "meters", + "hu": "m^2/s", + "hv": "m^2/s", + "u": "m/s", + "v": "m/s", + "s": "m/s", + "hss": "m^3/s^2", + "B": "meters", + } + + with netCDF4.Dataset(fname_nc, "w") as rootgrp: rootgrp.description = description rootgrp.history = "Created " + timestr rootgrp.history += " in %s; " % os.getcwd() - lon = rootgrp.createDimension('lon', len(x)) - longitudes = rootgrp.createVariable('lon','f8',('lon',)) + lon = rootgrp.createDimension("lon", len(x)) + longitudes = rootgrp.createVariable("lon", "f8", ("lon",)) longitudes[:] = x - longitudes.units = 'degrees_east' + longitudes.units = "degrees_east" - lat = rootgrp.createDimension('lat', len(y)) - latitudes = rootgrp.createVariable('lat','f8',('lat',)) + lat = rootgrp.createDimension("lat", len(y)) + latitudes = rootgrp.createVariable("lat", "f8", ("lat",)) latitudes[:] = y - latitudes.units = 'degrees_north' - - time = rootgrp.createDimension('time', len(fg_times)) - times = rootgrp.createVariable('time','f8',('time',)) + latitudes.units = "degrees_north" + + time = rootgrp.createDimension("time", len(fg_times)) + times = rootgrp.createVariable("time", "f8", ("time",)) times[:] = fg_times - times.units = 'seconds' - + times.units = "seconds" + if 0: # Could make times be datetimes relative to some event time, e.g.: - times.units = 'seconds since 1700-01-26 21:00:00.0' - times.calendar = 'gregorian' - dates = [datetime(1700,1,26,21) + timedelta(seconds=ss) \ - for ss in fg_times] - times[:] = date2num(dates,units=times.units,calendar=times.calendar) - + times.units = "seconds since 1700-01-26 21:00:00.0" + times.calendar = "gregorian" + dates = [ + datetime(1700, 1, 26, 21) + timedelta(seconds=ss) for ss in fg_times + ] + times[:] = date2num(dates, units=times.units, calendar=times.calendar) + if include_B0: - B0 = rootgrp.createVariable('B0',datatype,('lon','lat',)) - B0[:,:] = fg0.B - B0.units = 'meters' + B0 = rootgrp.createVariable( + "B0", + datatype, + ( + "lon", + "lat", + ), + ) + B0[:, :] = fg0.B + B0.units = "meters" if include_Bfinal: fg_final = fgout_frames[-1] - Bfinal = rootgrp.createVariable('Bfinal',datatype,('lon','lat',)) - Bfinal[:,:] = fg_final.B - Bfinal.units = 'meters' - + Bfinal = rootgrp.createVariable( + "Bfinal", + datatype, + ( + "lon", + "lat", + ), + ) + Bfinal[:, :] = fg_final.B + Bfinal.units = "meters" + for qoi in qois: - qoi_frames = [getattr(fgout,qoi) for fgout in fgout_frames] - qoi_var = rootgrp.createVariable(qoi,datatype,('time','lon','lat',)) - qoi_var[:,:,:] = qoi_frames + qoi_frames = [getattr(fgout, qoi) for fgout in fgout_frames] + qoi_var = rootgrp.createVariable( + qoi, + datatype, + ( + "time", + "lon", + "lat", + ), + ) + qoi_var[:, :, :] = qoi_frames qoi_var.units = units[qoi] + def get_as_array(var, rootgrp, verbose=True): """ - Utility function to retrieve variable from netCDF file and convert to + Utility function to retrieve variable from netCDF file and convert to numpy array. """ a = rootgrp.variables.get(var, None) if a is not None: - if verbose: print(' Loaded %s with shape %s' % (var,repr(a.shape))) + if verbose: + print(" Loaded %s with shape %s" % (var, repr(a.shape))) return numpy.array(a) else: - if verbose: print(' Did not find %s' % var) - return None + if verbose: + print(" Did not find %s" % var) + return None def read_netcdf_arrays(fname_nc, qois, verbose=True): @@ -790,41 +1125,40 @@ def read_netcdf_arrays(fname_nc, qois, verbose=True): 'h', 'eta', 'hu', 'hv', 'u', 'v', 's', 'hss', 'B'. qois can also include the time-independent 'B0' and/or 'Bfinal' - - Returns + + Returns x, y, t, qoi_arrays where x,y define the longitude, latitudes, t is the times of the frames, and qoi_arrays is a dictionary indexed by the strings from qois. - + Each dict element is an array with shape (len(t), len(x), len(y)) for time-dependent qoi's, or (len(x), len(y)) for B0 or Bfinal, or None if that qoi was not found in the netCDF file. """ - + import netCDF4 - with netCDF4.Dataset(fname_nc, 'r') as rootgrp: + with netCDF4.Dataset(fname_nc, "r") as rootgrp: if verbose: - print('Reading data to fgout frames from nc file',fname_nc) - print(' nc file description: ', rootgrp.description) - print('History: ', rootgrp.history) - - x = get_as_array('lon', rootgrp, verbose) - y = get_as_array('lat', rootgrp, verbose) - t = get_as_array('time', rootgrp, verbose) - + print("Reading data to fgout frames from nc file", fname_nc) + print(" nc file description: ", rootgrp.description) + print("History: ", rootgrp.history) + + x = get_as_array("lon", rootgrp, verbose) + y = get_as_array("lat", rootgrp, verbose) + t = get_as_array("time", rootgrp, verbose) + vars = list(rootgrp.variables) - + qoi_arrays = {} for qoi in qois: qoi_array = get_as_array(qoi, rootgrp, verbose) qoi_arrays[qoi] = qoi_array - - return x,y,t,qoi_arrays - - + + return x, y, t, qoi_arrays + def read_netcdf(fname_nc, fgout_grid=None, verbose=True): """ @@ -833,55 +1167,55 @@ def read_netcdf(fname_nc, fgout_grid=None, verbose=True): the qoi's 'h','hu','hv','eta' required to reconstruct the q array as output by GeoClaw. """ - + import netCDF4 - with netCDF4.Dataset(fname_nc, 'r') as rootgrp: + with netCDF4.Dataset(fname_nc, "r") as rootgrp: if 1: - print('Reading data to fgout frames from nc file',fname_nc) - print(' nc file description: ', rootgrp.description) - print('History: ', rootgrp.history) + print("Reading data to fgout frames from nc file", fname_nc) + print(" nc file description: ", rootgrp.description) + print("History: ", rootgrp.history) - x = get_as_array('lon', rootgrp, verbose) - y = get_as_array('lat', rootgrp, verbose) - t = get_as_array('time', rootgrp, verbose) + x = get_as_array("lon", rootgrp, verbose) + y = get_as_array("lat", rootgrp, verbose) + t = get_as_array("time", rootgrp, verbose) vars = list(rootgrp.variables) - - for qoi in ['h','hu','hv','eta']: - errmsg = '*** Cannot reconstruct fgout frame without %s' % qoi + for qoi in ["h", "hu", "hv", "eta"]: + errmsg = "*** Cannot reconstruct fgout frame without %s" % qoi assert qoi in vars, errmsg - h = get_as_array('h', rootgrp, verbose) - hu = get_as_array('hu', rootgrp, verbose) - hv = get_as_array('hv', rootgrp, verbose) - eta = get_as_array('eta', rootgrp, verbose) - + h = get_as_array("h", rootgrp, verbose) + hu = get_as_array("hu", rootgrp, verbose) + hv = get_as_array("hv", rootgrp, verbose) + eta = get_as_array("eta", rootgrp, verbose) + if (x is None) or (y is None): - print('*** Could not create grid') + print("*** Could not create grid") else: - X,Y = numpy.meshgrid(x,y) - + X, Y = numpy.meshgrid(x, y) + fgout_frames = [] - + for k in range(eta.shape[0]): fgout = FGoutFrame(fgout_grid=fgout_grid, frameno=k) fgout.x = x fgout.y = y fgout.t = t[k] - fgout.q = numpy.empty((4,eta.shape[1],eta.shape[2])) - fgout.q[0,:,:] = h[k,:,:] - fgout.q[1,:,:] = hu[k,:,:] - fgout.q[2,:,:] = hv[k,:,:] - fgout.q[3,:,:] = eta[k,:,:] + fgout.q = numpy.empty((4, eta.shape[1], eta.shape[2])) + fgout.q[0, :, :] = h[k, :, :] + fgout.q[1, :, :] = hu[k, :, :] + fgout.q[2, :, :] = hv[k, :, :] + fgout.q[3, :, :] = eta[k, :, :] fgout.X = X fgout.Y = Y fgout_frames.append(fgout) - - print('Created fgout_frames as list of length %i' % len(fgout_frames)) - + + print("Created fgout_frames as list of length %i" % len(fgout_frames)) + return fgout_frames + def print_netcdf_info(fname_nc): """ Print out info about the contents of a netCDF file contining fgout frames, @@ -889,21 +1223,19 @@ def print_netcdf_info(fname_nc): """ import netCDF4 - with netCDF4.Dataset(fname_nc, 'r') as rootgrp: - x = get_as_array('lon', rootgrp, verbose=False) - y = get_as_array('lat', rootgrp, verbose=False) - t = get_as_array('time', rootgrp, verbose=False) + with netCDF4.Dataset(fname_nc, "r") as rootgrp: + x = get_as_array("lon", rootgrp, verbose=False) + y = get_as_array("lat", rootgrp, verbose=False) + t = get_as_array("time", rootgrp, verbose=False) vars = list(rootgrp.variables) - - print('===================================================') - print('netCDF file %s contains:' % fname_nc) - print('description: \n', rootgrp.description) - print('history: \n', rootgrp.history) - print('%i longitudes from %.6f to %.6f' % (len(x),x[0],x[-1])) - print('%i latitudes from %.6f to %.6f' % (len(y),y[0],y[-1])) - print('%i times from %.3f to %.3f' % (len(t),t[0],t[-1])) - print('variables: ',vars) - print('===================================================') - - + + print("===================================================") + print("netCDF file %s contains:" % fname_nc) + print("description: \n", rootgrp.description) + print("history: \n", rootgrp.history) + print("%i longitudes from %.6f to %.6f" % (len(x), x[0], x[-1])) + print("%i latitudes from %.6f to %.6f" % (len(y), y[0], y[-1])) + print("%i times from %.3f to %.3f" % (len(t), t[0], t[-1])) + print("variables: ", vars) + print("===================================================") diff --git a/src/python/geoclaw/surge/plot.py b/src/python/geoclaw/surge/plot.py index c2ada663f..d80f8fd87 100644 --- a/src/python/geoclaw/surge/plot.py +++ b/src/python/geoclaw/surge/plot.py @@ -11,18 +11,21 @@ # http://www.opensource.org/licenses/ # ============================================================================== -from __future__ import absolute_import -from __future__ import print_function -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.colors as colors -import matplotlib.lines as mlines -import pandas +from __future__ import absolute_import, print_function + +import warnings +import clawpack.geoclaw.data as geodata import clawpack.visclaw.colormaps as colormaps import clawpack.visclaw.gaugetools as gaugetools import clawpack.visclaw.geoplot as geoplot -import clawpack.geoclaw.data as geodata +import matplotlib.colors as colors +import matplotlib.lines as mlines +import matplotlib.pyplot as plt +import numpy as np +import pandas + +# import clawpack.geoclaw.surge.storm # TODO: Assign these based on data files bathy_index = 0 @@ -31,17 +34,20 @@ pressure_field = 6 surface_cmap = plt.get_cmap("bwr") -speed_cmap = plt.get_cmap('PuBu') -friction_cmap = plt.get_cmap('YlOrRd') -velocity_cmap = plt.get_cmap('PiYG') -vorticity_cmap = plt.get_cmap('PRGn') -wind_cmap = plt.get_cmap('PuBu') -pressure_cmap = plt.get_cmap('PuBu') +speed_cmap = plt.get_cmap("PuBu") +friction_cmap = plt.get_cmap("YlOrRd") +velocity_cmap = plt.get_cmap("PiYG") +vorticity_cmap = plt.get_cmap("PRGn") +wind_cmap = plt.get_cmap("PuBu") +pressure_cmap = plt.get_cmap("PuBu") land_cmap = geoplot.land_colors surge_data = geodata.SurgeData() +# ============================== +# Track Plotting Functionality +# ============================== class track_data(object): """Read in storm track data from run output""" @@ -68,8 +74,9 @@ def get_track(self, frame): # Check to make sure that this fixed the problem if self._data.shape[0] < frame + 1: - print(" *** WARNING *** Could not find track data for ", - "frame %s." % frame) + warnings.warn( + f" *** WARNING *** Could not find track data", " for frame {frame}." + ) return None, None, None return self._data[frame, 1:] @@ -78,26 +85,33 @@ def get_track(self, frame): # ========================================================================== # Gauge functions # ========================================================================== -def gauge_locations(current_data, gaugenos='all'): - gaugetools.plot_gauge_locations(current_data.plotdata, - gaugenos=gaugenos, format_string='kx', - add_labels=True, xoffset=0.02, - yoffset=0.02) +def gauge_locations(current_data, gaugenos="all"): + gaugetools.plot_gauge_locations( + current_data.plotdata, + gaugenos=gaugenos, + format_string="kx", + add_labels=True, + xoffset=0.02, + yoffset=0.02, + ) def gauge_dry_regions(cd, dry_tolerance=1e-16): """Masked array of zeros where gauge is dry.""" - return np.ma.masked_where(np.abs(cd.gaugesoln.q[0, :]) > dry_tolerance, - np.zeros(cd.gaugesoln.q[0, :].shape)) + return np.ma.masked_where( + np.abs(cd.gaugesoln.q[0, :]) > dry_tolerance, + np.zeros(cd.gaugesoln.q[0, :].shape), + ) def gauge_surface(cd, dry_tolerance=1e-16): """Sea surface at gauge masked when dry.""" - return np.ma.masked_where(np.abs(cd.gaugesoln.q[0, :]) < dry_tolerance, - cd.gaugesoln.q[3, :]) + return np.ma.masked_where( + np.abs(cd.gaugesoln.q[0, :]) < dry_tolerance, cd.gaugesoln.q[3, :] + ) -def plot_landfall_gauge(gauge, axes, landfall=0.0, style='b', kwargs={}): +def plot_landfall_gauge(gauge, axes, landfall=0.0, style="b", kwargs={}): """Plot gauge data on the axes provided This will transform the plot so that it is relative to the landfall value @@ -122,11 +136,12 @@ def days_figure_title(current_data, land_fall=0.0): hours = (t - int(t)) * 24.0 title = current_data.plotaxes.title - plt.title('%s at day %3i, hour %2.1f' % (title, days, hours)) + plt.title("%s at day %3i, hour %2.1f" % (title, days, hours)) -def surge_afteraxes(current_data, track, land_fall=0.0, plot_direction=False, - style='ro', kwargs={}): +def surge_afteraxes( + current_data, track, land_fall=0.0, plot_direction=False, style="ro", kwargs={} +): """Default surge plotting after axes function Includes changing the title to something relative to landfall and plotting @@ -138,8 +153,12 @@ def surge_afteraxes(current_data, track, land_fall=0.0, plot_direction=False, axes = plt.gca() axes.plot(track_data[0], track_data[1], style, **kwargs) if plot_direction: - axes.quiver(track_data[0], track_data[1], - np.cos(track_data[2]), np.sin(track_data[2])) + axes.quiver( + track_data[0], + track_data[1], + np.cos(track_data[2]), + np.sin(track_data[2]), + ) days_figure_title(current_data, land_fall) @@ -154,19 +173,26 @@ def wind_x(cd): def wind_y(cd): # print(cd.aux[wind_field+1, :, :]) - return cd.aux[wind_field+1, :, :] + return cd.aux[wind_field + 1, :, :] def wind_speed(cd): - return np.sqrt(wind_x(cd)**2 + wind_y(cd)**2) + return np.sqrt(wind_x(cd) ** 2 + wind_y(cd) ** 2) def pressure(cd): # The division by 100.0 is to convert from Pa to millibars return cd.aux[pressure_field, :, :] / 100.0 -# def category(Storm, cd): -# return cd.aux[Storm.category, :, :] + +def storm_radius(cd, track): + """Distance from center of storm""" + track_data = track.get_track(cd.frameno) + + if track_data[0] is not None and track_data[1] is not None: + return np.sqrt((cd.x - track_data[0]) ** 2 + (cd.y - track_data[1]) ** 2) + else: + return None # ======================================================================== @@ -205,23 +231,24 @@ def water_speed(current_data): u = water_u(current_data) v = water_v(current_data) - return np.sqrt(u**2+v**2) + return np.sqrt(u**2 + v**2) # ======================================================================== # Plot items # ======================================================================== -def add_surface_elevation(plotaxes, plot_type='pcolor', bounds=None, - contours=None, shrink=1.0): +def add_surface_elevation( + plotaxes, plot_type="pcolor", bounds=None, contours=None, shrink=1.0 +): """Add plotitem representing the sea surface.""" - if plot_type == 'pcolor' or plot_type == 'imshow': - plotitem = plotaxes.new_plotitem(name='surface', plot_type='2d_pcolor') + if plot_type == "pcolor" or plot_type == "imshow": + plotitem = plotaxes.new_plotitem(name="surface", plot_type="2d_pcolor") plotitem.plot_var = geoplot.surface_or_depth if bounds is not None: if bounds[0] == 0.0: - plotitem.pcolor_cmap = plt.get_cmap('OrRd') + plotitem.pcolor_cmap = plt.get_cmap("OrRd") else: plotitem.pcolor_cmap = surface_cmap plotitem.pcolor_cmin = bounds[0] @@ -232,9 +259,8 @@ def add_surface_elevation(plotaxes, plot_type='pcolor', bounds=None, plotitem.amr_celledges_show = [0] * 10 plotitem.amr_patchedges_show = [1, 1, 1, 0, 0, 0, 0] - elif plot_type == 'contour': - plotitem = plotaxes.new_plotitem(name='surface', - plot_type='2d_contour') + elif plot_type == "contour": + plotitem = plotaxes.new_plotitem(name="surface", plot_type="2d_contour") if bounds is None: plotitem.contour_levels = [-2.5, -1.5, -0.5, 0.5, 1.5, 2.5] @@ -242,11 +268,10 @@ def add_surface_elevation(plotaxes, plot_type='pcolor', bounds=None, plotitem.amr_contour_show = [1] * 10 plotitem.amr_celledges_show = [0] * 10 plotitem.amr_patchedges_show = [1, 1, 1, 1, 0, 0, 0] - plotitem.amr_contour_colors = 'k' + plotitem.amr_contour_colors = "k" - elif plot_type == 'contourf': - plotitem = plotaxes.new_plotitem(name='surface', - plot_type='2d_contourf') + elif plot_type == "contourf": + plotitem = plotaxes.new_plotitem(name="surface", plot_type="2d_contourf") plotitem.plot_var = geoplot.surface_or_depth if bounds is not None: contours = numpy.linspace(bounds[0], bounds[1], 11) @@ -262,21 +287,20 @@ def add_surface_elevation(plotaxes, plot_type='pcolor', bounds=None, plotitem.fill_cmap = geoplot.tsunami_colormap plotitem.colorbar_shrink = shrink plotitem.colorbar_label = "Surface Height (m)" - plotitem.fill_cmap = plt.get_cmap('OrRd') + plotitem.fill_cmap = plt.get_cmap("OrRd") if any((value < 0 for value in plotitem.contour_levels)): plotitem.fill_cmap = surface_cmap plotitem.amr_contour_show = [1] * 10 plotitem.amr_celledges_show = [0] * 10 plotitem.amr_patchedges_show = [1, 1, 1, 1, 0, 0, 0] - plotitem.amr_contour_colors = 'k' + plotitem.amr_contour_colors = "k" -def add_speed(plotaxes, plot_type='pcolor', bounds=None, contours=None, - shrink=1.0): +def add_speed(plotaxes, plot_type="pcolor", bounds=None, contours=None, shrink=1.0): """Add plotitem representing speed of the water.""" - if plot_type == 'pcolor' or plot_type == 'imshow': - plotitem = plotaxes.new_plotitem(name='speed', plot_type='2d_pcolor') + if plot_type == "pcolor" or plot_type == "imshow": + plotitem = plotaxes.new_plotitem(name="speed", plot_type="2d_pcolor") plotitem.plot_var = water_speed # plotitem.plot_var = 1 plotitem.pcolor_cmap = speed_cmap @@ -289,26 +313,25 @@ def add_speed(plotaxes, plot_type='pcolor', bounds=None, contours=None, plotitem.amr_celledges_show = [0] * 10 plotitem.amr_patchedges_show = [1, 1, 1, 0, 0, 0, 0] - elif plot_type == 'contour': - plotitem = plotaxes.new_plotitem(name='speed', plot_type='2d_contour') + elif plot_type == "contour": + plotitem = plotaxes.new_plotitem(name="speed", plot_type="2d_contour") if bounds is None: plotitem.contour_levels = [0.5, 1.5, 3, 4.5, 6.0] - plotitem.kwargs = {'linewidths': 1} + plotitem.kwargs = {"linewidths": 1} plotitem.plot_var = water_speed plotitem.amr_contour_show = [1] * 10 plotitem.amr_celledges_show = [0] * 10 plotitem.amr_patchedges_show = [1, 1, 1, 1, 1, 0, 0] - plotitem.amr_contour_colors = 'k' - - elif plot_type == 'contourf': + plotitem.amr_contour_colors = "k" - plotitem = plotaxes.new_plotitem(name='speed', plot_type='2d_contourf') + elif plot_type == "contourf": + plotitem = plotaxes.new_plotitem(name="speed", plot_type="2d_contourf") plotitem.add_colorbar = True plotitem.colorbar_label = "Current (m/s)" plotitem.colorbar_shrink = shrink - plotitem.fill_cmap = plt.get_cmap('PuBu') + plotitem.fill_cmap = plt.get_cmap("PuBu") if bounds is not None: plotitem.contour_levels = numpy.linspace(bounds[0], bounds[1], 11) plotitem.fill_cmin = bounds[0] @@ -320,21 +343,20 @@ def add_speed(plotaxes, plot_type='pcolor', bounds=None, contours=None, # Modify the 'extends' plot attribute as we don't want this to extend # below 0 - plotitem.kwargs['extend'] = 'max' + plotitem.kwargs["extend"] = "max" plotitem.plot_var = water_speed plotitem.amr_contour_show = [1] * 10 plotitem.amr_celledges_show = [0] * 10 plotitem.amr_patchedges_show = [1, 1, 1, 1, 1, 0, 0] - plotitem.amr_contour_colors = 'k' + plotitem.amr_contour_colors = "k" -def add_friction(plotaxes, bounds=None, plot_type='pcolor', shrink=1.0): +def add_friction(plotaxes, bounds=None, plot_type="pcolor", shrink=1.0): """Add plotitem for the friction field""" - if plot_type == 'pcolor' or plot_type == 'imshow': - plotitem = plotaxes.new_plotitem(name='friction', - plot_type='2d_pcolor') + if plot_type == "pcolor" or plot_type == "imshow": + plotitem = plotaxes.new_plotitem(name="friction", plot_type="2d_pcolor") plotitem.plot_var = friction plotitem.pcolor_cmap = friction_cmap plotitem.colorbar_shrink = shrink @@ -348,11 +370,11 @@ def add_friction(plotaxes, bounds=None, plot_type='pcolor', shrink=1.0): plotitem.amr_patchedges_show = [0] * 10 -def add_wind(plotaxes, bounds=None, plot_type='pcolor', shrink=1.0): +def add_wind(plotaxes, bounds=None, plot_type="pcolor", shrink=1.0): """Add plotitem for the wind speed.""" - if plot_type == 'pcolor' or plot_type == 'imshow': - plotitem = plotaxes.new_plotitem(name='wind', plot_type='2d_pcolor') + if plot_type == "pcolor" or plot_type == "imshow": + plotitem = plotaxes.new_plotitem(name="wind", plot_type="2d_pcolor") plotitem.plot_var = wind_speed plotitem.pcolor_cmap = wind_cmap if bounds is not None: @@ -364,20 +386,19 @@ def add_wind(plotaxes, bounds=None, plot_type='pcolor', shrink=1.0): plotitem.amr_celledges_show = [0] * 10 plotitem.amr_patchedges_show = [1, 1, 1, 1, 1, 0, 0] - elif plot_type == 'contour': - plotitem = plotaxes.new_plotitem(name='wind', plot_type='2d_contour') + elif plot_type == "contour": + plotitem = plotaxes.new_plotitem(name="wind", plot_type="2d_contour") plotitem.plot_var = wind_speed plotitem.contour_nlevels = len(surge_data.wind_refine) plotitem.countour_min = surge_data.wind_refine[0] plotitem.patchedges_show = 1 -def add_pressure(plotaxes, bounds=None, plot_type='pcolor', shrink=1.0): +def add_pressure(plotaxes, bounds=None, plot_type="pcolor", shrink=1.0): """Add plotitem for the pressure field.""" - if plot_type == 'pcolor' or plot_type == 'imshow': - plotitem = plotaxes.new_plotitem(name="pressure", - plot_type='2d_pcolor') + if plot_type == "pcolor" or plot_type == "imshow": + plotitem = plotaxes.new_plotitem(name="pressure", plot_type="2d_pcolor") plotitem.plot_var = pressure plotitem.colorbar_shrink = shrink plotitem.pcolor_cmap = pressure_cmap @@ -389,15 +410,15 @@ def add_pressure(plotaxes, bounds=None, plot_type='pcolor', shrink=1.0): plotitem.colorbar_label = "Pressure (mbar)" plotitem.amr_celledges_show = [0] * 10 plotitem.amr_patchedges_show = [1, 1, 1, 1, 1, 0, 0] - elif plot_type == 'contour': + elif plot_type == "contour": pass -def add_land(plotaxes, plot_type='pcolor', bounds=None): +def add_land(plotaxes, plot_type="pcolor", bounds=None): """Add plotitem for land""" - if plot_type == 'pcolor': - plotitem = plotaxes.new_plotitem(name='land', plot_type='2d_pcolor') + if plot_type == "pcolor": + plotitem = plotaxes.new_plotitem(name="land", plot_type="2d_pcolor") plotitem.show = True plotitem.plot_var = geoplot.land plotitem.pcolor_cmap = land_cmap @@ -408,33 +429,41 @@ def add_land(plotaxes, plot_type='pcolor', bounds=None): plotitem.amr_celledges_show = [0] * 10 plotitem.amr_patchedges_show = [1, 1, 1, 1, 1, 0, 0] - elif plot_type == 'contour': - plotitem = plotaxes.new_plotitem(name="land", plot_type='2d_contour') + elif plot_type == "contour": + plotitem = plotaxes.new_plotitem(name="land", plot_type="2d_contour") plotitem.plot_var = geoplot.land plotitem.contour_nlevels = 40 plotitem.contour_min = bounds[0] plotitem.contour_max = bounds[1] - plotitem.amr_contour_colors = ['g'] # color on each level - plotitem.amr_patch_bgcolor = ['#ffeeee', '#eeeeff', '#eeffee'] + plotitem.amr_contour_colors = ["g"] # color on each level + plotitem.amr_patch_bgcolor = ["#ffeeee", "#eeeeff", "#eeffee"] plotitem.celledges_show = 0 plotitem.patchedges_show = 0 -def add_bathy_contours(plotaxes, contour_levels=None, color='k'): +def add_bathy_contours(plotaxes, contour_levels=None, color="k"): """Add plotitem to plot contours of the topography""" - plotitem = plotaxes.new_plotitem(name='bathy', plot_type='2d_contour') + plotitem = plotaxes.new_plotitem(name="bathy", plot_type="2d_contour") plotitem.plot_var = geoplot.topo if contour_levels is None: contour_levels = [0.0] plotitem.contour_levels = contour_levels plotitem.amr_contour_colors = [color] - plotitem.kwargs = {'linestyles': 'solid', 'linewidths': 2} + plotitem.kwargs = {"linestyles": "solid", "linewidths": 2} plotitem.amr_contour_show = [1] * 10 plotitem.celledges_show = 0 plotitem.patchedges_show = 0 +def add_storm_radii(plotaxes, track, radii=[100e3], color="r"): + """Add radii to plots based on storm position""" + plotitem = plotaxes.new_plotitem(name="storm radius", plot_type="2d_contour") + plotitem.plot_var = lambda cd: storm_radius(cd, track) + plotitem.contour_levels = radii + plotitem.contour_colors = color + + # ===== Storm related plotting ======= def sec2days(seconds): """Converst seconds to days.""" @@ -449,15 +478,15 @@ def plot_track(t, x, y, wind_radius, wind_speed, Pc, name=None): else: name = " - %s" % name - colors = ['r', 'b'] + colors = ["r", "b"] divide = (np.max(Pc) + np.min(Pc)) / 2.0 fig = plt.figure(1) axes = fig.add_subplot(111) indices = Pc < divide - axes.scatter(x[indices], y[indices], color='r', marker='o') + axes.scatter(x[indices], y[indices], color="r", marker="o") indices = Pc >= divide - axes.scatter(x[indices], y[indices], color='b', marker='o') + axes.scatter(x[indices], y[indices], color="b", marker="o") axes.set_title("Track%s" % name) fig = plt.figure(2, figsize=(24, 6)) @@ -471,7 +500,7 @@ def plot_track(t, x, y, wind_radius, wind_speed, Pc, name=None): axes = fig.add_subplot(133) axes.plot(sec2days(t), Pc) - axes.plot(sec2days(t), np.ones(t.shape) * divide, 'k--') + axes.plot(sec2days(t), np.ones(t.shape) * divide, "k--") axes.set_title("Central Pressure%s" % name) @@ -502,17 +531,27 @@ def plot_track(t, x, y, wind_radius, wind_speed, Pc, name=None): # ======================================================================== -def add_track(Storm, axes, plot_package=None, category_color=None, legend_loc='best', - intensity=False, categorization="NHC", limits=None, track_color='red'): - +def add_track( + Storm, + axes, + plot_package=None, + category_color=None, + legend_loc="best", + intensity=False, + categorization="NHC", + limits=None, + track_color="red", +): if category_color is None: - category_color = {5: 'red', - 4: 'orange', - 3: 'yellow', - 2: 'blue', # edit color - 1: 'violet', - 0: 'black', - -1: 'gray'} + category_color = { + 5: "red", + 4: "orange", + 3: "yellow", + 2: "blue", # edit color + 1: "violet", + 0: "black", + -1: "gray", + } category = Storm.category(categorization=categorization) # make it if intensity = true @@ -525,7 +564,7 @@ def add_track(Storm, axes, plot_package=None, category_color=None, legend_loc='b color = category_color[category[i]] else: color = track_color - axes.plot(longitude[i:i + 2], latitude[i:i + 2], color=color) + axes.plot(longitude[i : i + 2], latitude[i : i + 2], color=color) axes.set_xlabel("Longitude") axes.set_ylabel("Latitude") @@ -536,39 +575,56 @@ def add_track(Storm, axes, plot_package=None, category_color=None, legend_loc='b categories_legend = [] # plotitem = plotaxes.new_plotitem(name='category', plot_type='1d_plot') - if (-1 in category): + if -1 in category: negativeone = mlines.Line2D( - [], [], color=category_color[-1], marker='s', ls='', label="Tropical Depression") + [], + [], + color=category_color[-1], + marker="s", + ls="", + label="Tropical Depression", + ) categories_legend.append(negativeone) - if (0 in category): + if 0 in category: zero = mlines.Line2D( - [], [], color=category_color[0], marker='s', ls='', label="Tropical Storn") + [], + [], + color=category_color[0], + marker="s", + ls="", + label="Tropical Storn", + ) categories_legend.append(zero) - if (1 in category): - one = mlines.Line2D([], [], color=category_color[1], - marker='s', ls='', label="Category 1") + if 1 in category: + one = mlines.Line2D( + [], [], color=category_color[1], marker="s", ls="", label="Category 1" + ) categories_legend.append(one) - if (2 in category): - two = mlines.Line2D([], [], color=category_color[2], - marker='s', ls='', label="Category 2") + if 2 in category: + two = mlines.Line2D( + [], [], color=category_color[2], marker="s", ls="", label="Category 2" + ) categories_legend.append(two) - if (3 in category): + if 3 in category: three = mlines.Line2D( - [], [], color=category_color[3], marker='s', ls='', label="Category 3") + [], [], color=category_color[3], marker="s", ls="", label="Category 3" + ) categories_legend.append(three) - if (4 in category): + if 4 in category: four = mlines.Line2D( - [], [], color=category_color[4], marker='s', ls='', label="Category 4") + [], [], color=category_color[4], marker="s", ls="", label="Category 4" + ) categories_legend.append(four) - if (5 in category): + if 5 in category: five = mlines.Line2D( - [], [], color=category_color[5], marker='s', ls='', label="Category 5") + [], [], color=category_color[5], marker="s", ls="", label="Category 5" + ) categories_legend.append(five) plt.legend(handles=categories_legend, loc=legend_loc) diff --git a/src/python/geoclaw/surge/storm.py b/src/python/geoclaw/surge/storm.py index 2fe89971a..71d84d075 100644 --- a/src/python/geoclaw/surge/storm.py +++ b/src/python/geoclaw/surge/storm.py @@ -40,13 +40,12 @@ import warnings from pathlib import Path +import clawpack.geoclaw.units as units import numpy as np import pandas as pd import xarray as xr from fsspec import FSMap -import clawpack.geoclaw.units as units - # ============================================================================= # Common acronyms across formats @@ -314,20 +313,27 @@ def read_geoclaw(self, path, verbose=False): # Read header with open(path, "r") as data_file: num_casts = int(data_file.readline()) - self.time_offset = datetime.datetime.strptime( - data_file.readline()[:19], "%Y-%m-%dT%H:%M:%S" - ) + time = data_file.readline()[:19] + try: + self.time_offset = datetime.datetime.strptime(time, "%Y-%m-%dT%H:%M:%S") + except ValueError: + self.time_offset = float(time) # Read rest of data data = np.loadtxt(path, skiprows=3) num_forecasts = data.shape[0] - self.eye_location = np.empty((2, num_forecasts)) + self.eye_location = np.empty((num_forecasts, 2)) assert num_casts == num_forecasts - self.t = [ - self.time_offset + datetime.timedelta(seconds=data[i, 0]) - for i in range(num_forecasts) - ] - self.eye_location[0, :] = data[:, 1] - self.eye_location[1, :] = data[:, 2] + if isinstance(self.time_offset, datetime.datetime): + self.t = np.array( + [ + self.time_offset + datetime.timedelta(seconds=data[i, 0]) + for i in range(num_forecasts) + ] + ) + else: + self.t = data[:, 0] + self.eye_location[:, 0] = data[:, 1] + self.eye_location[:, 1] = data[:, 2] self.max_wind_speed = data[:, 3] self.max_wind_radius = data[:, 4] self.central_pressure = data[:, 5] @@ -806,14 +812,12 @@ def func(x): if (self.event == "L").any(): # if landfall, use last landfall self.time_offset = np.array(self.t)[self.event == "L"][-1] - else: - # if no landfall, use last time of storm - self.time_offset = self.t[-1] - + if (self.event == "L").any(): + # if landfall, use last landfall + self.time_offset = np.array(self.t)[self.event == "L"][-1] # Classification, note that this is not the category of the storm self.classification = ds.usa_status.values self.eye_location = np.array([ds.lon, ds.lat]).T - # Intensity information - for now, including only common, basic intensity # info. # TODO: add more detailed info for storms that have it @@ -1546,6 +1550,147 @@ def write_tcvitals(self, path, verbose=False): ) ) + # ================ + # Track Plotting + # ================ + def plot( + self, + ax, + radius=None, + t_range=None, + coordinate_system=2, + track_style="ko--", + categorization="NHC", + fill_alpha=0.25, + fill_color="red", + ): + """TO DO: Write doc-string""" + + import matplotlib.pyplot as plt + + if isinstance(track_style, str): + style = track_style + + # Extract information for plotting the track/swath + t = self.t + x = self.eye_location[:, 0] + y = self.eye_location[:, 1] + if t_range is not None: + t = np.ma.masked_outside(t, t_range[0], t_range[1]) + x = np.ma.array(x, mask=t.mask).compressed() + y = np.ma.array(y, mask=t.mask).compressed() + t = t.compressed() + + # Plot track + if isinstance(track_style, str): + # Plot the track as a simple line with the given style + ax.plot(x, y, track_style) + elif isinstance(track_style, dict): + if self.max_wind_speed is None: + raise ValueError( + "Maximum wind speed not available so " + "plotting catgories is not available." + ) + + # Plot the track using the colors provided in the dictionary + cat_color_defaults = { + 5: "red", + 4: "yellow", + 3: "orange", + 2: "green", + 1: "blue", + 0: "gray", + -1: "lightgray", + } + colors = [ + track_style.get(category, cat_color_defaults[category]) + for category in self.category(categorization=categorization) + ] + for i in range(t.shape[0] - 1): + ax.plot(x[i : i + 2], y[i : i + 2], color=colors[i], marker="o") + + else: + raise ValueError("The `track_style` should be a string or dict.") + + # Plot swath + if ( + isinstance(radius, float) + or isinstance(radius, np.ndarray) + or radius is None + ): + if radius is None: + # Default behavior + if self.storm_radius is None: + raise ValueError( + "Cannot use storm radius for plotting " + "the swath as the data is not available." + ) + else: + if coordinate_system == 1: + _radius = self.storm_radius + elif coordinate_system == 2: + _radius = units.convert(self.storm_radius, "m", "lat-long") + else: + raise ValueError( + f"Unknown coordinate system " + f"{coordinate_system} provided." + ) + + elif isinstance(radius, float): + # Only one value for the radius was given, replicate + _radius = np.ones(self.t.shape) * radius + elif isinstance(radius, np.ndarray): + # The array passed is the array to use + _radius = radius + else: + raise ValueError( + "Invalid input argument for radius. Should " "be a float or None" + ) + + # Draw first and last points + ax.add_patch(plt.Circle((x[0], y[0]), _radius[0], color=fill_color)) + if t.shape[0] > 1: + ax.add_patch(plt.Circle((x[-1], y[-1]), _radius[-1], color=fill_color)) + + # Draw path around inner points + if t.shape[0] > 2: + for i in range(t.shape[0] - 1): + p = np.array([(x[i], y[i]), (x[i + 1], y[i + 1])]) + v = p[1] - p[0] + if abs(v[1]) > 1e-16: + n = np.array([1, -v[0] / v[1]], dtype=float) + elif abs(v[0]) > 1e-16: + n = np.array([-v[1] / v[0], 1], dtype=float) + else: + raise Exception("Zero-vector given") + n /= np.linalg.norm(n) + n *= _radius[i] + + ax.fill( + ( + p[0, 0] + n[0], + p[0, 0] - n[0], + p[1, 0] - n[0], + p[1, 0] + n[0], + ), + ( + p[0, 1] + n[1], + p[0, 1] - n[1], + p[1, 1] - n[1], + p[1, 1] + n[1], + ), + facecolor=fill_color, + alpha=fill_alpha, + ) + ax.add_patch( + plt.Circle( + (p[1][0], p[1, 1]), + _radius[i], + color=fill_color, + alpha=fill_alpha, + ) + ) + # ========================================================================= # Other Useful Routines diff --git a/src/python/geoclaw/topotools.py b/src/python/geoclaw/topotools.py index 1743e1ae6..db6b6410c 100644 --- a/src/python/geoclaw/topotools.py +++ b/src/python/geoclaw/topotools.py @@ -414,7 +414,7 @@ def delta(self): def __init__(self, path=None, topo_type=None, topo_func=None, - unstructured=False): + unstructured=False, **kwargs): r"""Topography initialization routine. See :class:`Topography` for more info. @@ -444,7 +444,7 @@ def __init__(self, path=None, topo_type=None, topo_func=None, if path: self.read(path=path, topo_type=topo_type, - unstructured=unstructured) + unstructured=unstructured, **kwargs) def set_xyZ(self, X, Y, Z): r""" diff --git a/src/python/geoclaw/xarray_backends.py b/src/python/geoclaw/xarray_backends.py new file mode 100644 index 000000000..326d0d908 --- /dev/null +++ b/src/python/geoclaw/xarray_backends.py @@ -0,0 +1,583 @@ +r""" +xarray backends module: $CLAW/geoclaw/src/python/geoclaw/xarray_backends.py + +Xarray backends for GeoClaw fixed grids and fgmax grids. + +These only work for point_style = 2 (uniform regular) and have a dependency on +xarray and rioxarray. Xarray provides the core datastructure and rioxarray +provides an interface to rasterio, used to assign geospatial projection +information. + +- https://docs.xarray.dev/en/stable/index.html +- https://corteva.github.io/rioxarray/stable/readme.html +- https://rasterio.readthedocs.io/en/latest/index.html + +The expectation is that you run commands that use these backends from the same +directory you run "make output". + +Includes: + +- class FGMaxBackend: Xarray backend for fgmax grids. +- class FGOutBackend: Xarray backend for fgout grids. + +Usage: + +.. code-block:: python + import rioxarray # activate the rio accessor + import xarray as xr + from clawpack.geoclaw.xarray_backends import FGOutBackend, FGMaxBackend + + # An example of a fgout grid. + + filename = '_output/fgout0001.b0001 + # provide the .bxxx file if binary format is used or the + # .qxxx file if ascii format is used. + # the format, fg number, and frame number are inferred from the filename. + + ds = xr.open_dataset( + filename, + engine=FGOutBackend, + backend_kwargs={ + 'qmap': 'geoclaw', + 'epsg':epsg_code + }) + # ds is now an xarray object. It can be interacted with directly or written to netcdf using + ds.write_netcdf('filename.nc') + + # A 'qmap' backend_kwargs is required as it indicates the qmap used for + # selecting elements of q to include in fgout. See + # https://www.clawpack.org/dev/fgout.html#specifying-q-out-vars + + # Optionally, provide an epsg code to assign the associated coordinate system to the file. + # default behavior assigns no coordinate system. + + # An example of a fgmax grid. + filename = "_output/fgmax0001.txt" + ds = xr.open_dataset(filename, engine=FGMaxBackend, backend_kwargs={'epsg':epsg_code}) + + +Dimensions: + +Files opened with FGOutBackend will have dimensions (time, y, x). +Files opened with FGMaxBackend will have dimensions (y, x). + +Variable naming: + +For fixed grid files, the dataset will have the elements of q specified +by qmap and q_out_vars. This may be as many as the full list described below or +as few as specified by the user. + +For fixed grid geoclaw files, the full set of variables is: +- h +- hu +- hv +- eta +- B + +For fixed grid geoclaw-bouss files, the full set of variables is: +- h +- hu +- hv +- huc +- hvc +- eta +- B + +Fixed grid dclaw files, the full set of variables is: +- h +- hu +- hv +- hm +- pb +- hchi +- bdif +- eta +- B + +Depending on the number of variables specified in the setrun.py fgmax files will +have a portion of the following variables: + +If rundata.fgmax_data.num_fgmax_val == 1 + +- arrival_time, Wave arrival time (based on eta>sea_level + fg.arrival_tol) +- h_max, Maximum water depth +- eta_max, Maximum water surface elevation +- h_max_time, Time of maximum water depth +- B, Basal topography at the first time fgmax first monitored maximum amr level +- level, Maximum amr level + +If rundata.fgmax_data.num_fgmax_val == 2: + +- s_max, Maximum velocity +- s_max_time, Time of maximum velocity + +If rundata.fgmax_data.num_fgmax_val == 5: + +- hs_max, Maximum momentum +- hs_max_time, Time of maximum momentum +- hss_max, Maximum momentum flux +- hss_max_time, Time of maximum momentum flux +- h_min, Minimum depth +- h_min_time, Time of minimum depth + + +See the following links for additional information about xarray Backends. + +- https://docs.xarray.dev/en/stable/generated/xarray.backends.BackendEntrypoint.html#xarray.backends.BackendEntrypoint +- https://docs.xarray.dev/en/stable/generated/xarray.open_dataset.html +""" + +import os + +import numpy as np + +try: + import rioxarray # activate the rio accessor + import xarray as xr +except ImportError: + raise ImportError( + "rioxarray and xarray are required to use the FGOutBackend and FGMaxBackend" + ) + +from clawpack.geoclaw import fgmax_tools, fgout_tools +from xarray.backends import BackendEntrypoint + +_qunits = { + "h": "meters", + "hu": "meters squared per second", + "hv": "meters squared per second", + "hm": "meters", + "pb": "newton per meter squared", + "hchi": "meters", + "bdif": "meters", + "eta": "meters", + "B": "meters", + "huc": "varies", + "hvc": "varies", +} + +time_unit = "seconds" +reference_time = "model start" +space_unit = "meters" +nodata = np.nan + + +def _prepare_var(data, mask, dims, units, nodata, long_name): + "Reorient array into the format xarray expects and generate the mapping." + if mask is not None: + data[mask] = nodata + data = data.T + data = np.flipud(data) + mapping = ( + dims, + data, + {"units": units, "_FillValue": nodata, "long_name": long_name}, + ) + return mapping + + +class FGOutBackend(BackendEntrypoint): + "Xarray Backend for Clawpack fixed grid format." + + def open_dataset( + self, + filename, # path to fgout file. + qmap="geoclaw", # qmap value for FGoutGrid ('geoclaw', 'dclaw', or 'geoclaw-bouss') + epsg=None, # epsg code + dry_tolerance=0.001, + # dry tolerance used for for masking elements of q that are not + # eta or B. Default behavior is to mask all elements of q except for + # eta and B where h>0.001. + # used only if h or eta-B is available based on q_out_vars. + # if dry_tolerance = None, no masking is applied to any variable. + drop_variables=None, # name of any elements of q to drop. + ): + + if drop_variables is None: + drop_variables = [] + + full_path = os.path.abspath(filename) + filename = os.path.basename(full_path) + outdir = os.path.basename(os.path.dirname(full_path)) + + # filename has the format fgoutXXXX.qYYYY (ascii) + # or fgoutXXXX.bYYYY (binary) + # where XXXX is the fixed grid number and YYYY is the frame + # number. + type_code = filename.split(".")[-1][0] + fgno = int(filename.split(".")[0][-4:]) + frameno = int(filename.split(".")[-1][-4:]) + if type_code == "q": # TODO, is this correct? + output_format = "ascii" + elif type_code == "b": + output_format = "binary32" # format of fgout grid output + else: + raise ValueError("Invalid FGout output format. Must be ascii or binary.") + + fgout_grid = fgout_tools.FGoutGrid( + fgno=fgno, outdir=outdir, output_format=output_format, qmap=qmap + ) + + if fgout_grid.point_style != 2: + raise ValueError("FGOutBackend only works with fg.point_style=2") + + fgout_grid.read_fgout_grids_data() + fgout = fgout_grid.read_frame(frameno) + + time = fgout.t + # both come in ascending. flip to give expected order. + x = fgout.x + y = np.flipud(fgout.y) + nj = len(x) + ni = len(y) + + # mask based on dry tolerance + if dry_tolerance is not None: + try: + mask = fgout.h < dry_tolerance + # internally fgout_tools will try fgou.eta-fgout.B if h is not present. + except AttributeError: + print("FGOutBackend: No h, eta, or B. No mask applied.") + mask = np.ones((nj, ni), dtype=bool) + + # create data_vars dictionary + data_vars = {} + for i, i_var in enumerate(fgout_grid.q_out_vars): + + # construct variable + + # Find the varname in fgout.qmap associated with + # q_out_vars[i] + varname = None + for name, index in fgout_grid.qmap.items(): + if i_var == index: + varname = name + + # construct xarray dataset if varname not in drop vars. + if varname not in drop_variables: + + Q = fgout.q[i, :, :] + + # mask all but eta based on h presence. + if (varname not in ("B", "eta")) and dry_tolerance is not None: + Q[mask] = nodata + + # to keep xarray happy, need to transpose and flip ud. + Q = Q.T + Q = np.flipud(Q) + Q = Q.reshape((1, ni, nj)) # reshape to add a time dimension. + + data_array_attrs = {"units": _qunits[varname], "_FillValue": nodata} + + data_vars[varname] = ( + [ + "time", + "y", + "x", + ], + Q, + data_array_attrs, + ) + + ds_attrs = {"description": "Clawpack model output"} + + ds = xr.Dataset( + data_vars=data_vars, + coords=dict( + x=(["x"], x, {"units": space_unit}), + y=(["y"], y, {"units": space_unit}), + time=("time", [time], {"units": "seconds"}), + reference_time=reference_time, + ), + attrs=ds_attrs, + ) + + if epsg is not None: + ds.rio.write_crs( + epsg, + inplace=True, + ).rio.set_spatial_dims( + x_dim="x", + y_dim="y", + inplace=True, + ).rio.write_coordinate_system(inplace=True) + # https://corteva.github.io/rioxarray/stable/getting_started/crs_management.html#Spatial-dimensions + # https://gis.stackexchange.com/questions/470207/how-to-write-crs-info-to-netcdf-in-a-way-qgis-can-read-python-xarray + + return ds + + open_dataset_parameters = ["filename", "drop_variables"] + + description = "Use Clawpack fixed grid output files in Xarray" + url = "https://www.clawpack.org/fgout.html" + + +class FGMaxBackend(BackendEntrypoint): + "Xarray Backend for Clawpack fgmax grid format." + + def open_dataset( + self, + filename, + epsg=None, + drop_variables=None, + clip=False, # if True, clip the entire array to the extent where arrival_time is defined. + ): + + if drop_variables is None: + drop_variables = [] + + # expectation is for standard clawpack organization, + # e.g., output and 'fgmax_grids.data' in _output + fgno = int(os.path.basename(filename).split(".")[0][-4:]) + outdir = os.path.dirname(filename) + data_file = os.path.join(outdir, "fgmax_grids.data") + + fg = fgmax_tools.FGmaxGrid() + fg.read_fgmax_grids_data(fgno=fgno, data_file=data_file) + if fg.point_style != 2: + raise ValueError("FGMaxBackend only works with fg.point_style=2") + + fg.read_output(outdir=outdir) + + # Construct the x and y coordinates + # Both come in ascending, therefore flip y so that it is ordered as expected. + x = fg.x + y = np.flipud(fg.y) + + # Construct the data_vars array. To organize the + # data in the way expected by netcdf standards, need + # to both transpose and flipud the array. + + data_vars = {} + + data_vars["arrival_time"] = _prepare_var( + fg.arrival_time.data, + fg.arrival_time.mask, + [ + "y", + "x", + ], + "seconds", + nodata, + "Wave arrival time", + ) + + data_vars["h_max"] = _prepare_var( + fg.h.data, + fg.h.mask, + [ + "y", + "x", + ], + "meters", + nodata, + "Maximum water depth", + ) + + data_vars["eta_max"] = _prepare_var( + fg.h.data + fg.B.data, + fg.h.mask, + [ + "y", + "x", + ], + "meters", + nodata, + "Maximum water surface elevation", + ) + + data_vars["h_max_time"] = _prepare_var( + fg.h_time.data, + fg.h_time.mask, + [ + "y", + "x", + ], + "seconds", + nodata, + "Time of maximum water depth", + ) + + data_vars["B"] = _prepare_var( + fg.B, + None, + [ + "y", + "x", + ], + "meters", + nodata, + "Basal topography at the first time fgmax first monitored maximum amr level", + ) + + data_vars["level"] = _prepare_var( + fg.level, + None, + [ + "y", + "x", + ], + "(no units)", + -1, + "Maximum amr level", + ) + + if hasattr(fg, "s"): + if hasattr(fg.s, "data"): + data_vars["s_max"] = _prepare_var( + fg.s.data, + fg.s.mask, + [ + "y", + "x", + ], + "meters per second", + nodata, + "Maximum velocity", + ) + data_vars["s_max_time"] = _prepare_var( + fg.s_time.data, + fg.s_time.mask, + [ + "y", + "x", + ], + "seconds", + nodata, + "Time of maximum velocity", + ) + if hasattr(fg, "hs"): + if hasattr(fg.hs, "data"): + data_vars["hs_max"] = _prepare_var( + fg.hs.data, + fg.hs.mask, + [ + "y", + "x", + ], + "meters squared per second", + nodata, + "Maximum momentum", + ) + data_vars["hs_max_time"] = _prepare_var( + fg.hs_time.data, + fg.hs_time.mask, + [ + "y", + "x", + ], + "seconds", + nodata, + "Time of maximum momentum", + ) + + data_vars["hss_max"] = _prepare_var( + fg.hss.data, + fg.hss.mask, + [ + "y", + "x", + ], + "meters cubed per second squared", + nodata, + "Maximum momentum flux", + ) + data_vars["hss_max_time"] = _prepare_var( + fg.hss_time.data, + fg.hss_time.mask, + [ + "y", + "x", + ], + "seconds", + nodata, + "Time of maximum momentum flux", + ) + + data_vars["h_min"] = _prepare_var( + fg.hmin.data, + fg.hmin.mask, + [ + "y", + "x", + ], + "meters", + nodata, + "Minimum depth", + ) + data_vars["h_min_time"] = _prepare_var( + fg.hmin_time.data, + fg.hmin_time.mask, + [ + "y", + "x", + ], + "seconds", + nodata, + "Time of minimum depth", + ) + + # drop requested variables + for var in drop_variables: + if var in data_vars.keys(): + del data_vars[var] + + # Construct the values from + ds_attrs = {"description": "D-Claw model output"} + + ds = xr.Dataset( + data_vars=data_vars, + coords=dict( + x=(["x"], x, {"units": "meters"}), + y=(["y"], y, {"units": "meters"}), + ), + attrs=ds_attrs, + ) + + if epsg is not None: + + ds.rio.write_crs( + epsg, + inplace=True, + ).rio.set_spatial_dims( + x_dim="x", + y_dim="y", + inplace=True, + ).rio.write_coordinate_system(inplace=True) + # https://corteva.github.io/rioxarray/stable/getting_started/crs_management.html#Spatial-dimensions + # https://gis.stackexchange.com/questions/470207/how-to-write-crs-info-to-netcdf-in-a-way-qgis-can-read-python-xarray + + # clip + if clip: + clip_data = fg.arrival_time.data >= 0 + clip_data = clip_data.T + clip_data = np.flipud(clip_data) + ds = _clip(ds, clip_data) + + return ds + + open_dataset_parameters = ["filename", "drop_variables", "clip"] + + description = "Use Clawpack fix grid monitoring files in Xarray" + url = "https://www.clawpack.org/fgmax.html#fgmax" + + +def _clip(ds, clip_data): + # clip DS to the extent where clip_data is true. + # useful for when there are large areas where nothing happened. + + nrow, ncol = clip_data.shape + + valid_col = np.sum(clip_data, axis=0) > 0 + valid_row = np.sum(clip_data, axis=1) > 0 + + sel_rows = np.nonzero(valid_row)[0] + sel_cols = np.nonzero(valid_col)[0] + + first_row = sel_rows[0] + last_row = sel_rows[-1] + first_col = sel_cols[0] + last_col = sel_cols[-1] + + ds = ds.isel(y=np.arange(first_row, last_row), x=np.arange(first_col, last_col)) + return ds