From afd8f5ad8c31a6e3063c0fa5597e5e682ea9e381 Mon Sep 17 00:00:00 2001 From: Ken MacDonald <68608562+kmacdonald-stsci@users.noreply.github.com> Date: Wed, 3 Mar 2021 15:15:57 -0500 Subject: [PATCH] JP-1887: ramp_fit PEP8 cleanup (#5803) --- jwst/ramp_fitting/ramp_fit.py | 720 +++++++++++++++++----------------- 1 file changed, 361 insertions(+), 359 deletions(-) diff --git a/jwst/ramp_fitting/ramp_fit.py b/jwst/ramp_fitting/ramp_fit.py index 8f4cf0eab4..d5d3c886c1 100755 --- a/jwst/ramp_fitting/ramp_fit.py +++ b/jwst/ramp_fitting/ramp_fit.py @@ -32,6 +32,8 @@ DO_NOT_USE = dqflags.group['DO_NOT_USE'] JUMP_DET = dqflags.group['JUMP_DET'] +SATURATED = dqflags.group['SATURATED'] +UNRELIABLE_SLOPE = dqflags.pixel['UNRELIABLE_SLOPE'] BUFSIZE = 1024 * 300000 # 300Mb cache size for data section @@ -72,9 +74,11 @@ def ramp_fit(model, buffsize, save_opt, readnoise_model, gain_model, currently the only weighting supported. max_cores : string - Number of cores to use for multiprocessing. If set to 'none' (the default), then no multiprocessing will be done. - The other allowable values are 'quarter', 'half', and 'all'. This is the fraction of cores to use for multi-proc. - The total number of cores includes the SMT cores (Hyper Threading for Intel). + Number of cores to use for multiprocessing. If set to 'none' (the + default), then no multiprocessing will be done. The other allowable + values are 'quarter', 'half', and 'all'. This is the fraction of cores + to use for multi-proc. The total number of cores includes the SMT cores + (Hyper Threading for Intel). Returns ------- @@ -94,20 +98,19 @@ def ramp_fit(model, buffsize, save_opt, readnoise_model, gain_model, exposure """ if algorithm.upper() == "GLS": - new_model, int_model, gls_opt_model = gls_ramp_fit(model, - buffsize, save_opt, - readnoise_model, gain_model, max_cores) + new_model, int_model, gls_opt_model = \ + gls_ramp_fit(model, buffsize, save_opt, readnoise_model, gain_model, max_cores) opt_model = None else: # Get readnoise array for calculation of variance of noiseless ramps, and # gain array in case optimal weighting is to be done frames_per_group = model.meta.exposure.nframes - readnoise_2d, gain_2d = utils.get_ref_subs(model, readnoise_model, - gain_model, frames_per_group) + readnoise_2d, gain_2d = \ + utils.get_ref_subs(model, readnoise_model, gain_model, frames_per_group) new_model, int_model, opt_model = \ ols_ramp_fit_multi(model, buffsize, save_opt, readnoise_2d, - gain_2d, weighting, max_cores) + gain_2d, weighting, max_cores) gls_opt_model = None # Update data units in output models @@ -121,12 +124,13 @@ def ramp_fit(model, buffsize, save_opt, readnoise_model, gain_model, return new_model, int_model, opt_model, gls_opt_model -def ols_ramp_fit_multi(input_model, buffsize, save_opt, readnoise_2d, gain_2d, - weighting, max_cores): +def ols_ramp_fit_multi( + input_model, buffsize, save_opt, readnoise_2d, gain_2d, weighting, max_cores): """ - Setup the inputs to ols_ramp_fit with and without multiprocessing. The inputs will be sliced into the - number of cores that are being used for multiprocessing. Because the data models cannot be pickled, only - numpy arrays are passed and returned as parameters to ols_ramp_fit. + Setup the inputs to ols_ramp_fit with and without multiprocessing. The + inputs will be sliced into the number of cores that are being used for + multiprocessing. Because the data models cannot be pickled, only numpy + arrays are passed and returned as parameters to ols_ramp_fit. Parameters ---------- @@ -139,10 +143,10 @@ def ols_ramp_fit_multi(input_model, buffsize, save_opt, readnoise_2d, gain_2d, save_opt : boolean calculate optional fitting results - readnoise_model : instance of data Model + readnoise_2d : instance of data Model readnoise for all pixels - gain_model : instance of gain model + gain_2d : instance of gain model gain for all pixels algorithm : string @@ -154,9 +158,10 @@ def ols_ramp_fit_multi(input_model, buffsize, save_opt, readnoise_2d, gain_2d, currently the only weighting supported. max_cores : string - Number of cores to use for multiprocessing. If set to 'none' (the default), then no multiprocessing will be done. - The other allowable values are 'quarter', 'half', and 'all'. This is the fraction of cores to use for multi-proc. - The total number of cores includes the SMT cores (Hyper Threading for Intel). + Number of cores to use for multiprocessing. If set to 'none' (the default), + then no multiprocessing will be done. The other allowable values are 'quarter', + 'half', and 'all'. This is the fraction of cores to use for multi-proc. The + total number of cores includes the SMT cores (Hyper Threading for Intel). Returns ------- @@ -205,9 +210,9 @@ def ols_ramp_fit_multi(input_model, buffsize, save_opt, readnoise_2d, gain_2d, if number_slices == 1: max_segments, max_CRs = calc_num_seg(input_model.groupdq, number_of_integrations) log.debug(f"Max segments={max_segments}") - int_model, opt_model, out_model = create_output_models(input_model, - number_of_integrations, save_opt, total_cols, total_rows, - max_segments, max_CRs) + int_model, opt_model, out_model = \ + create_output_models(input_model, number_of_integrations, save_opt, + total_cols, total_rows, max_segments, max_CRs) out_model.data, out_model.dq, out_model.var_poisson, out_model.var_rnoise, out_model.err,\ int_data, int_dq, int_var_poisson, int_var_rnoise, int_err,\ @@ -256,17 +261,19 @@ def ols_ramp_fit_multi(input_model, buffsize, save_opt, readnoise_2d, gain_2d, stop_row = (i + 1) * rows_per_slice readnoise_slice = readnoise_2d[start_row: stop_row, :] gain_slice = gain_2d[start_row: stop_row, :] - data_slice = input_model.data[:,:,start_row: stop_row, :].copy() + data_slice = input_model.data[:, :, start_row: stop_row, :].copy() err_slice = input_model.err[:, :, start_row: stop_row, :].copy() - groupdq_slice = input_model.groupdq[:, :, start_row: stop_row, :].copy() - pixeldq_slice = input_model.pixeldq[ start_row: stop_row, :].copy() - - slices.insert(i, (data_slice, err_slice, groupdq_slice, pixeldq_slice, buffsize, save_opt, readnoise_slice, - gain_slice, weighting, - input_model.meta.instrument.name, input_model.meta.exposure.frame_time, - input_model.meta.exposure.ngroups, input_model.meta.exposure.group_time, - input_model.meta.exposure.groupgap, input_model.meta.exposure.nframes, - input_model.meta.exposure.drop_frames1, int_times)) + groupdq_slice = input_model.groupdq[:, :, start_row:stop_row, :].copy() + pixeldq_slice = input_model.pixeldq[start_row:stop_row, :].copy() + + slices.insert( + i, + (data_slice, err_slice, groupdq_slice, pixeldq_slice, buffsize, save_opt, + readnoise_slice, gain_slice, weighting, + input_model.meta.instrument.name, input_model.meta.exposure.frame_time, + input_model.meta.exposure.ngroups, input_model.meta.exposure.group_time, + input_model.meta.exposure.groupgap, input_model.meta.exposure.nframes, + input_model.meta.exposure.drop_frames1, int_times)) # last slice gets the rest start_row = (number_slices - 1) * rows_per_slice @@ -276,12 +283,13 @@ def ols_ramp_fit_multi(input_model, buffsize, save_opt, readnoise_2d, gain_2d, err_slice = input_model.err[:, :, start_row: total_rows, :].copy() groupdq_slice = input_model.groupdq[:, :, start_row: total_rows, :].copy() pixeldq_slice = input_model.pixeldq[start_row: total_rows, :].copy() - slices.insert(number_slices - 1, (data_slice, err_slice, groupdq_slice, pixeldq_slice, buffsize, save_opt, - readnoise_slice, gain_slice, weighting, - input_model.meta.instrument.name, input_model.meta.exposure.frame_time, - input_model.meta.exposure.ngroups, input_model.meta.exposure.group_time, - input_model.meta.exposure.groupgap, input_model.meta.exposure.nframes, - input_model.meta.exposure.drop_frames1, int_times)) + slices.insert(number_slices - 1, + (data_slice, err_slice, groupdq_slice, pixeldq_slice, buffsize, save_opt, + readnoise_slice, gain_slice, weighting, + input_model.meta.instrument.name, input_model.meta.exposure.frame_time, + input_model.meta.exposure.ngroups, input_model.meta.exposure.group_time, + input_model.meta.exposure.groupgap, input_model.meta.exposure.nframes, + input_model.meta.exposure.drop_frames1, int_times)) # Start up the processes for each slice log.debug("Creating %d processes for ramp fitting " % number_slices) @@ -294,9 +302,9 @@ def ols_ramp_fit_multi(input_model, buffsize, save_opt, readnoise_2d, gain_2d, # Create new model for the primary output. actual_segments = real_results[0][20] actual_CRs = real_results[0][21] - int_model, opt_model, out_model = create_output_models(input_model, - number_of_integrations, save_opt, total_cols, total_rows, - actual_segments, actual_CRs) + int_model, opt_model, out_model = \ + create_output_models(input_model, number_of_integrations, save_opt, + total_cols, total_rows, actual_segments, actual_CRs) int_model.int_times = int_times # iterate over the number of slices and place the results into the output models @@ -309,7 +317,7 @@ def ols_ramp_fit_multi(input_model, buffsize, save_opt, readnoise_2d, gain_2d, out_model.var_rnoise[start_row:total_rows, :] = resultslice[3] out_model.err[start_row:total_rows, :] = resultslice[4] if resultslice[5] is not None: #Integration results exist - int_model.data[: , start_row:total_rows, :] = resultslice[5] + int_model.data[:, start_row:total_rows, :] = resultslice[5] int_model.dq[:, start_row:total_rows, :] = resultslice[6] int_model.var_poisson[:, start_row:total_rows, :] = resultslice[7] int_model.var_rnoise[:, start_row:total_rows, :] = resultslice[8] @@ -317,7 +325,7 @@ def ols_ramp_fit_multi(input_model, buffsize, save_opt, readnoise_2d, gain_2d, if resultslice[11] is not None: #Optional results exist opt_model.slope[:, :, start_row:total_rows, :] = resultslice[11] opt_model.sigslope[:, :, start_row:total_rows, :] = resultslice[12] - opt_model.var_poisson[:,:, start_row:total_rows, :] = resultslice[13] + opt_model.var_poisson[:, :, start_row:total_rows, :] = resultslice[13] opt_model.var_rnoise[:, :, start_row:total_rows, :] = resultslice[14] opt_model.yint[:, :, start_row:total_rows, :] = resultslice[15] opt_model.sigyint[:, :, start_row:total_rows, :] = resultslice[16] @@ -328,8 +336,8 @@ def ols_ramp_fit_multi(input_model, buffsize, save_opt, readnoise_2d, gain_2d, stop_row = (k + 1) * rows_per_slice out_model.data[start_row: stop_row, :] = resultslice[0] out_model.dq[start_row: stop_row, :] = resultslice[1] - out_model.var_poisson[ start_row: stop_row, :] = resultslice[2] - out_model.var_rnoise[ start_row: stop_row, :] = resultslice[3] + out_model.var_poisson[start_row: stop_row, :] = resultslice[2] + out_model.var_rnoise[start_row: stop_row, :] = resultslice[3] out_model.err[start_row: stop_row, :] = resultslice[4] if resultslice[5] is not None: #Multiple integration results exist int_model.data[:, start_row: stop_row, :] = resultslice[5] @@ -338,15 +346,15 @@ def ols_ramp_fit_multi(input_model, buffsize, save_opt, readnoise_2d, gain_2d, int_model.var_rnoise[:, start_row: stop_row, :] = resultslice[8] int_model.err[:, start_row: stop_row, :] = resultslice[9] if resultslice[11] is not None: #Optional Results exist - opt_model.slope[:, :, start_row: (k + 1) *rows_per_slice, :] = resultslice[11] - opt_model.sigslope[:, :, start_row: (k + 1) *rows_per_slice, :] = resultslice[12] - opt_model.var_poisson[:, :, start_row: (k + 1) *rows_per_slice, :] = resultslice[13] - opt_model.var_rnoise[:, :, start_row: (k + 1) *rows_per_slice, :] = resultslice[14] - opt_model.yint[:, :, start_row: (k + 1) *rows_per_slice, :] = resultslice[15] - opt_model.sigyint[:, :, start_row: (k + 1) *rows_per_slice, :] = resultslice[16] - opt_model.pedestal[:, start_row: (k + 1) *rows_per_slice, :] = resultslice[17] - opt_model.weights[:, :, start_row: (k + 1) *rows_per_slice, :] = resultslice[18] - opt_model.crmag[:, :, start_row: (k + 1) *rows_per_slice, :] = resultslice[19] + opt_model.slope[:, :, start_row: (k + 1) * rows_per_slice, :] = resultslice[11] + opt_model.sigslope[:, :, start_row: (k + 1) * rows_per_slice, :] = resultslice[12] + opt_model.var_poisson[:, :, start_row: (k + 1) * rows_per_slice, :] = resultslice[13] + opt_model.var_rnoise[:, :, start_row: (k + 1) * rows_per_slice, :] = resultslice[14] + opt_model.yint[:, :, start_row: (k + 1) * rows_per_slice, :] = resultslice[15] + opt_model.sigyint[:, :, start_row: (k + 1) * rows_per_slice, :] = resultslice[16] + opt_model.pedestal[:, start_row: (k + 1) * rows_per_slice, :] = resultslice[17] + opt_model.weights[:, :, start_row: (k + 1) * rows_per_slice, :] = resultslice[18] + opt_model.crmag[:, :, start_row: (k + 1) * rows_per_slice, :] = resultslice[19] k = k + 1 return out_model, int_model, opt_model @@ -528,7 +536,7 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # Save original shapes for writing to log file, as these may change for MIRI orig_nreads = nreads orig_cubeshape = cubeshape - if (dropframes1 is None): # set to default if missing + if dropframes1 is None: # set to default if missing dropframes1 = 0 log.debug('Missing keyword DRPFRMS1, so setting to default value of 0') @@ -539,40 +547,38 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # the input model arrays will be resized appropriately. If all pixels in # all groups are flagged, return None for the models. - if (instrume == 'MIRI' and nreads > 1): + if instrume == 'MIRI' and nreads > 1: - first_gdq = groupdq[:,0,:,:] + first_gdq = groupdq[:, 0, :, :] num_bad_slices = 0 # number of initial groups that are all DO_NOT_USE - while (np.all(np.bitwise_and( first_gdq, dqflags.group['DO_NOT_USE']))): + while np.all(np.bitwise_and(first_gdq, DO_NOT_USE)): num_bad_slices += 1 nreads -= 1 ngroups -= 1 # Check if there are remaining groups before accessing data - if ngroups < 1 : # no usable data + if ngroups < 1: # no usable data log.error('1. All groups have all pixels flagged as DO_NOT_USE,') log.error(' so will not process this dataset.') return None, None, None - data = data[:,1:,:,:] - err = err[:,1:,:,:] - groupdq = groupdq[:,1:,:,:] + data = data[:, 1:, :, :] + err = err[:, 1:, :, :] + groupdq = groupdq[:, 1:, :, :] cubeshape = (nreads,) + imshape # Where the initial group of the just-truncated data is a cosmic ray, # remove the JUMP_DET flag from the group dq for those pixels so # that those groups will be included in the fit. - wh_cr = np.where( np.bitwise_and(groupdq[:,0,:,:], - dqflags.group['JUMP_DET']) != 0 ) + wh_cr = np.where(np.bitwise_and(groupdq[:, 0, :, :], JUMP_DET)) num_cr_1st = len(wh_cr[0]) for ii in range(num_cr_1st): - groupdq[ wh_cr[0][ii], 0, wh_cr[1][ii], - wh_cr[2][ii]] -= dqflags.group['JUMP_DET'] + groupdq[wh_cr[0][ii], 0, wh_cr[1][ii], wh_cr[2][ii]] -= JUMP_DET - first_gdq = groupdq[:,0,:,:] + first_gdq = groupdq[:, 0, :, :] log.info('Number of leading groups that are flagged as DO_NOT_USE: %s', num_bad_slices) @@ -580,32 +586,32 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # in the while loop above, ngroups would have been set to 0, and Nones # would have been returned. If execution has gotten here, there must # be at least 1 remaining group that is not all flagged. - last_gdq = groupdq[:,-1,:,:] - if np.all(np.bitwise_and( last_gdq, dqflags.group['DO_NOT_USE'] )): + last_gdq = groupdq[:, -1, :, :] + if np.all(np.bitwise_and(last_gdq, DO_NOT_USE)): nreads -= 1 ngroups -= 1 # Check if there are remaining groups before accessing data - if ngroups < 1 : # no usable data + if ngroups < 1: # no usable data log.error('2. All groups have all pixels flagged as DO_NOT_USE,') log.error(' so will not process this dataset.') return None, None, None - data = data[:,:-1,:,:] - err = err[:,:-1,:,:] - groupdq = groupdq[:,:-1,:,:] - cubeshape = (nreads,)+imshape + data = data[:, :-1, :, :] + err = err[:, :-1, :, :] + groupdq = groupdq[:, :-1, :, :] + cubeshape = (nreads,) + imshape log.info('MIRI dataset has all pixels in the final group flagged as DO_NOT_USE.') # Next block is to satisfy github issue 1681: # "MIRI FirstFrame and LastFrame minimum number of groups" - if (ngroups < 2): + if ngroups < 2: log.warning('MIRI datasets require at least 2 groups/integration') log.warning('(NGROUPS), so will not process this dataset.') return None, None, None - if (ngroups == 1): + if ngroups == 1: log.warning('Dataset has NGROUPS=1, so count rates for each integration') log.warning('will be calculated as the value of that 1 group divided by') log.warning('the group exposure time.') @@ -628,17 +634,20 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # the other arrays in all output products are populated with zeros, and # the output products are returned to ramp_fit(). If the initial group of # a ramp is saturated, it is assumed that all groups are saturated. - first_gdq = groupdq[:,0,:,:] - if np.all(np.bitwise_and( first_gdq, dqflags.group['SATURATED'] )): - new_model, int_model, opt_model = utils.do_all_sat(inpixeldq, groupdq, imshape, n_int, save_opt ) - if (save_opt): + first_gdq = groupdq[:, 0, :, :] + if np.all(np.bitwise_and(first_gdq, SATURATED)): + new_model, int_model, opt_model = \ + utils.do_all_sat(inpixeldq, groupdq, imshape, n_int, save_opt) + if save_opt: actual_segments = 0 actual_CRs = 0 return new_model.data, new_model.dq, new_model.var_poisson, new_model.var_rnoise, new_model.err, \ - int_model.data, int_model.dq, int_model.var_poisson, int_model.var_rnoise, int_model.err, int_model.int_times, \ - opt_model.slope, opt_model.sigslope, opt_model.var_poisson, opt_model.var_rnoise, opt_model.yint, opt_model.sigyint, \ - opt_model.pedestal, opt_model.weights, opt_model.crmag, actual_segments, actual_CRs + int_model.data, int_model.dq, int_model.var_poisson, int_model.var_rnoise, \ + int_model.err, int_model.int_times, \ + opt_model.slope, opt_model.sigslope, opt_model.var_poisson, opt_model.var_rnoise, \ + opt_model.yint, opt_model.sigyint, \ + opt_model.pedestal, opt_model.weights, opt_model.crmag, actual_segments, actual_CRs # Get max number of segments fit in all integrations max_seg, num_CRs = calc_num_seg(gdq_cube, n_int) @@ -646,7 +655,7 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d f_max_seg = 0 # final number to use, usually overwritten by actual value - (dq_int, median_diffs_2d, num_seg_per_int, sat_0th_group_int) =\ + dq_int, median_diffs_2d, num_seg_per_int, sat_0th_group_int =\ utils.alloc_arrays_1(n_int, imshape) opt_res = utils.OptRes(n_int, imshape, max_seg, nreads, save_opt) @@ -659,7 +668,7 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # require this local variable to be uint16, and it will be used as the # (uint16) PIXELDQ in the outgoing ImageModel. pixeldq = inpixeldq.copy() - pixeldq = utils.reset_bad_gain( pixeldq, gain_2d ) # Flag bad pixels in gain + pixeldq = utils.reset_bad_gain(pixeldq, gain_2d) # Flag bad pixels in gain # In this 'First Pass' over the data, loop over integrations and data # sections to calculate the estimated median slopes, which will be used @@ -677,25 +686,25 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d if rhi > cubeshape[1]: rhi = cubeshape[1] - data_sect = np.float32(data[num_int,: , :, :]) #dt = np.dtype(data_sect) # Skip data section if it is all NaNs - if np.all(np.isnan( data_sect)): + data_sect = np.float32(data[num_int, :, :, :]) + if np.all(np.isnan(data_sect)): log.error('Current data section is all nans, so not processing the section.') continue # first frame section for 1st group of current integration - ff_sect = data[ num_int, 0, rlo:rhi, :] + ff_sect = data[num_int, 0, rlo:rhi, :] + # Get appropriate sections - gdq_sect = groupdq[num_int,:,:,:] + gdq_sect = groupdq[num_int, :, :, :] rn_sect = readnoise_2d[rlo:rhi, :] gain_sect = gain_2d[rlo:rhi, :] # Reset all saturated groups in the input data array to NaN - where_sat = np.where( np.bitwise_and(gdq_sect, - dqflags.group['SATURATED']) != 0) + where_sat = np.where(np.bitwise_and(gdq_sect, SATURATED)) - data_sect[ where_sat ] = np.NaN + data_sect[where_sat] = np.NaN del where_sat # Compute the first differences of all groups @@ -703,26 +712,24 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # If the dataset has only 1 group/integ, assume the 'previous group' # is all zeros, so just use data as the difference - if (first_diffs_sect.shape[0] == 0): + if first_diffs_sect.shape[0] == 0: first_diffs_sect = data_sect.copy() else: # Similarly, for datasets having >1 group/integ and having # single-group segments, just use the data as the difference - wh_nan = np.where( np.isnan( first_diffs_sect[0,:,:]) ) + wh_nan = np.where(np.isnan(first_diffs_sect[0, :, :])) - if (len(wh_nan[0]) > 0): - first_diffs_sect[0,:,:][wh_nan] = data_sect[0,:,:][wh_nan] + if len(wh_nan[0]) > 0: + first_diffs_sect[0, :, :][wh_nan] = data_sect[0, :, :][wh_nan] del wh_nan - i_group,i_yy,i_xx, = np.where( np.bitwise_and(gdq_sect[1:,:,:], - dqflags.group['JUMP_DET']) != 0) - # Mask all the first differences that are affected by a CR, # starting at group 1. The purpose of starting at index 1 is # to shift all the indices down by 1, so they line up with the # indices in first_diffs. - first_diffs_sect[ i_group-1, i_yy, i_xx ] = np.NaN + i_group, i_yy, i_xx, = np.where(np.bitwise_and(gdq_sect[1:, :, :], JUMP_DET)) + first_diffs_sect[i_group - 1, i_yy, i_xx] = np.NaN del i_group, i_yy, i_xx @@ -731,10 +738,10 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # few good groups past the 0th. Due to the shortage of good # data, the first_diffs will be set here equal to the data in # the 0th group. - wh_min = np.where( np.logical_and(np.isnan(first_diffs_sect) - .all(axis=0), np.isfinite( data_sect[0,:,:]))) + wh_min = np.where(np.logical_and( + np.isnan(first_diffs_sect).all(axis=0), np.isfinite(data_sect[0, :, :]))) if len(wh_min[0] > 0): - first_diffs_sect[0,:,:][wh_min] = data_sect[0,:,:][wh_min] + first_diffs_sect[0, :, :][wh_min] = data_sect[0, :, :][wh_min] del wh_min @@ -744,7 +751,7 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d warnings.filterwarnings("ignore", "All-NaN.*", RuntimeWarning) nan_med = np.nanmedian(first_diffs_sect, axis=0) nan_med[np.isnan(nan_med)] = 0. # if all first_diffs_sect are nans - median_diffs_2d[ rlo:rhi, : ] += nan_med + median_diffs_2d[rlo:rhi, :] += nan_med # Calculate the slope of each segment # note that the name "opt_res", which stands for "optional results", @@ -752,9 +759,8 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # per-segment results that will eventually be used to compute the # final slopes, sigmas, etc. for the main (non-optional) products t_dq_cube, inv_var, opt_res, f_max_seg, num_seg = \ - calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, - rn_sect, gain_sect, max_seg, ngroups, weighting, - f_max_seg) + calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, rn_sect, + gain_sect, max_seg, ngroups, weighting, f_max_seg) del gain_sect @@ -764,16 +770,14 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d num_seg_per_int[num_int, rlo:rhi, :] = num_seg.reshape(sect_shape) # Populate integ-spec slice which is set if 0th group has SAT - wh_sat0 = np.where( np.bitwise_and(gdq_sect[0,:,:], - dqflags.group['SATURATED'])) - if ( len(wh_sat0[0] ) > 0): - sat_0th_group_int[num_int, rlo:rhi, :][ wh_sat0 ] = 1 + wh_sat0 = np.where(np.bitwise_and(gdq_sect[0, :, :], SATURATED)) + if len(wh_sat0[0]) > 0: + sat_0th_group_int[num_int, rlo:rhi, :][wh_sat0] = 1 del wh_sat0 pixeldq_sect = pixeldq[rlo:rhi, :].copy() - dq_int[num_int, rlo:rhi, :] = \ - dq_compress_sect(t_dq_cube, pixeldq_sect).copy() + dq_int[num_int, rlo:rhi, :] = dq_compress_sect(t_dq_cube, pixeldq_sect).copy() del t_dq_cube @@ -786,10 +790,9 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # as approximation to cosmic ray amplitude for those pixels # having their DQ set for cosmic rays data_diff = data_sect - utils.shift_z(data_sect, -1) - dq_cr = np.bitwise_and(dqflags.group['JUMP_DET'], gdq_sect) + dq_cr = np.bitwise_and(JUMP_DET, gdq_sect) - opt_res.cr_mag_seg[num_int, :, rlo:rhi, :] = \ - data_diff * (dq_cr != 0) + opt_res.cr_mag_seg[num_int, :, rlo:rhi, :] = data_diff * (dq_cr != 0) del data_diff @@ -802,14 +805,14 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # Compute the final 2D array of differences; create rate array median_diffs_2d /= n_int - med_rates = median_diffs_2d/group_time + med_rates = median_diffs_2d / group_time del median_diffs_2d del first_diffs_sect - (var_p3, var_r3, var_p4, var_r4, var_both4, var_both3, - inv_var_both4, s_inv_var_p3, s_inv_var_r3, s_inv_var_both3, - segs_4) = utils.alloc_arrays_2(n_int, imshape, max_seg) + var_p3, var_r3, var_p4, var_r4, var_both4, var_both3, \ + inv_var_both4, s_inv_var_p3, s_inv_var_r3, s_inv_var_both3, segs_4 = \ + utils.alloc_arrays_2(n_int, imshape, max_seg) # In this 'Second Pass' over the data, loop over integrations and data # sections to calculate the variances of the slope using the estimated @@ -842,20 +845,17 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # Calculate results needed to compute the variance arrays den_r3, den_p3, num_r3, segs_beg_3 = \ - utils.calc_slope_vars( rn_sect, gain_sect, gdq_sect, - group_time, max_seg ) + utils.calc_slope_vars(rn_sect, gain_sect, gdq_sect, group_time, max_seg) segs_4[num_int, :, rlo:rhi, :] = segs_beg_3 # Suppress harmless arithmetic warnings for now - warnings.filterwarnings("ignore", ".*invalid value.*", - RuntimeWarning) - warnings.filterwarnings("ignore", ".*divide by zero.*", - RuntimeWarning) - var_p4[num_int, :, rlo:rhi, :] = den_p3 * med_rates[ rlo:rhi, :] + warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) + warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) + var_p4[num_int, :, rlo:rhi, :] = den_p3 * med_rates[rlo:rhi, :] # Find the segment variance due to read noise and convert back to DN - var_r4[num_int, :, rlo:rhi, :] = num_r3 * den_r3/gain_sect**2 + var_r4[num_int, :, rlo:rhi, :] = num_r3 * den_r3 / gain_sect**2 # Reset the warnings filter to its original state warnings.resetwarnings() @@ -869,53 +869,53 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # variance is proportional to the median estimated slope) to # outrageously large values so that they will have negligible # contributions. - var_p4[num_int,:,:,:] *= ( segs_4[num_int,:,:,:] > 0) + var_p4[num_int, :, :, :] *= (segs_4[num_int, :, :, :] > 0) # Suppress, then re-enable harmless arithmetic warnings warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) var_p4[var_p4 <= 0.] = utils.LARGE_VARIANCE - var_r4[num_int,:,:,:] *= ( segs_4[num_int,:,:,:] > 0) + var_r4[num_int, :, :, :] *= (segs_4[num_int, :, :, :] > 0) var_r4[var_r4 <= 0.] = utils.LARGE_VARIANCE # The sums of inverses of the variances are needed for later # variance calculations. - s_inv_var_p3[num_int, :, :] = (1./var_p4[num_int, :, :, :]).sum(axis=0) - var_p3[num_int, :, :] = 1./ s_inv_var_p3[num_int, :, :] - s_inv_var_r3[num_int, :, :] = (1./var_r4[num_int, :, :, :]).sum(axis=0) - var_r3[num_int, :, :] = 1./ s_inv_var_r3[num_int, :, :] + s_inv_var_p3[num_int, :, :] = (1. / var_p4[num_int, :, :, :]).sum(axis=0) + var_p3[num_int, :, :] = 1. / s_inv_var_p3[num_int, :, :] + s_inv_var_r3[num_int, :, :] = (1. / var_r4[num_int, :, :, :]).sum(axis=0) + var_r3[num_int, :, :] = 1. / s_inv_var_r3[num_int, :, :] # Huge variances correspond to non-existing segments, so are reset to 0 # to nullify their contribution. var_p3[var_p3 > 0.1 * utils.LARGE_VARIANCE] = 0. warnings.resetwarnings() - var_both4[num_int,:,:,:] = var_r4[num_int,:,:,:] + var_p4[num_int,:,:,:] - inv_var_both4[num_int, :, :, :] = 1./var_both4[num_int, :, :, :] + var_both4[num_int, :, :, :] = var_r4[num_int, :, :, :] + var_p4[num_int, :, :, :] + inv_var_both4[num_int, :, :, :] = 1. / var_both4[num_int, :, :, :] # Want to retain values in the 4D arrays only for the segments that each # pixel has, so will zero out values for the higher indices. Creating # and manipulating intermediate arrays (views, such as var_p4_int # will zero out the appropriate indices in var_p4 and var_r4.) # Extract the slice of 4D arrays for the current integration - var_p4_int = var_p4[num_int,:,:,:] # [ segment, y, x ] - inv_var_both4_int = inv_var_both4[num_int,:,:,:] + var_p4_int = var_p4[num_int, :, :, :] # [ segment, y, x ] + inv_var_both4_int = inv_var_both4[num_int, :, :, :] # Zero out non-existing segments - var_p4_int *= ( segs_4[num_int,:,:,:] > 0) - inv_var_both4_int *= ( segs_4[num_int,:,:,:] > 0) + var_p4_int *= (segs_4[num_int, :, :, :] > 0) + inv_var_both4_int *= (segs_4[num_int, :, :, :] > 0) # reshape these arrays to simplify masking [ segment, 1D pixel ] - var_p4_int2 = var_p4_int.reshape(( var_p4_int.shape[0], - var_p4_int.shape[1]*var_p4_int.shape[2])) + var_p4_int2 = var_p4_int.reshape( + (var_p4_int.shape[0], var_p4_int.shape[1] * var_p4_int.shape[2])) - s_inv_var_both3[num_int,:,:] = (inv_var_both4[num_int,:,:,:]).sum(axis=0) + s_inv_var_both3[num_int, :, :] = (inv_var_both4[num_int, :, :, :]).sum(axis=0) # Suppress, then re-enable harmless arithmetic warnings warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) - var_both3[num_int, :, :] = 1./s_inv_var_both3[num_int, :, :] + var_both3[num_int, :, :] = 1. / s_inv_var_both3[num_int, :, :] warnings.resetwarnings() del var_p4_int @@ -923,8 +923,8 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d del gain_2d - var_p4 *= ( segs_4[:,:,:,:] > 0) # Zero out non-existing segments - var_r4 *= ( segs_4[:,:,:,:] > 0) + var_p4 *= (segs_4[:, :, :, :] > 0) # Zero out non-existing segments + var_r4 *= (segs_4[:, :, :, :] > 0) # Delete lots of arrays no longer needed if inv_var_both4_int is not None: @@ -954,7 +954,7 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # all integrations: # slope = sum_over_integs_and_segs(slope_seg/var_seg)/ # sum_over_integs_and_segs(1/var_seg) - slope_by_var4 = opt_res.slope_seg.copy()/var_both4 + slope_by_var4 = opt_res.slope_seg.copy() / var_both4 del var_both4 @@ -966,11 +966,11 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # Suppress, then re-enable harmless arithmetic warnings warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) - slope_dataset2 = s_slope_by_var2/s_inv_var_both2 + slope_dataset2 = s_slope_by_var2 / s_inv_var_both2 warnings.resetwarnings() - del s_inv_var_both2, s_slope_by_var2, s_slope_by_var3, slope_by_var4 - del s_inv_var_both3 + del s_slope_by_var2, s_slope_by_var3, slope_by_var4 + del s_inv_var_both2, s_inv_var_both3 # Replace nans in slope_dataset2 with 0 (for non-existing segments) slope_dataset2[np.isnan(slope_dataset2)] = 0. @@ -983,7 +983,7 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # Suppress, then re-enable harmless arithmetic warnings warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) - slope_int = the_num/the_den + slope_int = the_num / the_den warnings.resetwarnings() del the_num, the_den @@ -998,14 +998,13 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # Loop over data integrations to calculate integration-specific pedestal if save_opt: - dq_slice = np.zeros((gdq_cube_shape[2],gdq_cube_shape[3]), - dtype=np.uint32) + dq_slice = np.zeros((gdq_cube_shape[2], gdq_cube_shape[3]), dtype=np.uint32) for num_int in range(0, n_int): dq_slice = groupdq[num_int, 0, :, :] - opt_res.ped_int[ num_int, :, : ] = \ + opt_res.ped_int[num_int, :, :] = \ utils.calc_pedestal(num_int, slope_int, opt_res.firstf_int, - dq_slice, nframes, groupgap, dropframes1) + dq_slice, nframes, groupgap, dropframes1) del dq_slice @@ -1017,17 +1016,17 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # Some contributions to these vars may be NaN as they are from ramps # having PIXELDQ=DO_NOT_USE - var_p4[ np.isnan( var_p4 )] = 0. - var_r4[ np.isnan( var_r4 )] = 0. + var_p4[np.isnan(var_p4)] = 0. + var_r4[np.isnan(var_r4)] = 0. # Truncate results at the maximum number of segments found - opt_res.slope_seg = opt_res.slope_seg[:,:f_max_seg,:,:] - opt_res.sigslope_seg = opt_res.sigslope_seg[:,:f_max_seg,:,:] - opt_res.yint_seg = opt_res.yint_seg[:,:f_max_seg,:,:] - opt_res.sigyint_seg = opt_res.sigyint_seg[:,:f_max_seg,:,:] - opt_res.weights = (inv_var_both4[:,:f_max_seg,:,:])**2. - opt_res.var_p_seg = var_p4[:,:f_max_seg,:,:] - opt_res.var_r_seg = var_r4[:,:f_max_seg,:,:] + opt_res.slope_seg = opt_res.slope_seg[:, :f_max_seg, :, :] + opt_res.sigslope_seg = opt_res.sigslope_seg[:, :f_max_seg, :, :] + opt_res.yint_seg = opt_res.yint_seg[:, :f_max_seg, :, :] + opt_res.sigyint_seg = opt_res.sigyint_seg[:, :f_max_seg, :, :] + opt_res.weights = (inv_var_both4[:, :f_max_seg, :, :])**2. + opt_res.var_p_seg = var_p4[:, :f_max_seg, :, :] + opt_res.var_r_seg = var_r4[:, :f_max_seg, :, :] opt_model = opt_res.output_optional(effintim) else: @@ -1079,8 +1078,8 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d log.debug('Instrument: %s', instrume) log.debug('Number of pixels in 2D array: %d', nrows * ncols) - log.debug('Shape of 2D image: (%d, %d)' %(imshape)) - log.debug('Shape of data cube: (%d, %d, %d)' %(orig_cubeshape)) + log.debug('Shape of 2D image: (%d, %d)' % (imshape)) + log.debug('Shape of data cube: (%d, %d, %d)' % (orig_cubeshape)) log.debug('Buffer size (bytes): %d', buffsize) log.debug('Number of rows per buffer: %d', nrows) log.info('Number of groups per integration: %d', orig_nreads) @@ -1088,8 +1087,8 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d log.debug('The execution time in seconds: %f', tstop - tstart) # Compute the 2D variances due to Poisson and read noise - var_p2 = 1/(s_inv_var_p3.sum(axis=0)) - var_r2 = 1/(s_inv_var_r3.sum(axis=0)) + var_p2 = 1 / (s_inv_var_p3.sum(axis=0)) + var_r2 = 1 / (s_inv_var_r3.sum(axis=0)) # Huge variances correspond to non-existing segments, so are reset to 0 # to nullify their contribution. @@ -1100,8 +1099,8 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d # Some contributions to these vars may be NaN as they are from ramps # having PIXELDQ=DO_NOT_USE - var_p2[ np.isnan( var_p2 )] = 0. - var_r2[ np.isnan( var_r2 )] = 0. + var_p2[np.isnan(var_p2)] = 0. + var_r2[np.isnan(var_r2)] = 0. # Suppress, then re-enable, harmless arithmetic warning warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) @@ -1112,11 +1111,11 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d del s_inv_var_r3 # Create new model for the primary output. - new_model = datamodels.ImageModel(data=c_rates.astype(np.float32), - dq=final_pixeldq.astype(np.uint32), - var_poisson=var_p2.astype(np.float32), - var_rnoise=var_r2.astype(np.float32), - err=err_tot.astype(np.float32)) + new_model = datamodels.ImageModel(data = c_rates.astype(np.float32), + dq = final_pixeldq.astype(np.uint32), + var_poisson = var_p2.astype(np.float32), + var_rnoise = var_r2.astype(np.float32), + err = err_tot.astype(np.float32)) if int_model is not None: int_data = int_model.data.copy() @@ -1163,8 +1162,7 @@ def ols_ramp_fit(data, err, groupdq, inpixeldq, buffsize, save_opt, readnoise_2d opt_pedestal, opt_weights, opt_crmag, actual_segments, actual_CRs -def gls_ramp_fit(input_model, buffsize, save_opt, - readnoise_model, gain_model, max_cores): +def gls_ramp_fit(input_model, buffsize, save_opt, readnoise_model, gain_model, max_cores): """Fit a ramp using generalized least squares. Extended Summary @@ -1244,7 +1242,7 @@ def gls_ramp_fit(input_model, buffsize, save_opt, # Determine the maximum number of cosmic ray hits for any pixel. max_num_cr = -1 # invalid initial value for num_int in range(n_int): - i_max_num_cr = utils.get_max_num_cr(input_model.groupdq[num_int,:,:,:], jump_flag) + i_max_num_cr = utils.get_max_num_cr(input_model.groupdq[num_int, :, :, :], jump_flag) max_num_cr = max(max_num_cr, i_max_num_cr) # Calculate effective integration time (once EFFINTIM has been populated @@ -1253,16 +1251,16 @@ def gls_ramp_fit(input_model, buffsize, save_opt, # is the number of given by the NFRAMES keyword, and is the number of # frames averaged on-board for a group, i.e., it does not include the # groupgap. - effintim, nframes, groupgap, dropframes1= utils.get_efftim_ped(input_model) + effintim, nframes, groupgap, dropframes1 = utils.get_efftim_ped(input_model) - if number_slices ==1: + if number_slices == 1: rows_per_slice = total_rows slopes, slope_int, slope_err_int, pixeldq_sect, dq_int, sum_weight, \ - intercept_int, intercept_err_int, pedestal_int, ampl_int, ampl_err_int = \ - gls_fit_all_integrations(frame_time, gain_2d, input_model.groupdq, - group_time, jump_flag, max_num_cr, input_model.data, \ - input_model.err, nframes, pixeldq, readnoise_2d, \ - saturated_flag, save_opt) + intercept_int, intercept_err_int, pedestal_int, ampl_int, ampl_err_int = \ + gls_fit_all_integrations(frame_time, gain_2d, input_model.groupdq, + group_time, jump_flag, max_num_cr, input_model.data, \ + input_model.err, nframes, pixeldq, readnoise_2d, \ + saturated_flag, save_opt) else: rows_per_slice = round(total_rows / number_slices) pool = Pool(processes=number_slices) @@ -1297,13 +1295,14 @@ def gls_ramp_fit(input_model, buffsize, save_opt, stop_row = (i + 1) * rows_per_slice readnoise_slice = readnoise_2d[start_row: stop_row, :] gain_slice = gain_2d[start_row: stop_row, :] - data_slice = input_model.data[:,:,start_row: stop_row, :].copy() + data_slice = input_model.data[:, :, start_row:stop_row, :].copy() err_slice = input_model.err[:, :, start_row: stop_row, :].copy() groupdq_slice = input_model.groupdq[:, :, start_row: stop_row, :].copy() - pixeldq_slice = pixeldq[ start_row: stop_row, :].copy() - slices.insert(i, (frame_time, gain_slice, groupdq_slice, group_time, - jump_flag, max_num_cr, data_slice, err_slice, frames_per_group, pixeldq_slice, - readnoise_slice, saturated_flag, save_opt)) + pixeldq_slice = pixeldq[start_row: stop_row, :].copy() + slices.insert(i, + (frame_time, gain_slice, groupdq_slice, group_time, + jump_flag, max_num_cr, data_slice, err_slice, frames_per_group, pixeldq_slice, + readnoise_slice, saturated_flag, save_opt)) #The last slice takes the remainder of the rows start_row = (number_slices - 1) * rows_per_slice readnoise_slice = readnoise_2d[start_row: total_rows, :] @@ -1312,9 +1311,10 @@ def gls_ramp_fit(input_model, buffsize, save_opt, err_slice = input_model.err[:, :, start_row: total_rows, :].copy() groupdq_slice = input_model.groupdq[:, :, start_row: total_rows, :].copy() pixeldq_slice = input_model.pixeldq[start_row: total_rows, :].copy() - slices.insert(number_slices - 1, (frame_time, gain_slice, groupdq_slice, group_time, - jump_flag, max_num_cr, data_slice, err_slice, frames_per_group, pixeldq_slice, - readnoise_slice, saturated_flag, save_opt)) + slices.insert(number_slices - 1, + (frame_time, gain_slice, groupdq_slice, group_time, + jump_flag, max_num_cr, data_slice, err_slice, frames_per_group, pixeldq_slice, + readnoise_slice, saturated_flag, save_opt)) log.debug("Creating %d processes for ramp fitting " % number_slices) real_results = pool.starmap(gls_fit_all_integrations, slices) @@ -1387,10 +1387,8 @@ def gls_ramp_fit(input_model, buffsize, save_opt, if save_opt: # collect optional results for output # Get the zero-point intercepts and the cosmic-ray amplitudes for # each integration (even if there's only one integration). - gls_opt_model = utils.gls_output_optional(input_model, - intercept_int, intercept_err_int, - pedestal_int, - ampl_int, ampl_err_int) + gls_opt_model = utils.gls_output_optional( + input_model, intercept_int, intercept_err_int, pedestal_int, ampl_int, ampl_err_int) else: gls_opt_model = None @@ -1413,19 +1411,19 @@ def gls_ramp_fit(input_model, buffsize, save_opt, # Create new model... if n_int > 1: - new_model = datamodels.ImageModel(data=slopes.astype(np.float32), - dq=final_pixeldq, err=gls_err.astype(np.float32)) + new_model = datamodels.ImageModel( + data=slopes.astype(np.float32), dq=final_pixeldq, err=gls_err.astype(np.float32)) else: - new_model = datamodels.ImageModel(data=slope_int[0], dq=final_pixeldq, - err=slope_err_int[0]) + new_model = datamodels.ImageModel( + data=slope_int[0], dq=final_pixeldq, err=slope_err_int[0]) new_model.update(input_model) # ... and add all keys from input return new_model, int_model, gls_opt_model -def gls_fit_all_integrations(frame_time, gain_2d, gdq_cube, - group_time, jump_flag, max_num_cr, data_sect, input_var_sect, - nframes_used, pixeldq, readnoise_2d, saturated_flag, save_opt): +def gls_fit_all_integrations( + frame_time, gain_2d, gdq_cube, group_time, jump_flag, max_num_cr, data_sect, + input_var_sect, nframes_used, pixeldq, readnoise_2d, saturated_flag, save_opt): """ This method will fit the rate for all pixels and all integrations using the Generalized Least Squares (GLS) method. @@ -1529,14 +1527,11 @@ def gls_fit_all_integrations(frame_time, gain_2d, gdq_cube, if save_opt: first_group[:, :] = data_sect[num_int, 0, :, :].copy() - (intercept_sect, intercept_var_sect, - slope_sect, slope_var_sect, - cr_sect, cr_var_sect) = \ - gls_fit.determine_slope(data_sect[num_int,:,:,:], input_var_sect[num_int,:,:,:], - gdq_cube[num_int,:,:,:], readnoise_2d, gain_2d, - frame_time, group_time, - nframes_used, max_num_cr, - saturated_flag, jump_flag) + intercept_sect, intercept_var_sect, slope_sect, slope_var_sect, cr_sect, cr_var_sect = \ + gls_fit.determine_slope( + data_sect[num_int, :, :, :], input_var_sect[num_int, :, :, :], + gdq_cube[num_int, :, :, :], readnoise_2d, gain_2d, frame_time, group_time, + nframes_used, max_num_cr, saturated_flag, jump_flag) slope_int[num_int, :, :] = slope_sect.copy() v_mask = (slope_var_sect <= 0.) @@ -1544,14 +1539,14 @@ def gls_fit_all_integrations(frame_time, gain_2d, gdq_cube, # Replace negative or zero variances with a large value. slope_var_sect[v_mask] = utils.LARGE_VARIANCE # Also set a flag in the pixel dq array. - temp_dq[:, :][v_mask] = dqflags.pixel['UNRELIABLE_SLOPE'] + temp_dq[:, :][v_mask] = UNRELIABLE_SLOPE del v_mask # If a pixel was flagged (by an earlier step) as saturated in # the first group, flag the pixel as bad. # Note: save s_mask until after the call to utils.gls_pedestal. s_mask = (gdq_cube[0] == saturated_flag) if s_mask.any(): - temp_dq[:, :][s_mask] = dqflags.pixel['UNRELIABLE_SLOPE'] + temp_dq[:, :][s_mask] = UNRELIABLE_SLOPE slope_err_int[num_int, :, :] = np.sqrt(slope_var_sect) # We need to take a weighted average if (and only if) number_ints > 1. @@ -1565,23 +1560,20 @@ def gls_fit_all_integrations(frame_time, gain_2d, gdq_cube, # Save the intercepts and cosmic-ray amplitudes for the # current integration. intercept_int[num_int, :, :] = intercept_sect.copy() - intercept_err_int[num_int, :, :] = \ - np.sqrt(np.abs(intercept_var_sect)) - pedestal_int[num_int, :, :] = \ - utils.gls_pedestal(first_group[:, :], - slope_int[num_int, :, :], - s_mask, - frame_time, nframes_used) + intercept_err_int[num_int, :, :] = np.sqrt(np.abs(intercept_var_sect)) + pedestal_int[num_int, :, :] = utils.gls_pedestal(first_group[:, :], + slope_int[num_int, :, :], + s_mask, + frame_time, nframes_used) ampl_int[num_int, :, :, :] = cr_sect.copy() - ampl_err_int[num_int, :, :, :] = \ - np.sqrt(np.abs(cr_var_sect)) + ampl_err_int[num_int, :, :, :] = np.sqrt(np.abs(cr_var_sect)) # Compress 4D->2D dq arrays for saturated and jump-detected # pixels pixeldq_sect = pixeldq[:, :].copy() dq_int[num_int, :, :] = \ - dq_compress_sect(gdq_cube[num_int,:,:,:], pixeldq_sect).copy() + dq_compress_sect(gdq_cube[num_int, :, :, :], pixeldq_sect).copy() dq_int[num_int, :, :] |= temp_dq temp_dq[:, :] = 0 # initialize for next integration @@ -1616,10 +1608,10 @@ def calc_power(snr): def interpolate_power(snr): pow_wt = snr.copy() * 0.0 - pow_wt[np.where(snr > 5.)] = ((snr[snr>5]-5)/(10 - 5)) * 0.6 + 0.4 - pow_wt[np.where(snr > 10.)] = ((snr[snr>10]-10)/(20 - 10)) * 2.0 + 1.0 - pow_wt[np.where(snr > 20.)] = ((snr[snr>20]-20))/(50 - 20) * 3.0 + 3.0 - pow_wt[np.where(snr > 50.)] = ((snr[snr>50] - 50))/(100 - 50) * 4.0 + 6.0 + pow_wt[np.where(snr > 5.)] = ((snr[snr > 5] - 5) / (10 - 5)) * 0.6 + 0.4 + pow_wt[np.where(snr > 10.)] = ((snr[snr > 10] - 10) / (20 - 10)) * 2.0 + 1.0 + pow_wt[np.where(snr > 20.)] = ((snr[snr > 20] - 20)) / (50 - 20) * 3.0 + 3.0 + pow_wt[np.where(snr > 50.)] = ((snr[snr > 50] - 50)) / (100 - 50) * 4.0 + 6.0 pow_wt[np.where(snr > 100.)] = 10.0 return pow_wt.ravel() @@ -1677,15 +1669,13 @@ def dq_compress_sect(gdq_sect, pixeldq_sect): dq array of data section updated with saturated and jump-detected flags """ - sat_loc_r = np.bitwise_and(gdq_sect, dqflags.group['SATURATED']) + sat_loc_r = np.bitwise_and(gdq_sect, SATURATED) sat_loc_im = np.where(sat_loc_r.sum(axis=0) > 0) - pixeldq_sect[sat_loc_im] = np.bitwise_or(pixeldq_sect[sat_loc_im], - dqflags.pixel['SATURATED']) + pixeldq_sect[sat_loc_im] = np.bitwise_or(pixeldq_sect[sat_loc_im], SATURATED) - cr_loc_r = np.bitwise_and(gdq_sect, dqflags.group['JUMP_DET']) + cr_loc_r = np.bitwise_and(gdq_sect, JUMP_DET) cr_loc_im = np.where(cr_loc_r.sum(axis=0) > 0) - pixeldq_sect[cr_loc_im] = np.bitwise_or(pixeldq_sect[cr_loc_im], - dqflags.pixel['JUMP_DET']) + pixeldq_sect[cr_loc_im] = np.bitwise_or(pixeldq_sect[cr_loc_im], JUMP_DET) return pixeldq_sect @@ -1863,7 +1853,7 @@ def calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, rn_sect, # of intervals of length 1 will later be flagged as too short # to fit well. for ii, val in enumerate(these_p): - if (these_r[ii] != (nreads - 1)): + if these_r[ii] != (nreads - 1): end_st[end_heads[these_p[ii]], these_p[ii]] = these_r[ii] end_heads[these_p[ii]] += 1 @@ -1886,17 +1876,17 @@ def calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, rn_sect, break # frames >= start and <= end_st will be included in fit - mask_2d = ((arange_nreads_col >= start) & - (arange_nreads_col < - (end_st[end_heads[all_pix] - 1, all_pix] + 1))) + mask_2d = \ + ((arange_nreads_col >= start) + & (arange_nreads_col < (end_st[end_heads[all_pix] - 1, all_pix] + 1))) mask_2d[gdq_sect_r != 0] = False # RE-exclude bad group dq values # for all pixels, update arrays, summing slope and variance f_max_seg, num_seg = \ - fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, - mask_2d, mask_2d_init, inv_var, num_seg, opt_res, save_opt, - rn_sect, gain_sect, ngroups, weighting, f_max_seg) + fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, + mask_2d_init, inv_var, num_seg, opt_res, save_opt, rn_sect, + gain_sect, ngroups, weighting, f_max_seg) if f_max_seg is None: f_max_seg = 1 @@ -1992,8 +1982,8 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # Compute fit quantities for the next segment of all pixels # Each returned array below is 1D, for all npix pixels for current segment - slope, intercept, variance, sig_intercept, sig_slope = fit_lines(data_sect, - mask_2d, rn_sect, gain_sect, ngroups, weighting) + slope, intercept, variance, sig_intercept, sig_slope = \ + fit_lines(data_sect, mask_2d, rn_sect, gain_sect, ngroups, weighting) end_locs = end_st[end_heads[all_pix] - 1, all_pix] @@ -2006,7 +1996,8 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # Create array to set when each good pixel is classified for the current # semiramp (to enable unclassified pixels to have their arrays updated) - got_case = np.zeros((asize1*asize2), dtype=bool) + got_case = np.zeros((asize1 * asize2), dtype=bool) + # CASE A) Long enough (semiramp has >2 groups), at end of ramp # - set start to -1 to designate all fitting done @@ -2016,24 +2007,24 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # For segments of this type, the final good group is the final group in the # ramp, and the variable `l_interval` used below is equal to the number of # the segment's groups minus 1. - wh_check = np.where((l_interval>1) & (end_locs==nreads-1) & (~pixel_done)) - if(len(wh_check[0]) > 0): + wh_check = np.where((l_interval > 1) & (end_locs == nreads - 1) & (~pixel_done)) + if len(wh_check[0]) > 0: these_pix = wh_check[0] start[these_pix] = -1 # all processing for this pixel is completed end_st[end_heads[these_pix] - 1, these_pix] = 0 end_heads[these_pix] = 0 pixel_done[these_pix] = True # all processing for pixel is completed - got_case[ these_pix ] = True + got_case[these_pix] = True with warnings.catch_warnings(): warnings.filterwarnings("ignore", "invalid value.*", RuntimeWarning) g_pix = these_pix[variance[these_pix] > 0.] # good pixels - if (len(g_pix) > 0): + if len(g_pix) > 0: inv_var[g_pix] += 1.0 / variance[g_pix] # Append results to arrays - opt_res.append_arr(num_seg, g_pix, intercept, slope, - sig_intercept, sig_slope, inv_var, save_opt) + opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, + sig_slope, inv_var, save_opt) num_seg[g_pix] += 1 f_max_seg = max(f_max_seg, num_seg.max()) @@ -2048,9 +2039,9 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # and/or SAT and is not the final group in the ramp, and the variable # `l_interval` used below is equal to the number of the segment's groups. wh_check = np.where((l_interval > 2) & (end_locs != nreads - 1) & ~pixel_done) - if(len(wh_check[0]) > 0): + if len(wh_check[0]) > 0: these_pix = wh_check[0] - got_case[ these_pix ] = True + got_case[these_pix] = True start[these_pix] = end_locs[these_pix] end_st[end_heads[these_pix] - 1, these_pix] = 0 @@ -2059,12 +2050,12 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, g_pix = these_pix[variance[these_pix] > 0.] # good pixels - if (len(g_pix) > 0): + if len(g_pix) > 0: inv_var[g_pix] += 1.0 / variance[g_pix] # Append results to arrays - opt_res.append_arr(num_seg, g_pix, intercept, slope, - sig_intercept, sig_slope, inv_var, save_opt) + opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, + sig_slope, inv_var, save_opt) num_seg[g_pix] += 1 f_max_seg = max(f_max_seg, num_seg.max()) @@ -2074,22 +2065,23 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, c_mask_2d_init = mask_2d_init.copy() # create array: 0...nreads-1 in a column for each pixel - arr_ind_all = np.array( [np.arange(nreads),] * - c_mask_2d_init.shape[1]).transpose() - wh_c_start_all = np.zeros( c_mask_2d_init.shape[1], dtype=np.uint8) - wh_c_start_all[ g_pix ] = start[ g_pix ] + arr_ind_all = np.array( + [np.arange(nreads),] * c_mask_2d_init.shape[1]).transpose() + + wh_c_start_all = np.zeros(c_mask_2d_init.shape[1], dtype=np.uint8) + wh_c_start_all[g_pix] = start[g_pix] # set to False all groups before start group - c_mask_2d_init[ arr_ind_all < wh_c_start_all ] = False + c_mask_2d_init[arr_ind_all < wh_c_start_all] = False # select pixels having all groups False from start to ramp end - wh_rest_false = np.where( c_mask_2d_init.sum(axis=0) == 0) - if(len(wh_rest_false[0]) > 0): + wh_rest_false = np.where(c_mask_2d_init.sum(axis=0) == 0) + if len(wh_rest_false[0]) > 0: pix_rest_false = wh_rest_false[0] - start[ pix_rest_false ] = -1 - end_st[ end_heads[ pix_rest_false ] - 1, pix_rest_false ] = 0 - end_heads[ pix_rest_false ] = 0 - pixel_done[ pix_rest_false ] = True # all processing is complete + start[pix_rest_false] = -1 + end_st[end_heads[pix_rest_false] - 1, pix_rest_false] = 0 + end_heads[pix_rest_false] = 0 + pixel_done[pix_rest_false] = True # all processing is complete # CASE C) - dataset has NGROUPS=1 ; so special fitting is done for all pixels @@ -2099,23 +2091,23 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # - set number of ends to 0 # - add slopes and variances to running sums # - set pixel_done to True to designate all fitting done - if (ngroups == 1): + if ngroups == 1: start[all_pix] = -1 end_st[end_heads[all_pix] - 1, all_pix] = 0 end_heads[all_pix] = 0 pixel_done[all_pix] = True wh_check = np.where(mask_2d_init[0, :] & (ramp_mask_sum == 1)) - if(len(wh_check[0]) > 0): + if len(wh_check[0]) > 0: g_pix = wh_check[0] # Ignore all pixels having no good groups (so the single group is bad) - if (len(g_pix) > 0): + if len(g_pix) > 0: inv_var[g_pix] += 1.0 / variance[g_pix] # Append results to arrays - opt_res.append_arr(num_seg, g_pix, intercept, slope, - sig_intercept, sig_slope, inv_var, save_opt) + opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, + sig_slope, inv_var, save_opt) num_seg[g_pix] = 1 @@ -2129,18 +2121,18 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # - set number of ends to 0 # - add slopes and variances to running sums # - set pixel_done to True to designate all fitting done - if (ngroups == 2): + if ngroups == 2: start[all_pix] = -1 end_st[end_heads[all_pix] - 1, all_pix] = 0 end_heads[all_pix] = 0 pixel_done[all_pix] = True g_pix = all_pix[variance[all_pix] > 0.] - if (len(g_pix) > 0): + if len(g_pix) > 0: inv_var[g_pix] += 1.0 / variance[g_pix] - opt_res.append_arr(num_seg, g_pix, intercept, slope, - sig_intercept, sig_slope, inv_var, save_opt) + opt_res.append_arr(num_seg, g_pix, intercept, slope, sig_intercept, + sig_slope, inv_var, save_opt) num_seg[g_pix] = 1 @@ -2158,26 +2150,29 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # For segments of this type, the final good group is the final group in the # ramp, and the variable `l_interval` used below = 1, and the number of # groups in the segment = 2 - wh_check = np.where((l_interval == 1) & (end_locs == nreads - 1) & - (nreads > 1) & (ngroups != 2) & (~pixel_done)) + wh_check = np.where((l_interval == 1) + & (end_locs == nreads - 1) + & (nreads > 1) + & (ngroups != 2) + & (~pixel_done)) # Require that pixels to be processed here have at least 1 good group out # of the final 2 groups (these ramps have 2 groups and are at the end of # the array). wh_list = [] - if(len(wh_check[0]) > 0): + if len(wh_check[0]) > 0: num_wh = len(wh_check[0]) - for ii in range( num_wh ): # locate pixels with at least 1 good group + for ii in range(num_wh): # locate pixels with at least 1 good group this_pix = wh_check[0][ii] sum_final_2 = mask_2d_init[start[this_pix]:, this_pix].sum() if sum_final_2 > 0: - wh_list.append( wh_check[0][ii] ) # add to list to be fit + wh_list.append(wh_check[0][ii]) # add to list to be fit if len(wh_list) > 0: - these_pix = np.asarray( wh_list ) - got_case[ these_pix ] = True + these_pix = np.asarray(wh_list) + got_case[these_pix] = True start[these_pix] = -1 end_st[end_heads[these_pix] - 1, these_pix] = 0 @@ -2186,7 +2181,7 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, g_pix = these_pix[variance[these_pix] > 0.] # good pixels - if (len(g_pix) > 0): + if len(g_pix) > 0: inv_var[g_pix] += 1.0 / variance[g_pix] # Append results to arrays @@ -2212,12 +2207,14 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # Copy mask, as will modify when calculating the number of later good groups c_mask_2d_init = mask_2d_init.copy() - wh_check = np.where((l_interval == 2) & ( ngroups >2 ) & - (end_locs != nreads - 1) & ~pixel_done) + wh_check = np.where((l_interval == 2) + & (ngroups > 2) + & (end_locs != nreads - 1) + & ~pixel_done) - if(len(wh_check[0]) > 0): + if len(wh_check[0]) > 0: these_pix = wh_check[0] - got_case[ these_pix ] = True + got_case[these_pix] = True # Suppress, then re-enable, harmless arithmetic warnings warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning) @@ -2226,31 +2223,31 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, warnings.resetwarnings() # create array: 0...nreads-1 in a column for each pixel - arr_ind_all = np.array([np.arange(nreads),] * - c_mask_2d_init.shape[1]).transpose() - wh_c_start_all = np.zeros( mask_2d_init.shape[1], dtype=np.uint8) - wh_c_start_all[ these_pix ] = start[ these_pix] + arr_ind_all = np.array( + [np.arange(nreads),] * c_mask_2d_init.shape[1]).transpose() + wh_c_start_all = np.zeros(mask_2d_init.shape[1], dtype=np.uint8) + wh_c_start_all[these_pix] = start[these_pix] # set to False all groups before start group - c_mask_2d_init[ arr_ind_all < wh_c_start_all ] = 0 - tot_good_groups = c_mask_2d_init.sum(axis=0 ) + c_mask_2d_init[arr_ind_all < wh_c_start_all] = 0 + tot_good_groups = c_mask_2d_init.sum(axis=0) # Select pixels having at least 2 later good groups (these later good # groups are a segment whose slope will be calculated) - wh_more = np.where( tot_good_groups[these_pix] > 1 ) - pix_more = these_pix[ wh_more ] - start[ pix_more ] = end_locs[ pix_more ] - end_st[ end_heads[ pix_more ] - 1, pix_more ] = 0 - end_heads[ pix_more ] -= 1 + wh_more = np.where(tot_good_groups[these_pix] > 1) + pix_more = these_pix[wh_more] + start[pix_more] = end_locs[pix_more] + end_st[end_heads[pix_more] - 1, pix_more] = 0 + end_heads[pix_more] -= 1 # Select pixels having less than 2 later good groups (these later good # groups will not be used) - wh_only = np.where( tot_good_groups[these_pix] <= 1 ) - pix_only = these_pix[ wh_only ] - start[ pix_only ] = -1 - end_st[ end_heads[ pix_only ] - 1, pix_only ] = 0 - end_heads[ pix_only ] = 0 - pixel_done[ pix_only ] = True # all processing for pixel is completed + wh_only = np.where(tot_good_groups[these_pix] <= 1) + pix_only = these_pix[wh_only] + start[pix_only] = -1 + end_st[end_heads[pix_only] - 1, pix_only] = 0 + end_heads[pix_only] = 0 + pixel_done[pix_only] = True # all processing for pixel is completed end_heads[(end_heads < 0.)] = 0. # Append results to arrays @@ -2268,12 +2265,12 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # - set number of end to 0 # - add slopes and variances to running sums # - set pixel_done to True to designate all fitting done - wh_check = np.where(mask_2d_init[0, :] & ~mask_2d_init[1, :] & - (ramp_mask_sum == 1) & ~pixel_done) + wh_check = np.where( + mask_2d_init[0, :] & ~mask_2d_init[1, :] & (ramp_mask_sum == 1) & ~pixel_done) - if(len(wh_check[0]) > 0): + if len(wh_check[0]) > 0: these_pix = wh_check[0] - got_case[ these_pix ] = True + got_case[these_pix] = True start[these_pix] = -1 end_st[end_heads[these_pix] - 1, these_pix] = 0 @@ -2306,17 +2303,20 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # - increment start array # - remove current end from end stack # - decrement number of ends - wh_check = np.where(mask_2d_init[0, :] & ~mask_2d_init[1, :] & ~pixel_done & - (end_locs==1) & (start==0)) + wh_check = np.where(mask_2d_init[0, :] + & ~mask_2d_init[1, :] + & ~pixel_done + & (end_locs == 1) + & (start == 0)) - if(len(wh_check[0]) > 0): + if len(wh_check[0]) > 0: these_pix = wh_check[0] - got_case[ these_pix ] = True - start[ these_pix ] += 1 - start[ start > nreads-1 ] = nreads - 1 # to keep at max level - end_st[ end_heads[ these_pix ] - 1, these_pix ] = 0 - end_heads[ these_pix ] -= 1 - end_heads [end_heads < 0. ] = 0. + got_case[these_pix] = True + start[these_pix] += 1 + start[start > nreads - 1] = nreads - 1 # to keep at max level + end_st[end_heads[these_pix] - 1, these_pix] = 0 + end_heads[these_pix] -= 1 + end_heads[end_heads < 0.] = 0. # CASE OTHER) - all other types of segments not covered earlier. No segments @@ -2324,11 +2324,11 @@ def fit_next_segment(start, end_st, end_heads, pixel_done, data_sect, mask_2d, # - increment start array # - remove current end from end stack # - decrement number of ends - wh_check = np.asarray( np.where( ~pixel_done & ~got_case )) - if(len(wh_check[0]) > 0): + wh_check = np.asarray(np.where(~pixel_done & ~got_case)) + if len(wh_check[0]) > 0: these_pix = wh_check[0] - start[ these_pix ] += 1 - start[ start > nreads-1 ] = nreads -1 # to keep at max level + start[these_pix] += 1 + start[start > nreads - 1] = nreads - 1 # to keep at max level end_st[end_heads[these_pix] - 1, these_pix] = 0 end_heads[these_pix] -= 1 end_heads[end_heads < 0.] = 0. @@ -2408,14 +2408,14 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting): # Calculate slopes etc. for datasets having either 1 or 2 groups per # integration, and return - if (ngroups == 1): # process all pixels in 1 group/integration dataset + if ngroups == 1: # process all pixels in 1 group/integration dataset slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s = \ fit_1_group(slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s, npix, data, c_mask_2d) return slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s - if (ngroups == 2): # process all pixels in 2 group/integration dataset + if ngroups == 2: # process all pixels in 2 group/integration dataset rn_sect_1d = rn_sect.reshape(npix) slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s = \ fit_2_group(slope_s, intercept_s, variance_s, sig_intercept_s, @@ -2430,21 +2430,22 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting): # For datasets having >2 groups/integration, for any semiramp in which the # 0th group is good and the 1st group is bad, determine whether or not to # use the 0th group. - wh_pix_1r = np.where(c_mask_2d[0,:] & (np.logical_not(c_mask_2d[1,:]))) + wh_pix_1r = np.where(c_mask_2d[0, :] & (np.logical_not(c_mask_2d[1, :]))) - if (len(wh_pix_1r[0]) > 0 ): + if len(wh_pix_1r[0]) > 0: slope_s, intercept_s, variance_s, sig_intercept_s, \ - sig_slope_s = fit_single_read(slope_s, intercept_s, - variance_s, sig_intercept_s, sig_slope_s, npix, data, wh_pix_1r) + sig_slope_s = fit_single_read(slope_s, intercept_s, variance_s, + sig_intercept_s, sig_slope_s, npix, + data, wh_pix_1r) del wh_pix_1r # For datasets having >2 groups/integrations, for any semiramp in which only # the 0th and 1st group are good, set slope, etc - wh_pix_2r = np.where( c_mask_2d.sum(axis=0) ==2) # ramps with 2 good groups + wh_pix_2r = np.where(c_mask_2d.sum(axis=0) == 2) # ramps with 2 good groups slope_s, intercept_s, variance_s, sig_slope_s, sig_intercept_s = \ - fit_double_read( c_mask_2d, wh_pix_2r, data_masked, slope_s, intercept_s, - variance_s, sig_slope_s, sig_intercept_s, rn_sect) + fit_double_read(c_mask_2d, wh_pix_2r, data_masked, slope_s, intercept_s, + variance_s, sig_slope_s, sig_intercept_s, rn_sect) del wh_pix_2r @@ -2464,18 +2465,17 @@ def fit_lines(data, mask_2d, rn_sect, gain_sect, ngroups, weighting): if weighting.lower() == 'optimal': # fit using optimal weighting # get sums from optimal weighting - sumx, sumxx, sumxy, sumy, nreads_wtd, xvalues = calc_opt_sums( rn_sect, - gain_sect, data_masked, c_mask_2d, xvalues, good_pix ) + sumx, sumxx, sumxy, sumy, nreads_wtd, xvalues = \ + calc_opt_sums(rn_sect, gain_sect, data_masked, c_mask_2d, xvalues, good_pix) - slope, intercept, sig_slope, sig_intercept = calc_opt_fit( nreads_wtd, - sumxx, sumx, sumxy, sumy) + slope, intercept, sig_slope, sig_intercept = \ + calc_opt_fit(nreads_wtd, sumxx, sumx, sumxy, sumy) variance = sig_slope**2. # variance due to fit values elif weighting.lower() == 'unweighted': # fit using unweighted weighting # get sums from unweighted weighting - sumx, sumxx, sumxy, sumy =\ - calc_unwtd_sums(data_masked, xvalues) + sumx, sumxx, sumxy, sumy = calc_unwtd_sums(data_masked, xvalues) slope, intercept, sig_slope, sig_intercept, line_fit =\ calc_unwtd_fit(xvalues, nreads_1d, sumxx, sumx, sumxy, sumy) @@ -2624,18 +2624,18 @@ def fit_double_read(mask_2d, wh_pix_2r, data_masked, slope_s, intercept_s, rn = rn_sect_flattened[pixel_ff] # read noise for this pixel - read_nums = np.where( mask_2d[:,pixel_ff]) + read_nums = np.where(mask_2d[:, pixel_ff]) second_read = read_nums[0][1] data_ramp = data_masked[:, pixel_ff] * mask_2d[:, pixel_ff] - data_semi = data_ramp[ mask_2d[:, pixel_ff]] # picks only the 2 + data_semi = data_ramp[mask_2d[:, pixel_ff]] # picks only the 2 diff_data = data_semi[1] - data_semi[0] - slope_s[ pixel_ff ] = diff_data - intercept_s[ pixel_ff ] = data_semi[1]*(1.- second_read) + \ - data_semi[0]*second_read # by geometry - variance_s[ pixel_ff ] = 2.0 * rn * rn + slope_s[pixel_ff] = diff_data + intercept_s[pixel_ff] = \ + data_semi[1] * (1. - second_read) + data_semi[0] * second_read # by geometry + variance_s[pixel_ff] = 2.0 * rn * rn sig_slope_s[pixel_ff] = np.sqrt(2) * rn - sig_intercept_s[ pixel_ff ] = np.sqrt(2) * rn + sig_intercept_s[pixel_ff] = np.sqrt(2) * rn return slope_s, intercept_s, variance_s, sig_slope_s, sig_intercept_s @@ -2820,7 +2820,7 @@ def fit_1_group(slope_s, intercept_s, variance_s, sig_intercept_s, # For saturated pixels, overwrite slope with benign values. wh_sat0 = np.where(np.logical_not(mask_2d[0, :])) - if (len(wh_sat0[0]) > 0): + if len(wh_sat0[0]) > 0: sat_pix = wh_sat0[0] slope_s[sat_pix] = 0. @@ -2883,7 +2883,7 @@ def fit_2_group(slope_s, intercept_s, variance_s, sig_intercept_s, # benign values to be recalculated later. wh_sat0 = np.where(np.logical_not(mask_2d[0, :])) - if (len(wh_sat0[0]) > 0): + if len(wh_sat0[0]) > 0: sat_pix = wh_sat0[0] slope_s[sat_pix] = 0. variance_s[sat_pix] = 0. @@ -2902,7 +2902,7 @@ def fit_2_group(slope_s, intercept_s, variance_s, sig_intercept_s, # later consider refactoring. wh_sat1 = np.where((mask_2d[:, :].sum(axis=0) == 1) & mask_2d[0, :]) - if (len(wh_sat1[0]) > 0): + if len(wh_sat1[0]) > 0: data0_slice = data[0, :, :].reshape(npix) slope_s[wh_sat1] = data0_slice[wh_sat1] # set variance non-zero because calling function uses variance=0 to @@ -2919,7 +2919,7 @@ def fit_2_group(slope_s, intercept_s, variance_s, sig_intercept_s, # rate, and recalculate other fit quantities to be benign. wh_sat_no = np.where(mask_2d[:, :].sum(axis=0) == 2) - if (len(wh_sat_no[0]) > 0): + if len(wh_sat_no[0]) > 0: data0_slice = data[0, :, :].reshape(npix) data1_slice = data[1, :, :].reshape(npix) slope_s[wh_sat_no] = data1_slice[wh_sat_no] - data0_slice[wh_sat_no] @@ -2936,7 +2936,7 @@ def fit_2_group(slope_s, intercept_s, variance_s, sig_intercept_s, def calc_num_seg(gdq, n_int): """ - Calculate the maximum number of segments that will be be fit within an + Calculate the maximum number of segments that will be fit within an integration, calculated over all pixels and all integrations. This value is based on the locations of cosmic ray-affected pixels in all of the ramps, and will be used to allocate arrays used for the optional output product. @@ -2946,13 +2946,15 @@ def calc_num_seg(gdq, n_int): gdq : float, 3D array cube of GROUPDQ array for a data - n_int : int + n_int : int (unused) total number of integrations in data set Return: ------- - int(max_cr) +1 : int - maxmimum number of segments; n CRS implies n+1 segments + max_num_seg: int + The maximum number of segements within an integration + max_cr: int + The maximum number of cosmic rays within an integration """ max_cr = 0 # max number of CRS for all integrations @@ -2966,7 +2968,7 @@ def calc_num_seg(gdq, n_int): # this is a MIRI dataset in which the first or last group was flagged as # DO_NOT_USE and also flagged as a jump. max_num_seg = int(max_cr) + 1 # n CRS implies n+1 segments - if (max_num_seg > gdq.shape[1]): + if max_num_seg > gdq.shape[1]: max_num_seg = gdq.shape[1] return max_num_seg, max_cr @@ -3062,7 +3064,7 @@ def calc_opt_sums(rn_sect, gain_sect, data_masked, mask_2d, xvalues, good_pix): c_mask_2d = mask_2d.copy() # copy the mask to prevent propagation rn_sect = np.float32(rn_sect) # Return 'empty' sums if there is no more data to fit - if (data_masked.size == 0): + if data_masked.size == 0: return np.array([]), np.array([]), np.array([]), np.array([]),\ np.array([]), np.array([]) @@ -3087,10 +3089,10 @@ def calc_opt_sums(rn_sect, gain_sect, data_masked, mask_2d, xvalues, good_pix): ind_lastnz = 0 # Use the readnoise and gain for good pixels only - rn_sect_rav = rn_sect.flatten()[ good_pix ] + rn_sect_rav = rn_sect.flatten()[good_pix] rn_2_r = rn_sect_rav * rn_sect_rav - gain_sect_r = gain_sect.flatten()[ good_pix ] + gain_sect_r = gain_sect.flatten()[good_pix] # Calculate the sigma for nonzero gain values sigma_ir = data_final.copy() * 0.0 @@ -3103,8 +3105,8 @@ def calc_opt_sums(rn_sect, gain_sect, data_masked, mask_2d, xvalues, good_pix): with warnings.catch_warnings(): warnings.filterwarnings("ignore", "invalid value.*", RuntimeWarning) wh_pos = np.where((sqrt_arg >= 0.) & (gain_sect_r != 0.)) - numer_ir[wh_pos] = np.sqrt(rn_2_r[wh_pos] + \ - data_diff[wh_pos] * gain_sect_r[wh_pos]) + numer_ir[wh_pos] = \ + np.sqrt(rn_2_r[wh_pos] + data_diff[wh_pos] * gain_sect_r[wh_pos]) sigma_ir[wh_pos] = numer_ir[wh_pos] / gain_sect_r[wh_pos] snr = data_diff * 0. snr[wh_pos] = data_diff[wh_pos] / sigma_ir[wh_pos] @@ -3130,7 +3132,7 @@ def calc_opt_sums(rn_sect, gain_sect, data_masked, mask_2d, xvalues, good_pix): # Calculate inverse read noise^2 for use in weights # Suppress, then re-enable, harmless arithmetic warning warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning) - invrdns2_r = 1./rn_2_r + invrdns2_r = 1. / rn_2_r warnings.resetwarnings() rn_sect = 0 @@ -3150,7 +3152,7 @@ def calc_opt_sums(rn_sect, gain_sect, data_masked, mask_2d, xvalues, good_pix): # For all pixels, 'roll' up the leading zeros such that the 0th group of # each pixel is the lowest nonzero group for that pixel wh_m2d_f = np.logical_not(c_mask_2d[0, :]) # ramps with initial group False - while (wh_m2d_f.sum() > 0): + while wh_m2d_f.sum() > 0: data_masked[:, wh_m2d_f] = np.roll(data_masked[:, wh_m2d_f], -1, axis=0) c_mask_2d[:, wh_m2d_f] = np.roll(c_mask_2d[:, wh_m2d_f], -1, axis=0) xvalues[:, wh_m2d_f] = np.roll(xvalues[:, wh_m2d_f], -1, axis=0)