From 47c4dfd2a00c1e1fd0aba57e2d7b2724968b9ce1 Mon Sep 17 00:00:00 2001 From: Tibeuleu Date: Fri, 27 Jan 2023 17:52:52 +0100 Subject: [PATCH] add gaussian fitting for better background estimation --- src/FOC_reduction.py | 13 +-- src/comparison_Kishimoto.py | 2 +- src/lib/background.py | 177 +++++++++++++++++++++++++++++++++--- src/lib/plots.py | 14 +-- src/lib/reduction.py | 35 ++++--- 5 files changed, 197 insertions(+), 44 deletions(-) diff --git a/src/FOC_reduction.py b/src/FOC_reduction.py index 39bc220..6476085 100755 --- a/src/FOC_reduction.py +++ b/src/FOC_reduction.py @@ -131,28 +131,29 @@ def main(): display_crop = False # Error estimation error_sub_type = 'freedman-diaconis' #sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (15,15)) + subtract_error = False display_error = False # Data binning rebin = True - pxsize = 0.10 - px_scale = 'arcsec' #pixel, arcsec or full + pxsize = 10 + px_scale = 'pixel' #pixel, arcsec or full rebin_operation = 'sum' #sum or average # Alignement align_center = 'image' #If None will align image to image center display_data = False # Smoothing smoothing_function = 'combine' #gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine - smoothing_FWHM = 0.20 #If None, no smoothing is done + smoothing_FWHM = None #If None, no smoothing is done smoothing_scale = 'arcsec' #pixel or arcsec # Rotation rotate_stokes = True rotate_data = False #rotation to North convention can give erroneous results # Final crop crop = False #Crop to desired ROI - final_display = False + final_display = True # Polarization map output figname = 'NGC1068_FOC' #target/intrument name - figtype = '_c_FWHM020' #additionnal informations + figtype = '_bin10px' #additionnal informations SNRp_cut = 5. #P measurments with SNR>3 SNRi_cut = 50. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%. step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted @@ -173,7 +174,7 @@ def main(): # Estimate error from data background, estimated from sub-image of desired sub_shape. background = None if px_scale.lower() not in ['full','integrate']: - data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, sub_type=error_sub_type, display=display_error, savename=figname+"_errors", plots_folder=plots_folder, return_background=True) + data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, sub_type=error_sub_type, subtract_error=subtract_error, display=display_error, savename=figname+"_errors", plots_folder=plots_folder, return_background=True) # Align and rescale images with oversampling. data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False) diff --git a/src/comparison_Kishimoto.py b/src/comparison_Kishimoto.py index b3dc519..b06cb1c 100755 --- a/src/comparison_Kishimoto.py +++ b/src/comparison_Kishimoto.py @@ -70,7 +70,7 @@ im0 = ax.imshow(data_S['I']*convert_flux,norm=LogNorm(data_S['I'][data_S['I']>0] #im0 = ax.imshow(data_S['P']*100.,vmin=0.,vmax=100.,origin='lower',cmap='inferno',label=r"$P$ through this pipeline") #im0 = ax.imshow(data_K['PA'],vmin=0.,vmax=360.,origin='lower',cmap='inferno',label=r"$\theta_P$ through Kishimoto's pipeline") #im0 = ax.imshow(data_S['PA'],vmin=0.,vmax=360.,origin='lower',cmap='inferno',label=r"$\theta_P$ through this pipeline") -quiv0 = ax.quiver(data_S['X'],data_S['Y'],data_S['xy_U'],data_S['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='b',alpha=0.75, label="PA through this pipeline") +quiv0 = ax.quiver(data_S['X'],data_S['Y'],data_S['xy_U'],data_S['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.2,color='b',alpha=0.75, label="PA through this pipeline") quiv1 = ax.quiver(data_K['X'],data_K['Y'],data_K['xy_U'],data_K['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='r',alpha=0.75, label="PA through Kishimoto's pipeline") ax.set_title(r"$SNR_P \geq 5 \; & \; SNR_I \geq 30$") diff --git a/src/lib/background.py b/src/lib/background.py index 1c7b98f..436ffdc 100644 --- a/src/lib/background.py +++ b/src/lib/background.py @@ -2,7 +2,7 @@ Library function for background estimation steps of the reduction pipeline. prototypes : - - display_bkg(data, background, std_bkg, headers, histograms, bin_centers, rectangle, savename, plots_folder) + - display_bkg(data, background, std_bkg, headers, histograms, binning, rectangle, savename, plots_folder) Display and save how the background noise is computed. - bkg_hist(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background) Compute the error (noise) of the input array by computing the base mode of each image. @@ -18,8 +18,20 @@ from matplotlib.colors import LogNorm from matplotlib.patches import Rectangle from datetime import datetime from lib.plots import plot_obs +from scipy.optimize import curve_fit -def display_bkg(data, background, std_bkg, headers, histograms=None, bin_centers=None, rectangle=None, savename=None, plots_folder="./"): +def gauss(x, *p): + N, mu, sigma = p + return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + +def gausspol(x, *p): + N, mu, sigma, a, b, c, d = p + return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + a*np.log(x) + b/x + c*x + d + +def bin_centers(edges): + return (edges[1:]+edges[:-1])/2. + +def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"): plt.rcParams.update({'font.size': 15}) convert_flux = np.array([head['photflam'] for head in headers]) date_time = np.array([headers[i]['date-obs']+';'+headers[i]['time-obs'] @@ -52,12 +64,15 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, bin_centers if not(histograms is None): filt_obs = {"POL0":0, "POL60":0, "POL120":0} fig_h, ax_h = plt.subplots(figsize=(10,6), constrained_layout=True) - for i, (hist, bins) in enumerate(zip(histograms, bin_centers)): + for i, (hist, bins) in enumerate(zip(histograms, binning)): filt_obs[headers[i]['filtnam1']] += 1 ax_h.plot(bins,hist,'+',color="C{0:d}".format(i),alpha=0.8,label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')') ax_h.plot([background[i],background[i]],[hist.min(), hist.max()],'x--',color="C{0:d}".format(i),alpha=0.8) + if not(coeff is None): + ax_h.plot(bins,gausspol(bins,*coeff[i]),'--',color="C{0:d}".format(i),alpha=0.8) ax_h.set_xscale('log') - ax_h.set_xlim([background.mean()*1e-2,background.mean()*1e2]) + ax_h.set_ylim([0.,np.max([hist.max() for hist in histograms])]) + ax_h.set_xlim([np.min(background)*1e-2,np.max(background)*1e2]) ax_h.set_xlabel(r"Count rate [$s^{-1}$]") ax_h.set_ylabel(r"Number of pixels in bin") ax_h.set_title("Histogram for each observation") @@ -97,8 +112,128 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, bin_centers plt.show() +def sky_part(img): + rand_ind = np.unique((np.random.rand(np.floor(img.size/4).astype(int))*2*img.size).astype(int)%img.size) + rand_pix = img.flatten()[rand_ind] + # Intensity range + sky_med = np.median(rand_pix) + sig = np.min([img[imgsky_med].std()]) + sky_range = [sky_med-2.*sig, sky_med+sig] -def bkg_hist(data, error, mask, headers, sub_type=None, display=False, savename=None, plots_folder=""): + sky = img[np.logical_and(img>=sky_range[0],img<=sky_range[1])] + return sky, sky_range + +def bkg_estimate(img, bins=None, chi2=None, coeff=None): + if bins is None or chi2 is None or coeff is None: + bins, chi2, coeff = [8], [], [] + else: + try: + bins.append(int(3./2.*bins[-1])) + except IndexError: + bins, chi2, coeff = [8], [], [] + hist, bin_edges = np.histogram(img[img>0], bins=bins[-1]) + binning = bin_centers(bin_edges) + peak = binning[np.argmax(hist)] + bins_fwhm = binning[hist>hist.max()/2.] + fwhm = bins_fwhm[-1]-bins_fwhm[0] + p0 = [hist.max(), peak, fwhm, 1e-3, 1e-3, 1e-3, 1e-3] + try: + popt, pcov = curve_fit(gausspol, binning, hist, p0=p0) + except RuntimeError: + popt = p0 + chi2.append(np.sum((hist - gausspol(binning,*popt))**2)/hist.size) + coeff.append(popt) + return bins, chi2, coeff + +def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, savename=None, plots_folder=""): + """ + ---------- + Inputs: + data : numpy.ndarray + Array containing the data to study (2D float arrays). + error : numpy.ndarray + Array of images (2D floats, aligned and of the same shape) containing + the error in each pixel of the observation images in data_array. + mask : numpy.ndarray + 2D boolean array delimiting the data to work on. + headers : header list + Headers associated with the images in data_array. + display : boolean, optional + If True, data_array will be displayed with a rectangle around the + sub-image selected for background computation. + Defaults to False. + savename : str, optional + Name of the figure the map should be saved to. If None, the map won't + be saved (only displayed). Only used if display is True. + Defaults to None. + plots_folder : str, optional + Relative (or absolute) filepath to the folder in wich the map will + be saved. Not used if savename is None. + Defaults to current folder. + ---------- + Returns: + data_array : numpy.ndarray + Array containing the data to study minus the background. + headers : header list + Updated headers associated with the images in data_array. + error_array : numpy.ndarray + Array containing the background values associated to the images in + data_array. + background : numpy.ndarray + Array containing the pixel background value for each image in + data_array. + """ + n_data_array, n_error_array = deepcopy(data), deepcopy(error) + error_bkg = np.ones(n_data_array.shape) + std_bkg = np.zeros((data.shape[0])) + background = np.zeros((data.shape[0])) + histograms, binning = [], [] + + for i, image in enumerate(data): + #Compute the Count-rate histogram for the image + sky, sky_range = sky_part(image[image>0.]) + + bins, chi2, coeff = bkg_estimate(sky) + while bins[-1]<256: + bins, chi2, coeff = bkg_estimate(sky, bins, chi2, coeff) + hist, bin_edges = np.histogram(sky, bins=bins[-1]) + histograms.append(hist) + binning.append(bin_centers(bin_edges)) + chi2, coeff = np.array(chi2), np.array(coeff) + weights = 1/chi2**2 + weights /= weights.sum() + + bkg = np.sum(weights*coeff[:,1]) + + error_bkg[i] *= bkg + + # Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999) + #wavelength dependence of the polariser filters + #estimated to less than 1% + err_wav = data[i]*0.01 + #difference in PSFs through each polarizers + #estimated to less than 3% + err_psf = data[i]*0.03 + #flatfielding uncertainties + #estimated to less than 3% + err_flat = data[i]*0.03 + + n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2) + + #Substract background + if subtract_error: + n_data_array[i][mask] = n_data_array[i][mask] - bkg + n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg + + std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std() + background[i] = bkg + + if display: + display_bkg(data, background, std_bkg, headers, histograms=histograms, binning=binning, coeff=coeff, savename=savename, plots_folder=plots_folder) + return n_data_array, n_error_array, headers, background + + +def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, display=False, savename=None, plots_folder=""): """ ---------- Inputs: @@ -144,7 +279,7 @@ def bkg_hist(data, error, mask, headers, sub_type=None, display=False, savename= error_bkg = np.ones(n_data_array.shape) std_bkg = np.zeros((data.shape[0])) background = np.zeros((data.shape[0])) - histograms, bin_centers = [], [] + histograms, binning, coeff = [], [], [] for i, image in enumerate(data): #Compute the Count-rate histogram for the image @@ -167,10 +302,20 @@ def bkg_hist(data, error, mask, headers, sub_type=None, display=False, savename= hist, bin_edges = np.histogram(np.log(image[n_mask]),bins=n_bins) histograms.append(hist) - bin_centers.append(np.exp((bin_edges[:-1]+bin_edges[1:])/2)) + binning.append(np.exp(bin_centers(bin_edges))) + #Take the background as the count-rate with the maximum number of pixels - hist_max = bin_centers[-1][np.argmax(hist)] - bkg = np.sqrt(np.sum(image[np.abs(image-hist_max)/hist_max<0.5]**2)/image[np.abs(image-hist_max)/hist_max<0.5].size) + #hist_max = binning[-1][np.argmax(hist)] + #bkg = np.sqrt(np.sum(image[np.abs(image-hist_max)/hist_max<0.5]**2)/image[np.abs(image-hist_max)/hist_max<0.5].size) + + #Fit a gaussian to the log-intensity histogram + bins_fwhm = binning[-1][hist>hist.max()/2.] + fwhm = bins_fwhm[-1]-bins_fwhm[0] + p0 = [hist.max(), binning[-1][np.argmax(hist)], fwhm, 1e-3, 1e-3, 1e-3, 1e-3] + popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0) + coeff.append(popt) + bkg = popt[1] + error_bkg[i] *= bkg # Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999) @@ -187,18 +332,19 @@ def bkg_hist(data, error, mask, headers, sub_type=None, display=False, savename= n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2) #Substract background - n_data_array[i][mask] = n_data_array[i][mask] - bkg - n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg + if subtract_error: + n_data_array[i][mask] = n_data_array[i][mask] - bkg + n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std() background[i] = bkg if display: - display_bkg(data, background, std_bkg, headers, histograms=histograms, bin_centers=bin_centers, savename=savename, plots_folder=plots_folder) + display_bkg(data, background, std_bkg, headers, histograms=histograms, binning=binning, coeff=coeff, savename=savename, plots_folder=plots_folder) return n_data_array, n_error_array, headers, background -def bkg_mini(data, error, mask, headers, sub_shape=(15,15), display=False, savename=None, plots_folder=""): +def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True, display=False, savename=None, plots_folder=""): """ Look for sub-image of shape sub_shape that have the smallest integrated flux (no source assumption) and define the background on the image by the @@ -290,8 +436,9 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15,15), display=False, saven n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2) #Substract background - n_data_array[i][mask] = n_data_array[i][mask] - bkg - n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg + if subtract_error: + n_data_array[i][mask] = n_data_array[i][mask] - bkg + n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std() background[i] = bkg diff --git a/src/lib/plots.py b/src/lib/plots.py index 36f9e90..edfaf47 100755 --- a/src/lib/plots.py +++ b/src/lib/plots.py @@ -309,7 +309,7 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c if display.lower() in ['intensity']: # If no display selected, show intensity map display='i' - vmin, vmax = np.min(stkI.data[mask]*convert_flux)/5., np.max(stkI.data[stkI.data > 0.]*convert_flux) + vmin, vmax = np.max(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.) cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") levelsI = np.linspace(vmax*0.01, vmax*0.99, 10) @@ -320,7 +320,7 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c # Display polarisation flux display='pf' pf_mask = (stkI.data > 0.) * (pol.data > 0.) - vmin, vmax = np.min(stkI.data[mask]*convert_flux)/5., np.max(stkI.data[stkI.data > 0.]*convert_flux) + vmin, vmax = np.max(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) im = ax.imshow(stkI.data*convert_flux*pol.data, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.) cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") levelsPf = np.linspace(vmax*0.01, vmax*0.99, 10) @@ -1736,21 +1736,17 @@ class pol_map(object): self.display_selection = "total_flux" if self.display_selection.lower() in ['total_flux']: self.data = self.I*self.convert_flux - try: - vmin, vmax = np.min(self.data[self.cut])/5., np.max(self.data[self.data > 0.]) - except ValueError: - vmax = np.max(self.data[self.data > 0.]) - vmin = vmax*1e-3 + vmin, vmax = np.max(np.sqrt(self.IQU_cov[0,0][self.cut])*self.convert_flux), np.max(self.data[self.data > 0.]) norm = LogNorm(vmin, vmax) label = r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]" elif self.display_selection.lower() in ['pol_flux']: self.data = self.I*self.convert_flux*self.P - vmin, vmax = np.min(self.I[self.cut]*self.convert_flux)/5., np.max(self.I[self.data > 0.]*self.convert_flux) + vmin, vmax = np.max(np.sqrt(self.IQU_cov[0,0][self.cut])*self.convert_flux), np.max(self.I[self.data > 0.]*self.convert_flux) norm = LogNorm(vmin, vmax) label = r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]" elif self.display_selection.lower() in ['pol_deg']: self.data = self.P*100. - vmin, vmax = 0., np.max(self.data[self.data > 0.]) + vmin, vmax = 0., 100. #np.max(self.data[self.data > 0.]) label = r"$P$ [%]" elif self.display_selection.lower() in ['pol_ang']: self.data = princ_angle(self.PA) diff --git a/src/lib/reduction.py b/src/lib/reduction.py index 1c3f1b5..c3c6215 100755 --- a/src/lib/reduction.py +++ b/src/lib/reduction.py @@ -53,7 +53,7 @@ log.setLevel('ERROR') import warnings from lib.deconvolve import deconvolve_im, gaussian_psf, gaussian2d, zeropad from lib.convex_hull import image_hull, clean_ROI -from lib.background import bkg_hist, bkg_mini +from lib.background import bkg_fit, bkg_hist, bkg_mini from lib.plots import plot_obs from lib.cross_correlation import phase_cross_correlation @@ -409,8 +409,8 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px', def get_error(data_array, headers, error_array=None, data_mask=None, - sub_type=None, display=False, savename=None, plots_folder="", - return_background=False): + sub_type=None, subtract_error=True, display=False, savename=None, + plots_folder="", return_background=False): """ Look for sub-image of shape sub_shape that have the smallest integrated flux (no source assumption) and define the background on the image by the @@ -431,10 +431,11 @@ def get_error(data_array, headers, error_array=None, data_mask=None, If None, will be initialized with a full true mask. Defaults to None. sub_type : str or int or tuple, optional - If str, statistic rule to be used for the number of bins in counts/s. + If 'auto', look for optimal binning and fit intensity histogram with au gaussian. + If str or None, statistic rule to be used for the number of bins in counts/s. If int, number of bins for the counts/s histogram. If tuple, shape of the sub-image to look for. Must be odd. - Defaults to "Freedman-Diaconis". + Defaults to None. display : boolean, optional If True, data_array will be displayed with a rectangle around the sub-image selected for background computation. @@ -468,18 +469,26 @@ def get_error(data_array, headers, error_array=None, data_mask=None, # Crop out any null edges if error_array is None: error_array = np.zeros(data_array.shape) - n_data_array, n_error_array = deepcopy(data_array), deepcopy(error_array) + data, error = deepcopy(data_array), deepcopy(error_array) if not data_mask is None: - data, error, mask = n_data_array, n_error_array, deepcopy(data_mask) + mask = deepcopy(data_mask) else: - data, error, _ = crop_array(n_data_array, headers, n_error_array, step=5, null_val=0., inside=False) - mask = np.ones(data[0].shape, dtype=bool) + data_c, error_c, _ = crop_array(data, headers, error, step=5, null_val=0., inside=False) + mask_c = np.ones(data_c[0].shape, dtype=bool) + for i,(data_ci, error_ci) in enumerate(zip(data_c, error_c)): + data[i], error[i] = zeropad(data_ci, data[i].shape), zeropad(error_ci, error[i].shape) + mask = zeropad(mask_c, data[0].shape).astype(bool) background = np.zeros((data.shape[0])) - if (sub_type is None) or (type(sub_type)==str): - n_data_array, n_error_array, headers, background = bkg_hist(data, error, mask, headers, sub_type=sub_type, display=display, savename=savename, plots_folder=plots_folder) + if (sub_type is None): + n_data_array, n_error_array, headers, background = bkg_hist(data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) + elif type(sub_type)==str: + if sub_type.lower() in ['auto']: + n_data_array, n_error_array, headers, background = bkg_fit(data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) + else: + n_data_array, n_error_array, headers, background = bkg_hist(data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) elif type(sub_type)==tuple: - n_data_array, n_error_array, headers, background = bkg_mini(data, error, mask, headers, sub_shape=sub_type, display=display, savename=savename, plots_folder=plots_folder) + n_data_array, n_error_array, headers, background = bkg_mini(data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) else: print("Warning: Invalid subtype.") @@ -674,7 +683,7 @@ def align_data(data_array, headers, error_array=None, background=None, raise ValueError("All images in data_array must have same shape as\ ref_data") if (error_array is None) or (background is None): - _, error_array, headers, background = get_error_hist(data_array, headers, return_background=True) + _, error_array, headers, background = get_error(data_array, headers, sub_type=(10,10), return_background=True) # Crop out any null edges #(ref_data must be cropped as well)