add gaussian fitting for better background estimation

This commit is contained in:
Tibeuleu
2023-01-27 17:52:52 +01:00
parent c0083c97b6
commit 47c4dfd2a0
5 changed files with 197 additions and 44 deletions

View File

@@ -131,28 +131,29 @@ def main():
display_crop = False display_crop = False
# Error estimation # Error estimation
error_sub_type = 'freedman-diaconis' #sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (15,15)) error_sub_type = 'freedman-diaconis' #sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (15,15))
subtract_error = False
display_error = False display_error = False
# Data binning # Data binning
rebin = True rebin = True
pxsize = 0.10 pxsize = 10
px_scale = 'arcsec' #pixel, arcsec or full px_scale = 'pixel' #pixel, arcsec or full
rebin_operation = 'sum' #sum or average rebin_operation = 'sum' #sum or average
# Alignement # Alignement
align_center = 'image' #If None will align image to image center align_center = 'image' #If None will align image to image center
display_data = False display_data = False
# Smoothing # Smoothing
smoothing_function = 'combine' #gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_function = 'combine' #gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 0.20 #If None, no smoothing is done smoothing_FWHM = None #If None, no smoothing is done
smoothing_scale = 'arcsec' #pixel or arcsec smoothing_scale = 'arcsec' #pixel or arcsec
# Rotation # Rotation
rotate_stokes = True rotate_stokes = True
rotate_data = False #rotation to North convention can give erroneous results rotate_data = False #rotation to North convention can give erroneous results
# Final crop # Final crop
crop = False #Crop to desired ROI crop = False #Crop to desired ROI
final_display = False final_display = True
# Polarization map output # Polarization map output
figname = 'NGC1068_FOC' #target/intrument name figname = 'NGC1068_FOC' #target/intrument name
figtype = '_c_FWHM020' #additionnal informations figtype = '_bin10px' #additionnal informations
SNRp_cut = 5. #P measurments with SNR>3 SNRp_cut = 5. #P measurments with SNR>3
SNRi_cut = 50. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 50. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
@@ -173,7 +174,7 @@ def main():
# Estimate error from data background, estimated from sub-image of desired sub_shape. # Estimate error from data background, estimated from sub-image of desired sub_shape.
background = None background = None
if px_scale.lower() not in ['full','integrate']: if px_scale.lower() not in ['full','integrate']:
data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, sub_type=error_sub_type, display=display_error, savename=figname+"_errors", plots_folder=plots_folder, return_background=True) data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, sub_type=error_sub_type, subtract_error=subtract_error, display=display_error, savename=figname+"_errors", plots_folder=plots_folder, return_background=True)
# Align and rescale images with oversampling. # Align and rescale images with oversampling.
data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False) data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False)

View File

@@ -70,7 +70,7 @@ im0 = ax.imshow(data_S['I']*convert_flux,norm=LogNorm(data_S['I'][data_S['I']>0]
#im0 = ax.imshow(data_S['P']*100.,vmin=0.,vmax=100.,origin='lower',cmap='inferno',label=r"$P$ through this pipeline") #im0 = ax.imshow(data_S['P']*100.,vmin=0.,vmax=100.,origin='lower',cmap='inferno',label=r"$P$ through this pipeline")
#im0 = ax.imshow(data_K['PA'],vmin=0.,vmax=360.,origin='lower',cmap='inferno',label=r"$\theta_P$ through Kishimoto's pipeline") #im0 = ax.imshow(data_K['PA'],vmin=0.,vmax=360.,origin='lower',cmap='inferno',label=r"$\theta_P$ through Kishimoto's pipeline")
#im0 = ax.imshow(data_S['PA'],vmin=0.,vmax=360.,origin='lower',cmap='inferno',label=r"$\theta_P$ through this pipeline") #im0 = ax.imshow(data_S['PA'],vmin=0.,vmax=360.,origin='lower',cmap='inferno',label=r"$\theta_P$ through this pipeline")
quiv0 = ax.quiver(data_S['X'],data_S['Y'],data_S['xy_U'],data_S['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='b',alpha=0.75, label="PA through this pipeline") quiv0 = ax.quiver(data_S['X'],data_S['Y'],data_S['xy_U'],data_S['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.2,color='b',alpha=0.75, label="PA through this pipeline")
quiv1 = ax.quiver(data_K['X'],data_K['Y'],data_K['xy_U'],data_K['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='r',alpha=0.75, label="PA through Kishimoto's pipeline") quiv1 = ax.quiver(data_K['X'],data_K['Y'],data_K['xy_U'],data_K['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='r',alpha=0.75, label="PA through Kishimoto's pipeline")
ax.set_title(r"$SNR_P \geq 5 \; & \; SNR_I \geq 30$") ax.set_title(r"$SNR_P \geq 5 \; & \; SNR_I \geq 30$")

View File

@@ -2,7 +2,7 @@
Library function for background estimation steps of the reduction pipeline. Library function for background estimation steps of the reduction pipeline.
prototypes : prototypes :
- display_bkg(data, background, std_bkg, headers, histograms, bin_centers, rectangle, savename, plots_folder) - display_bkg(data, background, std_bkg, headers, histograms, binning, rectangle, savename, plots_folder)
Display and save how the background noise is computed. Display and save how the background noise is computed.
- bkg_hist(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background) - bkg_hist(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background)
Compute the error (noise) of the input array by computing the base mode of each image. Compute the error (noise) of the input array by computing the base mode of each image.
@@ -18,8 +18,20 @@ from matplotlib.colors import LogNorm
from matplotlib.patches import Rectangle from matplotlib.patches import Rectangle
from datetime import datetime from datetime import datetime
from lib.plots import plot_obs from lib.plots import plot_obs
from scipy.optimize import curve_fit
def display_bkg(data, background, std_bkg, headers, histograms=None, bin_centers=None, rectangle=None, savename=None, plots_folder="./"): def gauss(x, *p):
N, mu, sigma = p
return N*np.exp(-(x-mu)**2/(2.*sigma**2))
def gausspol(x, *p):
N, mu, sigma, a, b, c, d = p
return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + a*np.log(x) + b/x + c*x + d
def bin_centers(edges):
return (edges[1:]+edges[:-1])/2.
def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"):
plt.rcParams.update({'font.size': 15}) plt.rcParams.update({'font.size': 15})
convert_flux = np.array([head['photflam'] for head in headers]) convert_flux = np.array([head['photflam'] for head in headers])
date_time = np.array([headers[i]['date-obs']+';'+headers[i]['time-obs'] date_time = np.array([headers[i]['date-obs']+';'+headers[i]['time-obs']
@@ -52,12 +64,15 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, bin_centers
if not(histograms is None): if not(histograms is None):
filt_obs = {"POL0":0, "POL60":0, "POL120":0} filt_obs = {"POL0":0, "POL60":0, "POL120":0}
fig_h, ax_h = plt.subplots(figsize=(10,6), constrained_layout=True) fig_h, ax_h = plt.subplots(figsize=(10,6), constrained_layout=True)
for i, (hist, bins) in enumerate(zip(histograms, bin_centers)): for i, (hist, bins) in enumerate(zip(histograms, binning)):
filt_obs[headers[i]['filtnam1']] += 1 filt_obs[headers[i]['filtnam1']] += 1
ax_h.plot(bins,hist,'+',color="C{0:d}".format(i),alpha=0.8,label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')') ax_h.plot(bins,hist,'+',color="C{0:d}".format(i),alpha=0.8,label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')')
ax_h.plot([background[i],background[i]],[hist.min(), hist.max()],'x--',color="C{0:d}".format(i),alpha=0.8) ax_h.plot([background[i],background[i]],[hist.min(), hist.max()],'x--',color="C{0:d}".format(i),alpha=0.8)
if not(coeff is None):
ax_h.plot(bins,gausspol(bins,*coeff[i]),'--',color="C{0:d}".format(i),alpha=0.8)
ax_h.set_xscale('log') ax_h.set_xscale('log')
ax_h.set_xlim([background.mean()*1e-2,background.mean()*1e2]) ax_h.set_ylim([0.,np.max([hist.max() for hist in histograms])])
ax_h.set_xlim([np.min(background)*1e-2,np.max(background)*1e2])
ax_h.set_xlabel(r"Count rate [$s^{-1}$]") ax_h.set_xlabel(r"Count rate [$s^{-1}$]")
ax_h.set_ylabel(r"Number of pixels in bin") ax_h.set_ylabel(r"Number of pixels in bin")
ax_h.set_title("Histogram for each observation") ax_h.set_title("Histogram for each observation")
@@ -97,8 +112,128 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, bin_centers
plt.show() plt.show()
def sky_part(img):
rand_ind = np.unique((np.random.rand(np.floor(img.size/4).astype(int))*2*img.size).astype(int)%img.size)
rand_pix = img.flatten()[rand_ind]
# Intensity range
sky_med = np.median(rand_pix)
sig = np.min([img[img<sky_med].std(),img[img>sky_med].std()])
sky_range = [sky_med-2.*sig, sky_med+sig]
def bkg_hist(data, error, mask, headers, sub_type=None, display=False, savename=None, plots_folder=""): sky = img[np.logical_and(img>=sky_range[0],img<=sky_range[1])]
return sky, sky_range
def bkg_estimate(img, bins=None, chi2=None, coeff=None):
if bins is None or chi2 is None or coeff is None:
bins, chi2, coeff = [8], [], []
else:
try:
bins.append(int(3./2.*bins[-1]))
except IndexError:
bins, chi2, coeff = [8], [], []
hist, bin_edges = np.histogram(img[img>0], bins=bins[-1])
binning = bin_centers(bin_edges)
peak = binning[np.argmax(hist)]
bins_fwhm = binning[hist>hist.max()/2.]
fwhm = bins_fwhm[-1]-bins_fwhm[0]
p0 = [hist.max(), peak, fwhm, 1e-3, 1e-3, 1e-3, 1e-3]
try:
popt, pcov = curve_fit(gausspol, binning, hist, p0=p0)
except RuntimeError:
popt = p0
chi2.append(np.sum((hist - gausspol(binning,*popt))**2)/hist.size)
coeff.append(popt)
return bins, chi2, coeff
def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, savename=None, plots_folder=""):
"""
----------
Inputs:
data : numpy.ndarray
Array containing the data to study (2D float arrays).
error : numpy.ndarray
Array of images (2D floats, aligned and of the same shape) containing
the error in each pixel of the observation images in data_array.
mask : numpy.ndarray
2D boolean array delimiting the data to work on.
headers : header list
Headers associated with the images in data_array.
display : boolean, optional
If True, data_array will be displayed with a rectangle around the
sub-image selected for background computation.
Defaults to False.
savename : str, optional
Name of the figure the map should be saved to. If None, the map won't
be saved (only displayed). Only used if display is True.
Defaults to None.
plots_folder : str, optional
Relative (or absolute) filepath to the folder in wich the map will
be saved. Not used if savename is None.
Defaults to current folder.
----------
Returns:
data_array : numpy.ndarray
Array containing the data to study minus the background.
headers : header list
Updated headers associated with the images in data_array.
error_array : numpy.ndarray
Array containing the background values associated to the images in
data_array.
background : numpy.ndarray
Array containing the pixel background value for each image in
data_array.
"""
n_data_array, n_error_array = deepcopy(data), deepcopy(error)
error_bkg = np.ones(n_data_array.shape)
std_bkg = np.zeros((data.shape[0]))
background = np.zeros((data.shape[0]))
histograms, binning = [], []
for i, image in enumerate(data):
#Compute the Count-rate histogram for the image
sky, sky_range = sky_part(image[image>0.])
bins, chi2, coeff = bkg_estimate(sky)
while bins[-1]<256:
bins, chi2, coeff = bkg_estimate(sky, bins, chi2, coeff)
hist, bin_edges = np.histogram(sky, bins=bins[-1])
histograms.append(hist)
binning.append(bin_centers(bin_edges))
chi2, coeff = np.array(chi2), np.array(coeff)
weights = 1/chi2**2
weights /= weights.sum()
bkg = np.sum(weights*coeff[:,1])
error_bkg[i] *= bkg
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
#wavelength dependence of the polariser filters
#estimated to less than 1%
err_wav = data[i]*0.01
#difference in PSFs through each polarizers
#estimated to less than 3%
err_psf = data[i]*0.03
#flatfielding uncertainties
#estimated to less than 3%
err_flat = data[i]*0.03
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
#Substract background
if subtract_error:
n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std()
background[i] = bkg
if display:
display_bkg(data, background, std_bkg, headers, histograms=histograms, binning=binning, coeff=coeff, savename=savename, plots_folder=plots_folder)
return n_data_array, n_error_array, headers, background
def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, display=False, savename=None, plots_folder=""):
""" """
---------- ----------
Inputs: Inputs:
@@ -144,7 +279,7 @@ def bkg_hist(data, error, mask, headers, sub_type=None, display=False, savename=
error_bkg = np.ones(n_data_array.shape) error_bkg = np.ones(n_data_array.shape)
std_bkg = np.zeros((data.shape[0])) std_bkg = np.zeros((data.shape[0]))
background = np.zeros((data.shape[0])) background = np.zeros((data.shape[0]))
histograms, bin_centers = [], [] histograms, binning, coeff = [], [], []
for i, image in enumerate(data): for i, image in enumerate(data):
#Compute the Count-rate histogram for the image #Compute the Count-rate histogram for the image
@@ -167,10 +302,20 @@ def bkg_hist(data, error, mask, headers, sub_type=None, display=False, savename=
hist, bin_edges = np.histogram(np.log(image[n_mask]),bins=n_bins) hist, bin_edges = np.histogram(np.log(image[n_mask]),bins=n_bins)
histograms.append(hist) histograms.append(hist)
bin_centers.append(np.exp((bin_edges[:-1]+bin_edges[1:])/2)) binning.append(np.exp(bin_centers(bin_edges)))
#Take the background as the count-rate with the maximum number of pixels #Take the background as the count-rate with the maximum number of pixels
hist_max = bin_centers[-1][np.argmax(hist)] #hist_max = binning[-1][np.argmax(hist)]
bkg = np.sqrt(np.sum(image[np.abs(image-hist_max)/hist_max<0.5]**2)/image[np.abs(image-hist_max)/hist_max<0.5].size) #bkg = np.sqrt(np.sum(image[np.abs(image-hist_max)/hist_max<0.5]**2)/image[np.abs(image-hist_max)/hist_max<0.5].size)
#Fit a gaussian to the log-intensity histogram
bins_fwhm = binning[-1][hist>hist.max()/2.]
fwhm = bins_fwhm[-1]-bins_fwhm[0]
p0 = [hist.max(), binning[-1][np.argmax(hist)], fwhm, 1e-3, 1e-3, 1e-3, 1e-3]
popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
coeff.append(popt)
bkg = popt[1]
error_bkg[i] *= bkg error_bkg[i] *= bkg
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999) # Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
@@ -187,6 +332,7 @@ def bkg_hist(data, error, mask, headers, sub_type=None, display=False, savename=
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2) n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
#Substract background #Substract background
if subtract_error:
n_data_array[i][mask] = n_data_array[i][mask] - bkg n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg
@@ -194,11 +340,11 @@ def bkg_hist(data, error, mask, headers, sub_type=None, display=False, savename=
background[i] = bkg background[i] = bkg
if display: if display:
display_bkg(data, background, std_bkg, headers, histograms=histograms, bin_centers=bin_centers, savename=savename, plots_folder=plots_folder) display_bkg(data, background, std_bkg, headers, histograms=histograms, binning=binning, coeff=coeff, savename=savename, plots_folder=plots_folder)
return n_data_array, n_error_array, headers, background return n_data_array, n_error_array, headers, background
def bkg_mini(data, error, mask, headers, sub_shape=(15,15), display=False, savename=None, plots_folder=""): def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True, display=False, savename=None, plots_folder=""):
""" """
Look for sub-image of shape sub_shape that have the smallest integrated Look for sub-image of shape sub_shape that have the smallest integrated
flux (no source assumption) and define the background on the image by the flux (no source assumption) and define the background on the image by the
@@ -290,6 +436,7 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15,15), display=False, saven
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2) n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
#Substract background #Substract background
if subtract_error:
n_data_array[i][mask] = n_data_array[i][mask] - bkg n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg

View File

@@ -309,7 +309,7 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
if display.lower() in ['intensity']: if display.lower() in ['intensity']:
# If no display selected, show intensity map # If no display selected, show intensity map
display='i' display='i'
vmin, vmax = np.min(stkI.data[mask]*convert_flux)/5., np.max(stkI.data[stkI.data > 0.]*convert_flux) vmin, vmax = np.max(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.) im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsI = np.linspace(vmax*0.01, vmax*0.99, 10) levelsI = np.linspace(vmax*0.01, vmax*0.99, 10)
@@ -320,7 +320,7 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
# Display polarisation flux # Display polarisation flux
display='pf' display='pf'
pf_mask = (stkI.data > 0.) * (pol.data > 0.) pf_mask = (stkI.data > 0.) * (pol.data > 0.)
vmin, vmax = np.min(stkI.data[mask]*convert_flux)/5., np.max(stkI.data[stkI.data > 0.]*convert_flux) vmin, vmax = np.max(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
im = ax.imshow(stkI.data*convert_flux*pol.data, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.) im = ax.imshow(stkI.data*convert_flux*pol.data, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsPf = np.linspace(vmax*0.01, vmax*0.99, 10) levelsPf = np.linspace(vmax*0.01, vmax*0.99, 10)
@@ -1736,21 +1736,17 @@ class pol_map(object):
self.display_selection = "total_flux" self.display_selection = "total_flux"
if self.display_selection.lower() in ['total_flux']: if self.display_selection.lower() in ['total_flux']:
self.data = self.I*self.convert_flux self.data = self.I*self.convert_flux
try: vmin, vmax = np.max(np.sqrt(self.IQU_cov[0,0][self.cut])*self.convert_flux), np.max(self.data[self.data > 0.])
vmin, vmax = np.min(self.data[self.cut])/5., np.max(self.data[self.data > 0.])
except ValueError:
vmax = np.max(self.data[self.data > 0.])
vmin = vmax*1e-3
norm = LogNorm(vmin, vmax) norm = LogNorm(vmin, vmax)
label = r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]" label = r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]"
elif self.display_selection.lower() in ['pol_flux']: elif self.display_selection.lower() in ['pol_flux']:
self.data = self.I*self.convert_flux*self.P self.data = self.I*self.convert_flux*self.P
vmin, vmax = np.min(self.I[self.cut]*self.convert_flux)/5., np.max(self.I[self.data > 0.]*self.convert_flux) vmin, vmax = np.max(np.sqrt(self.IQU_cov[0,0][self.cut])*self.convert_flux), np.max(self.I[self.data > 0.]*self.convert_flux)
norm = LogNorm(vmin, vmax) norm = LogNorm(vmin, vmax)
label = r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]" label = r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]"
elif self.display_selection.lower() in ['pol_deg']: elif self.display_selection.lower() in ['pol_deg']:
self.data = self.P*100. self.data = self.P*100.
vmin, vmax = 0., np.max(self.data[self.data > 0.]) vmin, vmax = 0., 100. #np.max(self.data[self.data > 0.])
label = r"$P$ [%]" label = r"$P$ [%]"
elif self.display_selection.lower() in ['pol_ang']: elif self.display_selection.lower() in ['pol_ang']:
self.data = princ_angle(self.PA) self.data = princ_angle(self.PA)

View File

@@ -53,7 +53,7 @@ log.setLevel('ERROR')
import warnings import warnings
from lib.deconvolve import deconvolve_im, gaussian_psf, gaussian2d, zeropad from lib.deconvolve import deconvolve_im, gaussian_psf, gaussian2d, zeropad
from lib.convex_hull import image_hull, clean_ROI from lib.convex_hull import image_hull, clean_ROI
from lib.background import bkg_hist, bkg_mini from lib.background import bkg_fit, bkg_hist, bkg_mini
from lib.plots import plot_obs from lib.plots import plot_obs
from lib.cross_correlation import phase_cross_correlation from lib.cross_correlation import phase_cross_correlation
@@ -409,8 +409,8 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px',
def get_error(data_array, headers, error_array=None, data_mask=None, def get_error(data_array, headers, error_array=None, data_mask=None,
sub_type=None, display=False, savename=None, plots_folder="", sub_type=None, subtract_error=True, display=False, savename=None,
return_background=False): plots_folder="", return_background=False):
""" """
Look for sub-image of shape sub_shape that have the smallest integrated Look for sub-image of shape sub_shape that have the smallest integrated
flux (no source assumption) and define the background on the image by the flux (no source assumption) and define the background on the image by the
@@ -431,10 +431,11 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
If None, will be initialized with a full true mask. If None, will be initialized with a full true mask.
Defaults to None. Defaults to None.
sub_type : str or int or tuple, optional sub_type : str or int or tuple, optional
If str, statistic rule to be used for the number of bins in counts/s. If 'auto', look for optimal binning and fit intensity histogram with au gaussian.
If str or None, statistic rule to be used for the number of bins in counts/s.
If int, number of bins for the counts/s histogram. If int, number of bins for the counts/s histogram.
If tuple, shape of the sub-image to look for. Must be odd. If tuple, shape of the sub-image to look for. Must be odd.
Defaults to "Freedman-Diaconis". Defaults to None.
display : boolean, optional display : boolean, optional
If True, data_array will be displayed with a rectangle around the If True, data_array will be displayed with a rectangle around the
sub-image selected for background computation. sub-image selected for background computation.
@@ -468,18 +469,26 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
# Crop out any null edges # Crop out any null edges
if error_array is None: if error_array is None:
error_array = np.zeros(data_array.shape) error_array = np.zeros(data_array.shape)
n_data_array, n_error_array = deepcopy(data_array), deepcopy(error_array) data, error = deepcopy(data_array), deepcopy(error_array)
if not data_mask is None: if not data_mask is None:
data, error, mask = n_data_array, n_error_array, deepcopy(data_mask) mask = deepcopy(data_mask)
else: else:
data, error, _ = crop_array(n_data_array, headers, n_error_array, step=5, null_val=0., inside=False) data_c, error_c, _ = crop_array(data, headers, error, step=5, null_val=0., inside=False)
mask = np.ones(data[0].shape, dtype=bool) mask_c = np.ones(data_c[0].shape, dtype=bool)
for i,(data_ci, error_ci) in enumerate(zip(data_c, error_c)):
data[i], error[i] = zeropad(data_ci, data[i].shape), zeropad(error_ci, error[i].shape)
mask = zeropad(mask_c, data[0].shape).astype(bool)
background = np.zeros((data.shape[0])) background = np.zeros((data.shape[0]))
if (sub_type is None) or (type(sub_type)==str): if (sub_type is None):
n_data_array, n_error_array, headers, background = bkg_hist(data, error, mask, headers, sub_type=sub_type, display=display, savename=savename, plots_folder=plots_folder) n_data_array, n_error_array, headers, background = bkg_hist(data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
elif type(sub_type)==str:
if sub_type.lower() in ['auto']:
n_data_array, n_error_array, headers, background = bkg_fit(data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
else:
n_data_array, n_error_array, headers, background = bkg_hist(data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
elif type(sub_type)==tuple: elif type(sub_type)==tuple:
n_data_array, n_error_array, headers, background = bkg_mini(data, error, mask, headers, sub_shape=sub_type, display=display, savename=savename, plots_folder=plots_folder) n_data_array, n_error_array, headers, background = bkg_mini(data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
else: else:
print("Warning: Invalid subtype.") print("Warning: Invalid subtype.")
@@ -674,7 +683,7 @@ def align_data(data_array, headers, error_array=None, background=None,
raise ValueError("All images in data_array must have same shape as\ raise ValueError("All images in data_array must have same shape as\
ref_data") ref_data")
if (error_array is None) or (background is None): if (error_array is None) or (background is None):
_, error_array, headers, background = get_error_hist(data_array, headers, return_background=True) _, error_array, headers, background = get_error(data_array, headers, sub_type=(10,10), return_background=True)
# Crop out any null edges # Crop out any null edges
#(ref_data must be cropped as well) #(ref_data must be cropped as well)