allow to play with background estimation
This commit is contained in:
@@ -25,10 +25,10 @@ globals()['infiles'] = ['x274020at_c0f.fits','x274020bt_c0f.fits','x274020ct_c0f
|
||||
#psf_file = 'NGC1068_f253m00.fits'
|
||||
globals()['plots_folder'] = "../plots/NGC1068_x274020/"
|
||||
|
||||
#globals()['data_folder'] = "../data/IC5063_x3nl030/"
|
||||
#globals()['infiles'] = ['x3nl0301r_c0f.fits','x3nl0302r_c0f.fits','x3nl0303r_c0f.fits']
|
||||
##psf_file = 'IC5063_f502m00.fits'
|
||||
#globals()['plots_folder'] = "../plots/IC5063_x3nl030/"
|
||||
globals()['data_folder'] = "../data/IC5063_x3nl030/"
|
||||
globals()['infiles'] = ['x3nl0301r_c0f.fits','x3nl0302r_c0f.fits','x3nl0303r_c0f.fits']
|
||||
#psf_file = 'IC5063_f502m00.fits'
|
||||
globals()['plots_folder'] = "../plots/IC5063_x3nl030/"
|
||||
|
||||
#globals()['data_folder'] = "../data/NGC1068_x14w010/"
|
||||
#globals()['infiles'] = ['x14w0101t_c0f.fits','x14w0102t_c0f.fits','x14w0103t_c0f.fits',
|
||||
@@ -131,8 +131,8 @@ def main():
|
||||
display_crop = False
|
||||
# Error estimation
|
||||
error_sub_type = 'freedman-diaconis' #sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (15,15))
|
||||
subtract_error = True
|
||||
display_error = False
|
||||
subtract_error = 1.25
|
||||
display_error = True
|
||||
# Data binning
|
||||
rebin = True
|
||||
pxsize = 0.10
|
||||
@@ -152,10 +152,10 @@ def main():
|
||||
crop = False #Crop to desired ROI
|
||||
final_display = True
|
||||
# Polarization map output
|
||||
figname = 'NGC1068_FOC' #target/intrument name
|
||||
figname = 'IC5063_FOC' #target/intrument name
|
||||
figtype = '_c_020' #additionnal informations
|
||||
SNRp_cut = 5. #P measurments with SNR>3
|
||||
SNRi_cut = 50. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
||||
SNRp_cut = 3. #P measurments with SNR>3
|
||||
SNRi_cut = 30. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
||||
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
|
||||
# if step_vec = 0 then all vectors are displayed at full length
|
||||
|
||||
@@ -181,8 +181,8 @@ def main():
|
||||
# Rebin data to desired pixel size.
|
||||
if rebin:
|
||||
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask)
|
||||
|
||||
# Rotate data to have North up
|
||||
|
||||
# Rotate data to have North up
|
||||
if rotate_data:
|
||||
data_mask = np.ones(data_array.shape[1:]).astype(bool)
|
||||
alpha = headers[0]['orientat']
|
||||
@@ -194,7 +194,10 @@ def main():
|
||||
shape = np.array([vertex[1]-vertex[0],vertex[3]-vertex[2]])
|
||||
rectangle = [vertex[2], vertex[0], shape[1], shape[0], 0., 'g']
|
||||
|
||||
proj_plots.plot_obs(data_array, headers, vmin=data_array.min(), vmax=data_array.max(), rectangle =[rectangle,]*data_array.shape[0], savename=figname+"_center_"+align_center, plots_folder=plots_folder)
|
||||
proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array>0.].min(), vmax=data_array[data_array>0.].max(), rectangle =[rectangle,]*data_array.shape[0], savename=figname+"_center_"+align_center, plots_folder=plots_folder)
|
||||
|
||||
background = np.array([np.array(bkg).reshape(1,1) for bkg in background])
|
||||
background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1']==head['filtnam1'] for h in headers],dtype=bool)].mean())**2/np.sum([h['filtnam1']==head['filtnam1'] for h in headers]))).reshape(1,1) for bkg,head in zip(background,headers)])
|
||||
|
||||
## Step 2:
|
||||
# Compute Stokes I, Q, U with smoothed polarized images
|
||||
@@ -202,8 +205,8 @@ def main():
|
||||
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
|
||||
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
|
||||
# Bibcode : 1995chst.conf...10J
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function,transmitcorr=True)
|
||||
I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(background, background_error, np.array(True).reshape(1,1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function,transmitcorr=True)
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function,transmitcorr=False)
|
||||
I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(background, background_error, np.array(True).reshape(1,1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function,transmitcorr=False)
|
||||
|
||||
## Step 3:
|
||||
# Rotate images to have North up
|
||||
|
||||
@@ -15,7 +15,7 @@ root_dir = path_join('/home/t.barnouin/Documents/Thesis/HST')
|
||||
root_dir_K = path_join(root_dir,'Kishimoto','output')
|
||||
root_dir_S = path_join(root_dir,'FOC_Reduction','output')
|
||||
root_dir_data_S = path_join(root_dir,'FOC_Reduction','data','NGC1068_x274020')
|
||||
filename_S = "NGC1068_FOC_bg_b_10px.fits"
|
||||
filename_S = "NGC1068_FOC_b_10px.fits"
|
||||
|
||||
data_K = {}
|
||||
data_S = {}
|
||||
@@ -31,13 +31,16 @@ for d,i in zip(['I','Q','U','P','PA','sI','sQ','sU','sP','sPA'],[0,1,2,5,8,(3,0,
|
||||
wcs = WCS(header)
|
||||
convert_flux = header['photflam']
|
||||
|
||||
bkg_S = np.median(data_S['I'])/3
|
||||
bkg_K = np.median(data_K['I'])/3
|
||||
|
||||
#zeropad data to get same size of array
|
||||
shape = data_S['I'].shape
|
||||
for d in data_K:
|
||||
data_K[d] = zeropad(data_K[d],shape)
|
||||
|
||||
#shift array to get same information in same pixel
|
||||
data_arr, error_ar, heads, data_msk, shifts, shifts_err = align_data(np.array([data_S['I'],data_K['I']]), [header, header], upsample_factor=10., return_shifts=True)
|
||||
data_arr, error_ar, heads, data_msk, shifts, shifts_err = align_data(np.array([data_S['I'],data_K['I']]), [header, header], error_array=np.array([data_S['sI'],data_K['sI']]), background=np.array([bkg_S,bkg_K]), upsample_factor=10., return_shifts=True)
|
||||
for d in data_K:
|
||||
data_K[d] = shift(data_K[d],shifts[1],order=1,cval=0.)
|
||||
|
||||
|
||||
@@ -158,6 +158,10 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
|
||||
2D boolean array delimiting the data to work on.
|
||||
headers : header list
|
||||
Headers associated with the images in data_array.
|
||||
subtract_error : float or bool, optional
|
||||
If float, factor to which the estimated background should be multiplied
|
||||
If False the background is not subtracted.
|
||||
Defaults to True (factor = 1.).
|
||||
display : boolean, optional
|
||||
If True, data_array will be displayed with a rectangle around the
|
||||
sub-image selected for background computation.
|
||||
@@ -203,7 +207,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
|
||||
weights = 1/chi2**2
|
||||
weights /= weights.sum()
|
||||
|
||||
bkg = np.sum(weights*coeff[:,1])
|
||||
bkg = np.sum(weights*coeff[:,1])*subtract_error if subtract_error>0 else np.sum(weights*coeff[:,1])
|
||||
|
||||
error_bkg[i] *= bkg
|
||||
|
||||
@@ -221,7 +225,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
|
||||
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
|
||||
|
||||
#Substract background
|
||||
if subtract_error:
|
||||
if subtract_error>0:
|
||||
n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
||||
n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg
|
||||
|
||||
@@ -250,6 +254,10 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
|
||||
If str, statistic rule to be used for the number of bins in counts/s.
|
||||
If int, number of bins for the counts/s histogram.
|
||||
Defaults to "Freedman-Diaconis".
|
||||
subtract_error : float or bool, optional
|
||||
If float, factor to which the estimated background should be multiplied
|
||||
If False the background is not subtracted.
|
||||
Defaults to True (factor = 1.).
|
||||
display : boolean, optional
|
||||
If True, data_array will be displayed with a rectangle around the
|
||||
sub-image selected for background computation.
|
||||
@@ -314,7 +322,7 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
|
||||
p0 = [hist.max(), binning[-1][np.argmax(hist)], fwhm, 1e-3, 1e-3, 1e-3, 1e-3]
|
||||
popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
|
||||
coeff.append(popt)
|
||||
bkg = popt[1]
|
||||
bkg = popt[1]*subtract_error if subtract_error>0 else popt[1]
|
||||
|
||||
error_bkg[i] *= bkg
|
||||
|
||||
@@ -332,9 +340,9 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
|
||||
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
|
||||
|
||||
#Substract background
|
||||
if subtract_error:
|
||||
if subtract_error > 0:
|
||||
n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
||||
n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg
|
||||
n_data_array[i][np.logical_and(mask,n_data_array[i] < 0.)] = 0.
|
||||
|
||||
std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std()
|
||||
background[i] = bkg
|
||||
@@ -363,6 +371,10 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True,
|
||||
sub_shape : tuple, optional
|
||||
Shape of the sub-image to look for. Must be odd.
|
||||
Defaults to 10% of input array.
|
||||
subtract_error : float or bool, optional
|
||||
If float, factor to which the estimated background should be multiplied
|
||||
If False the background is not subtracted.
|
||||
Defaults to True (factor = 1.).
|
||||
display : boolean, optional
|
||||
If True, data_array will be displayed with a rectangle around the
|
||||
sub-image selected for background computation.
|
||||
@@ -419,7 +431,7 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True,
|
||||
# Compute error : root mean square of the background
|
||||
sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]]
|
||||
#bkg = np.std(sub_image) # Previously computed using standard deviation over the background
|
||||
bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)
|
||||
bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*subtract_error if subtract_error>0 else np.sqrt(np.sum(sub_image**2)/sub_image.size)
|
||||
error_bkg[i] *= bkg
|
||||
|
||||
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
|
||||
@@ -436,7 +448,7 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True,
|
||||
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
|
||||
|
||||
#Substract background
|
||||
if subtract_error:
|
||||
if subtract_error>0.:
|
||||
n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
||||
n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg
|
||||
|
||||
|
||||
@@ -316,7 +316,7 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
||||
if display.lower() in ['intensity']:
|
||||
# If no display selected, show intensity map
|
||||
display='i'
|
||||
vmin, vmax = 1/10*np.median(stkI.data[stkI.data > 0.]*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||
vmin, vmax = 3.*np.mean(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||
im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.)
|
||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||
levelsI = np.linspace(vmax*0.01, vmax*0.99, 10)
|
||||
@@ -327,7 +327,7 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
||||
# Display polarisation flux
|
||||
display='pf'
|
||||
pf_mask = (stkI.data > 0.) * (pol.data > 0.)
|
||||
vmin, vmax = 1/10*np.median(stkI.data[stkI.data > 0.]*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||
vmin, vmax = 3.*np.mean(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||
im = ax.imshow(stkI.data*convert_flux*pol.data, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.)
|
||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||
levelsPf = np.linspace(vmax*0.01, vmax*0.99, 10)
|
||||
@@ -382,7 +382,7 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
||||
#ax.clabel(cont,inline=True,fontsize=6)
|
||||
else:
|
||||
# Defaults to intensity map
|
||||
vmin, vmax = 1/10*np.median(stkI.data[stkI.data > 0.]*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||
vmin, vmax = 3.*np.mean(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||
#im = ax.imshow(stkI.data*convert_flux, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
|
||||
#cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
|
||||
im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.)
|
||||
@@ -1745,12 +1745,12 @@ class pol_map(object):
|
||||
self.display_selection = "total_flux"
|
||||
if self.display_selection.lower() in ['total_flux']:
|
||||
self.data = self.I*self.convert_flux
|
||||
vmin, vmax = 1/10.*np.median(self.data[self.data > 0.]), np.max(self.data[self.data > 0.])
|
||||
vmin, vmax = 1/2.0*np.median(self.data[self.data > 0.]), np.max(self.data[self.data > 0.])
|
||||
norm = LogNorm(vmin, vmax)
|
||||
label = r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]"
|
||||
elif self.display_selection.lower() in ['pol_flux']:
|
||||
self.data = self.I*self.convert_flux*self.P
|
||||
vmin, vmax = 1/10.*np.median(self.I[self.I > 0.]*self.convert_flux), np.max(self.I[self.I > 0.]*self.convert_flux)
|
||||
vmin, vmax = 1/2.0*np.median(self.I[self.I > 0.]*self.convert_flux), np.max(self.I[self.I > 0.]*self.convert_flux)
|
||||
norm = LogNorm(vmin, vmax)
|
||||
label = r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]"
|
||||
elif self.display_selection.lower() in ['pol_deg']:
|
||||
|
||||
@@ -327,8 +327,8 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5,
|
||||
#fig.suptitle(savename+'_'+filt+'_crop_region')
|
||||
fig.savefig(plots_folder+savename+'_'+filt+'_crop_region.png',
|
||||
bbox_inches='tight')
|
||||
plot_obs(data_array, headers, vmin=data_array.min(),
|
||||
vmax=data_array.max(), rectangle=[rectangle,]*len(headers),
|
||||
plot_obs(data_array, headers, vmin=data_array[data_array>0.].min(),
|
||||
vmax=data_array[data_array>0.].max(), rectangle=[rectangle,]*len(headers),
|
||||
savename=savename+'_crop_region',plots_folder=plots_folder)
|
||||
plt.show()
|
||||
|
||||
@@ -436,6 +436,10 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
|
||||
If int, number of bins for the counts/s histogram.
|
||||
If tuple, shape of the sub-image to look for. Must be odd.
|
||||
Defaults to None.
|
||||
subtract_error : float or bool, optional
|
||||
If float, factor to which the estimated background should be multiplied
|
||||
If False the background is not subtracted.
|
||||
Defaults to True (factor = 1.).
|
||||
display : boolean, optional
|
||||
If True, data_array will be displayed with a rectangle around the
|
||||
sub-image selected for background computation.
|
||||
|
||||
@@ -7,11 +7,11 @@ from lib.plots import overplot_radio, overplot_pol, align_pol
|
||||
from matplotlib.colors import LogNorm
|
||||
|
||||
Stokes_UV = fits.open("../data/IC5063_x3nl030/IC5063_FOC_c_020.fits")
|
||||
Stokes_18GHz = fits.open("../data/IC5063_x3nl030/radio/IC5063.18GHz.fits")
|
||||
Stokes_24GHz = fits.open("../data/IC5063_x3nl030/radio/IC5063.24GHz.fits")
|
||||
Stokes_103GHz = fits.open("../data/IC5063_x3nl030/radio/I5063_103GHz.fits")
|
||||
Stokes_229GHz = fits.open("../data/IC5063_x3nl030/radio/I5063_229GHz.fits")
|
||||
Stokes_357GHz = fits.open("../data/IC5063_x3nl030/radio/I5063_357GHz.fits")
|
||||
Stokes_18GHz = fits.open("../data/IC5063_x3nl030/radio/IC5063_18GHz.fits")
|
||||
Stokes_24GHz = fits.open("../data/IC5063_x3nl030/radio/IC5063_24GHz.fits")
|
||||
Stokes_103GHz = fits.open("../data/IC5063_x3nl030/radio/IC5063_103GHz.fits")
|
||||
Stokes_229GHz = fits.open("../data/IC5063_x3nl030/radio/IC5063_229GHz.fits")
|
||||
Stokes_357GHz = fits.open("../data/IC5063_x3nl030/radio/IC5063_357GHz.fits")
|
||||
#Stokes_S2 = fits.open("../data/IC5063_x3nl030/POLARIZATION_COMPARISON/S2_rot_crop.fits")
|
||||
Stokes_IR = fits.open("../data/IC5063_x3nl030/IR/u2e65g01t_c0f_rot.fits")
|
||||
|
||||
|
||||
Reference in New Issue
Block a user