fix background substraction and clean repo

This commit is contained in:
Tibeuleu
2022-11-04 15:39:19 +01:00
parent da805b71f1
commit 4035ca1455
231 changed files with 76 additions and 34 deletions

View File

@@ -19,26 +19,25 @@ from astropy.wcs import WCS
##### User inputs
## Input and output locations
#globals()['data_folder'] = "../data/NGC1068_x274020/"
##globals()['infiles'] = ['xn1c400.fits','xn2c400.fits','xn3c400.fits']
#globals()['infiles'] = ['x274020at_c0f.fits','x274020bt_c0f.fits','x274020ct_c0f.fits',
# 'x274020dt_c0f.fits','x274020et_c0f.fits','x274020ft_c0f.fits',
# 'x274020gt_c0f.fits','x274020ht_c0f.fits','x274020it_c0f.fits']
##psf_file = 'NGC1068_f253m00.fits'
#globals()['plots_folder'] = "../plots/NGC1068_x274020/"
globals()['data_folder'] = "../data/IC5063_x3nl030/"
globals()['infiles'] = ['x3nl0301r_c0f.fits','x3nl0302r_c0f.fits','x3nl0303r_c0f.fits']
#psf_file = 'IC5063_f502m00.fits'
globals()['plots_folder'] = "../plots/IC5063_x3nl030/"
#globals()['data_folder'] = "../data/IC5063_x3nl030/"
#globals()['infiles'] = ['x3nl0301r_c0f.fits','x3nl0302r_c0f.fits','x3nl0303r_c0f.fits']
##psf_file = 'IC5063_f502m00.fits'
#globals()['plots_folder'] = "../plots/IC5063_x3nl030/"
#globals()['data_folder'] = "../data/NGC1068_x14w010/"
#globals()['infiles'] = ['x14w0101t_c0f.fits','x14w0102t_c0f.fits','x14w0103t_c0f.fits',
# 'x14w0104t_c0f.fits','x14w0105p_c0f.fits','x14w0106t_c0f.fits']
#globals()['plots_folder'] = "../plots/NGC1068_x14w010/"
#globals()['data_folder'] = "../data/3C405_x136060/"
#globals()['infiles'] = ['x1360601t_c0f.fits','x1360602t_c0f.fits','x1360603t_c0f.fits']
#globals()['plots_folder'] = "../plots/3C405_x136060/"
globals()['data_folder'] = "../data/3C405_x136060/"
globals()['infiles'] = ['x1360601t_c0f.fits','x1360602t_c0f.fits','x1360603t_c0f.fits']
globals()['plots_folder'] = "../plots/3C405_x136060/"
#globals()['data_folder'] = "../data/CygnusA_x43w0/"
#globals()['infiles'] = ['x43w0101r_c0f.fits', 'x43w0102r_c0f.fits', 'x43w0103r_c0f.fits',
@@ -146,10 +145,10 @@ def main():
crop = False #Crop to desired ROI
final_display = True
# Polarization map output
figname = 'IC5063_FOC' #target/intrument name
figname = '3C405_FOC' #target/intrument name
figtype = '_combine_FWHM020' #additionnal informations
SNRp_cut = 3. #P measurments with SNR>3
SNRi_cut = 30. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
SNRp_cut = 5. #P measurments with SNR>3
SNRi_cut = 50. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
# if step_vec = 0 then all vectors are displayed at full length

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 76 KiB

View File

@@ -68,9 +68,9 @@ for d in [data_S, data_K]:
d['I_dil'] = np.sum(d['I'][d['mask']])
d['sI_dil'] = np.sqrt(np.sum(d['sI'][d['mask']]**2))
d['Q_dil'] = np.sum(d['Q'][d['mask']])
d['sQ_dil'] = np.sqrt(np.sum(d['sQ'][d['mask']])**2)
d['sQ_dil'] = np.sqrt(np.sum(d['sQ'][d['mask']]**2))
d['U_dil'] = np.sum(d['U'][d['mask']])
d['sU_dil'] = np.sqrt(np.sum(d['sU'][d['mask']])**2)
d['sU_dil'] = np.sqrt(np.sum(d['sU'][d['mask']]**2))
d['P_dil'] = np.sqrt(d['Q_dil']**2+d['U_dil']**2)/d['I_dil']
d['sP_dil'] = np.sqrt((d['Q_dil']**2*d['sQ_dil']**2+d['U_dil']**2*d['sU_dil']**2)/(d['Q_dil']**2+d['U_dil']**2)+((d['Q_dil']/d['I_dil'])**2+(d['U_dil']/d['I_dil'])**2)*d['sI_dil']**2)/d['I_dil']

View File

@@ -51,11 +51,11 @@ from astropy.io import fits
def princ_angle(ang):
"""
Return the principal angle in the -180° to 180° quadrant.
Return the principal angle in the 0° to 360° quadrant.
"""
while ang <= -180.:
while ang <= 0.:
ang += 360.
while ang > 180.:
while ang > 360.:
ang -= 360.
return ang
@@ -1349,11 +1349,13 @@ class pol_map(object):
def update_snri(val):
self.SNRi = val
self.pol_vector()
self.pol_int()
self.fig.canvas.draw_idle()
def update_snrp(val):
self.SNRp = val
self.pol_vector()
self.pol_int()
self.fig.canvas.draw_idle()
def reset_snr(event):
@@ -1786,6 +1788,30 @@ class pol_map(object):
P_reg_err = self.Stokes[0].header['P_int_err']
PA_reg = self.Stokes[0].header['PA_int']
PA_reg_err = self.Stokes[0].header['PA_int_err']
s_I = np.sqrt(self.IQU_cov[0,0])
s_Q = np.sqrt(self.IQU_cov[1,1])
s_U = np.sqrt(self.IQU_cov[2,2])
s_IQ = self.IQU_cov[0,1]
s_IU = self.IQU_cov[0,2]
s_QU = self.IQU_cov[1,2]
I_cut = self.I[self.cut].sum()
Q_cut = self.Q[self.cut].sum()
U_cut = self.U[self.cut].sum()
I_cut_err = np.sqrt(np.sum(s_I[self.cut]**2))
Q_cut_err = np.sqrt(np.sum(s_Q[self.cut]**2))
U_cut_err = np.sqrt(np.sum(s_U[self.cut]**2))
IQ_cut_err = np.sqrt(np.sum(s_IQ[self.cut]**2))
IU_cut_err = np.sqrt(np.sum(s_IU[self.cut]**2))
QU_cut_err = np.sqrt(np.sum(s_QU[self.cut]**2))
P_cut = np.sqrt(Q_cut**2+U_cut**2)/I_cut
P_cut_err = np.sqrt((Q_cut**2*Q_cut_err**2 + U_cut**2*U_cut_err**2 + 2.*Q_cut*U_cut*QU_cut_err)/(Q_cut**2 + U_cut**2) + ((Q_cut/I_cut)**2 + (U_cut/I_cut)**2)*I_cut_err**2 - 2.*(Q_cut/I_cut)*IQ_cut_err - 2.*(U_cut/I_cut)*IU_cut_err)/I_cut
PA_cut = princ_angle(np.degrees((1./2.)*np.arctan2(U_cut,Q_cut)))
PA_cut_err = princ_angle(np.degrees((1./(2.*(Q_cut**2+U_cut**2)))*np.sqrt(U_cut**2*Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err)))
else:
n_pix = self.I[self.region].size
s_I = np.sqrt(self.IQU_cov[0,0])
@@ -1808,9 +1834,26 @@ class pol_map(object):
P_reg = np.sqrt(Q_reg**2+U_reg**2)/I_reg
P_reg_err = np.sqrt((Q_reg**2*Q_reg_err**2 + U_reg**2*U_reg_err**2 + 2.*Q_reg*U_reg*QU_reg_err)/(Q_reg**2 + U_reg**2) + ((Q_reg/I_reg)**2 + (U_reg/I_reg)**2)*I_reg_err**2 - 2.*(Q_reg/I_reg)*IQ_reg_err - 2.*(U_reg/I_reg)*IU_reg_err)/I_reg
PA_reg = np.degrees((1./2.)*np.arctan2(U_reg,Q_reg))
PA_reg = princ_angle(np.degrees((1./2.)*np.arctan2(U_reg,Q_reg)))
PA_reg_err = princ_angle(np.degrees((1./(2.*(Q_reg**2+U_reg**2)))*np.sqrt(U_reg**2*Q_reg_err**2 + Q_reg**2*U_reg_err**2 - 2.*Q_reg*U_reg*QU_reg_err)))
new_cut = np.logical_and(self.region, self.cut)
I_cut = self.I[new_cut].sum()
Q_cut = self.Q[new_cut].sum()
U_cut = self.U[new_cut].sum()
I_cut_err = np.sqrt(np.sum(s_I[new_cut]**2))
Q_cut_err = np.sqrt(np.sum(s_Q[new_cut]**2))
U_cut_err = np.sqrt(np.sum(s_U[new_cut]**2))
IQ_cut_err = np.sqrt(np.sum(s_IQ[new_cut]**2))
IU_cut_err = np.sqrt(np.sum(s_IU[new_cut]**2))
QU_cut_err = np.sqrt(np.sum(s_QU[new_cut]**2))
P_cut = np.sqrt(Q_cut**2+U_cut**2)/I_cut
P_cut_err = np.sqrt((Q_cut**2*Q_cut_err**2 + U_cut**2*U_cut_err**2 + 2.*Q_cut*U_cut*QU_cut_err)/(Q_cut**2 + U_cut**2) + ((Q_cut/I_cut)**2 + (U_cut/I_cut)**2)*I_cut_err**2 - 2.*(Q_cut/I_cut)*IQ_cut_err - 2.*(U_cut/I_cut)*IU_cut_err)/I_cut
PA_cut = princ_angle(np.degrees((1./2.)*np.arctan2(U_cut,Q_cut)))
PA_cut_err = princ_angle(np.degrees((1./(2.*(Q_cut**2+U_cut**2)))*np.sqrt(U_cut**2*Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err)))
if hasattr(self, 'cont'):
for coll in self.cont.collections:
try:
@@ -1824,13 +1867,13 @@ class pol_map(object):
ax = self.ax
if hasattr(self, 'an_int'):
self.an_int.remove()
self.an_int = ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav,sci_not(I_reg*self.convert_flux,I_reg_err*self.convert_flux,2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100.,P_reg_err*100.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg,PA_reg_err), color='white', fontsize=12, xy=(0.01, 0.90), xycoords='axes fraction')
self.an_int = ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav,sci_not(I_reg*self.convert_flux,I_reg_err*self.convert_flux,2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100.,np.ceil(P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg,np.ceil(PA_reg_err*10.)/10.)+"\n"+r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_cut*100.,np.ceil(P_cut_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_cut,np.ceil(PA_cut_err*10.)/10.), color='white', fontsize=12, xy=(0.01, 0.85), xycoords='axes fraction')
if not self.region is None:
self.cont = ax.contour(self.region.astype(float),levels=[0.5], colors='white', linewidths=0.8)
fig.canvas.draw_idle()
return self.an_int
else:
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav,sci_not(I_reg*self.convert_flux,I_reg_err*self.convert_flux,2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100.,P_reg_err*100.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg,PA_reg_err), color='white', fontsize=12, xy=(0.01, 0.94), xycoords='axes fraction')
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav,sci_not(I_reg*self.convert_flux,I_reg_err*self.convert_flux,2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100.,np.ceil(P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg,np.ceil(PA_reg_err*10.)/10.)+"\n"+r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_cut*100.,np.ceil(P_cut_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_cut,np.ceil(PA_cut_err*10.)/10.), color='white', fontsize=12, xy=(0.01, 0.90), xycoords='axes fraction')
if not self.region is None:
ax.contour(self.region.astype(float),levels=[0.5], colors='white', linewidths=0.8)
fig.canvas.draw_idle()

View File

@@ -491,10 +491,7 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
#bkg = np.std(sub_image) # Previously computed using standard deviation over the background
bkg = np.sqrt(np.sum((sub_image-sub_image.mean())**2)/sub_image.size)
error_bkg[i] *= bkg
#Substract background
data_array[i] = np.abs(data_array[i] - sub_image.mean())
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
#wavelength dependence of the polariser filters
#estimated to less than 1%
@@ -507,15 +504,18 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
err_flat = data_array[i]*0.03
error_array[i] = np.sqrt(error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
#Substract background
data_array[i] = np.abs(data_array[i] - sub_image.mean())
background[i] = sub_image.sum()
if (data_array[i] < 0.).any():
print(data_array[i])
#if i==0:
#np.savetext("output/s_bg.txt",error_bkg[i])
#np.savetext("output/s_wav.txt",err_wav)
#np.savetext("output/s_psf.txt",err_psf)
#np.savetext("output/s_flat.txt",err_flat)
#np.savetxt("output/s_bg.txt",error_bkg[i])
#np.savetxt("output/s_wav.txt",err_wav)
#np.savetxt("output/s_psf.txt",err_psf)
#np.savetxt("output/s_flat.txt",err_flat)
if display:
plt.rcParams.update({'font.size': 10})
@@ -846,7 +846,7 @@ def align_data(data_array, headers, error_array=None, upsample_factor=1.,
rescaled_error[i] = np.sqrt(rescaled_error[i]**2 + error_shift**2)
#if i==1:
#np.savetext("output/s_shift.txt",error_shift)
#np.savetxt("output/s_shift.txt",error_shift)
shifts.append(shift)
errors.append(error)
@@ -1259,9 +1259,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
s_I2_axis = np.sum([dI_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
s_Q2_axis = np.sum([dQ_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
s_U2_axis = np.sum([dU_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
#np.savetext("output/sI_dir.txt", np.sqrt(s_I2_axis))
#np.savetext("output/sQ_dir.txt", np.sqrt(s_Q2_axis))
#np.savetext("output/sU_dir.txt", np.sqrt(s_U2_axis))
#np.savetxt("output/sI_dir.txt", np.sqrt(s_I2_axis))
#np.savetxt("output/sQ_dir.txt", np.sqrt(s_Q2_axis))
#np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis))
# Add quadratically the uncertainty to the Stokes covariance matrix
Stokes_cov[0,0] += s_I2_axis
@@ -1296,7 +1296,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
PA_diluted = np.degrees((1./2.)*np.arctan2(U_diluted,Q_diluted))
PA_diluted = princ_angle(np.degrees((1./2.)*np.arctan2(U_diluted,Q_diluted)))
PA_diluted_err = princ_angle(np.degrees((1./(2.*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)))
for header in headers:
@@ -1555,7 +1555,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
PA_diluted = np.degrees((1./2.)*np.arctan2(U_diluted,Q_diluted))
PA_diluted = princ_angle(np.degrees((1./2.)*np.arctan2(U_diluted,Q_diluted)))
PA_diluted_err = princ_angle(np.degrees((1./(1.*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)))
for header in new_headers: