add correction factor for all polarizer filters to have same transmission efficiency
BIN
plots/NGC1068_x274020/NGC1068_FOC_aper1arcsec.png
Normal file
|
After Width: | Height: | Size: 738 KiB |
|
Before Width: | Height: | Size: 152 KiB After Width: | Height: | Size: 198 KiB |
|
Before Width: | Height: | Size: 406 KiB After Width: | Height: | Size: 427 KiB |
|
Before Width: | Height: | Size: 55 KiB After Width: | Height: | Size: 54 KiB |
|
Before Width: | Height: | Size: 352 KiB After Width: | Height: | Size: 369 KiB |
|
Before Width: | Height: | Size: 344 KiB After Width: | Height: | Size: 366 KiB |
|
Before Width: | Height: | Size: 358 KiB After Width: | Height: | Size: 418 KiB |
|
Before Width: | Height: | Size: 407 KiB After Width: | Height: | Size: 420 KiB |
|
Before Width: | Height: | Size: 579 KiB After Width: | Height: | Size: 685 KiB |
|
Before Width: | Height: | Size: 612 KiB After Width: | Height: | Size: 632 KiB |
BIN
plots/NGC1068_x274020/NGC1068_FOC_combine_FWHM010_cropped.png
Normal file
|
After Width: | Height: | Size: 733 KiB |
BIN
src/Figure_1.png
|
Before Width: | Height: | Size: 91 KiB After Width: | Height: | Size: 91 KiB |
BIN
src/Figure_2.png
Normal file
|
After Width: | Height: | Size: 76 KiB |
@@ -49,7 +49,9 @@ for d in [data_S, data_K]:
|
|||||||
d['SNRi'] = np.zeros(d['I'].shape)
|
d['SNRi'] = np.zeros(d['I'].shape)
|
||||||
d['SNRi'][d['sI']>0.] = d['I'][d['sI']>0.]/d['sI'][d['sI']>0.]
|
d['SNRi'][d['sI']>0.] = d['I'][d['sI']>0.]/d['sI'][d['sI']>0.]
|
||||||
d['mask'] = np.logical_and(d['SNRi']>30,d['SNRp']>5)
|
d['mask'] = np.logical_and(d['SNRi']>30,d['SNRp']>5)
|
||||||
|
data_S['mask'], data_K['mask'] = np.logical_and(data_S['mask'],data_K['mask']), np.logical_and(data_S['mask'],data_K['mask'])
|
||||||
|
|
||||||
|
for d in [data_S, data_K]:
|
||||||
d['X'], d['Y'] = np.meshgrid(np.arange(d['I'].shape[1]), np.arange(d['I'].shape[0]))
|
d['X'], d['Y'] = np.meshgrid(np.arange(d['I'].shape[1]), np.arange(d['I'].shape[0]))
|
||||||
d['xy_U'], d['xy_V'] = np.where(d['mask'],d['P']*np.cos(np.pi/2.+d['PA']*np.pi/180.), np.nan), np.where(d['mask'],d['P']*np.sin(np.pi/2.+d['PA']*np.pi/180.), np.nan)
|
d['xy_U'], d['xy_V'] = np.where(d['mask'],d['P']*np.cos(np.pi/2.+d['PA']*np.pi/180.), np.nan), np.where(d['mask'],d['P']*np.sin(np.pi/2.+d['PA']*np.pi/180.), np.nan)
|
||||||
|
|
||||||
@@ -60,7 +62,6 @@ quiv0 = ax.quiver(data_S['X'],data_S['Y'],data_S['xy_U'],data_S['xy_V'],units='x
|
|||||||
quiv1 = ax.quiver(data_K['X'],data_K['Y'],data_K['xy_U'],data_K['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='r',alpha=0.75, label="PA through Kishimoto's pipeline")
|
quiv1 = ax.quiver(data_K['X'],data_K['Y'],data_K['xy_U'],data_K['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='r',alpha=0.75, label="PA through Kishimoto's pipeline")
|
||||||
ax.set_title(r"$SNR_P \geq 5 \; & \; SNR_I \geq 30$")
|
ax.set_title(r"$SNR_P \geq 5 \; & \; SNR_I \geq 30$")
|
||||||
fig.legend()
|
fig.legend()
|
||||||
plt.show()
|
|
||||||
|
|
||||||
#compute integrated polarization parameters on a specific cut
|
#compute integrated polarization parameters on a specific cut
|
||||||
for d in [data_S, data_K]:
|
for d in [data_S, data_K]:
|
||||||
@@ -79,8 +80,12 @@ print('From my pipeline :\n', "P = {0:.2f} ± {1:.2f} %\n".format(data_S['P_dil'
|
|||||||
print("From Kishimoto's pipeline :\n", "P = {0:.2f} ± {1:.2f} %\n".format(data_K['P_dil']*100.,data_K['sP_dil']*100.), "PA = {0:.2f} ± {1:.2f} °".format(data_K['PA_dil'],data_K['sPA_dil']))
|
print("From Kishimoto's pipeline :\n", "P = {0:.2f} ± {1:.2f} %\n".format(data_K['P_dil']*100.,data_K['sP_dil']*100.), "PA = {0:.2f} ± {1:.2f} °".format(data_K['PA_dil'],data_K['sPA_dil']))
|
||||||
|
|
||||||
#compare different types of error
|
#compare different types of error
|
||||||
print("My pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(np.mean(data_S['sI'][data_S['mask']]/data_S['I'][data_S['mask']]),np.mean(np.abs(data_S['sQ'][data_S['mask']]/data_S['Q'][data_S['mask']])),np.mean(np.abs(data_S['sU'][data_S['mask']]/data_S['U'][data_S['mask']])),np.mean(data_S['sP'][data_S['mask']]/data_S['P'][data_S['mask']])))
|
xx, yy = np.indices(data_S['mask'].shape)
|
||||||
print("Kishimoto's pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(np.mean(data_K['sI'][data_K['mask']]/data_K['I'][data_K['mask']]),np.mean(np.abs(data_K['sQ'][data_K['mask']]/data_K['Q'][data_K['mask']])),np.mean(np.abs(data_K['sU'][data_K['mask']]/data_K['U'][data_K['mask']])),np.mean(data_K['sP'][data_K['mask']]/data_K['P'][data_K['mask']])))
|
mask_ind = np.array([[y,x] for y,x in zip(yy[data_S['mask']],xx[data_S['mask']])])
|
||||||
|
index = mask_ind[np.random.randint(len(mask_ind))]
|
||||||
|
print("My pipeline : sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(np.mean(data_S['sI'][index[0],index[1]]/data_S['I'][index[0],index[1]]),np.mean(data_S['sQ'][index[0],index[1]]/data_S['Q'][index[0],index[1]]),np.mean(data_S['sU'][index[0],index[1]]/data_S['U'][index[0],index[1]]),np.mean(data_S['sP'][index[0],index[1]]/data_S['P'][index[0],index[1]])))
|
||||||
|
print("Kishimoto's pipeline : sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(np.mean(data_K['sI'][index[0],index[1]]/data_K['I'][index[0],index[1]]),np.mean(data_K['sQ'][index[0],index[1]]/data_K['Q'][index[0],index[1]]),np.mean(data_K['sU'][index[0],index[1]]/data_K['U'][index[0],index[1]]),np.mean(data_K['sP'][index[0],index[1]]/data_K['P'][index[0],index[1]])))
|
||||||
|
print("For random pixel in cut at {}".format(index))
|
||||||
for d,i in zip(['I','Q','U','P','PA','sI','sQ','sU','sP','sPA'],[0,1,2,5,8,(3,0,0),(3,1,1),(3,2,2),6,9]):
|
for d,i in zip(['I','Q','U','P','PA','sI','sQ','sU','sP','sPA'],[0,1,2,5,8,(3,0,0),(3,1,1),(3,2,2),6,9]):
|
||||||
data_K[d] = np.loadtxt(path_join(root_dir_K,d+'.txt'))
|
data_K[d] = np.loadtxt(path_join(root_dir_K,d+'.txt'))
|
||||||
with fits.open(path_join(root_dir_data_S,'NGC1068_K_FOC_bin10px.fits')) as f:
|
with fits.open(path_join(root_dir_data_S,'NGC1068_K_FOC_bin10px.fits')) as f:
|
||||||
@@ -94,3 +99,5 @@ for d,i in zip(['I','Q','U','P','PA','sI','sQ','sU','sP','sPA'],[0,1,2,5,8,(3,0,
|
|||||||
#from Kishimoto's pipeline : IQU_dir, IQU_shift, IQU_stat, IQU_trans
|
#from Kishimoto's pipeline : IQU_dir, IQU_shift, IQU_stat, IQU_trans
|
||||||
#from my pipeline : raw_bg, raw_flat, raw_psf, raw_shift, raw_wav, IQU_dir
|
#from my pipeline : raw_bg, raw_flat, raw_psf, raw_shift, raw_wav, IQU_dir
|
||||||
# but errors from my pipeline are propagated all along, how to compare then ?
|
# but errors from my pipeline are propagated all along, how to compare then ?
|
||||||
|
|
||||||
|
plt.show()
|
||||||
@@ -509,11 +509,11 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
|
|||||||
background[i] = sub_image.sum()
|
background[i] = sub_image.sum()
|
||||||
if (data_array[i] < 0.).any():
|
if (data_array[i] < 0.).any():
|
||||||
print(data_array[i])
|
print(data_array[i])
|
||||||
if i==0:
|
#if i==0:
|
||||||
np.savetxt("output/s_bg.txt",error_bkg[i])
|
#np.savetext("output/s_bg.txt",error_bkg[i])
|
||||||
np.savetxt("output/s_wav.txt",err_wav)
|
#np.savetext("output/s_wav.txt",err_wav)
|
||||||
np.savetxt("output/s_psf.txt",err_psf)
|
#np.savetext("output/s_psf.txt",err_psf)
|
||||||
np.savetxt("output/s_flat.txt",err_flat)
|
#np.savetext("output/s_flat.txt",err_flat)
|
||||||
|
|
||||||
if display:
|
if display:
|
||||||
plt.rcParams.update({'font.size': 10})
|
plt.rcParams.update({'font.size': 10})
|
||||||
@@ -843,8 +843,8 @@ def align_data(data_array, headers, error_array=None, upsample_factor=1.,
|
|||||||
#sum quadratically the errors
|
#sum quadratically the errors
|
||||||
rescaled_error[i] = np.sqrt(rescaled_error[i]**2 + error_shift**2)
|
rescaled_error[i] = np.sqrt(rescaled_error[i]**2 + error_shift**2)
|
||||||
|
|
||||||
if i==1:
|
#if i==1:
|
||||||
np.savetxt("output/s_shift.txt",error_shift)
|
#np.savetext("output/s_shift.txt",error_shift)
|
||||||
|
|
||||||
shifts.append(shift)
|
shifts.append(shift)
|
||||||
errors.append(error)
|
errors.append(error)
|
||||||
@@ -1204,12 +1204,14 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
|
|||||||
transmit3 = np.min([trans3[header['filtnam3'].lower()] for header in headers])
|
transmit3 = np.min([trans3[header['filtnam3'].lower()] for header in headers])
|
||||||
transmit4 = np.min([trans4[header['filtnam4'].lower()] for header in headers])
|
transmit4 = np.min([trans4[header['filtnam4'].lower()] for header in headers])
|
||||||
transmit *= transmit2*transmit3*transmit4
|
transmit *= transmit2*transmit3*transmit4
|
||||||
|
|
||||||
pol_eff = np.array([pol_efficiency['pol0'], pol_efficiency['pol60'], pol_efficiency['pol120']])
|
pol_eff = np.array([pol_efficiency['pol0'], pol_efficiency['pol60'], pol_efficiency['pol120']])
|
||||||
|
|
||||||
|
#Calculating correction factor
|
||||||
|
corr = np.array([1.0*h['photflam']/h['exptime'] for h in pol_headers])*pol_headers[0]['exptime']/pol_headers[0]['photflam']
|
||||||
|
|
||||||
# Orientation and error for each polarizer
|
# Orientation and error for each polarizer
|
||||||
fmax = np.finfo(np.float64).max
|
fmax = np.finfo(np.float64).max
|
||||||
pol_flux = np.array([pol0, pol60, pol120])
|
pol_flux = np.array([corr[0]*pol0, corr[1]*pol60, corr[2]*pol120])
|
||||||
|
|
||||||
coeff_stokes = np.zeros((3,3))
|
coeff_stokes = np.zeros((3,3))
|
||||||
# Coefficients linking each polarizer flux to each Stokes parameter
|
# Coefficients linking each polarizer flux to each Stokes parameter
|
||||||
@@ -1255,9 +1257,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
|
|||||||
s_I2_axis = np.sum([dI_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
|
s_I2_axis = np.sum([dI_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
|
||||||
s_Q2_axis = np.sum([dQ_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
|
s_Q2_axis = np.sum([dQ_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
|
||||||
s_U2_axis = np.sum([dU_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
|
s_U2_axis = np.sum([dU_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
|
||||||
np.savetxt("output/sI_dir.txt", np.sqrt(s_I2_axis))
|
#np.savetext("output/sI_dir.txt", np.sqrt(s_I2_axis))
|
||||||
np.savetxt("output/sQ_dir.txt", np.sqrt(s_Q2_axis))
|
#np.savetext("output/sQ_dir.txt", np.sqrt(s_Q2_axis))
|
||||||
np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis))
|
#np.savetext("output/sU_dir.txt", np.sqrt(s_U2_axis))
|
||||||
|
|
||||||
# Add quadratically the uncertainty to the Stokes covariance matrix
|
# Add quadratically the uncertainty to the Stokes covariance matrix
|
||||||
Stokes_cov[0,0] += s_I2_axis
|
Stokes_cov[0,0] += s_I2_axis
|
||||||
|
|||||||