add code to compare to Kishimoto's pipeline output
This commit is contained in:
@@ -148,7 +148,7 @@ def main():
|
|||||||
# Polarization map output
|
# Polarization map output
|
||||||
figname = 'NGC1068_K_FOC' #target/intrument name
|
figname = 'NGC1068_K_FOC' #target/intrument name
|
||||||
figtype = '_bin10px' #additionnal informations
|
figtype = '_bin10px' #additionnal informations
|
||||||
SNRp_cut = 3. #P measurments with SNR>3
|
SNRp_cut = 5. #P measurments with SNR>3
|
||||||
SNRi_cut = 30. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
SNRi_cut = 30. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
||||||
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
|
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
|
||||||
# if step_vec = 0 then all vectors are displayed at full length
|
# if step_vec = 0 then all vectors are displayed at full length
|
||||||
@@ -164,7 +164,6 @@ def main():
|
|||||||
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
|
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
|
||||||
if deconvolve:
|
if deconvolve:
|
||||||
data_array = proj_red.deconvolve_array(data_array, headers, psf=psf, FWHM=psf_FWHM, scale=psf_scale, shape=psf_shape, iterations=iterations, algo=algo)
|
data_array = proj_red.deconvolve_array(data_array, headers, psf=psf, FWHM=psf_FWHM, scale=psf_scale, shape=psf_shape, iterations=iterations, algo=algo)
|
||||||
|
|
||||||
# Rotate data to have North up
|
# Rotate data to have North up
|
||||||
if rotate_data:
|
if rotate_data:
|
||||||
data_mask = np.ones(data_array.shape[1:]).astype(bool)
|
data_mask = np.ones(data_array.shape[1:]).astype(bool)
|
||||||
|
|||||||
BIN
src/Figure_1.png
Normal file
BIN
src/Figure_1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 91 KiB |
96
src/comparison_Kishimoto.py
Executable file
96
src/comparison_Kishimoto.py
Executable file
@@ -0,0 +1,96 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
from lib.reduction import align_data, princ_angle
|
||||||
|
from lib.deconvolve import zeropad
|
||||||
|
from matplotlib.colors import LogNorm
|
||||||
|
from os.path import join as path_join
|
||||||
|
from os import walk as path_walk
|
||||||
|
from astropy.io import fits
|
||||||
|
from re import compile as regcompile, IGNORECASE
|
||||||
|
from scipy.ndimage import shift
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
root_dir = path_join('/home/t.barnouin/Thesis/HST')
|
||||||
|
root_dir_K = path_join(root_dir,'Kishimoto','output')
|
||||||
|
root_dir_S = path_join(root_dir,'FOC_Reduction','output')
|
||||||
|
root_dir_data_S = path_join(root_dir,'FOC_Reduction','data','NGC1068_x274020')
|
||||||
|
|
||||||
|
data_K = {}
|
||||||
|
data_S = {}
|
||||||
|
for d,i in zip(['I','Q','U','P','PA','sI','sQ','sU','sP','sPA'],[0,1,2,5,8,(3,0,0),(3,1,1),(3,2,2),6,9]):
|
||||||
|
data_K[d] = np.loadtxt(path_join(root_dir_K,d+'.txt'))
|
||||||
|
with fits.open(path_join(root_dir_data_S,'NGC1068_K_FOC_bin10px.fits')) as f:
|
||||||
|
if not type(i) is int:
|
||||||
|
data_S[d] = np.sqrt(f[i[0]].data[i[1],i[2]])
|
||||||
|
else:
|
||||||
|
data_S[d] = f[i].data
|
||||||
|
if i==0:
|
||||||
|
header = f[i].header
|
||||||
|
|
||||||
|
#zeropad data to get same size of array
|
||||||
|
shape = data_S['I'].shape
|
||||||
|
for d in data_K:
|
||||||
|
data_K[d] = zeropad(data_K[d],shape)
|
||||||
|
|
||||||
|
#shift array to get same information in same pixel
|
||||||
|
data_arr, error_ar, heads, data_msk, shifts, shifts_err = align_data(np.array([data_S['I'],data_K['I']]), [header, header], upsample_factor=10., return_shifts=True)
|
||||||
|
for d in data_K:
|
||||||
|
data_K[d] = shift(data_K[d],shifts[1],order=1,cval=0.)
|
||||||
|
|
||||||
|
#compute pol components from shifted array
|
||||||
|
for d in [data_S, data_K]:
|
||||||
|
for i in d:
|
||||||
|
d[i][np.isnan(d[i])] = 0.
|
||||||
|
d['P'] = np.where(d['I']>0.,np.sqrt(d['Q']**2+d['U']**2)/d['I'],0.)
|
||||||
|
d['sP'] = np.where(d['I']>0.,np.sqrt((d['Q']**2*d['sQ']**2+d['U']**2*d['sU']**2)/(d['Q']**2+d['U']**2)+((d['Q']/d['I'])**2+(d['U']/d['I'])**2)*d['sI']**2)/d['I'],0.)
|
||||||
|
d['PA'] = (90./np.pi)*np.arctan2(d['U'],d['Q'])
|
||||||
|
d['SNRp'] = np.zeros(d['P'].shape)
|
||||||
|
d['SNRp'][d['sP']>0.] = d['P'][d['sP']>0.]/d['sP'][d['sP']>0.]
|
||||||
|
d['SNRi'] = np.zeros(d['I'].shape)
|
||||||
|
d['SNRi'][d['sI']>0.] = d['I'][d['sI']>0.]/d['sI'][d['sI']>0.]
|
||||||
|
d['mask'] = np.logical_and(d['SNRi']>30,d['SNRp']>5)
|
||||||
|
|
||||||
|
d['X'], d['Y'] = np.meshgrid(np.arange(d['I'].shape[1]), np.arange(d['I'].shape[0]))
|
||||||
|
d['xy_U'], d['xy_V'] = np.where(d['mask'],d['P']*np.cos(np.pi/2.+d['PA']*np.pi/180.), np.nan), np.where(d['mask'],d['P']*np.sin(np.pi/2.+d['PA']*np.pi/180.), np.nan)
|
||||||
|
|
||||||
|
#display both polarization maps to check consistency
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
im0 = ax.imshow(data_S['I'],norm=LogNorm(data_S['I'][data_S['I']>0].min(),data_S['I'][data_S['I']>0].max()),origin='lower',cmap='gray',label=r"$I_{STOKES}$ through my pipeline")
|
||||||
|
quiv0 = ax.quiver(data_S['X'],data_S['Y'],data_S['xy_U'],data_S['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='b',alpha=0.75, label="PA through my pipeline")
|
||||||
|
quiv1 = ax.quiver(data_K['X'],data_K['Y'],data_K['xy_U'],data_K['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='r',alpha=0.75, label="PA through Kishimoto's pipeline")
|
||||||
|
ax.set_title(r"$SNR_P \geq 5 \; & \; SNR_I \geq 30$")
|
||||||
|
fig.legend()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
#compute integrated polarization parameters on a specific cut
|
||||||
|
for d in [data_S, data_K]:
|
||||||
|
d['I_dil'] = np.sum(d['I'][d['mask']])
|
||||||
|
d['sI_dil'] = np.sqrt(np.sum(d['sI'][d['mask']]**2))
|
||||||
|
d['Q_dil'] = np.sum(d['Q'][d['mask']])
|
||||||
|
d['sQ_dil'] = np.sqrt(np.sum(d['sQ'][d['mask']])**2)
|
||||||
|
d['U_dil'] = np.sum(d['U'][d['mask']])
|
||||||
|
d['sU_dil'] = np.sqrt(np.sum(d['sU'][d['mask']])**2)
|
||||||
|
|
||||||
|
d['P_dil'] = np.sqrt(d['Q_dil']**2+d['U_dil']**2)/d['I_dil']
|
||||||
|
d['sP_dil'] = np.sqrt((d['Q_dil']**2*d['sQ_dil']**2+d['U_dil']**2*d['sU_dil']**2)/(d['Q_dil']**2+d['U_dil']**2)+((d['Q_dil']/d['I_dil'])**2+(d['U_dil']/d['I_dil'])**2)*d['sI_dil']**2)/d['I_dil']
|
||||||
|
d['PA_dil'] = princ_angle((90./np.pi)*np.arctan2(d['U_dil'],d['Q_dil']))
|
||||||
|
d['sPA_dil'] = princ_angle((90./(np.pi*(d['Q_dil']**2+d['U_dil']**2)))*np.sqrt(d['Q_dil']**2*d['sU_dil']**2+d['U_dil']**2*d['sU_dil']**2))
|
||||||
|
print('From my pipeline :\n', "P = {0:.2f} ± {1:.2f} %\n".format(data_S['P_dil']*100.,data_S['sP_dil']*100.), "PA = {0:.2f} ± {1:.2f} °".format(data_S['PA_dil'],data_S['sPA_dil']))
|
||||||
|
print("From Kishimoto's pipeline :\n", "P = {0:.2f} ± {1:.2f} %\n".format(data_K['P_dil']*100.,data_K['sP_dil']*100.), "PA = {0:.2f} ± {1:.2f} °".format(data_K['PA_dil'],data_K['sPA_dil']))
|
||||||
|
|
||||||
|
#compare different types of error
|
||||||
|
print("My pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(np.mean(data_S['sI'][data_S['mask']]/data_S['I'][data_S['mask']]),np.mean(np.abs(data_S['sQ'][data_S['mask']]/data_S['Q'][data_S['mask']])),np.mean(np.abs(data_S['sU'][data_S['mask']]/data_S['U'][data_S['mask']])),np.mean(data_S['sP'][data_S['mask']]/data_S['P'][data_S['mask']])))
|
||||||
|
print("Kishimoto's pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(np.mean(data_K['sI'][data_K['mask']]/data_K['I'][data_K['mask']]),np.mean(np.abs(data_K['sQ'][data_K['mask']]/data_K['Q'][data_K['mask']])),np.mean(np.abs(data_K['sU'][data_K['mask']]/data_K['U'][data_K['mask']])),np.mean(data_K['sP'][data_K['mask']]/data_K['P'][data_K['mask']])))
|
||||||
|
for d,i in zip(['I','Q','U','P','PA','sI','sQ','sU','sP','sPA'],[0,1,2,5,8,(3,0,0),(3,1,1),(3,2,2),6,9]):
|
||||||
|
data_K[d] = np.loadtxt(path_join(root_dir_K,d+'.txt'))
|
||||||
|
with fits.open(path_join(root_dir_data_S,'NGC1068_K_FOC_bin10px.fits')) as f:
|
||||||
|
if not type(i) is int:
|
||||||
|
data_S[d] = np.sqrt(f[i[0]].data[i[1],i[2]])
|
||||||
|
else:
|
||||||
|
data_S[d] = f[i].data
|
||||||
|
if i==0:
|
||||||
|
header = f[i].header
|
||||||
|
|
||||||
|
#from Kishimoto's pipeline : IQU_dir, IQU_shift, IQU_stat, IQU_trans
|
||||||
|
#from my pipeline : raw_bg, raw_flat, raw_psf, raw_shift, raw_wav, IQU_dir
|
||||||
|
# but errors from my pipeline are propagated all along, how to compare then ?
|
||||||
@@ -318,6 +318,7 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5,
|
|||||||
curr_wcs = deepcopy(WCS(crop_headers[i]))
|
curr_wcs = deepcopy(WCS(crop_headers[i]))
|
||||||
curr_wcs.wcs.crpix = curr_wcs.wcs.crpix - np.array([v_array[2], v_array[0]])
|
curr_wcs.wcs.crpix = curr_wcs.wcs.crpix - np.array([v_array[2], v_array[0]])
|
||||||
crop_headers[i].update(curr_wcs.to_header())
|
crop_headers[i].update(curr_wcs.to_header())
|
||||||
|
crop_headers[i]['naxis1'], crop_headers[i]['naxis2'] = crop_array[i].shape
|
||||||
if not data_mask is None:
|
if not data_mask is None:
|
||||||
crop_mask = data_mask[v_array[0]:v_array[1],v_array[2]:v_array[3]]
|
crop_mask = data_mask[v_array[0]:v_array[1],v_array[2]:v_array[3]]
|
||||||
return crop_array, crop_error_array, crop_mask, crop_headers
|
return crop_array, crop_error_array, crop_mask, crop_headers
|
||||||
@@ -508,6 +509,11 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
|
|||||||
background[i] = sub_image.sum()
|
background[i] = sub_image.sum()
|
||||||
if (data_array[i] < 0.).any():
|
if (data_array[i] < 0.).any():
|
||||||
print(data_array[i])
|
print(data_array[i])
|
||||||
|
if i==0:
|
||||||
|
np.savetxt("output/s_bg.txt",error_bkg[i])
|
||||||
|
np.savetxt("output/s_wav.txt",err_wav)
|
||||||
|
np.savetxt("output/s_psf.txt",err_psf)
|
||||||
|
np.savetxt("output/s_flat.txt",err_flat)
|
||||||
|
|
||||||
if display:
|
if display:
|
||||||
plt.rcParams.update({'font.size': 10})
|
plt.rcParams.update({'font.size': 10})
|
||||||
@@ -770,7 +776,7 @@ def align_data(data_array, headers, error_array=None, upsample_factor=1.,
|
|||||||
if error_array is None:
|
if error_array is None:
|
||||||
_, error_array, headers, background = get_error(data_array, headers, return_background=True)
|
_, error_array, headers, background = get_error(data_array, headers, return_background=True)
|
||||||
else:
|
else:
|
||||||
_, _, headers, background = get_error(data_array, headers, return_background=True)
|
_, _, headers, background = get_error(data_array, headers, error_array=error_array, return_background=True)
|
||||||
|
|
||||||
# Crop out any null edges
|
# Crop out any null edges
|
||||||
#(ref_data must be cropped as well)
|
#(ref_data must be cropped as well)
|
||||||
@@ -837,6 +843,9 @@ def align_data(data_array, headers, error_array=None, upsample_factor=1.,
|
|||||||
#sum quadratically the errors
|
#sum quadratically the errors
|
||||||
rescaled_error[i] = np.sqrt(rescaled_error[i]**2 + error_shift**2)
|
rescaled_error[i] = np.sqrt(rescaled_error[i]**2 + error_shift**2)
|
||||||
|
|
||||||
|
if i==1:
|
||||||
|
np.savetxt("output/s_shift.txt",error_shift)
|
||||||
|
|
||||||
shifts.append(shift)
|
shifts.append(shift)
|
||||||
errors.append(error)
|
errors.append(error)
|
||||||
|
|
||||||
@@ -1075,17 +1084,6 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
|
|||||||
err120 = np.sqrt(np.sum(err120_array**2,axis=0))
|
err120 = np.sqrt(np.sum(err120_array**2,axis=0))
|
||||||
polerr_array = np.array([err0, err60, err120])
|
polerr_array = np.array([err0, err60, err120])
|
||||||
|
|
||||||
# Update headers
|
|
||||||
for header in headers:
|
|
||||||
if header['filtnam1']=='POL0':
|
|
||||||
list_head = headers0
|
|
||||||
elif header['filtnam1']=='POL60':
|
|
||||||
list_head = headers60
|
|
||||||
else:
|
|
||||||
list_head = headers120
|
|
||||||
header['exptime'] = np.sum([head['exptime'] for head in list_head])/len(list_head)
|
|
||||||
headers_array = [headers0[0], headers60[0], headers120[0]]
|
|
||||||
|
|
||||||
if not(FWHM is None) and (smoothing.lower() in ['gaussian','gauss']):
|
if not(FWHM is None) and (smoothing.lower() in ['gaussian','gauss']):
|
||||||
# Smooth by convoluting with a gaussian each polX image.
|
# Smooth by convoluting with a gaussian each polX image.
|
||||||
pol_array, polerr_array = smooth_data(pol_array, polerr_array,
|
pol_array, polerr_array = smooth_data(pol_array, polerr_array,
|
||||||
@@ -1093,6 +1091,17 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
|
|||||||
pol0, pol60, pol120 = pol_array
|
pol0, pol60, pol120 = pol_array
|
||||||
err0, err60, err120 = polerr_array
|
err0, err60, err120 = polerr_array
|
||||||
|
|
||||||
|
# Update headers
|
||||||
|
for header in headers:
|
||||||
|
if header['filtnam1']=='POL0':
|
||||||
|
list_head = headers0
|
||||||
|
elif header['filtnam1']=='POL60':
|
||||||
|
list_head = headers60
|
||||||
|
elif header['filtnam1']=='POL120':
|
||||||
|
list_head = headers120
|
||||||
|
header['exptime'] = np.sum([head['exptime'] for head in list_head])#/len(list_head)
|
||||||
|
pol_headers = [headers0[0], headers60[0], headers120[0]]
|
||||||
|
|
||||||
# Get image shape
|
# Get image shape
|
||||||
shape = pol0.shape
|
shape = pol0.shape
|
||||||
|
|
||||||
@@ -1109,7 +1118,7 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
|
|||||||
polarizer_cov[1,1] = err60**2
|
polarizer_cov[1,1] = err60**2
|
||||||
polarizer_cov[2,2] = err120**2
|
polarizer_cov[2,2] = err120**2
|
||||||
|
|
||||||
return polarizer_array, polarizer_cov
|
return polarizer_array, polarizer_cov, pol_headers
|
||||||
|
|
||||||
|
|
||||||
def compute_Stokes(data_array, error_array, data_mask, headers,
|
def compute_Stokes(data_array, error_array, data_mask, headers,
|
||||||
@@ -1172,7 +1181,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
|
|||||||
# Routine for the FOC instrument
|
# Routine for the FOC instrument
|
||||||
if instr == 'FOC':
|
if instr == 'FOC':
|
||||||
# Get image from each polarizer and covariance matrix
|
# Get image from each polarizer and covariance matrix
|
||||||
pol_array, pol_cov = polarizer_avg(data_array, error_array, data_mask,
|
pol_array, pol_cov, pol_headers = polarizer_avg(data_array, error_array, data_mask,
|
||||||
headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
|
headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
|
||||||
pol0, pol60, pol120 = pol_array
|
pol0, pol60, pol120 = pol_array
|
||||||
|
|
||||||
@@ -1246,6 +1255,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
|
|||||||
s_I2_axis = np.sum([dI_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
|
s_I2_axis = np.sum([dI_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
|
||||||
s_Q2_axis = np.sum([dQ_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
|
s_Q2_axis = np.sum([dQ_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
|
||||||
s_U2_axis = np.sum([dU_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
|
s_U2_axis = np.sum([dU_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
|
||||||
|
np.savetxt("output/sI_dir.txt", np.sqrt(s_I2_axis))
|
||||||
|
np.savetxt("output/sQ_dir.txt", np.sqrt(s_Q2_axis))
|
||||||
|
np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis))
|
||||||
|
|
||||||
# Add quadratically the uncertainty to the Stokes covariance matrix
|
# Add quadratically the uncertainty to the Stokes covariance matrix
|
||||||
Stokes_cov[0,0] += s_I2_axis
|
Stokes_cov[0,0] += s_I2_axis
|
||||||
|
|||||||
Reference in New Issue
Block a user