Change reduction to take into account transmittance, and try plotting FOC's FOV

This commit is contained in:
Thibault Barnouin
2021-06-21 20:22:45 +02:00
parent d9c45870e6
commit 5493120a56
22 changed files with 124 additions and 49 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 442 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 300 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 333 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 459 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 443 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 452 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 362 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 350 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 458 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 462 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 512 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 73 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 385 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 412 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 530 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 512 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 116 KiB

After

Width:  |  Height:  |  Size: 118 KiB

View File

@@ -8,20 +8,20 @@ Main script where are progressively added the steps for the FOC pipeline reducti
import sys import sys
import numpy as np import numpy as np
import copy import copy
import matplotlib.pyplot as plt
import lib.fits as proj_fits #Functions to handle fits files import lib.fits as proj_fits #Functions to handle fits files
import lib.reduction as proj_red #Functions used in reduction pipeline import lib.reduction as proj_red #Functions used in reduction pipeline
import lib.plots as proj_plots #Functions for plotting data import lib.plots as proj_plots #Functions for plotting data
from lib.convex_hull import image_hull
def main(): def main():
##### User inputs ##### User inputs
## Input and output locations ## Input and output locations
# globals()['data_folder'] = "../data/NGC1068_x274020/" globals()['data_folder'] = "../data/NGC1068_x274020/"
# infiles = ['x274020at.c0f.fits','x274020bt.c0f.fits','x274020ct.c0f.fits', infiles = ['x274020at.c0f.fits','x274020bt.c0f.fits','x274020ct.c0f.fits',
# 'x274020dt.c0f.fits','x274020et.c0f.fits','x274020ft.c0f.fits', 'x274020dt.c0f.fits','x274020et.c0f.fits','x274020ft.c0f.fits',
# 'x274020gt.c0f.fits','x274020ht.c0f.fits','x274020it.c0f.fits'] 'x274020gt.c0f.fits','x274020ht.c0f.fits','x274020it.c0f.fits']
# globals()['plots_folder'] = "../plots/NGC1068_x274020/" globals()['plots_folder'] = "../plots/NGC1068_x274020/"
# globals()['data_folder'] = "../data/NGC1068_x14w010/" # globals()['data_folder'] = "../data/NGC1068_x14w010/"
# infiles = ['x14w0101t_c0f.fits','x14w0102t_c0f.fits','x14w0103t_c0f.fits', # infiles = ['x14w0101t_c0f.fits','x14w0102t_c0f.fits','x14w0103t_c0f.fits',
@@ -44,13 +44,13 @@ def main():
# infiles = ['x3mc0101m_c0f.fits','x3mc0102m_c0f.fits','x3mc0103m_c0f.fits'] # infiles = ['x3mc0101m_c0f.fits','x3mc0102m_c0f.fits','x3mc0103m_c0f.fits']
# globals()['plots_folder'] = "../plots/3C109_x3mc010/" # globals()['plots_folder'] = "../plots/3C109_x3mc010/"
globals()['data_folder'] = "../data/MKN463_x2rp030/" # globals()['data_folder'] = "../data/MKN463_x2rp030/"
infiles = ['x2rp0201t_c0f.fits', 'x2rp0202t_c0f.fits', 'x2rp0203t_c0f.fits', # infiles = ['x2rp0201t_c0f.fits', 'x2rp0202t_c0f.fits', 'x2rp0203t_c0f.fits',
'x2rp0204t_c0f.fits', 'x2rp0205t_c0f.fits', 'x2rp0206t_c0f.fits', # 'x2rp0204t_c0f.fits', 'x2rp0205t_c0f.fits', 'x2rp0206t_c0f.fits',
'x2rp0207t_c0f.fits', 'x2rp0301t_c0f.fits', 'x2rp0302t_c0f.fits', # 'x2rp0207t_c0f.fits', 'x2rp0301t_c0f.fits', 'x2rp0302t_c0f.fits',
'x2rp0303t_c0f.fits', 'x2rp0304t_c0f.fits', 'x2rp0305t_c0f.fits', # 'x2rp0303t_c0f.fits', 'x2rp0304t_c0f.fits', 'x2rp0305t_c0f.fits',
'x2rp0306t_c0f.fits', 'x2rp0307t_c0f.fits'] # 'x2rp0306t_c0f.fits', 'x2rp0307t_c0f.fits']
globals()['plots_folder'] = "../plots/MKN463_x2rp030/" # globals()['plots_folder'] = "../plots/MKN463_x2rp030/"
# globals()['data_folder'] = "../data/PG1630+377_x39510/" # globals()['data_folder'] = "../data/PG1630+377_x39510/"
# infiles = ['x3990201m_c0f.fits', 'x3990205m_c0f.fits', 'x3995101r_c0f.fits', # infiles = ['x3990201m_c0f.fits', 'x3990205m_c0f.fits', 'x3995101r_c0f.fits',
@@ -108,10 +108,10 @@ def main():
rotate_stokes = True #rotation to North convention can give erroneous results rotate_stokes = True #rotation to North convention can give erroneous results
rotate_data = False #rotation to North convention can give erroneous results rotate_data = False #rotation to North convention can give erroneous results
# Polarization map output # Polarization map output
figname = 'MKN463_FOC' #target/intrument name figname = 'NGC1068_FOC' #target/intrument name
figtype = '_combine_FWHM020_rot' #additionnal informations figtype = '_3_combine_FWHM020' #additionnal informations
SNRp_cut = 3 #P measurments with SNR>3 SNRp_cut = 30 #P measurments with SNR>3
SNRi_cut = 30 #I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 300 #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
##### Pipeline start ##### Pipeline start
@@ -145,16 +145,26 @@ def main():
for data in data_array: for data in data_array:
if (data < 0.).any(): if (data < 0.).any():
print("ETAPE 5 : ", data) print("ETAPE 5 : ", data)
vertex = image_hull((1.-data_mask),step=5,null_val=0.,inside=True)
shape = np.array([vertex[1]-vertex[0],vertex[3]-vertex[2]])
rectangle = [vertex[2], vertex[0], shape[1], shape[0], 0., 'w']
# Rotate data to have North up # Rotate data to have North up
ref_header = copy.deepcopy(headers[0]) ref_header = copy.deepcopy(headers[0])
if rotate_data: if rotate_data:
alpha = ref_header['orientat']
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)],
[np.sin(-alpha), np.cos(-alpha)]])
rectangle[0:2] = np.dot(mrot, np.asarray(rectangle[0:2]))
rectangle[4] = alpha
data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers, -ref_header['orientat']) data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers, -ref_header['orientat'])
for data in data_array: for data in data_array:
if (data < 0.).any(): if (data < 0.).any():
print("ETAPE 6 : ", data) print("ETAPE 6 : ", data)
#Plot array for checking output #Plot array for checking output
if display_data: if display_data:
proj_plots.plot_obs(data_array, headers, vmin=data_array.min(), vmax=data_array.max(), savename=figname+"_center_"+align_center, plots_folder=plots_folder) proj_plots.plot_obs(data_array, headers, vmin=data_array.min(), vmax=data_array.max(), rectangle =[rectangle,]*data_array.shape[0], savename=figname+"_center_"+align_center, plots_folder=plots_folder)
## Step 2: ## Step 2:
# Compute Stokes I, Q, U with smoothed polarized images # Compute Stokes I, Q, U with smoothed polarized images
@@ -166,8 +176,13 @@ def main():
## Step 3: ## Step 3:
# Rotate images to have North up # Rotate images to have North up
if rotate_stokes:
ref_header = copy.deepcopy(headers[0]) ref_header = copy.deepcopy(headers[0])
if rotate_stokes:
alpha = ref_header['orientat']
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)],
[np.sin(-alpha), np.cos(-alpha)]])
rectangle[0:2] = np.dot(mrot, np.asarray(rectangle[0:2]))
rectangle[4] = alpha
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, -ref_header['orientat'], SNRi_cut=None) I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, -ref_header['orientat'], SNRi_cut=None)
# Compute polarimetric parameters (polarization degree and angle). # Compute polarimetric parameters (polarization degree and angle).
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers) P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers)
@@ -178,11 +193,11 @@ def main():
## Step 5: ## Step 5:
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error). # Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display=None) proj_plots.polarization_map(copy.deepcopy(Stokes_test), rectangle, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display=None)
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P", plots_folder=plots_folder, display='Pol_deg') proj_plots.polarization_map(copy.deepcopy(Stokes_test), rectangle, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P", plots_folder=plots_folder, display='Pol_deg')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_err", plots_folder=plots_folder, display='Pol_deg_err') proj_plots.polarization_map(copy.deepcopy(Stokes_test), rectangle, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_err", plots_folder=plots_folder, display='Pol_deg_err')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRi", plots_folder=plots_folder, display='SNRi') proj_plots.polarization_map(copy.deepcopy(Stokes_test), rectangle, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRi", plots_folder=plots_folder, display='SNRi')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRp", plots_folder=plots_folder, display='SNRp') proj_plots.polarization_map(copy.deepcopy(Stokes_test), rectangle, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRp", plots_folder=plots_folder, display='SNRp')
return 0 return 0

View File

@@ -73,16 +73,20 @@ def plot_obs(data_array, headers, shape=None, vmin=0., vmax=6., rectangle=None,
#plots #plots
im = ax.imshow(data, vmin=vmin, vmax=vmax, origin='lower') im = ax.imshow(data, vmin=vmin, vmax=vmax, origin='lower')
if not(rectangle is None): if not(rectangle is None):
x, y, width, height, color = rectangle[i] x, y, width, height, angle, color = rectangle[i]
ax.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False)) ax.add_patch(Rectangle((x, y), width, height, angle=angle,
edgecolor=color, fill=False))
#position of centroid #position of centroid
ax.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], lw=1, ax.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], lw=1,
color='black') color='black')
ax.plot([0,data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], lw=1, ax.plot([0,data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], lw=1,
color='black') color='black')
ax.annotate(instr+":"+rootname, color='white', fontsize=5, xy=(0.02, 0.95), xycoords='axes fraction') ax.annotate(instr+":"+rootname,color='white',fontsize=5,xy=(0.02, 0.95),
ax.annotate(filt, color='white', fontsize=10, xy=(0.02, 0.02), xycoords='axes fraction') xycoords='axes fraction')
ax.annotate(exptime, color='white', fontsize=5, xy=(0.80, 0.02), xycoords='axes fraction') ax.annotate(filt,color='white',fontsize=10,xy=(0.02, 0.02),
xycoords='axes fraction')
ax.annotate(exptime,color='white',fontsize=5,xy=(0.80, 0.02),
xycoords='axes fraction')
fig.subplots_adjust(hspace=0, wspace=0, right=0.85) fig.subplots_adjust(hspace=0, wspace=0, right=0.85)
cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75]) cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75])
@@ -144,7 +148,7 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""):
return 0 return 0
def polarization_map(Stokes, data_mask, SNRp_cut=3., SNRi_cut=30., step_vec=1, def polarization_map(Stokes, rectangle=None, SNRp_cut=3., SNRi_cut=30., step_vec=1,
savename=None, plots_folder="", display=None): savename=None, plots_folder="", display=None):
""" """
Plots polarization map from Stokes HDUList. Plots polarization map from Stokes HDUList.
@@ -153,6 +157,10 @@ def polarization_map(Stokes, data_mask, SNRp_cut=3., SNRi_cut=30., step_vec=1,
Stokes : astropy.io.fits.hdu.hdulist.HDUList Stokes : astropy.io.fits.hdu.hdulist.HDUList
HDUList containing I, Q, U, P, s_P, PA, s_PA (in this particular order) HDUList containing I, Q, U, P, s_P, PA, s_PA (in this particular order)
for one observation. for one observation.
rectangle : numpy.ndarray, optional
Array of parameters for matplotlib.patches.Rectangle objects that will
be displayed on each output image. If None, no rectangle displayed.
Defaults to None.
SNRp_cut : float, optional SNRp_cut : float, optional
Cut that should be applied to the signal-to-noise ratio on P. Cut that should be applied to the signal-to-noise ratio on P.
Any SNR < SNRp_cut won't be displayed. Any SNR < SNRp_cut won't be displayed.
@@ -279,6 +287,12 @@ def polarization_map(Stokes, data_mask, SNRp_cut=3., SNRi_cut=30., step_vec=1,
north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-Stokes[0].header['orientat'], color='w', arrow_props={'ec': 'w', 'fc': 'w', 'alpha': 1,'lw': 2}) north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-Stokes[0].header['orientat'], color='w', arrow_props={'ec': 'w', 'fc': 'w', 'alpha': 1,'lw': 2})
ax.add_artist(north_dir) ax.add_artist(north_dir)
# Display instrument FOV
if not(rectangle is None):
x, y, width, height, angle, color = rectangle
ax.add_patch(Rectangle((x, y), width, height, angle=angle,
edgecolor=color, fill=False))
# Compute integrated parameters and associated errors for pixels in the cut # Compute integrated parameters and associated errors for pixels in the cut
n_pix = mask.size n_pix = mask.size
I_int = stkI.data[mask].sum() I_int = stkI.data[mask].sum()
@@ -294,7 +308,7 @@ def polarization_map(Stokes, data_mask, SNRp_cut=3., SNRi_cut=30., step_vec=1,
P_int = np.sqrt(Q_int**2+U_int**2)/I_int*100. P_int = np.sqrt(Q_int**2+U_int**2)/I_int*100.
P_int_err = (100./I_int)*np.sqrt((Q_int**2*Q_int_err**2 + U_int**2*U_int_err**2 + 2.*Q_int*U_int*QU_int_err)/(Q_int**2 + U_int**2) + ((Q_int/I_int)**2 + (U_int/I_int)**2)*I_int_err**2 - 2.*(Q_int/I_int)*IQ_int_err - 2.*(U_int/I_int)*IU_int_err) P_int_err = (100./I_int)*np.sqrt((Q_int**2*Q_int_err**2 + U_int**2*U_int_err**2 + 2.*Q_int*U_int*QU_int_err)/(Q_int**2 + U_int**2) + ((Q_int/I_int)**2 + (U_int/I_int)**2)*I_int_err**2 - 2.*(Q_int/I_int)*IQ_int_err - 2.*(U_int/I_int)*IU_int_err)
PA_int = (90./np.pi)*np.arctan2(U_int,Q_int)+90. PA_int = (90./np.pi)*np.arctan2(U_int,Q_int)+90.*2
PA_int_err = (90./(np.pi*(Q_int**2 + U_int**2)))*np.sqrt(U_int**2*Q_int_err**2 + Q_int**2*U_int_err**2 - 2.*Q_int*U_int*QU_int_err) PA_int_err = (90./(np.pi*(Q_int**2 + U_int**2)))*np.sqrt(U_int**2*Q_int_err**2 + Q_int**2*U_int_err**2 - 2.*Q_int*U_int*QU_int_err)
# Compute integrated parameters and associated errors for all pixels # Compute integrated parameters and associated errors for all pixels
@@ -313,7 +327,7 @@ def polarization_map(Stokes, data_mask, SNRp_cut=3., SNRi_cut=30., step_vec=1,
P_diluted_err = (100./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err) P_diluted_err = (100./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
P_diluted_err = np.sqrt(2/n_pix)*100. P_diluted_err = np.sqrt(2/n_pix)*100.
PA_diluted = (90./np.pi)*np.arctan2(U_diluted,Q_diluted)+90. PA_diluted = (90./np.pi)*np.arctan2(U_diluted,Q_diluted)+90.*2
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err) PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
PA_diluted_err = P_diluted_err/(2.*P_diluted)*180./np.pi PA_diluted_err = P_diluted_err/(2.*P_diluted)*180./np.pi

View File

@@ -215,7 +215,7 @@ def crop_array(data_array, headers, error_array=None, step=5, null_val=None,
if null_val is None: if null_val is None:
null_val = [1.00*error.mean() for error in error_array] null_val = [1.00*error.mean() for error in error_array]
elif type(null_val) is float: elif type(null_val) is float:
null_val = [null_val,]*len(error_array) null_val = [null_val,]*error_array.shape[0]
vertex = np.zeros((data_array.shape[0],4),dtype=int) vertex = np.zeros((data_array.shape[0],4),dtype=int)
for i,image in enumerate(data_array): for i,image in enumerate(data_array):
@@ -233,7 +233,7 @@ def crop_array(data_array, headers, error_array=None, step=5, null_val=None,
v_array[3] = np.max(vertex[:,3]).astype(int) v_array[3] = np.max(vertex[:,3]).astype(int)
new_shape = np.array([v_array[1]-v_array[0],v_array[3]-v_array[2]]) new_shape = np.array([v_array[1]-v_array[0],v_array[3]-v_array[2]])
rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 'b'] rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 0., 'b']
if display: if display:
fig, ax = plt.subplots() fig, ax = plt.subplots()
data = data_array[0] data = data_array[0]
@@ -243,7 +243,7 @@ def crop_array(data_array, headers, error_array=None, step=5, null_val=None,
filt = headers[0]['filtnam1'] filt = headers[0]['filtnam1']
#plots #plots
im = ax.imshow(data, vmin=data.min(), vmax=data.max(), origin='lower') im = ax.imshow(data, vmin=data.min(), vmax=data.max(), origin='lower')
x, y, width, height, color = rectangle x, y, width, height, angle, color = rectangle
ax.add_patch(Rectangle((x, y),width,height,edgecolor=color,fill=False)) ax.add_patch(Rectangle((x, y),width,height,edgecolor=color,fill=False))
#position of centroid #position of centroid
ax.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], lw=1, ax.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], lw=1,
@@ -432,7 +432,7 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
minima = np.unravel_index(np.argmin(temp.sum(axis=0)),temp.shape[1:]) minima = np.unravel_index(np.argmin(temp.sum(axis=0)),temp.shape[1:])
for i, image in enumerate(data): for i, image in enumerate(data):
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 'r']) rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0., 'r'])
# Compute error : root mean square of the background # Compute error : root mean square of the background
sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]] sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]]
#error = np.std(sub_image) # Previously computed using standard deviation over the background #error = np.std(sub_image) # Previously computed using standard deviation over the background
@@ -819,7 +819,7 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
for c in range(smoothed.shape[1]): for c in range(smoothed.shape[1]):
# Compute distance from current pixel # Compute distance from current pixel
dist_rc = np.where(data_mask, fmax, np.sqrt((r-xx)**2+(c-yy)**2)) dist_rc = np.where(data_mask, fmax, np.sqrt((r-xx)**2+(c-yy)**2))
g_rc = np.array([np.exp(-0.5*(dist_rc/stdev)**2),]*len(data_array)) g_rc = np.array([np.exp(-0.5*(dist_rc/stdev)**2),]*data_array.shape[0])
# Apply weighted combination # Apply weighted combination
smoothed[r,c] = (1.-data_mask[r,c])*np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc) smoothed[r,c] = (1.-data_mask[r,c])*np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc)
error[r,c] = np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc) error[r,c] = np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc)
@@ -1064,9 +1064,55 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
print("WARNING : Negative value in polarizer array.") print("WARNING : Negative value in polarizer array.")
# Stokes parameters # Stokes parameters
I_stokes = (2./3.)*(pol0 + pol60 + pol120) #default
Q_stokes = (2./3.)*(2*pol0 - pol60 - pol120) #I_stokes = (2./3.)*(pol0 + pol60 + pol120)
U_stokes = (2./np.sqrt(3.))*(pol60 - pol120) #Q_stokes = (2./3.)*(2*pol0 - pol60 - pol120)
#U_stokes = (2./np.sqrt(3.))*(pol60 - pol120)
#transmittance corrected
trans2 = {'f140w' : 0.21, 'f175w' : 0.24, 'f220w' : 0.39, 'f275w' : 0.40, 'f320w' : 0.89, 'f342w' : 0.81, 'f430w' : 0.74, 'f370lp' : 0.83, 'f486n' : 0.63, 'f501n' : 0.68, 'f480lp' : 0.82, 'clear2' : 1.0}
trans3 = {'f120m' : 0.10, 'f130m' : 0.10, 'f140m' : 0.08, 'f152m' : 0.08, 'f165w' : 0.28, 'f170m' : 0.18, 'f195w' : 0.42, 'f190m' : 0.15, 'f210m' : 0.18, 'f231m' : 0.18, 'clear3' : 1.0}
trans4 = {'f253m' : 0.18, 'f278m' : 0.26, 'f307m' : 0.26, 'f130lp' : 0.92, 'f346m' : 0.58, 'f372m' : 0.73, 'f410m' : 0.58, 'f437m' : 0.71, 'f470m' : 0.79, 'f502m' : 0.82, 'f550m' : 0.77, 'clear4' : 1.0}
transmit = np.ones((3,)) #will be filter dependant
filt2 = headers[0]['filtnam2']
filt3 = headers[0]['filtnam3']
filt4 = headers[0]['filtnam4']
same_filt2 = np.array([filt2 == header['filtnam2'] for header in headers]).all()
same_filt3 = np.array([filt3 == header['filtnam3'] for header in headers]).all()
same_filt4 = np.array([filt4 == header['filtnam4'] for header in headers]).all()
if not (same_filt2 and same_filt3 and same_filt4):
print("WARNING : All images in data_array are not from the same \
band filter, the limiting transmittance will be taken.")
transmit2 = np.min([trans2[header['filtnam2'].lower()] for header in headers])
transmit3 = np.min([trans3[header['filtnam3'].lower()] for header in headers])
transmit4 = np.min([trans4[header['filtnam4'].lower()] for header in headers])
else :
transmit2 = trans2[filt2.lower()]
transmit3 = trans3[filt3.lower()]
transmit4 = trans4[filt4.lower()]
transmit *= transmit2*transmit3*transmit4
pol_efficiency = {'pol0' : 0.92, 'pol60' : 0.92, 'pol120' : 0.91}
pol_eff = np.ones((3,))
pol_eff[0] = pol_efficiency['pol0']
pol_eff[1] = pol_efficiency['pol60']
pol_eff[2] = pol_efficiency['pol120']
theta = np.array([np.pi, np.pi/3., 2.*np.pi/3.])
flux = 2.*np.array([pol0/transmit[0], pol60/transmit[1], pol120/transmit[2]])
norm = pol_eff[1]*pol_eff[2]*np.sin(-2.*theta[1]+2.*theta[2]) \
+ pol_eff[2]*pol_eff[0]*np.sin(-2.*theta[2]+2.*theta[0]) \
+ pol_eff[0]*pol_eff[1]*np.sin(-2.*theta[0]+2.*theta[1])
coeff = np.zeros((3,3))
for i in range(3):
coeff[0,i] = pol_eff[(i+1)%3]*pol_eff[(i+2)%3]*np.sin(-2.*theta[(i+1)%3]+2.*theta[(i+2)%3])/norm
coeff[1,i] = (-pol_eff[(i+1)%3]*np.sin(2.*theta[(i+1)%3]) + pol_eff[(i+2)%3]*np.sin(2.*theta[(i+2)%3]))/norm
coeff[2,i] = (pol_eff[(i+1)%3]*np.cos(2.*theta[(i+1)%3]) - pol_eff[(i+2)%3]*np.cos(2.*theta[(i+2)%3]))/norm
I_stokes = np.sum([coeff[0,i]*flux[i] for i in range(3)], axis=0)
Q_stokes = np.sum([coeff[1,i]*flux[i] for i in range(3)], axis=0)
U_stokes = np.sum([coeff[2,i]*flux[i] for i in range(3)], axis=0)
# Remove nan # Remove nan
I_stokes[np.isnan(I_stokes)]=0. I_stokes[np.isnan(I_stokes)]=0.
@@ -1077,7 +1123,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
mask = (Q_stokes**2 + U_stokes**2) > I_stokes**2 mask = (Q_stokes**2 + U_stokes**2) > I_stokes**2
if mask.any(): if mask.any():
print("WARNING : I_pol > I_stokes : ", len(I_stokes[mask])) print("WARNING : I_pol > I_stokes : ", I_stokes[mask].size)
#plt.imshow(np.sqrt(Q_stokes**2+U_stokes**2)/I_stokes*mask, origin='lower') #plt.imshow(np.sqrt(Q_stokes**2+U_stokes**2)/I_stokes*mask, origin='lower')
#plt.colorbar() #plt.colorbar()
@@ -1156,10 +1202,10 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
I_pol = np.sqrt(Q_stokes**2 + U_stokes**2) I_pol = np.sqrt(Q_stokes**2 + U_stokes**2)
P = I_pol/I_stokes*100. P = I_pol/I_stokes*100.
P[I_stokes <= 0.] = 0. P[I_stokes <= 0.] = 0.
PA = (90./np.pi)*np.arctan2(U_stokes,Q_stokes)+90 PA = (90./np.pi)*np.arctan2(U_stokes,Q_stokes)+90.*2.
if (P>100.).any(): if (P>100.).any():
print("WARNING : found pixels for which P > 100%", len(P[P>100.])) print("WARNING : found pixels for which P > 100%", P[P>100.].size)
#Associated errors #Associated errors
s_P = (100./I_stokes)*np.sqrt((Q_stokes**2*Stokes_cov[1,1] + U_stokes**2*Stokes_cov[2,2] + 2.*Q_stokes*U_stokes*Stokes_cov[1,2])/(Q_stokes**2 + U_stokes**2) + ((Q_stokes/I_stokes)**2 + (U_stokes/I_stokes)**2)*Stokes_cov[0,0] - 2.*(Q_stokes/I_stokes)*Stokes_cov[0,1] - 2.*(U_stokes/I_stokes)*Stokes_cov[0,2]) s_P = (100./I_stokes)*np.sqrt((Q_stokes**2*Stokes_cov[1,1] + U_stokes**2*Stokes_cov[2,2] + 2.*Q_stokes*U_stokes*Stokes_cov[1,2])/(Q_stokes**2 + U_stokes**2) + ((Q_stokes/I_stokes)**2 + (U_stokes/I_stokes)**2)*Stokes_cov[0,0] - 2.*(Q_stokes/I_stokes)*Stokes_cov[0,1] - 2.*(U_stokes/I_stokes)*Stokes_cov[0,2])
@@ -1169,7 +1215,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
debiased_P = np.sqrt(P**2 - s_P**2) debiased_P = np.sqrt(P**2 - s_P**2)
if (debiased_P>100.).any(): if (debiased_P>100.).any():
print("WARNING : found pixels for which debiased_P > 100%", len(debiased_P[debiased_P>100.])) print("WARNING : found pixels for which debiased_P > 100%", debiased_P[debiased_P>100.].size)
#Compute the total exposure time so that #Compute the total exposure time so that
#I_stokes*exp_tot = N_tot the total number of events #I_stokes*exp_tot = N_tot the total number of events
@@ -1226,7 +1272,7 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
#Rotate original images using scipy.ndimage.rotate #Rotate original images using scipy.ndimage.rotate
new_data_array = [] new_data_array = []
new_error_array = [] new_error_array = []
for i in range(len(data_array)): for i in range(data_array.shape[0]):
new_data_array.append(sc_rotate(data_array[i], ang, reshape=False, new_data_array.append(sc_rotate(data_array[i], ang, reshape=False,
cval=0.)) cval=0.))
new_error_array.append(sc_rotate(error_array[i], ang, reshape=False, new_error_array.append(sc_rotate(error_array[i], ang, reshape=False,
@@ -1235,7 +1281,7 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
new_data_mask = sc_rotate(data_mask, ang, reshape=False, cval=True) new_data_mask = sc_rotate(data_mask, ang, reshape=False, cval=True)
new_error_array = np.array(new_error_array) new_error_array = np.array(new_error_array)
for i in range(len(new_data_array)): for i in range(new_data_array.shape[0]):
new_data_array[i][new_data_array[i] < 0.] = 0. new_data_array[i][new_data_array[i] < 0.] = 0.
#Update headers to new angle #Update headers to new angle