Add comments pinpointing polarizers' orientation uncertainty computation

This commit is contained in:
Thibault Barnouin
2022-01-30 15:44:42 +01:00
parent d03ae5ffc5
commit d133450b82
12 changed files with 36 additions and 285 deletions

View File

@@ -1,256 +0,0 @@
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.wcs import WCS
from aplpy import FITSFigure
import scipy.ndimage
import os as os
plt.close('all')
def bin_ndarray(ndarray, new_shape, operation='sum'):
"""
Bins an ndarray in all axes based on the target shape, by summing or
averaging.
Number of output dimensions must match number of input dimensions.
Example
-------
>>> m = np.arange(0,100,1).reshape((10,10))
>>> n = bin_ndarray(m, new_shape=(5,5), operation='sum')
>>> print(n)
[[ 22 30 38 46 54]
[102 110 118 126 134]
[182 190 198 206 214]
[262 270 278 286 294]
[342 350 358 366 374]]
"""
if not operation.lower() in ['sum', 'mean', 'average', 'avg']:
raise ValueError("Operation not supported.")
if ndarray.ndim != len(new_shape):
raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape,
new_shape))
compression_pairs = [(d, c//d) for d,c in zip(new_shape,
ndarray.shape)]
flattened = [l for p in compression_pairs for l in p]
ndarray = ndarray.reshape(flattened)
for i in range(len(new_shape)):
if operation.lower() == "sum":
ndarray = ndarray.sum(-1*(i+1))
elif operation.lower() in ["mean", "average", "avg"]:
ndarray = ndarray.mean(-1*(i+1))
return ndarray
def plots(ax,data,vmax,vmin):
ax.imshow(data,vmax=vmax,vmin=vmin,origin='lower')
### User input
infiles = ['x274020at.c0f.fits','x274020bt.c0f.fits','x274020ct.c0f.fits','x274020dt.c0f.fits',
'x274020et.c0f.fits','x274020ft.c0f.fits','x274020gt.c0f.fits','x274020ht.c0f.fits',
'x274020it.c0f.fits']
#Centroids
#The centroids should be estimated by cross-correlating the images.
#Here I used the position of the central source for each file as the reference pixel position.
ycen_0 = [304,306,303,296,295,295,294,305,304]
xcen_0 = [273,274,273,276,274,274,274,272,271]
#size, in pixels, of the FOV centered in x_cen_0,y_cen_0
Dx = 200
Dy = 200
#set the new image size (Dxy x Dxy pixels binning)
Dxy = 5
new_shape = (Dx//Dxy,Dy//Dxy)
#figures
#test alignment
vmin = 0
vmax = 6
font_size=40.0
label_size=20.
lw = 3.
#pol. map
SNRp_cut = 3 #P measumentes with SNR>3
SNRi_cut = 30 #I measuremntes with SNR>30, which implies an uncerrtainty in P of 4.7%.
scalevec = 0.05 #length of vectors in pol. map
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
vec_legend = 10 #% pol for legend
figname = 'NGC1068_FOC.png'
### SCRIPT ###
### Step 1. Check input images before data reduction
#this step is very simplistic.
#Here I used the position of the central source for each file as the
#reference pixel position.
#The centroids should be estimated by cross-correlating the images,
#and compare with the simplistic approach of using the peak pixel of the
#object as the reference pixel.
fig,axs = plt.subplots(3,3,figsize=(30,30),dpi=200,sharex=True,sharey=True)
for jj, a in enumerate(axs.flatten()):
img = fits.open(infiles[jj])
ima = img[0].data
ima = ima[ycen_0[jj]-Dy:ycen_0[jj]+Dy,xcen_0[jj]-Dx:xcen_0[jj]+Dx]
ima = bin_ndarray(ima,new_shape=new_shape,operation='sum') #binning
exptime = img[0].header['EXPTIME']
fil = img[0].header['FILTNAM1']
ima = ima/exptime
globals()['ima_%s' % jj] = ima
#plots
plots(a,ima,vmax=vmax,vmin=vmin)
#position of centroid
a.plot([ima.shape[1]/2,ima.shape[1]/2],[0,ima.shape[0]-1],lw=1,color='black')
a.plot([0,ima.shape[1]-1],[ima.shape[1]/2,ima.shape[1]/2],lw=1,color='black')
a.text(2,2,infiles[jj][0:8],color='white',fontsize=10)
a.text(2,5,fil,color='white',fontsize=30)
a.text(ima.shape[1]-20,1,exptime,color='white',fontsize=20)
fig.subplots_adjust(hspace=0,wspace=0)
fig.savefig('test_alignment.png',dpi=300)
os.system('open test_alignment.png')
### Step 2. average of all images for a single polarizer to have them in the same units e/s.
pol0 = (ima_0 + ima_1 + ima_2)/3.
pol60 = (ima_3 + ima_4 + ima_5 + ima_6)/4.
pol120 = (ima_7 + ima_8)/2.
fig1,(ax1,ax2,ax3) = plt.subplots(1,3,figsize=(26,8),dpi=200)
CF = ax1.imshow(pol0,vmin=vmin,vmax=vmax,origin='lower')
cbar = plt.colorbar(CF,ax=ax1)
cbar.ax.tick_params(labelsize=20)
ax1.tick_params(axis='both', which='major', labelsize=20)
ax1.text(2,2,'POL0',color='white',fontsize=10)
CF = ax2.imshow(pol60,vmin=vmin,vmax=vmax,origin='lower')
cbar = plt.colorbar(CF,ax=ax2)
cbar.ax.tick_params(labelsize=20)
ax2.tick_params(axis='both', which='major', labelsize=20)
ax2.text(2,2,'POL60',color='white',fontsize=10)
CF = ax3.imshow(pol120,vmin=vmin,vmax=vmax,origin='lower')
cbar = plt.colorbar(CF,ax=ax3)
cbar.ax.tick_params(labelsize=20)
ax3.tick_params(axis='both', which='major', labelsize=20)
ax3.text(2,2,'POL120',color='white',fontsize=10)
fig1.savefig('test_combinePol.png',dpi=300)
os.system('open test_combinePol.png')
### Step 3. Compute Stokes IQU, P, PA, PI
#Stokes parameters
I_stokes = (2./3.)*(pol0 + pol60 + pol120)
Q_stokes = (2./3.)*(2*pol0 - pol60 - pol120)
U_stokes = (2./np.sqrt(3.))*(pol60 - pol120)
#Remove nan
I_stokes[np.isnan(I_stokes)]=0.
Q_stokes[np.isnan(Q_stokes)]=0.
U_stokes[np.isnan(U_stokes)]=0.
#Polarimetry
PI = np.sqrt(Q_stokes*Q_stokes + U_stokes*U_stokes)
P = PI/I_stokes*100
PA = 0.5*arctan2(U_stokes,Q_stokes)*180./np.pi+90
s_P = np.sqrt(2.)*(I_stokes)**(-0.5)
s_PA = s_P/(P/100.)*180./np.pi
fig2,((ax1,ax2,ax3),(ax4,ax5,ax6)) = plt.subplots(2,3,figsize=(40,20),dpi=200)
CF = ax1.imshow(I_stokes,origin='lower')
cbar = plt.colorbar(CF,ax=ax1)
cbar.ax.tick_params(labelsize=20)
ax1.tick_params(axis='both', which='major', labelsize=20)
ax1.text(2,2,'I',color='white',fontsize=10)
CF = ax2.imshow(Q_stokes,origin='lower')
cbar = plt.colorbar(CF,ax=ax2)
cbar.ax.tick_params(labelsize=20)
ax2.tick_params(axis='both', which='major', labelsize=20)
ax2.text(2,2,'Q',color='white',fontsize=10)
CF = ax3.imshow(U_stokes,origin='lower')
cbar = plt.colorbar(CF,ax=ax3)
cbar.ax.tick_params(labelsize=20)
ax3.tick_params(axis='both', which='major', labelsize=20)
ax3.text(2,2,'U',color='white',fontsize=10)
v = np.linspace(0,40,50)
CF = ax4.imshow(P,origin='lower',vmin=0,vmax=40)
cbar = plt.colorbar(CF,ax=ax4)
cbar.ax.tick_params(labelsize=20)
ax4.tick_params(axis='both', which='major', labelsize=20)
ax4.text(2,2,'P',color='white',fontsize=10)
CF = ax5.imshow(PA,origin='lower',vmin=0,vmax=180)
cbar = plt.colorbar(CF,ax=ax5)
cbar.ax.tick_params(labelsize=20)
ax5.tick_params(axis='both', which='major', labelsize=20)
ax5.text(2,2,'PA',color='white',fontsize=10)
CF = ax6.imshow(PI,origin='lower')
cbar = plt.colorbar(CF,ax=ax6)
cbar.ax.tick_params(labelsize=20)
ax6.tick_params(axis='both', which='major', labelsize=20)
ax6.text(2,2,'PI',color='white',fontsize=10)
fig2.savefig('test_Stokes.png',dpi=300)
os.system('open test_Stokes.png')
### Step 4. Binning and smoothing
#Images can be binned and smoothed to improve SNR. This step can also be done
#using the PolX images.
### Step 5. Roate images to have North up
#Images needs to be reprojected to have North up.
#this procedure implies to rotate the Stokes QU using a rotation matrix
### STEP 6. image to FITS with updated WCS
new_wcs = WCS(naxis=2)
new_wcs.wcs.crpix = [I_stokes.shape[0]/2, I_stokes.shape[1]/2]
new_wcs.wcs.crval = [img[0].header['CRVAL1'], img[0].header['CRVAL2']]
new_wcs.wcs.cunit = ["deg", "deg"]
new_wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
new_wcs.wcs.cdelt = [img[0].header['CD1_1']*Dxy, img[0].header['CD1_2']*Dxy]
#hdu_ori = WCS(img[0])
stkI=fits.PrimaryHDU(data=I_stokes,header=new_wcs.to_header())
pol=fits.PrimaryHDU(data=P,header=new_wcs.to_header())
pang=fits.PrimaryHDU(data=PA,header=new_wcs.to_header())
pol_err=fits.PrimaryHDU(data=s_P,header=new_wcs.to_header())
pang_err=fits.PrimaryHDU(data=s_PA,header=new_wcs.to_header())
### STEP 7. polarization map
#quality cuts
pxscale = stkI.header['CDELT1']
#apply quality cuts
SNRp = pol.data/pol_err.data
pol.data[SNRp < SNRp_cut] = np.nan
SNRi = stkI.data/np.std(stkI.data[0:10,0:10])
pol.data[SNRi < SNRi_cut] = np.nan
fig = plt.figure(figsize=(11,10))
gc = FITSFigure(stkI,figure=fig)
gc.show_contour(np.log10(SNRi),levels=np.linspace(np.log10(SNRi_cut),np.max(np.log10(SNRi)),20),\
filled=True,cmap='magma')
gc.show_vectors(pol,pang,scale=scalevec,step=step_vec,color='white',linewidth=1.0)
fig.savefig(figname,dpi=300)
os.system('open '+figname)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 396 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 526 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 594 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 151 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 393 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 423 KiB

After

Width:  |  Height:  |  Size: 426 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 518 KiB

After

Width:  |  Height:  |  Size: 711 KiB

View File

@@ -7,7 +7,7 @@ Main script where are progressively added the steps for the FOC pipeline reducti
#Project libraries #Project libraries
import sys import sys
import numpy as np import numpy as np
import copy from copy import deepcopy
import lib.fits as proj_fits #Functions to handle fits files import lib.fits as proj_fits #Functions to handle fits files
import lib.reduction as proj_red #Functions used in reduction pipeline import lib.reduction as proj_red #Functions used in reduction pipeline
import lib.plots as proj_plots #Functions for plotting data import lib.plots as proj_plots #Functions for plotting data
@@ -155,11 +155,10 @@ def main():
rectangle = [vertex[2], vertex[0], shape[1], shape[0], 0., 'w'] rectangle = [vertex[2], vertex[0], shape[1], shape[0], 0., 'w']
# Rotate data to have North up # Rotate data to have North up
ref_header = copy.deepcopy(headers[0]) ref_header = deepcopy(headers[0])
if rotate_data: if rotate_data:
alpha = ref_header['orientat'] alpha = ref_header['orientat']
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
[np.sin(-alpha), np.cos(-alpha)]])
rectangle[0:2] = np.dot(mrot, np.asarray(rectangle[0:2]))+np.array(data_array.shape[1:])/2 rectangle[0:2] = np.dot(mrot, np.asarray(rectangle[0:2]))+np.array(data_array.shape[1:])/2
rectangle[4] = alpha rectangle[4] = alpha
data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers, -ref_header['orientat']) data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers, -ref_header['orientat'])
@@ -180,7 +179,7 @@ def main():
## Step 3: ## Step 3:
# Rotate images to have North up # Rotate images to have North up
ref_header = copy.deepcopy(headers[0]) ref_header = deepcopy(headers[0])
if rotate_stokes: if rotate_stokes:
alpha = ref_header['orientat'] alpha = ref_header['orientat']
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)],
@@ -193,7 +192,7 @@ def main():
## Step 4: ## Step 4:
# crop to desired region of interest (roi) # crop to desired region of interest (roi)
# stokescrop = proj_plots.crop_map(copy.deepcopy(stokes_test), copy.deepcopy(data_mask), snrp_cut=snrp_cut, snri_cut=snri_cut) # stokescrop = proj_plots.crop_map(deepcopy(stokes_test), deepcopy(data_mask), snrp_cut=snrp_cut, snri_cut=snri_cut)
# stokescrop.run() # stokescrop.run()
# stokes_crop, data_mask = stokescrop.crop() # stokes_crop, data_mask = stokescrop.crop()
@@ -202,13 +201,13 @@ def main():
Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, figname+figtype, data_folder=data_folder, return_hdul=True) Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, figname+figtype, data_folder=data_folder, return_hdul=True)
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error). # Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display=None) proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display=None)
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_flux", plots_folder=plots_folder, display='Pol_Flux') proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_flux", plots_folder=plots_folder, display='Pol_Flux')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P", plots_folder=plots_folder, display='Pol_deg') proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P", plots_folder=plots_folder, display='Pol_deg')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_I_err", plots_folder=plots_folder, display='I_err') proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_I_err", plots_folder=plots_folder, display='I_err')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_err", plots_folder=plots_folder, display='Pol_deg_err') proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_err", plots_folder=plots_folder, display='Pol_deg_err')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRi", plots_folder=plots_folder, display='SNRi') proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRi", plots_folder=plots_folder, display='SNRi')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRp", plots_folder=plots_folder, display='SNRp') proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRp", plots_folder=plots_folder, display='SNRp')
return 0 return 0

View File

@@ -3,7 +3,7 @@ Library functions for graham algorithm implementation (find the convex hull
of a given list of points). of a given list of points).
""" """
import copy from copy import deepcopy
import numpy as np import numpy as np
@@ -141,12 +141,12 @@ def partition(s, l, r, order):
for j in range(l, r): for j in range(l, r):
if order(s[j], s[r]): if order(s[j], s[r]):
i = i + 1 i = i + 1
temp = copy.deepcopy(s[i]) temp = deepcopy(s[i])
s[i] = copy.deepcopy(s[j]) s[i] = deepcopy(s[j])
s[j] = copy.deepcopy(temp) s[j] = deepcopy(temp)
temp = copy.deepcopy(s[i+1]) temp = deepcopy(s[i+1])
s[i+1] = copy.deepcopy(s[r]) s[i+1] = deepcopy(s[r])
s[r] = copy.deepcopy(temp) s[r] = deepcopy(temp)
return i + 1 return i + 1

View File

@@ -37,7 +37,7 @@ prototypes :
Rotate I, Q, U given an angle in degrees using scipy functions. Rotate I, Q, U given an angle in degrees using scipy functions.
""" """
import copy from copy import deepcopy
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import matplotlib.dates as mdates import matplotlib.dates as mdates
@@ -222,15 +222,15 @@ def crop_array(data_array, headers, error_array=None, step=5, null_val=None,
null_val = [null_val,]*error_array.shape[0] null_val = [null_val,]*error_array.shape[0]
vertex = np.zeros((data_array.shape[0],4),dtype=int) vertex = np.zeros((data_array.shape[0],4),dtype=int)
for i,image in enumerate(data_array): for i,image in enumerate(data_array): # Get vertex of the rectangular convex hull of each image
vertex[i] = image_hull(image,step=step,null_val=null_val[i],inside=inside) vertex[i] = image_hull(image,step=step,null_val=null_val[i],inside=inside)
v_array = np.zeros(4,dtype=int) v_array = np.zeros(4,dtype=int)
if inside: if inside: # Get vertex of the maximum convex hull for all images
v_array[0] = np.max(vertex[:,0]).astype(int) v_array[0] = np.max(vertex[:,0]).astype(int)
v_array[1] = np.min(vertex[:,1]).astype(int) v_array[1] = np.min(vertex[:,1]).astype(int)
v_array[2] = np.max(vertex[:,2]).astype(int) v_array[2] = np.max(vertex[:,2]).astype(int)
v_array[3] = np.min(vertex[:,3]).astype(int) v_array[3] = np.min(vertex[:,3]).astype(int)
else: else: # Get vertex of the minimum convex hull for all images
v_array[0] = np.min(vertex[:,0]).astype(int) v_array[0] = np.min(vertex[:,0]).astype(int)
v_array[1] = np.max(vertex[:,1]).astype(int) v_array[1] = np.max(vertex[:,1]).astype(int)
v_array[2] = np.min(vertex[:,2]).astype(int) v_array[2] = np.min(vertex[:,2]).astype(int)
@@ -279,7 +279,7 @@ def crop_array(data_array, headers, error_array=None, step=5, null_val=None,
crop_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1])) crop_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1]))
crop_error_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1])) crop_error_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1]))
for i,image in enumerate(data_array): for i,image in enumerate(data_array): #Put the image data in the cropped array
crop_array[i] = image[v_array[0]:v_array[1],v_array[2]:v_array[3]] crop_array[i] = image[v_array[0]:v_array[1],v_array[2]:v_array[3]]
crop_error_array[i] = error_array[i][v_array[0]:v_array[1],v_array[2]:v_array[3]] crop_error_array[i] = error_array[i][v_array[0]:v_array[1],v_array[2]:v_array[3]]
@@ -732,9 +732,9 @@ def align_data(data_array, headers, error_array=None, upsample_factor=1.,
center = np.fix(ref_center-shift).astype(int) center = np.fix(ref_center-shift).astype(int)
res_shift = res_center-ref_center res_shift = res_center-ref_center
rescaled_image[i,res_shift[0]:res_shift[0]+shape[1], rescaled_image[i,res_shift[0]:res_shift[0]+shape[1],
res_shift[1]:res_shift[1]+shape[2]] = copy.deepcopy(image) res_shift[1]:res_shift[1]+shape[2]] = deepcopy(image)
rescaled_error[i,res_shift[0]:res_shift[0]+shape[1], rescaled_error[i,res_shift[0]:res_shift[0]+shape[1],
res_shift[1]:res_shift[1]+shape[2]] = copy.deepcopy(error_array[i]) res_shift[1]:res_shift[1]+shape[2]] = deepcopy(error_array[i])
rescaled_mask[i,res_shift[0]:res_shift[0]+shape[1], rescaled_mask[i,res_shift[0]:res_shift[0]+shape[1],
res_shift[1]:res_shift[1]+shape[2]] = False res_shift[1]:res_shift[1]+shape[2]] = False
# Shift images to align # Shift images to align
@@ -1106,14 +1106,19 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
pol_eff[1] = pol_efficiency['pol60'] pol_eff[1] = pol_efficiency['pol60']
pol_eff[2] = pol_efficiency['pol120'] pol_eff[2] = pol_efficiency['pol120']
# Orientation and error for each polarizer ## THIS IS WHERE WE IMPLEMENT THE ERROR THAT IS GOING WRONG
# POL0 = 0deg, POL60 = 60deg, POL120=120deg
theta = np.array([180.*np.pi/180., 60.*np.pi/180., 120.*np.pi/180.]) theta = np.array([180.*np.pi/180., 60.*np.pi/180., 120.*np.pi/180.])
# Uncertainties on the orientation of the polarizers' axes taken to be 3deg (see Nota et. al 1996, p36; Robinson & Thomson 1995)
sigma_theta = np.array([3.*np.pi/180., 3.*np.pi/180., 3.*np.pi/180.]) sigma_theta = np.array([3.*np.pi/180., 3.*np.pi/180., 3.*np.pi/180.])
pol_flux = 2.*np.array([pol0/transmit[0], pol60/transmit[1], pol120/transmit[2]]) pol_flux = 2.*np.array([pol0/transmit[0], pol60/transmit[1], pol120/transmit[2]])
# Normalization parameter for Stokes parameters computation
A = pol_eff[1]*pol_eff[2]*np.sin(-2.*theta[1]+2.*theta[2]) \ A = pol_eff[1]*pol_eff[2]*np.sin(-2.*theta[1]+2.*theta[2]) \
+ pol_eff[2]*pol_eff[0]*np.sin(-2.*theta[2]+2.*theta[0]) \ + pol_eff[2]*pol_eff[0]*np.sin(-2.*theta[2]+2.*theta[0]) \
+ pol_eff[0]*pol_eff[1]*np.sin(-2.*theta[0]+2.*theta[1]) + pol_eff[0]*pol_eff[1]*np.sin(-2.*theta[0]+2.*theta[1])
coeff_stokes = np.zeros((3,3)) coeff_stokes = np.zeros((3,3))
# Coefficients linking each polarizer flux to each Stokes parameter
for i in range(3): for i in range(3):
coeff_stokes[0,i] = pol_eff[(i+1)%3]*pol_eff[(i+2)%3]*np.sin(-2.*theta[(i+1)%3]+2.*theta[(i+2)%3])/A coeff_stokes[0,i] = pol_eff[(i+1)%3]*pol_eff[(i+2)%3]*np.sin(-2.*theta[(i+1)%3]+2.*theta[(i+2)%3])/A
coeff_stokes[1,i] = (-pol_eff[(i+1)%3]*np.sin(2.*theta[(i+1)%3]) + pol_eff[(i+2)%3]*np.sin(2.*theta[(i+2)%3]))/A coeff_stokes[1,i] = (-pol_eff[(i+1)%3]*np.sin(2.*theta[(i+1)%3]) + pol_eff[(i+2)%3]*np.sin(2.*theta[(i+2)%3]))/A
@@ -1143,6 +1148,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
Stokes_cov[0,2] = Stokes_cov[2,0] = coeff_stokes[0,0]*coeff_stokes[2,0]*pol_cov[0,0]+coeff_stokes[0,1]*coeff_stokes[2,1]*pol_cov[1,1]+coeff_stokes[0,2]*coeff_stokes[2,2]*pol_cov[2,2]+(coeff_stokes[0,0]*coeff_stokes[2,1]+coeff_stokes[2,0]*coeff_stokes[0,1])*pol_cov[0,1]+(coeff_stokes[0,0]*coeff_stokes[2,2]+coeff_stokes[2,0]*coeff_stokes[0,2])*pol_cov[0,2]+(coeff_stokes[0,1]*coeff_stokes[2,2]+coeff_stokes[2,1]*coeff_stokes[0,2])*pol_cov[1,2] Stokes_cov[0,2] = Stokes_cov[2,0] = coeff_stokes[0,0]*coeff_stokes[2,0]*pol_cov[0,0]+coeff_stokes[0,1]*coeff_stokes[2,1]*pol_cov[1,1]+coeff_stokes[0,2]*coeff_stokes[2,2]*pol_cov[2,2]+(coeff_stokes[0,0]*coeff_stokes[2,1]+coeff_stokes[2,0]*coeff_stokes[0,1])*pol_cov[0,1]+(coeff_stokes[0,0]*coeff_stokes[2,2]+coeff_stokes[2,0]*coeff_stokes[0,2])*pol_cov[0,2]+(coeff_stokes[0,1]*coeff_stokes[2,2]+coeff_stokes[2,1]*coeff_stokes[0,2])*pol_cov[1,2]
Stokes_cov[1,2] = Stokes_cov[2,1] = coeff_stokes[1,0]*coeff_stokes[2,0]*pol_cov[0,0]+coeff_stokes[1,1]*coeff_stokes[2,1]*pol_cov[1,1]+coeff_stokes[1,2]*coeff_stokes[2,2]*pol_cov[2,2]+(coeff_stokes[1,0]*coeff_stokes[2,1]+coeff_stokes[2,0]*coeff_stokes[1,1])*pol_cov[0,1]+(coeff_stokes[1,0]*coeff_stokes[2,2]+coeff_stokes[2,0]*coeff_stokes[1,2])*pol_cov[0,2]+(coeff_stokes[1,1]*coeff_stokes[2,2]+coeff_stokes[2,1]*coeff_stokes[1,2])*pol_cov[1,2] Stokes_cov[1,2] = Stokes_cov[2,1] = coeff_stokes[1,0]*coeff_stokes[2,0]*pol_cov[0,0]+coeff_stokes[1,1]*coeff_stokes[2,1]*pol_cov[1,1]+coeff_stokes[1,2]*coeff_stokes[2,2]*pol_cov[2,2]+(coeff_stokes[1,0]*coeff_stokes[2,1]+coeff_stokes[2,0]*coeff_stokes[1,1])*pol_cov[0,1]+(coeff_stokes[1,0]*coeff_stokes[2,2]+coeff_stokes[2,0]*coeff_stokes[1,2])*pol_cov[0,2]+(coeff_stokes[1,1]*coeff_stokes[2,2]+coeff_stokes[2,1]*coeff_stokes[1,2])*pol_cov[1,2]
# Compute the derivative of each Stokes parameter with respect to the polarizer orientation
dI_dtheta1 = 2.*pol_eff[0]/A*(pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes)) dI_dtheta1 = 2.*pol_eff[0]/A*(pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes))
dI_dtheta2 = 2.*pol_eff[1]/A*(pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes)) dI_dtheta2 = 2.*pol_eff[1]/A*(pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes))
dI_dtheta3 = 2.*pol_eff[2]/A*(pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes)) dI_dtheta3 = 2.*pol_eff[2]/A*(pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes))
@@ -1153,10 +1159,12 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
dU_dtheta2 = 2.*pol_eff[1]/A*(np.sin(2.*theta[1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1]) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2]))*U_stokes) dU_dtheta2 = 2.*pol_eff[1]/A*(np.sin(2.*theta[1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1]) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2]))*U_stokes)
dU_dtheta3 = 2.*pol_eff[2]/A*(np.sin(2.*theta[2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2]) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0]))*U_stokes) dU_dtheta3 = 2.*pol_eff[2]/A*(np.sin(2.*theta[2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2]) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0]))*U_stokes)
# Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999)
s_I2_axis = (dI_dtheta1**2*sigma_theta[0]**2 + dI_dtheta2**2*sigma_theta[1]**2 + dI_dtheta3**2*sigma_theta[2]**2) s_I2_axis = (dI_dtheta1**2*sigma_theta[0]**2 + dI_dtheta2**2*sigma_theta[1]**2 + dI_dtheta3**2*sigma_theta[2]**2)
s_Q2_axis = (dQ_dtheta1**2*sigma_theta[0]**2 + dQ_dtheta2**2*sigma_theta[1]**2 + dQ_dtheta3**2*sigma_theta[2]**2) s_Q2_axis = (dQ_dtheta1**2*sigma_theta[0]**2 + dQ_dtheta2**2*sigma_theta[1]**2 + dQ_dtheta3**2*sigma_theta[2]**2)
s_U2_axis = (dU_dtheta1**2*sigma_theta[0]**2 + dU_dtheta2**2*sigma_theta[1]**2 + dU_dtheta3**2*sigma_theta[2]**2) s_U2_axis = (dU_dtheta1**2*sigma_theta[0]**2 + dU_dtheta2**2*sigma_theta[1]**2 + dU_dtheta3**2*sigma_theta[2]**2)
# Add quadratically the uncertainty to the Stokes covariance matrix ## THIS IS WHERE THE PROBLEMATIC UNCERTAINTY IS ADDED TO THE PIPELINE
Stokes_cov[0,0] += s_I2_axis Stokes_cov[0,0] += s_I2_axis
Stokes_cov[1,1] += s_Q2_axis Stokes_cov[1,1] += s_Q2_axis
Stokes_cov[2,2] += s_U2_axis Stokes_cov[2,2] += s_U2_axis
@@ -1361,7 +1369,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
#Compute new covariance matrix on rotated parameters #Compute new covariance matrix on rotated parameters
new_Stokes_cov = copy.deepcopy(Stokes_cov) new_Stokes_cov = deepcopy(Stokes_cov)
new_Stokes_cov[1,1] = np.cos(2.*alpha)**2*Stokes_cov[1,1] + np.sin(2.*alpha)**2*Stokes_cov[2,2] + 2.*np.cos(2.*alpha)*np.sin(2.*alpha)*Stokes_cov[1,2] new_Stokes_cov[1,1] = np.cos(2.*alpha)**2*Stokes_cov[1,1] + np.sin(2.*alpha)**2*Stokes_cov[2,2] + 2.*np.cos(2.*alpha)*np.sin(2.*alpha)*Stokes_cov[1,2]
new_Stokes_cov[2,2] = np.sin(2.*alpha)**2*Stokes_cov[1,1] + np.cos(2.*alpha)**2*Stokes_cov[2,2] - 2.*np.cos(2.*alpha)*np.sin(2.*alpha)*Stokes_cov[1,2] new_Stokes_cov[2,2] = np.sin(2.*alpha)**2*Stokes_cov[1,1] + np.cos(2.*alpha)**2*Stokes_cov[2,2] - 2.*np.cos(2.*alpha)*np.sin(2.*alpha)*Stokes_cov[1,2]
new_Stokes_cov[0,1] = new_Stokes_cov[1,0] = np.cos(2.*alpha)*Stokes_cov[0,1] + np.sin(2.*alpha)*Stokes_cov[0,2] new_Stokes_cov[0,1] = new_Stokes_cov[1,0] = np.cos(2.*alpha)*Stokes_cov[0,1] + np.sin(2.*alpha)*Stokes_cov[0,2]
@@ -1383,7 +1391,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)],
[np.sin(-alpha), np.cos(-alpha)]]) [np.sin(-alpha), np.cos(-alpha)]])
for header in headers: for header in headers:
new_header = copy.deepcopy(header) new_header = deepcopy(header)
new_header['orientat'] = header['orientat'] + ang new_header['orientat'] = header['orientat'] + ang
new_wcs = WCS(header).deepcopy() new_wcs = WCS(header).deepcopy()