Test reduction and error from ELR propositions

This commit is contained in:
Thibault Barnouin
2021-06-03 16:19:55 +02:00
parent c1b0fead8a
commit 26fc1eb668
46 changed files with 1965 additions and 20 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 315 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 484 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 486 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 434 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 482 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 520 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 659 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 393 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 432 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 291 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 308 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 298 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 231 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 299 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 242 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

After

Width:  |  Height:  |  Size: 119 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 486 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 494 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 548 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 383 KiB

After

Width:  |  Height:  |  Size: 519 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 378 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 323 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 534 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 584 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 298 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 287 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 266 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 587 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 624 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 586 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 388 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 383 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 327 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 125 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 406 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 404 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 348 KiB

View File

@@ -73,28 +73,28 @@ def main():
iterations = 10
# Error estimation
error_sub_shape = (75,75)
display_error = True
display_error = False
# Data binning
rebin = True
if rebin:
pxsize = 8
px_scale = 'px' #pixel or arcsec
pxsize = 0.1
px_scale = 'arcsec' #pixel or arcsec
rebin_operation = 'sum' #sum or average
# Alignement
align_center = 'image' #If None will align image to image center
display_data = True
display_data = False
# Smoothing
smoothing_function = 'gaussian' #gaussian or combine
smoothing_FWHM = 1 #If None, no smoothing is done
smoothing_scale = 'pixel' #pixel or arcsec
smoothing_function = 'combine' #gaussian or combine
smoothing_FWHM = 0.1 #If None, no smoothing is done
smoothing_scale = 'arcsec' #pixel or arcsec
# Rotation
rotate = False #rotation to North convention can give erroneous results
rotate_library = 'scipy' #scipy or pillow
# Polarization map output
figname = 'NGC1068_FOC' #target/intrument name
figtype = '_ELR_same_params' #additionnal informations
figtype = '_combine_FWHM010' #additionnal informations
SNRp_cut = 3 #P measurments with SNR>3
SNRi_cut = 4 #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
SNRi_cut = 30 #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
##### Pipeline start
@@ -112,7 +112,7 @@ def main():
if rebin:
data_array, error_array, headers, Dxy = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation)
#Align and rescale images with oversampling.
data_array, error_array = proj_red.align_data(data_array, error_array, upsample_factor=np.min(Dxy).astype(int), ref_center=align_center, return_shifts=False)
data_array, error_array = proj_red.align_data(data_array, error_array, upsample_factor=int(Dxy.min()), ref_center=align_center, return_shifts=False)
#Plot array for checking output
if display_data:
@@ -139,13 +139,15 @@ def main():
## Step 4:
# Save image to FITS.
Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers[0], figname+figtype, data_folder=data_folder, return_hdul=True)
Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, figname+figtype, data_folder=data_folder, return_hdul=True)
## Step 5:
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display=None)
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P", plots_folder=plots_folder, display='Pol_deg')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_err", plots_folder=plots_folder, display='Pol_deg_err')
#proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P", plots_folder=plots_folder, display='Pol_deg')
#proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_err", plots_folder=plots_folder, display='Pol_deg_err')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRi", plots_folder=plots_folder, display='SNRi')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRp", plots_folder=plots_folder, display='SNRp')
return 0

156
src/FOC_reduction2_ELR.py Executable file
View File

@@ -0,0 +1,156 @@
#!/usr/bin/python
#-*- coding:utf-8 -*-
"""
Main script where are progressively added the steps for the FOC pipeline reduction.
"""
#Project libraries
import sys
import numpy as np
import copy
import lib.fits as proj_fits #Functions to handle fits files
import lib.reduction_ELR as proj_red #Functions used in reduction pipeline
import lib.plots_ELR as proj_plots #Functions for plotting data
def main():
##### User inputs
## Input and output locations
globals()['data_folder'] = "../data/NGC1068_x274020/"
infiles = ['x274020at.c0f.fits','x274020bt.c0f.fits','x274020ct.c0f.fits',
'x274020dt.c0f.fits','x274020et.c0f.fits','x274020ft.c0f.fits',
'x274020gt.c0f.fits','x274020ht.c0f.fits','x274020it.c0f.fits']
globals()['plots_folder'] = "../plots/NGC1068_x274020/"
# globals()['data_folder'] = "../data/NGC1068_x14w010/"
# infiles = ['x14w0101t_c0f.fits','x14w0102t_c0f.fits','x14w0103t_c0f.fits',
# 'x14w0104t_c0f.fits','x14w0105p_c0f.fits','x14w0106t_c0f.fits']
# infiles = ['x14w0101t_c1f.fits','x14w0102t_c1f.fits','x14w0103t_c1f.fits',
# 'x14w0104t_c1f.fits','x14w0105p_c1f.fits','x14w0106t_c1f.fits']
# globals()['plots_folder'] = "../plots/NGC1068_x14w010/"
# globals()['data_folder'] = "../data/3C405_x136060/"
# infiles = ['x1360601t_c0f.fits','x1360602t_c0f.fits','x1360603t_c0f.fits']
# infiles = ['x1360601t_c1f.fits','x1360602t_c1f.fits','x1360603t_c1f.fits']
# globals()['plots_folder'] = "../plots/3C405_x136060/"
# globals()['data_folder'] = "../data/CygnusA_x43w0/"
# infiles = ['x43w0101r_c0f.fits', 'x43w0104r_c0f.fits', 'x43w0107r_c0f.fits',
# 'x43w0201r_c0f.fits', 'x43w0204r_c0f.fits', 'x43w0102r_c0f.fits',
# 'x43w0105r_c0f.fits', 'x43w0108r_c0f.fits', 'x43w0202r_c0f.fits',
# 'x43w0205r_c0f.fits', 'x43w0103r_c0f.fits', 'x43w0106r_c0f.fits',
# 'x43w0109r_c0f.fits', 'x43w0203r_c0f.fits', 'x43w0206r_c0f.fits']
# globals()['plots_folder'] = "../plots/CygnusA_x43w0/"
# globals()['data_folder'] = "../data/3C109_x3mc010/"
# infiles = ['x3mc0101m_c0f.fits','x3mc0102m_c0f.fits','x3mc0103m_c0f.fits']
# globals()['plots_folder'] = "../plots/3C109_x3mc010/"
# globals()['data_folder'] = "../data/MKN463_x2rp030/"
# infiles = ['x2rp0201t_c0f.fits', 'x2rp0203t_c0f.fits', 'x2rp0205t_c0f.fits',
# 'x2rp0207t_c0f.fits', 'x2rp0302t_c0f.fits', 'x2rp0304t_c0f.fits',
# 'x2rp0306t_c0f.fits', 'x2rp0202t_c0f.fits', 'x2rp0204t_c0f.fits',
# 'x2rp0206t_c0f.fits', 'x2rp0301t_c0f.fits', 'x2rp0303t_c0f.fits',
# 'x2rp0305t_c0f.fits', 'x2rp0307t_c0f.fits']
# globals()['plots_folder'] = "../plots/MKN463_x2rp030/"
# globals()['data_folder'] = "../data/PG1630+377_x39510/"
# infiles = ['x3990201m_c0f.fits', 'x3990205m_c0f.fits', 'x3995101r_c0f.fits',
# 'x3995105r_c0f.fits', 'x3995109r_c0f.fits', 'x3995201r_c0f.fits',
# 'x3995205r_c0f.fits', 'x3990202m_c0f.fits', 'x3990206m_c0f.fits',
# 'x3995102r_c0f.fits', 'x3995106r_c0f.fits', 'x399510ar_c0f.fits',
# 'x3995202r_c0f.fits','x3995206r_c0f.fits']
# globals()['plots_folder'] = "../plots/PG1630+377_x39510/"
## Reduction parameters
# Deconvolution
deconvolve = False
if deconvolve:
psf = 'gaussian' #Can be user-defined as well
psf_FWHM = 0.10
psf_scale = 'arcsec'
psf_shape=(9,9)
iterations = 10
# Error estimation
error_sub_shape = (75,75)
display_error = False
# Data binning
rebin = True
if rebin:
pxsize = 8
px_scale = 'pixel' #pixel or arcsec
rebin_operation = 'average' #sum or average
# Alignement
align_center = 'image' #If None will align image to image center
display_data = False
# Smoothing
smoothing_function = 'gaussian' #gaussian or combine
smoothing_FWHM = 1 #If None, no smoothing is done
smoothing_scale = 'pixel' #pixel or arcsec
# Rotation
rotate = False #rotation to North convention can give erroneous results
rotate_library = 'scipy' #scipy or pillow
# Polarization map output
figname = 'NGC1068_FOC' #target/intrument name
figtype = '_ELR_same_param_same_op_sP100' #additionnal informations
SNRp_cut = 3 #P measurments with SNR>3
SNRi_cut = 4 #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
##### Pipeline start
## Step 1:
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=False)
# Crop data to remove outside blank margins.
#data_array, error_array = proj_red.crop_array(data_array, step=5, null_val=0., inside=True)
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
if deconvolve:
data_array = proj_red.deconvolve_array(data_array, headers, psf=psf, FWHM=psf_FWHM, scale=psf_scale, shape=psf_shape, iterations=iterations)
# Estimate error from data background, estimated from sub-image of desired sub_shape.
data_array, error_array = proj_red.get_error(data_array, sub_shape=error_sub_shape, display=display_error, headers=headers, savename=figname+"_errors", plots_folder=plots_folder)
# Rebin data to desired pixel size.
if rebin:
data_array, error_array, headers, Dxy = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation)
#Align and rescale images with oversampling.
data_array, error_array = proj_red.align_data(data_array, error_array, upsample_factor=int(Dxy.min()), ref_center=align_center, return_shifts=False)
#Plot array for checking output
if display_data:
proj_plots.plot_obs(data_array, headers, vmin=data_array.min(), vmax=data_array.max(), savename=figname+"_center_"+align_center, plots_folder=plots_folder)
## Step 2:
# Compute Stokes I, Q, U with smoothed polarized images
# SMOOTHING DISCUSSION :
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
# Bibcode : 1995chst.conf...10J
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function)
## Step 3:
# Rotate images to have North up
if rotate:
ref_header = copy.deepcopy(headers[0])
if rotate_library.lower() in ['scipy','scipy.ndimage']:
I_stokes, Q_stokes, U_stokes, Stokes_cov, headers = proj_red.rotate_sc_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, -ref_header['orientat'])
if rotate_library.lower() in ['pillow','pil']:
I_stokes, Q_stokes, U_stokes, Stokes_cov, headers = proj_red.rotate_PIL_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, -ref_header['orientat'])
# Compute polarimetric parameters (polarization degree and angle).
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers)
## Step 4:
# Save image to FITS.
Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, figname+figtype, data_folder=data_folder, return_hdul=True)
## Step 5:
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display=None)
#proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P", plots_folder=plots_folder, display='Pol_deg')
#proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_err", plots_folder=plots_folder, display='Pol_deg_err')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRi", plots_folder=plots_folder, display='SNRi')
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRp", plots_folder=plots_folder, display='SNRp')
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -50,7 +50,7 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
s_P_P, PA, s_PA, s_PA_P, ref_header, filename, data_folder="",
s_P_P, PA, s_PA, s_PA_P, headers, filename, data_folder="",
return_hdul=False):
"""
Save computed polarimetry parameters to a single fits file,
@@ -64,9 +64,9 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
its error propagated and assuming Poisson noise.
Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U.
ref_header : astropy.io.fits.header.Header
headers : header list
Header of reference some keywords will be copied from (CRVAL, CDELT,
INSTRUME, PROPOSID, TARGNAME, ORIENTAT).
INSTRUME, PROPOSID, TARGNAME, ORIENTAT, EXPTOT).
filename : str
Name that will be given to the file on writing (will appear in header).
data_folder : str, optional
@@ -85,6 +85,8 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
Only returned if return_hdul is True.
"""
#Create new WCS object given the modified images
ref_header = headers[0]
exp_tot = np.array([header['exptime'] for header in headers]).sum()
new_wcs = wcs.WCS(ref_header).deepcopy()
if new_wcs.wcs.has_cd():
del new_wcs.wcs.cd
@@ -106,6 +108,8 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
header = new_wcs.to_header()
header['instrume'] = (ref_header['instrume'], 'Instrument from which data is reduced')
header['photplam'] = (ref_header['photplam'], 'Pivot Wavelength')
header['photflam'] = (ref_header['photflam'], 'Inverse Sensitivity in DN/sec/cm**2/Angst')
header['exptot'] = (exp_tot, 'Total exposure time in sec')
header['proposid'] = (ref_header['proposid'], 'PEP proposal identifier for observation')
header['targname'] = (ref_header['targname'], 'Target name')
header['orientat'] = (ref_header['orientat'], 'Angle between North and the y-axis of the image')

View File

@@ -129,6 +129,7 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
stk_cov = Stokes[np.argmax([Stokes[i].header['datatype']=='IQU_cov_matrix' for i in range(len(Stokes))])]
pol = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg' for i in range(len(Stokes))])]
pol_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err' for i in range(len(Stokes))])]
#pol_err_Poisson = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err_Poisson_noise' for i in range(len(Stokes))])]
pang = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang' for i in range(len(Stokes))])]
pang_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang_err' for i in range(len(Stokes))])]
@@ -138,6 +139,7 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
#Compute SNR and apply cuts
pol.data[pol.data == 0.] = np.nan
SNRp = pol.data/pol_err.data
#SNRp = pol.data/pol_err_Poisson.data
pol.data[SNRp < SNRp_cut] = np.nan
SNRi = stkI.data/np.sqrt(stk_cov.data[0,0])
pol.data[SNRi < SNRi_cut] = np.nan
@@ -163,6 +165,8 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.])
im = ax.imshow(stkI.data,extent=[-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsI = np.linspace(SNRi_cut, np.max(SNRi[SNRi > 0.]), 10)
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
elif display.lower() in ['p','pol','pol_deg']:
# Display polarization degree map
vmin, vmax = 0., 100.
@@ -173,10 +177,26 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
vmin, vmax = 0., 5.
im = ax.imshow(pol_err.data,extent=[-pol_err.data.shape[1]/2.,pol_err.data.shape[1]/2.,-pol_err.data.shape[0]/2.,pol_err.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$\sigma_P$ [%]")
elif display.lower() in ['snr','snri']:
# Display I_stokes signal-to-noise map
vmin, vmax = 0., np.max(SNRi[SNRi > 0.])
im = ax.imshow(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$I_{Stokes}/\sigma_{I}$")
levelsI = np.linspace(SNRi_cut, np.max(SNRi[SNRi > 0.]), 10)
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
elif display.lower() in ['snrp']:
# Display polarization degree signal-to-noise map
vmin, vmax = SNRp_cut, np.max(SNRp[SNRp > 0.])
im = ax.imshow(SNRp, extent=[-SNRp.shape[1]/2.,SNRp.shape[1]/2.,-SNRp.shape[0]/2.,SNRp.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P/\sigma_{P}$")
levelsP = np.linspace(SNRp_cut, np.max(SNRp[SNRp > 0.]), 10)
cont = ax.contour(SNRp, extent=[-SNRp.shape[1]/2.,SNRp.shape[1]/2.,-SNRp.shape[0]/2.,SNRp.shape[0]/2.], levels=levelsP, colors='grey', linewidths=0.5)
else:
# Defaults to intensity map
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.])
im = ax.imshow(stkI.data,extent=[-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
levelsI = np.linspace(SNRi_cut, SNRi.max(), 10)
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
px_size = wcs.wcs.get_cdelt()[0]

247
src/lib/plots_ELR.py Executable file
View File

@@ -0,0 +1,247 @@
"""
Library functions for displaying informations using matplotlib
prototypes :
- plot_obs(data_array, headers, shape, vmin, vmax, savename, plots_folder)
Plots whole observation raw data in given display shape
- polarization_map(Stokes_hdul, SNRp_cut, SNRi_cut, step_vec, savename, plots_folder, display)
Plots polarization map of polarimetric parameters saved in an HDUList
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
from astropy.wcs import WCS
def plot_obs(data_array, headers, shape=None, vmin=0., vmax=6., rectangle=None,
savename=None, plots_folder=""):
"""
Plots raw observation imagery with some information on the instrument and
filters.
----------
Inputs:
data_array : numpy.ndarray
Array of images (2D floats, aligned and of the same shape) of a
single observation with multiple polarizers of an instrument
headers : header list
List of headers corresponding to the images in data_array
shape : array-like of length 2, optional
Shape of the display, with shape = [#row, #columns]. If None, defaults
to the optimal square.
Defaults to None.
vmin : float, optional
Min pixel value that should be displayed.
Defaults to 0.
vmax : float, optional
Max pixel value that should be displayed.
Defaults to 6.
rectangle : numpy.ndarray, optional
Array of parameters for matplotlib.patches.Rectangle objects that will
be displayed on each output image. If None, no rectangle displayed.
Defaults to None.
savename : str, optional
Name of the figure the map should be saved to. If None, the map won't
be saved (only displayed).
Defaults to None.
plots_folder : str, optional
Relative (or absolute) filepath to the folder in wich the map will
be saved. Not used if savename is None.
Defaults to current folder.
"""
if shape is None:
shape = np.array([np.ceil(np.sqrt(data_array.shape[0])).astype(int),]*2)
fig, ax = plt.subplots(shape[0], shape[1], figsize=(10,10), dpi=200,
sharex=True, sharey=True)
for i, enum in enumerate(list(zip(ax.flatten(),data_array))):
ax = enum[0]
data = enum[1]
instr = headers[i]['instrume']
rootname = headers[i]['rootname']
exptime = headers[i]['exptime']
filt = headers[i]['filtnam1']
#plots
im = ax.imshow(data, vmin=vmin, vmax=vmax, origin='lower')
if not(rectangle is None):
x, y, width, height = rectangle[i]
ax.add_patch(Rectangle((x, y), width, height, edgecolor='r', fill=False))
#position of centroid
ax.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], lw=1,
color='black')
ax.plot([0,data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], lw=1,
color='black')
ax.annotate(instr+":"+rootname, color='white', fontsize=5, xy=(0.02, 0.95), xycoords='axes fraction')
ax.annotate(filt, color='white', fontsize=10, xy=(0.02, 0.02), xycoords='axes fraction')
ax.annotate(exptime, color='white', fontsize=5, xy=(0.80, 0.02), xycoords='axes fraction')
fig.subplots_adjust(hspace=0, wspace=0, right=0.85)
cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75])
fig.colorbar(im, cax=cbar_ax)
if not (savename is None):
fig.suptitle(savename)
fig.savefig(plots_folder+savename+".png",bbox_inches='tight')
plt.show()
return 0
def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
savename=None, plots_folder="", display=None):
"""
Plots polarization map from Stokes HDUList.
----------
Inputs:
Stokes : astropy.io.fits.hdu.hdulist.HDUList
HDUList containing I, Q, U, P, s_P, PA, s_PA (in this particular order)
for one observation.
SNRp_cut : float, optional
Cut that should be applied to the signal-to-noise ratio on P.
Any SNR < SNRp_cut won't be displayed.
Defaults to 3.
SNRi_cut : float, optional
Cut that should be applied to the signal-to-noise ratio on I.
Any SNR < SNRi_cut won't be displayed.
Defaults to 30. This value implies an uncertainty in P of 4.7%
step_vec : int, optional
Number of steps between each displayed polarization vector.
If step_vec = 2, every other vector will be displayed.
Defaults to 1
savename : str, optional
Name of the figure the map should be saved to. If None, the map won't
be saved (only displayed).
Defaults to None.
plots_folder : str, optional
Relative (or absolute) filepath to the folder in wich the map will
be saved. Not used if savename is None.
Defaults to current folder.
display : str, optional
Choose the map to display between intensity (default), polarization
degree ('p','pol','pol_deg') or polarization degree error ('s_p',
'pol_err','pol_deg_err').
Defaults to None (intensity).
"""
#Get data
stkI = Stokes[np.argmax([Stokes[i].header['datatype']=='I_stokes' for i in range(len(Stokes))])]
stkQ = Stokes[np.argmax([Stokes[i].header['datatype']=='Q_stokes' for i in range(len(Stokes))])]
stkU = Stokes[np.argmax([Stokes[i].header['datatype']=='U_stokes' for i in range(len(Stokes))])]
stk_cov = Stokes[np.argmax([Stokes[i].header['datatype']=='IQU_cov_matrix' for i in range(len(Stokes))])]
pol = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg' for i in range(len(Stokes))])]
pol_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err' for i in range(len(Stokes))])]
pol_err_Poisson = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err_Poisson_noise' for i in range(len(Stokes))])]
pang = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang' for i in range(len(Stokes))])]
pang_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang_err' for i in range(len(Stokes))])]
pivot_wav = Stokes[0].header['photplam']
convert_flux = Stokes[0].header['photflam']/Stokes[0].header['exptot']
wcs = WCS(Stokes[0]).deepcopy()
#Compute SNR and apply cuts
pol.data[pol.data == 0.] = np.nan
#SNRp = pol.data/pol_err.data
SNRp = pol.data/pol_err_Poisson.data
pol.data[SNRp < SNRp_cut] = np.nan
#SNRi = stkI.data/np.sqrt(stk_cov.data[0,0])
SNRi = stkI.data/np.std(stkI.data[0:10,0:10])
pol.data[SNRi < SNRi_cut] = np.nan
mask = (SNRp < SNRp_cut) * (SNRi < SNRi_cut)
# Look for pixel of max polarization
if np.isfinite(pol.data).any():
p_max = np.max(pol.data[np.isfinite(pol.data)])
x_max, y_max = np.unravel_index(np.argmax(pol.data==p_max),pol.data.shape)
else:
print("No pixel with polarization information above requested SNR.")
#Plot the map
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111, projection=wcs)
ax.set_facecolor('k')
fig.subplots_adjust(hspace=0, wspace=0, right=0.95)
cbar_ax = fig.add_axes([0.98, 0.12, 0.01, 0.75])
if display is None:
# If no display selected, show intensity map
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux)
im = ax.imshow(stkI.data*convert_flux,extent=[-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsI = np.linspace(SNRi_cut, SNRi.max(), 10)
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
elif display.lower() in ['p','pol','pol_deg']:
# Display polarization degree map
vmin, vmax = 0., 100.
im = ax.imshow(pol.data,extent=[-pol.data.shape[1]/2.,pol.data.shape[1]/2.,-pol.data.shape[0]/2.,pol.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P$ [%]")
elif display.lower() in ['s_p','pol_err','pol_deg_err']:
# Display polarization degree error map
vmin, vmax = 0., 5.
im = ax.imshow(pol_err.data,extent=[-pol_err.data.shape[1]/2.,pol_err.data.shape[1]/2.,-pol_err.data.shape[0]/2.,pol_err.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$\sigma_P$ [%]")
elif display.lower() in ['snr','snri']:
# Display I_stokes signal-to-noise map
vmin, vmax = 0., SNRi.max()
im = ax.imshow(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$I_{Stokes}/\sigma_{I}$")
levelsI = np.linspace(SNRi_cut, SNRi.max(), 20)
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
elif display.lower() in ['snrp']:
# Display polarization degree signal-to-noise map
vmin, vmax = SNRp_cut, np.max(SNRp[SNRp > 0.])
im = ax.imshow(SNRp, extent=[-SNRp.shape[1]/2.,SNRp.shape[1]/2.,-SNRp.shape[0]/2.,SNRp.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P/\sigma_{P}$")
levelsP = np.linspace(SNRp_cut, np.max(SNRp[SNRp > 0.]), 10)
cont = ax.contour(SNRp, extent=[-SNRp.shape[1]/2.,SNRp.shape[1]/2.,-SNRp.shape[0]/2.,SNRp.shape[0]/2.], levels=levelsP, colors='grey', linewidths=0.5)
else:
# Defaults to intensity map
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux)
im = ax.imshow(stkI.data,extent=[-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
levelsI = np.linspace(SNRi_cut, SNRi.max(), 10)
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
px_size = wcs.wcs.get_cdelt()[0]
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
ax.add_artist(px_sc)
X, Y = np.meshgrid(np.linspace(-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.,stkI.data.shape[0]), np.linspace(-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,stkI.data.shape[1]))
U, V = pol.data*np.cos(np.pi/2.+pang.data*np.pi/180.), pol.data*np.sin(np.pi/2.+pang.data*np.pi/180.)
Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=50.,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='w')
pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
ax.add_artist(pol_sc)
# Compute integrated parameters and associated errors
I_int = stkI.data[mask].sum()
Q_int = stkQ.data[mask].sum()
U_int = stkU.data[mask].sum()
I_int_err = np.sqrt(np.sum(stk_cov.data[0,0][mask]))
Q_int_err = np.sqrt(np.sum(stk_cov.data[1,1][mask]))
U_int_err = np.sqrt(np.sum(stk_cov.data[2,2][mask]))
IQ_int_err = np.sqrt(np.sum(stk_cov.data[0,1][mask]**2))
IU_int_err = np.sqrt(np.sum(stk_cov.data[0,2][mask]**2))
QU_int_err = np.sqrt(np.sum(stk_cov.data[1,2][mask]**2))
P_int = np.sqrt(Q_int**2+U_int**2)/I_int*100.
P_int_err = np.sqrt(2.)*(I_int)**(-0.5)*100.#(100./I_int)*np.sqrt((Q_int**2*Q_int_err**2 + U_int**2*U_int_err**2 + 2.*Q_int*U_int*QU_int_err)/(Q_int**2 + U_int**2) + ((Q_int/I_int)**2 + (U_int/I_int)**2)*I_int_err**2 - 2.*(Q_int/I_int)*IQ_int_err - 2.*(U_int/I_int)*IU_int_err)
PA_int = (90./np.pi)*np.arctan2(U_int,Q_int)+90.
PA_int_err = P_int_err/(P_int)*180./np.pi#(90./(np.pi*(Q_int**2 + U_int**2)))*np.sqrt(U_int**2*Q_int_err**2 + Q_int**2*U_int_err**2 - 2.*Q_int*U_int*QU_int_err)
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1:.1e} $\pm$ {2:.1e} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav,I_int*convert_flux,I_int_err*convert_flux)+"\n"+r"$P^{{int}}$ = {0:.2f} $\pm$ {1:.2f} %".format(P_int,P_int_err)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.2f} $\pm$ {1:.2f} °".format(PA_int,PA_int_err), color='white', fontsize=11, xy=(0.01, 0.94), xycoords='axes fraction')
ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
ax.coords[0].set_axislabel('Right Ascension (J2000)')
ax.coords[0].set_axislabel_position('t')
ax.coords[0].set_ticklabel_position('t')
ax.coords[1].set_axislabel('Declination (J2000)')
ax.coords[1].set_axislabel_position('l')
ax.coords[1].set_ticklabel_position('l')
ax.axis('equal')
if not savename is None:
fig.suptitle(savename)
fig.savefig(plots_folder+savename+".png",bbox_inches='tight',dpi=200)
plt.show()
return 0

View File

@@ -660,8 +660,8 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
full_array = np.concatenate((data_array,[ref_data]),axis=0)
err_array = np.concatenate((error_array,[np.zeros(ref_data.shape)]),axis=0)
full_array, err_array = crop_array(full_array, err_array, step=5,
inside=False)
#full_array, err_array = crop_array(full_array, err_array, step=5,
# inside=False)
data_array, ref_data = full_array[:-1], full_array[-1]
error_array = err_array[:-1]
@@ -1083,9 +1083,10 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
#Compute the total exposure time so that
#I_stokes*exp_tot = N_tot the total number of events
exp_tot = np.array([header['exptime'] for header in headers]).sum()
N_obs = I_stokes/np.array([header['photflam'] for header in headers]).mean()*exp_tot
#Errors on P, PA supposing Poisson noise
s_P_P = np.sqrt(2.)*(I_stokes*exp_tot)**(-0.5)
s_P_P = np.sqrt(2.)*(N_obs)**(-0.5)*100.
s_PA_P = s_P_P/(2.*P/100.)*180./np.pi
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P

1515
src/lib/reduction_ELR.py Executable file

File diff suppressed because it is too large Load Diff