Change display, margin handling and redo all reductions
This commit is contained in:
@@ -8,6 +8,7 @@ Main script where are progressively added the steps for the FOC pipeline reducti
|
||||
import sys
|
||||
import numpy as np
|
||||
import copy
|
||||
import matplotlib.pyplot as plt
|
||||
import lib.fits as proj_fits #Functions to handle fits files
|
||||
import lib.reduction as proj_red #Functions used in reduction pipeline
|
||||
import lib.plots as proj_plots #Functions for plotting data
|
||||
@@ -85,6 +86,8 @@ def main():
|
||||
psf_scale = 'arcsec'
|
||||
psf_shape=(9,9)
|
||||
iterations = 10
|
||||
# Cropping
|
||||
display_crop = False
|
||||
# Error estimation
|
||||
error_sub_shape = (75,75)
|
||||
display_error = False
|
||||
@@ -98,17 +101,17 @@ def main():
|
||||
align_center = 'image' #If None will align image to image center
|
||||
display_data = False
|
||||
# Smoothing
|
||||
smoothing_function = 'gaussian' #gaussian_after, gaussian or combine
|
||||
smoothing_FWHM = 0.10 #If None, no smoothing is done
|
||||
smoothing_function = 'combine' #gaussian_after, gaussian or combine
|
||||
smoothing_FWHM = 0.20 #If None, no smoothing is done
|
||||
smoothing_scale = 'arcsec' #pixel or arcsec
|
||||
# Rotation
|
||||
rotate_stokes = True #rotation to North convention can give erroneous results
|
||||
rotate_data = False #rotation to North convention can give erroneous results
|
||||
# Polarization map output
|
||||
figname = 'NGC1068_FOC' #target/intrument name
|
||||
figtype = '_gaussian_FWHM010_rot' #additionnal informations
|
||||
SNRp_cut = 3 #P measurments with SNR>3
|
||||
SNRi_cut = 30 #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
||||
figtype = '_combine_FWHM020_rot' #additionnal informations
|
||||
SNRp_cut = 20 #P measurments with SNR>3
|
||||
SNRi_cut = 130 #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
||||
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
|
||||
|
||||
##### Pipeline start
|
||||
@@ -119,7 +122,7 @@ def main():
|
||||
if (data < 0.).any():
|
||||
print("ETAPE 1 : ", data)
|
||||
# Crop data to remove outside blank margins.
|
||||
data_array, error_array = proj_red.crop_array(data_array, step=5, null_val=0., inside=True)
|
||||
data_array, error_array = proj_red.crop_array(data_array, headers, step=5, null_val=0., inside=True, display=display_crop, savename=figname, plots_folder=plots_folder)
|
||||
for data in data_array:
|
||||
if (data < 0.).any():
|
||||
print("ETAPE 2 : ", data)
|
||||
@@ -138,14 +141,14 @@ def main():
|
||||
if (data < 0.).any():
|
||||
print("ETAPE 4 : ", data)
|
||||
#Align and rescale images with oversampling.
|
||||
data_array, error_array = proj_red.align_data(data_array, error_array, upsample_factor=int(Dxy.min()), ref_center=align_center, return_shifts=False)
|
||||
data_array, error_array, data_mask = proj_red.align_data(data_array, headers, error_array, upsample_factor=int(Dxy.min()), ref_center=align_center, return_shifts=False)
|
||||
for data in data_array:
|
||||
if (data < 0.).any():
|
||||
print("ETAPE 5 : ", data)
|
||||
# Rotate data to have North up
|
||||
ref_header = copy.deepcopy(headers[0])
|
||||
if rotate_data:
|
||||
data_array, error_array, headers = proj_red.rotate_data(data_array, error_array, headers, -ref_header['orientat'])
|
||||
data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers, -ref_header['orientat'])
|
||||
for data in data_array:
|
||||
if (data < 0.).any():
|
||||
print("ETAPE 6 : ", data)
|
||||
@@ -159,13 +162,13 @@ def main():
|
||||
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
|
||||
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
|
||||
# Bibcode : 1995chst.conf...10J
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function)
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function)
|
||||
|
||||
## Step 3:
|
||||
# Rotate images to have North up
|
||||
if rotate_stokes:
|
||||
ref_header = copy.deepcopy(headers[0])
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, headers = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, -ref_header['orientat'], SNRi_cut=None)
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, -ref_header['orientat'], SNRi_cut=None)
|
||||
# Compute polarimetric parameters (polarization degree and angle).
|
||||
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers)
|
||||
|
||||
@@ -175,11 +178,11 @@ def main():
|
||||
|
||||
## Step 5:
|
||||
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display=None)
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P", plots_folder=plots_folder, display='Pol_deg')
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_err", plots_folder=plots_folder, display='Pol_deg_err')
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRi", plots_folder=plots_folder, display='SNRi')
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRp", plots_folder=plots_folder, display='SNRp')
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display=None)
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P", plots_folder=plots_folder, display='Pol_deg')
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_err", plots_folder=plots_folder, display='Pol_deg_err')
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRi", plots_folder=plots_folder, display='SNRi')
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRp", plots_folder=plots_folder, display='SNRp')
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
@@ -1,156 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#-*- coding:utf-8 -*-
|
||||
"""
|
||||
Main script where are progressively added the steps for the FOC pipeline reduction.
|
||||
"""
|
||||
|
||||
#Project libraries
|
||||
import sys
|
||||
import numpy as np
|
||||
import copy
|
||||
import lib.fits as proj_fits #Functions to handle fits files
|
||||
import lib.reduction_ELR as proj_red #Functions used in reduction pipeline
|
||||
import lib.plots_ELR as proj_plots #Functions for plotting data
|
||||
|
||||
|
||||
def main():
|
||||
##### User inputs
|
||||
## Input and output locations
|
||||
# globals()['data_folder'] = "../data/NGC1068_x274020/"
|
||||
# infiles = ['x274020at.c0f.fits','x274020bt.c0f.fits','x274020ct.c0f.fits',
|
||||
# 'x274020dt.c0f.fits','x274020et.c0f.fits','x274020ft.c0f.fits',
|
||||
# 'x274020gt.c0f.fits','x274020ht.c0f.fits','x274020it.c0f.fits']
|
||||
# globals()['plots_folder'] = "../plots/NGC1068_x274020/"
|
||||
|
||||
# globals()['data_folder'] = "../data/NGC1068_x14w010/"
|
||||
# infiles = ['x14w0101t_c0f.fits','x14w0102t_c0f.fits','x14w0103t_c0f.fits',
|
||||
# 'x14w0104t_c0f.fits','x14w0105p_c0f.fits','x14w0106t_c0f.fits']
|
||||
# infiles = ['x14w0101t_c1f.fits','x14w0102t_c1f.fits','x14w0103t_c1f.fits',
|
||||
# 'x14w0104t_c1f.fits','x14w0105p_c1f.fits','x14w0106t_c1f.fits']
|
||||
# globals()['plots_folder'] = "../plots/NGC1068_x14w010/"
|
||||
|
||||
globals()['data_folder'] = "../data/3C405_x136060/"
|
||||
infiles = ['x1360601t_c0f.fits','x1360602t_c0f.fits','x1360603t_c0f.fits']
|
||||
# infiles = ['x1360601t_c1f.fits','x1360602t_c1f.fits','x1360603t_c1f.fits']
|
||||
globals()['plots_folder'] = "../plots/3C405_x136060/"
|
||||
|
||||
# globals()['data_folder'] = "../data/CygnusA_x43w0/"
|
||||
# infiles = ['x43w0101r_c0f.fits', 'x43w0104r_c0f.fits', 'x43w0107r_c0f.fits',
|
||||
# 'x43w0201r_c0f.fits', 'x43w0204r_c0f.fits', 'x43w0102r_c0f.fits',
|
||||
# 'x43w0105r_c0f.fits', 'x43w0108r_c0f.fits', 'x43w0202r_c0f.fits',
|
||||
# 'x43w0205r_c0f.fits', 'x43w0103r_c0f.fits', 'x43w0106r_c0f.fits',
|
||||
# 'x43w0109r_c0f.fits', 'x43w0203r_c0f.fits', 'x43w0206r_c0f.fits']
|
||||
# globals()['plots_folder'] = "../plots/CygnusA_x43w0/"
|
||||
|
||||
# globals()['data_folder'] = "../data/3C109_x3mc010/"
|
||||
# infiles = ['x3mc0101m_c0f.fits','x3mc0102m_c0f.fits','x3mc0103m_c0f.fits']
|
||||
# globals()['plots_folder'] = "../plots/3C109_x3mc010/"
|
||||
|
||||
# globals()['data_folder'] = "../data/MKN463_x2rp030/"
|
||||
# infiles = ['x2rp0201t_c0f.fits', 'x2rp0203t_c0f.fits', 'x2rp0205t_c0f.fits',
|
||||
# 'x2rp0207t_c0f.fits', 'x2rp0302t_c0f.fits', 'x2rp0304t_c0f.fits',
|
||||
# 'x2rp0306t_c0f.fits', 'x2rp0202t_c0f.fits', 'x2rp0204t_c0f.fits',
|
||||
# 'x2rp0206t_c0f.fits', 'x2rp0301t_c0f.fits', 'x2rp0303t_c0f.fits',
|
||||
# 'x2rp0305t_c0f.fits', 'x2rp0307t_c0f.fits']
|
||||
# globals()['plots_folder'] = "../plots/MKN463_x2rp030/"
|
||||
|
||||
# globals()['data_folder'] = "../data/PG1630+377_x39510/"
|
||||
# infiles = ['x3990201m_c0f.fits', 'x3990205m_c0f.fits', 'x3995101r_c0f.fits',
|
||||
# 'x3995105r_c0f.fits', 'x3995109r_c0f.fits', 'x3995201r_c0f.fits',
|
||||
# 'x3995205r_c0f.fits', 'x3990202m_c0f.fits', 'x3990206m_c0f.fits',
|
||||
# 'x3995102r_c0f.fits', 'x3995106r_c0f.fits', 'x399510ar_c0f.fits',
|
||||
# 'x3995202r_c0f.fits','x3995206r_c0f.fits']
|
||||
# globals()['plots_folder'] = "../plots/PG1630+377_x39510/"
|
||||
|
||||
## Reduction parameters
|
||||
# Deconvolution
|
||||
deconvolve = False
|
||||
if deconvolve:
|
||||
psf = 'gaussian' #Can be user-defined as well
|
||||
psf_FWHM = 0.10
|
||||
psf_scale = 'arcsec'
|
||||
psf_shape=(9,9)
|
||||
iterations = 10
|
||||
# Error estimation
|
||||
error_sub_shape = (150,150)
|
||||
display_error = True
|
||||
# Data binning
|
||||
rebin = True
|
||||
if rebin:
|
||||
pxsize = 15
|
||||
px_scale = 'pixel' #pixel or arcsec
|
||||
rebin_operation = 'average' #sum or average
|
||||
# Alignement
|
||||
align_center = 'image' #If None will align image to image center
|
||||
display_data = True
|
||||
# Smoothing
|
||||
smoothing_function = 'gaussian' #gaussian or combine
|
||||
smoothing_FWHM = 1 #If None, no smoothing is done
|
||||
smoothing_scale = 'pixel' #pixel or arcsec
|
||||
# Rotation
|
||||
rotate = False #rotation to North convention can give erroneous results
|
||||
rotate_library = 'scipy' #scipy or pillow
|
||||
# Polarization map output
|
||||
figname = '3C405_FOC' #target/intrument name
|
||||
figtype = '' #additionnal informations
|
||||
SNRp_cut = 3 #P measurments with SNR>3
|
||||
SNRi_cut = 4 #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
||||
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
|
||||
|
||||
##### Pipeline start
|
||||
## Step 1:
|
||||
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
|
||||
data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=False)
|
||||
# Crop data to remove outside blank margins.
|
||||
#data_array, error_array = proj_red.crop_array(data_array, step=5, null_val=0., inside=True)
|
||||
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
|
||||
if deconvolve:
|
||||
data_array = proj_red.deconvolve_array(data_array, headers, psf=psf, FWHM=psf_FWHM, scale=psf_scale, shape=psf_shape, iterations=iterations)
|
||||
# Estimate error from data background, estimated from sub-image of desired sub_shape.
|
||||
data_array, error_array = proj_red.get_error(data_array, sub_shape=error_sub_shape, display=display_error, headers=headers, savename=figname+"_errors", plots_folder=plots_folder)
|
||||
# Rebin data to desired pixel size.
|
||||
if rebin:
|
||||
data_array, error_array, headers, Dxy = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation)
|
||||
#Align and rescale images with oversampling.
|
||||
data_array, error_array = proj_red.align_data(data_array, error_array, upsample_factor=int(Dxy.min()), ref_center=align_center, return_shifts=False)
|
||||
|
||||
#Plot array for checking output
|
||||
if display_data:
|
||||
proj_plots.plot_obs(data_array, headers, vmin=data_array.min(), vmax=data_array.max(), savename=figname+"_center_"+align_center, plots_folder=plots_folder)
|
||||
|
||||
## Step 2:
|
||||
# Compute Stokes I, Q, U with smoothed polarized images
|
||||
# SMOOTHING DISCUSSION :
|
||||
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
|
||||
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
|
||||
# Bibcode : 1995chst.conf...10J
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function)
|
||||
|
||||
## Step 3:
|
||||
# Rotate images to have North up
|
||||
if rotate:
|
||||
ref_header = copy.deepcopy(headers[0])
|
||||
if rotate_library.lower() in ['scipy','scipy.ndimage']:
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, headers = proj_red.rotate_sc_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, -ref_header['orientat'])
|
||||
if rotate_library.lower() in ['pillow','pil']:
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, headers = proj_red.rotate_PIL_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, -ref_header['orientat'])
|
||||
# Compute polarimetric parameters (polarization degree and angle).
|
||||
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers)
|
||||
|
||||
## Step 4:
|
||||
# Save image to FITS.
|
||||
Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, figname+figtype, data_folder=data_folder, return_hdul=True)
|
||||
|
||||
## Step 5:
|
||||
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display=None)
|
||||
#proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P", plots_folder=plots_folder, display='Pol_deg')
|
||||
#proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_err", plots_folder=plots_folder, display='Pol_deg_err')
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRi", plots_folder=plots_folder, display='SNRi')
|
||||
proj_plots.polarization_map(copy.deepcopy(Stokes_test), SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRp", plots_folder=plots_folder, display='SNRp')
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,312 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#-*- coding:utf-8 -*-
|
||||
|
||||
from pylab import *
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from astropy.io import fits
|
||||
from astropy.wcs import WCS
|
||||
from aplpy import FITSFigure
|
||||
from scipy import ndimage
|
||||
from scipy import signal
|
||||
from skimage.feature import register_translation
|
||||
from astropy.convolution import convolve
|
||||
from astropy.convolution import Gaussian2DKernel
|
||||
import os as os
|
||||
|
||||
import lib.fits as proj_fits #Functions to handle fits files
|
||||
import lib.reduction as proj_red #Functions used in reduction pipeline
|
||||
import lib.plots as proj_plots #Functions for plotting data
|
||||
|
||||
def bin_ndarray(ndarray, new_shape, operation='sum'):
|
||||
"""
|
||||
Bins an ndarray in all axes based on the target shape, by summing or
|
||||
averaging.
|
||||
|
||||
Number of output dimensions must match number of input dimensions.
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> m = np.arange(0,100,1).reshape((10,10))
|
||||
>>> n = bin_ndarray(m, new_shape=(5,5), operation='sum')
|
||||
>>> print(n)
|
||||
|
||||
[[ 22 30 38 46 54]
|
||||
[102 110 118 126 134]
|
||||
[182 190 198 206 214]
|
||||
[262 270 278 286 294]
|
||||
[342 350 358 366 374]]
|
||||
|
||||
"""
|
||||
if not operation.lower() in ['sum', 'median', 'std']:
|
||||
raise ValueError("Operation not supported.")
|
||||
if ndarray.ndim != len(new_shape):
|
||||
raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape,
|
||||
new_shape))
|
||||
compression_pairs = [(d, c//d) for d,c in zip(new_shape,
|
||||
ndarray.shape)]
|
||||
flattened = [l for p in compression_pairs for l in p]
|
||||
ndarray = ndarray.reshape(flattened)
|
||||
for i in range(len(new_shape)):
|
||||
if operation.lower() == "sum":
|
||||
ndarray = ndarray.sum(-1*(i+1))
|
||||
if operation.lower() == "median":
|
||||
ndarray = ndarray.mean(-1*(i+1))
|
||||
if operation.lower() == "std":
|
||||
ndarray = ndarray.std(-1*(i+1))
|
||||
return ndarray
|
||||
|
||||
#load files
|
||||
def load_files(infiles):
|
||||
data_array = []
|
||||
for name in infiles['filenames']:
|
||||
with fits.open(infiles['data_folder']+name) as f:
|
||||
data_array.append(f[0].data)
|
||||
data_array = np.array(data_array)
|
||||
return data_array
|
||||
|
||||
def rebin_array(data,config):
|
||||
data_m = []
|
||||
data_s = []
|
||||
for ii in range(len(data)):
|
||||
new_shape = (data[ii].shape[1]//config['downsample_factor'],\
|
||||
data[ii].shape[0]//config['downsample_factor'])
|
||||
data_m.append(bin_ndarray(data[ii], new_shape, operation='median'))
|
||||
data_s.append(bin_ndarray(data[ii], new_shape, operation='std'))
|
||||
return data_m, data_s
|
||||
|
||||
def cross_corr(data):
|
||||
shifts = []
|
||||
data_array_c = []
|
||||
for ii in range(len(data)):
|
||||
shift, error, diffphase = register_translation(data[0], data_array[ii])
|
||||
print(' -',infiles['filenames'][ii],shift)
|
||||
shifts.append(shift)
|
||||
ima_shifted = np.roll(data[ii],round(shifts[ii][0]),axis=0)
|
||||
ima_shifted = np.roll(ima_shifted,round(shifts[ii][1]),axis=1)
|
||||
data_array_c.append(ima_shifted)
|
||||
return data_array_c
|
||||
|
||||
plt.close('all')
|
||||
|
||||
|
||||
### User input ####
|
||||
#Create dictionaries
|
||||
infiles = {'data_folder':'../data/NGC1068_x274020/',\
|
||||
'filenames':['x274020at.c0f.fits','x274020bt.c0f.fits','x274020ct.c0f.fits',\
|
||||
'x274020dt.c0f.fits','x274020et.c0f.fits','x274020ft.c0f.fits','x274020gt.c0f.fits',\
|
||||
'x274020ht.c0f.fits','x274020it.c0f.fits']}
|
||||
|
||||
config = {'conserve_flux':True,'upsample_factor':1,\
|
||||
'downsample_factor':8,\
|
||||
'rotate':True,'reshape':True}
|
||||
|
||||
fig = {'cmap':'magma','vmin':-20,'vmax':200,'font_size':40.0,'label_size':20,'lw':3,\
|
||||
'SNRp_cut':1,'SNRi_cut':4,'scalevec':0.05,'step_vec':1,\
|
||||
'vec_legend':10,'figname':'NGC1068_FOC_ELR'}
|
||||
#####################
|
||||
|
||||
##### SCRIPT #####
|
||||
#load files
|
||||
print('--- Load files')
|
||||
print(' -',infiles['filenames'])
|
||||
data_array = load_files(infiles)
|
||||
#crosscorrelation
|
||||
print('--- Cross-correlate HWP PAs')
|
||||
data_array_c = cross_corr(data_array)
|
||||
#resample image
|
||||
print('--- Downsample image')
|
||||
data_array_cr_m, data_array_cr_s = rebin_array(data_array_c,config)
|
||||
#compute stokes
|
||||
print('--- Combine HWP PAs')
|
||||
#combine HWP PAs
|
||||
pol0 = (data_array_cr_m[0]+data_array_cr_m[1]+data_array_cr_m[2])
|
||||
pol60 = (data_array_cr_m[3]+data_array_cr_m[4]+data_array_cr_m[5]+data_array_cr_m[6])
|
||||
pol120 = (data_array_cr_m[7]+data_array_cr_m[8])
|
||||
#Stokes
|
||||
print('--- Compute Stokes')
|
||||
I_stokes = (2./3.)*(pol0 + pol60 + pol120)
|
||||
Q_stokes = (2./3.)*(2*pol0 - pol60 - pol120)
|
||||
U_stokes = (2./np.sqrt(3.))*(pol60 - pol120)
|
||||
#Rotate Stokes
|
||||
if config['rotate']:
|
||||
print('--- Rotate Stokes')
|
||||
ima = fits.open(infiles['data_folder']+infiles['filenames'][0])
|
||||
PA = ima[0].header['ORIENTAT']
|
||||
#roate images
|
||||
I_stokes = ndimage.rotate(I_stokes,-PA,reshape=config['reshape'])
|
||||
Q_stokes = ndimage.rotate(Q_stokes,-PA,reshape=config['reshape'])
|
||||
U_stokes = ndimage.rotate(U_stokes,-PA,reshape=config['reshape'])
|
||||
#rotate stokes
|
||||
Q_stokes = Q_stokes*np.cos(np.deg2rad(PA)) - U_stokes*np.sin(np.deg2rad(PA))
|
||||
U_stokes = Q_stokes*np.sin(np.deg2rad(PA)) + U_stokes*np.cos(np.deg2rad(PA))
|
||||
#smoothing
|
||||
print('--- Smoothing')
|
||||
kernel = Gaussian2DKernel(x_stddev=1)
|
||||
I_stokes = convolve(I_stokes,kernel)
|
||||
Q_stokes = convolve(Q_stokes,kernel)
|
||||
U_stokes = convolve(U_stokes,kernel)
|
||||
#remove annoying pixels due to rotation
|
||||
I_stokes[I_stokes == 0] = np.nan
|
||||
Q_stokes[Q_stokes == 0] = np.nan
|
||||
U_stokes[U_stokes == 0] = np.nan
|
||||
|
||||
#Polarimetry
|
||||
PI = np.sqrt(Q_stokes*Q_stokes + U_stokes*U_stokes)
|
||||
P = PI/I_stokes*100
|
||||
PA = 0.5*arctan2(U_stokes,Q_stokes)*180./np.pi+90
|
||||
s_P = (np.sqrt(2.)*(I_stokes)**(-0.5)) ### Correct this line!!!
|
||||
s_PA = s_P/(P/100.)*180./np.pi
|
||||
|
||||
### STEP 6. image to FITS with updated WCS
|
||||
img = fits.open(infiles['data_folder']+infiles['filenames'][0])
|
||||
new_wcs = WCS(naxis=2)
|
||||
new_wcs.wcs.crpix = [I_stokes.shape[0]/2, I_stokes.shape[1]/2]
|
||||
new_wcs.wcs.crval = [img[0].header['CRVAL1'], img[0].header['CRVAL2']]
|
||||
new_wcs.wcs.cunit = ["deg", "deg"]
|
||||
new_wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
|
||||
new_wcs.wcs.cdelt = [img[0].header['CD1_1']*config['downsample_factor'],\
|
||||
img[0].header['CD1_2']*config['downsample_factor']]
|
||||
|
||||
stkI=fits.PrimaryHDU(data=I_stokes,header=new_wcs.to_header())
|
||||
pol=fits.PrimaryHDU(data=P,header=new_wcs.to_header())
|
||||
pang=fits.PrimaryHDU(data=PA,header=new_wcs.to_header())
|
||||
pol_err=fits.PrimaryHDU(data=s_P,header=new_wcs.to_header())
|
||||
pang_err=fits.PrimaryHDU(data=s_PA,header=new_wcs.to_header())
|
||||
|
||||
### STEP 7. polarization map
|
||||
#quality cuts
|
||||
pxscale = stkI.header['CDELT2']
|
||||
|
||||
#apply quality cuts
|
||||
pol.data[stkI.data < 10] = np.nan
|
||||
|
||||
|
||||
#levelsI = 2**(np.arange(2,20,0.5))
|
||||
levelsI = 2**(np.arange(3,20,0.5))
|
||||
|
||||
fig1 = plt.figure(figsize=(11,10))
|
||||
gc = FITSFigure(stkI,figure=fig1)
|
||||
gc.show_colorscale(cmap=fig['cmap'],vmin=fig['vmin'],vmax=fig['vmax'])
|
||||
gc.show_contour(levels=levelsI,colors='grey',linewidths=0.5)
|
||||
gc.show_vectors(pol,pang,scale=fig['scalevec'],\
|
||||
step=fig['step_vec'],color='black',linewidth=2.0)
|
||||
gc.show_vectors(pol,pang,scale=fig['scalevec'],\
|
||||
step=fig['step_vec'],color='white',linewidth=1.0)
|
||||
#legend vector
|
||||
vecscale = fig['scalevec'] * new_wcs.wcs.cdelt[0]
|
||||
gc.add_scalebar(fig['vec_legend']*vecscale,'P ='+np.str(fig['vec_legend'])+'%',\
|
||||
corner='bottom right',frame=True,color='black',facecolor='blue')
|
||||
figname = figname = fig['figname']+'_test_polmap.pdf'
|
||||
fig1.savefig(figname,dpi=300)
|
||||
os.system('open '+figname)
|
||||
|
||||
sys.exit()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
fig2,((ax1,ax2,ax3),(ax4,ax5,ax6)) = plt.subplots(2,3,figsize=(40,20),dpi=200)
|
||||
CF = ax1.imshow(I_stokes,origin='lower')
|
||||
cbar = plt.colorbar(CF,ax=ax1)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax1.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax1.text(2,2,'I',color='white',fontsize=10)
|
||||
|
||||
CF = ax2.imshow(Q_stokes,origin='lower')
|
||||
cbar = plt.colorbar(CF,ax=ax2)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax2.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax2.text(2,2,'Q',color='white',fontsize=10)
|
||||
|
||||
CF = ax3.imshow(U_stokes,origin='lower')
|
||||
cbar = plt.colorbar(CF,ax=ax3)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax3.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax3.text(2,2,'U',color='white',fontsize=10)
|
||||
|
||||
v = np.linspace(0,40,50)
|
||||
CF = ax4.imshow(P,origin='lower',vmin=0,vmax=40)
|
||||
cbar = plt.colorbar(CF,ax=ax4)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax4.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax4.text(2,2,'P',color='white',fontsize=10)
|
||||
|
||||
CF = ax5.imshow(PA,origin='lower',vmin=0,vmax=180)
|
||||
cbar = plt.colorbar(CF,ax=ax5)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax5.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax5.text(2,2,'PA',color='white',fontsize=10)
|
||||
|
||||
CF = ax6.imshow(PI,origin='lower')
|
||||
cbar = plt.colorbar(CF,ax=ax6)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax6.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax6.text(2,2,'PI',color='white',fontsize=10)
|
||||
|
||||
figname = fig['figname']+'_test_Stokes.pdf'
|
||||
fig2.savefig(figname,dpi=300)
|
||||
#os.system('open '+figname)
|
||||
|
||||
|
||||
|
||||
### Step 4. Binning and smoothing
|
||||
#Images can be binned and smoothed to improve SNR. This step can also be done
|
||||
#using the PolX images.
|
||||
|
||||
|
||||
### Step 5. Roate images to have North up
|
||||
#Images needs to be reprojected to have North up.
|
||||
#this procedure implies to rotate the Stokes QU using a rotation matrix
|
||||
|
||||
|
||||
### STEP 6. image to FITS with updated WCS
|
||||
img = fits.open(infiles['data_folder']+infiles['filenames'][0])
|
||||
new_wcs = WCS(naxis=2)
|
||||
new_wcs.wcs.crpix = [I_stokes.shape[0]/2, I_stokes.shape[1]/2]
|
||||
new_wcs.wcs.crval = [img[0].header['CRVAL1'], img[0].header['CRVAL2']]
|
||||
new_wcs.wcs.cunit = ["deg", "deg"]
|
||||
new_wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
|
||||
new_wcs.wcs.cdelt = [img[0].header['CD1_1']*config['downsample_factor'],\
|
||||
img[0].header['CD1_2']*config['downsample_factor']]
|
||||
|
||||
#hdu_ori = WCS(img[0])
|
||||
stkI=fits.PrimaryHDU(data=I_stokes,header=new_wcs.to_header())
|
||||
pol=fits.PrimaryHDU(data=P,header=new_wcs.to_header())
|
||||
pang=fits.PrimaryHDU(data=PA,header=new_wcs.to_header())
|
||||
pol_err=fits.PrimaryHDU(data=s_P,header=new_wcs.to_header())
|
||||
pang_err=fits.PrimaryHDU(data=s_PA,header=new_wcs.to_header())
|
||||
|
||||
|
||||
### STEP 7. polarization map
|
||||
#quality cuts
|
||||
pxscale = stkI.header['CDELT1']
|
||||
|
||||
#apply quality cuts
|
||||
SNRp = pol.data/pol_err.data
|
||||
pol.data[SNRp < fig['SNRp_cut']] = np.nan
|
||||
|
||||
SNRi = stkI.data/np.std(stkI.data[0:10,0:10])
|
||||
SNRi = fits.PrimaryHDU(SNRi,header=stkI.header)
|
||||
pol.data[SNRi.data < fig['SNRi_cut']] = np.nan
|
||||
print(np.max(SNRi))
|
||||
fig1 = plt.figure(figsize=(11,10))
|
||||
gc = FITSFigure(stkI,figure=fig1)
|
||||
gc.show_colorscale(fig['cmap'])
|
||||
#gc.show_contour(np.log10(SNRi.data),levels=np.linspace(np.log10(fig['SNRi_cut']),np.nanmax(np.log10(SNRi.data,20))),\
|
||||
# filled=True,cmap='magma')
|
||||
gc.show_vectors(pol,pang,scale=scalevec,step=step_vec,color='white',linewidth=1.0)
|
||||
|
||||
figname = figname = fig['figname']+'_test_polmap.pdf'
|
||||
fig1.savefig(figname,dpi=300)
|
||||
os.system('open '+figname)
|
||||
@@ -1,281 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
#-*- coding:utf-8 -*-
|
||||
|
||||
from pylab import *
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from astropy.io import fits
|
||||
from astropy.wcs import WCS
|
||||
from aplpy import FITSFigure
|
||||
import scipy.ndimage
|
||||
import os as os
|
||||
|
||||
import lib.fits as proj_fits #Functions to handle fits files
|
||||
import lib.reduction as proj_red #Functions used in reduction pipeline
|
||||
import lib.plots as proj_plots #Functions for plotting data
|
||||
|
||||
plt.close('all')
|
||||
|
||||
def bin_ndarray(ndarray, new_shape, operation='sum'):
|
||||
"""
|
||||
Bins an ndarray in all axes based on the target shape, by summing or
|
||||
averaging.
|
||||
|
||||
Number of output dimensions must match number of input dimensions.
|
||||
|
||||
Example
|
||||
-------
|
||||
>>> m = np.arange(0,100,1).reshape((10,10))
|
||||
>>> n = bin_ndarray(m, new_shape=(5,5), operation='sum')
|
||||
>>> print(n)
|
||||
|
||||
[[ 22 30 38 46 54]
|
||||
[102 110 118 126 134]
|
||||
[182 190 198 206 214]
|
||||
[262 270 278 286 294]
|
||||
[342 350 358 366 374]]
|
||||
|
||||
"""
|
||||
if not operation.lower() in ['sum', 'mean', 'average', 'avg']:
|
||||
raise ValueError("Operation not supported.")
|
||||
if ndarray.ndim != len(new_shape):
|
||||
raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape,
|
||||
new_shape))
|
||||
compression_pairs = [(d, c//d) for d,c in zip(new_shape,
|
||||
ndarray.shape)]
|
||||
flattened = [l for p in compression_pairs for l in p]
|
||||
ndarray = ndarray.reshape(flattened)
|
||||
for i in range(len(new_shape)):
|
||||
if operation.lower() == "sum":
|
||||
ndarray = ndarray.sum(-1*(i+1))
|
||||
elif operation.lower() in ["mean", "average", "avg"]:
|
||||
ndarray = ndarray.mean(-1*(i+1))
|
||||
return ndarray
|
||||
|
||||
def plots(ax,data,vmax,vmin):
|
||||
ax.imshow(data,vmax=vmax,vmin=vmin,origin='lower')
|
||||
|
||||
|
||||
|
||||
### User input
|
||||
|
||||
#infiles = ['x274020at.c0f.fits','x274020bt.c0f.fits','x274020ct.c0f.fits','x274020dt.c0f.fits',
|
||||
# 'x274020et.c0f.fits','x274020ft.c0f.fits','x274020gt.c0f.fits','x274020ht.c0f.fits',
|
||||
# 'x274020it.c0f.fits']
|
||||
|
||||
globals()['data_folder'] = "../data/3C405_x136060/"
|
||||
infiles = ['x1360601t_c0f.fits','x1360602t_c0f.fits','x1360603t_c0f.fits']
|
||||
#infiles = ['x1360601t_c1f.fits','x1360602t_c1f.fits','x1360603t_c1f.fits']
|
||||
globals()['plots_folder'] = "../plots/3C405_x136060/"
|
||||
|
||||
#Centroids
|
||||
#The centroids should be estimated by cross-correlating the images.
|
||||
#Here I used the position of the central source for each file as the reference pixel position.
|
||||
#ycen_0 = [304,306,303,296,295,295,294,305,304]
|
||||
#xcen_0 = [273,274,273,276,274,274,274,272,271]
|
||||
|
||||
data_array = []
|
||||
for name in infiles:
|
||||
with fits.open(data_folder+name) as f:
|
||||
data_array.append(f[0].data)
|
||||
data_array = np.array(data_array)
|
||||
shape = data_array.shape
|
||||
data_array, error_array = proj_red.crop_array(data_array, step=5, null_val=0., inside=True)
|
||||
data_array, error_array, shifts, errors = proj_red.align_data(data_array, error_array)
|
||||
center = np.array([(np.array(shape[1:])/2).astype(int),]*len(infiles))-shifts
|
||||
xcen_0 = center[:,0].astype(int)
|
||||
ycen_0 = center[:,1].astype(int)
|
||||
|
||||
#size, in pixels, of the FOV centered in x_cen_0,y_cen_0
|
||||
Dx = 500
|
||||
Dy = 500
|
||||
|
||||
#set the new image size (Dxy x Dxy pixels binning)
|
||||
Dxy = 10
|
||||
new_shape = (Dx//Dxy,Dy//Dxy)
|
||||
|
||||
#figures
|
||||
#test alignment
|
||||
vmin = 0
|
||||
vmax = 6
|
||||
font_size=40.0
|
||||
label_size=20.
|
||||
lw = 3.
|
||||
|
||||
#pol. map
|
||||
SNRp_cut = 3 #P measumentes with SNR>3
|
||||
SNRi_cut = 5 #I measuremntes with SNR>30, which implies an uncerrtainty in P of 4.7%.
|
||||
scalevec = 0.05 #length of vectors in pol. map
|
||||
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
|
||||
vec_legend = 10 #% pol for legend
|
||||
#figname = 'NGC1068_FOC.png'
|
||||
figname = '3C405_FOC_Enrique'
|
||||
|
||||
|
||||
### SCRIPT ###
|
||||
### Step 1. Check input images before data reduction
|
||||
#this step is very simplistic.
|
||||
#Here I used the position of the central source for each file as the
|
||||
#reference pixel position.
|
||||
#The centroids should be estimated by cross-correlating the images,
|
||||
#and compare with the simplistic approach of using the peak pixel of the
|
||||
#object as the reference pixel.
|
||||
|
||||
|
||||
fig,axs = plt.subplots(3,3,figsize=(30,30),dpi=200,sharex=True,sharey=True)
|
||||
|
||||
for jj, enum in enumerate(list(zip(axs.flatten(),data_array))):
|
||||
a = enum[0]
|
||||
img = fits.open(data_folder+infiles[jj])
|
||||
ima = img[0].data
|
||||
ima = ima[ycen_0[jj]-Dy:ycen_0[jj]+Dy,xcen_0[jj]-Dx:xcen_0[jj]+Dx]
|
||||
ima = bin_ndarray(ima,new_shape=new_shape,operation='sum') #binning
|
||||
exptime = img[0].header['EXPTIME']
|
||||
fil = img[0].header['FILTNAM1']
|
||||
ima = ima/exptime
|
||||
globals()['ima_%s' % jj] = ima
|
||||
#plots
|
||||
plots(a,ima,vmax=vmax,vmin=vmin)
|
||||
#position of centroid
|
||||
a.plot([ima.shape[1]/2,ima.shape[1]/2],[0,ima.shape[0]-1],lw=1,color='black')
|
||||
a.plot([0,ima.shape[1]-1],[ima.shape[1]/2,ima.shape[1]/2],lw=1,color='black')
|
||||
a.text(2,2,infiles[jj][0:8],color='white',fontsize=10)
|
||||
a.text(2,5,fil,color='white',fontsize=30)
|
||||
a.text(ima.shape[1]-20,1,exptime,color='white',fontsize=20)
|
||||
fig.subplots_adjust(hspace=0,wspace=0)
|
||||
fig.savefig(plots_folder+figname+'_test_alignment.png',dpi=300)
|
||||
#os.system('open test_alignment.png')
|
||||
|
||||
|
||||
|
||||
### Step 2. average of all images for a single polarizer to have them in the same units e/s.
|
||||
pol0 = ima_0#(ima_0 + ima_1 + ima_2)/3.
|
||||
pol60 = ima_1#(ima_3 + ima_4 + ima_5 + ima_6)/4.
|
||||
pol120 = ima_2#(ima_7 + ima_8)/2.
|
||||
|
||||
fig1,(ax1,ax2,ax3) = plt.subplots(1,3,figsize=(26,8),dpi=200)
|
||||
CF = ax1.imshow(pol0,vmin=vmin,vmax=vmax,origin='lower')
|
||||
cbar = plt.colorbar(CF,ax=ax1)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax1.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax1.text(2,2,'POL0',color='white',fontsize=10)
|
||||
|
||||
CF = ax2.imshow(pol60,vmin=vmin,vmax=vmax,origin='lower')
|
||||
cbar = plt.colorbar(CF,ax=ax2)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax2.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax2.text(2,2,'POL60',color='white',fontsize=10)
|
||||
|
||||
CF = ax3.imshow(pol120,vmin=vmin,vmax=vmax,origin='lower')
|
||||
cbar = plt.colorbar(CF,ax=ax3)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax3.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax3.text(2,2,'POL120',color='white',fontsize=10)
|
||||
fig1.savefig(plots_folder+figname+'_test_combinePol.png',dpi=300)
|
||||
#os.system('open test_combinePol.png')
|
||||
|
||||
|
||||
### Step 3. Compute Stokes IQU, P, PA, PI
|
||||
#Stokes parameters
|
||||
I_stokes = (2./3.)*(pol0 + pol60 + pol120)
|
||||
Q_stokes = (2./3.)*(2*pol0 - pol60 - pol120)
|
||||
U_stokes = (2./np.sqrt(3.))*(pol60 - pol120)
|
||||
|
||||
#Remove nan
|
||||
I_stokes[np.isnan(I_stokes)]=0.
|
||||
Q_stokes[np.isnan(Q_stokes)]=0.
|
||||
U_stokes[np.isnan(U_stokes)]=0.
|
||||
|
||||
#Polarimetry
|
||||
PI = np.sqrt(Q_stokes*Q_stokes + U_stokes*U_stokes)
|
||||
P = PI/I_stokes*100
|
||||
PA = 0.5*arctan2(U_stokes,Q_stokes)*180./np.pi+90
|
||||
s_P = np.sqrt(2.)*(I_stokes)**(-0.5)
|
||||
s_PA = s_P/(P/100.)*180./np.pi
|
||||
|
||||
fig2,((ax1,ax2,ax3),(ax4,ax5,ax6)) = plt.subplots(2,3,figsize=(40,20),dpi=200)
|
||||
CF = ax1.imshow(I_stokes,origin='lower')
|
||||
cbar = plt.colorbar(CF,ax=ax1)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax1.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax1.text(2,2,'I',color='white',fontsize=10)
|
||||
|
||||
CF = ax2.imshow(Q_stokes,origin='lower')
|
||||
cbar = plt.colorbar(CF,ax=ax2)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax2.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax2.text(2,2,'Q',color='white',fontsize=10)
|
||||
|
||||
CF = ax3.imshow(U_stokes,origin='lower')
|
||||
cbar = plt.colorbar(CF,ax=ax3)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax3.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax3.text(2,2,'U',color='white',fontsize=10)
|
||||
|
||||
v = np.linspace(0,40,50)
|
||||
CF = ax4.imshow(P,origin='lower',vmin=0,vmax=40)
|
||||
cbar = plt.colorbar(CF,ax=ax4)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax4.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax4.text(2,2,'P',color='white',fontsize=10)
|
||||
|
||||
CF = ax5.imshow(PA,origin='lower',vmin=0,vmax=180)
|
||||
cbar = plt.colorbar(CF,ax=ax5)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax5.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax5.text(2,2,'PA',color='white',fontsize=10)
|
||||
|
||||
CF = ax6.imshow(PI,origin='lower')
|
||||
cbar = plt.colorbar(CF,ax=ax6)
|
||||
cbar.ax.tick_params(labelsize=20)
|
||||
ax6.tick_params(axis='both', which='major', labelsize=20)
|
||||
ax6.text(2,2,'PI',color='white',fontsize=10)
|
||||
|
||||
fig2.savefig(plots_folder+figname+'_test_Stokes.png',dpi=300)
|
||||
#os.system('open test_Stokes.png')
|
||||
|
||||
### Step 4. Binning and smoothing
|
||||
#Images can be binned and smoothed to improve SNR. This step can also be done
|
||||
#using the PolX images.
|
||||
|
||||
|
||||
### Step 5. Roate images to have North up
|
||||
#Images needs to be reprojected to have North up.
|
||||
#this procedure implies to rotate the Stokes QU using a rotation matrix
|
||||
|
||||
|
||||
### STEP 6. image to FITS with updated WCS
|
||||
new_wcs = WCS(naxis=2)
|
||||
new_wcs.wcs.crpix = [I_stokes.shape[0]/2, I_stokes.shape[1]/2]
|
||||
new_wcs.wcs.crval = [img[0].header['CRVAL1'], img[0].header['CRVAL2']]
|
||||
new_wcs.wcs.cunit = ["deg", "deg"]
|
||||
new_wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
|
||||
new_wcs.wcs.cdelt = [img[0].header['CD1_1']*Dxy, img[0].header['CD1_2']*Dxy]
|
||||
|
||||
#hdu_ori = WCS(img[0])
|
||||
stkI=fits.PrimaryHDU(data=I_stokes,header=new_wcs.to_header())
|
||||
pol=fits.PrimaryHDU(data=P,header=new_wcs.to_header())
|
||||
pang=fits.PrimaryHDU(data=PA,header=new_wcs.to_header())
|
||||
pol_err=fits.PrimaryHDU(data=s_P,header=new_wcs.to_header())
|
||||
pang_err=fits.PrimaryHDU(data=s_PA,header=new_wcs.to_header())
|
||||
|
||||
|
||||
### STEP 7. polarization map
|
||||
#quality cuts
|
||||
pxscale = stkI.header['CDELT1']
|
||||
|
||||
#apply quality cuts
|
||||
SNRp = pol.data/pol_err.data
|
||||
pol.data[SNRp < SNRp_cut] = np.nan
|
||||
|
||||
SNRi = stkI.data/np.std(stkI.data[0:10,0:10])
|
||||
pol.data[SNRi < SNRi_cut] = np.nan
|
||||
print(np.max(SNRi))
|
||||
fig = plt.figure(figsize=(11,10))
|
||||
gc = FITSFigure(stkI,figure=fig)
|
||||
gc.show_contour(np.log10(SNRi),levels=np.linspace(np.log10(SNRi_cut),np.max(np.log10(SNRi)),20),\
|
||||
filled=True,cmap='magma')
|
||||
gc.show_vectors(pol,pang,scale=scalevec,step=step_vec,color='white',linewidth=1.0)
|
||||
|
||||
fig.savefig(plots_folder+figname,dpi=300)
|
||||
#os.system('open '+figname)
|
||||
@@ -12,10 +12,17 @@ prototypes :
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.patches import Rectangle
|
||||
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
|
||||
import matplotlib.font_manager as fm
|
||||
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar, AnchoredDirectionArrows
|
||||
from astropy.wcs import WCS
|
||||
|
||||
|
||||
def sci_not(v,err,rnd=1):
|
||||
power = - int(('%E' % v)[-3:])+1
|
||||
return r"({0} $\pm$ {1})e{2}".format(
|
||||
round(v*10**power,rnd),round(err*10**power,rnd),-power)
|
||||
|
||||
|
||||
def plot_obs(data_array, headers, shape=None, vmin=0., vmax=6., rectangle=None,
|
||||
savename=None, plots_folder=""):
|
||||
"""
|
||||
@@ -66,8 +73,8 @@ def plot_obs(data_array, headers, shape=None, vmin=0., vmax=6., rectangle=None,
|
||||
#plots
|
||||
im = ax.imshow(data, vmin=vmin, vmax=vmax, origin='lower')
|
||||
if not(rectangle is None):
|
||||
x, y, width, height = rectangle[i]
|
||||
ax.add_patch(Rectangle((x, y), width, height, edgecolor='r', fill=False))
|
||||
x, y, width, height, color = rectangle[i]
|
||||
ax.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False))
|
||||
#position of centroid
|
||||
ax.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], lw=1,
|
||||
color='black')
|
||||
@@ -137,7 +144,7 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""):
|
||||
return 0
|
||||
|
||||
|
||||
def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
|
||||
def polarization_map(Stokes, data_mask, SNRp_cut=3., SNRi_cut=30., step_vec=1,
|
||||
savename=None, plots_folder="", display=None):
|
||||
"""
|
||||
Plots polarization map from Stokes HDUList.
|
||||
@@ -177,7 +184,8 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
|
||||
stkQ = Stokes[np.argmax([Stokes[i].header['datatype']=='Q_stokes' for i in range(len(Stokes))])]
|
||||
stkU = Stokes[np.argmax([Stokes[i].header['datatype']=='U_stokes' for i in range(len(Stokes))])]
|
||||
stk_cov = Stokes[np.argmax([Stokes[i].header['datatype']=='IQU_cov_matrix' for i in range(len(Stokes))])]
|
||||
pol = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg' for i in range(len(Stokes))])]
|
||||
#pol = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg' for i in range(len(Stokes))])]
|
||||
pol = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_debiased' for i in range(len(Stokes))])]
|
||||
pol_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err' for i in range(len(Stokes))])]
|
||||
#pol_err_Poisson = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err_Poisson_noise' for i in range(len(Stokes))])]
|
||||
pang = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang' for i in range(len(Stokes))])]
|
||||
@@ -210,11 +218,12 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
|
||||
print("No pixel with polarization information above requested SNR.")
|
||||
|
||||
#Plot the map
|
||||
plt.rcParams.update({'font.size': 16})
|
||||
fig = plt.figure(figsize=(15,15))
|
||||
ax = fig.add_subplot(111, projection=wcs)
|
||||
ax.set_facecolor('k')
|
||||
fig.subplots_adjust(hspace=0, wspace=0, right=0.95)
|
||||
cbar_ax = fig.add_axes([0.98, 0.12, 0.01, 0.75])
|
||||
fig.subplots_adjust(hspace=0, wspace=0, right=0.9)
|
||||
cbar_ax = fig.add_axes([0.95, 0.12, 0.01, 0.75])
|
||||
|
||||
if display is None:
|
||||
# If no display selected, show intensity map
|
||||
@@ -239,6 +248,7 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
|
||||
im = ax.imshow(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$I_{Stokes}/\sigma_{I}$")
|
||||
levelsI = np.linspace(SNRi_cut, np.max(SNRi[SNRi > 0.]), 10)
|
||||
#print(levelsI)
|
||||
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
|
||||
elif display.lower() in ['snrp']:
|
||||
# Display polarization degree signal-to-noise map
|
||||
@@ -255,26 +265,31 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
|
||||
levelsI = np.linspace(SNRi_cut, SNRi.max(), 10)
|
||||
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
|
||||
|
||||
fontprops = fm.FontProperties(size=16)
|
||||
px_size = wcs.wcs.get_cdelt()[0]
|
||||
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
|
||||
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w', fontproperties=fontprops)
|
||||
ax.add_artist(px_sc)
|
||||
|
||||
X, Y = np.meshgrid(np.linspace(-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.,stkI.data.shape[0]), np.linspace(-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,stkI.data.shape[1]))
|
||||
U, V = pol.data*np.cos(np.pi/2.+pang.data*np.pi/180.), pol.data*np.sin(np.pi/2.+pang.data*np.pi/180.)
|
||||
Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=50.,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='w')
|
||||
pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
|
||||
pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w', fontproperties=fontprops)
|
||||
ax.add_artist(pol_sc)
|
||||
|
||||
north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-Stokes[0].header['orientat'], color='w', arrow_props={'ec': 'w', 'fc': 'w', 'alpha': 1,'lw': 2})
|
||||
ax.add_artist(north_dir)
|
||||
|
||||
# Compute integrated parameters and associated errors for pixels in the cut
|
||||
n_pix = mask.size
|
||||
I_int = stkI.data[mask].sum()
|
||||
Q_int = stkQ.data[mask].sum()
|
||||
U_int = stkU.data[mask].sum()
|
||||
I_int_err = np.sqrt(np.sum(stk_cov.data[0,0][mask]))
|
||||
Q_int_err = np.sqrt(np.sum(stk_cov.data[1,1][mask]))
|
||||
U_int_err = np.sqrt(np.sum(stk_cov.data[2,2][mask]))
|
||||
IQ_int_err = np.sqrt(np.sum(stk_cov.data[0,1][mask]**2))
|
||||
IU_int_err = np.sqrt(np.sum(stk_cov.data[0,2][mask]**2))
|
||||
QU_int_err = np.sqrt(np.sum(stk_cov.data[1,2][mask]**2))
|
||||
I_int_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,0][mask]))
|
||||
Q_int_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[1,1][mask]))
|
||||
U_int_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[2,2][mask]))
|
||||
IQ_int_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,1][mask]**2))
|
||||
IU_int_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,2][mask]**2))
|
||||
QU_int_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[1,2][mask]**2))
|
||||
|
||||
P_int = np.sqrt(Q_int**2+U_int**2)/I_int*100.
|
||||
P_int_err = (100./I_int)*np.sqrt((Q_int**2*Q_int_err**2 + U_int**2*U_int_err**2 + 2.*Q_int*U_int*QU_int_err)/(Q_int**2 + U_int**2) + ((Q_int/I_int)**2 + (U_int/I_int)**2)*I_int_err**2 - 2.*(Q_int/I_int)*IQ_int_err - 2.*(U_int/I_int)*IU_int_err)
|
||||
@@ -283,23 +298,26 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
|
||||
PA_int_err = (90./(np.pi*(Q_int**2 + U_int**2)))*np.sqrt(U_int**2*Q_int_err**2 + Q_int**2*U_int_err**2 - 2.*Q_int*U_int*QU_int_err)
|
||||
|
||||
# Compute integrated parameters and associated errors for all pixels
|
||||
n_pix = stkI.data.size
|
||||
I_diluted = stkI.data.sum()
|
||||
Q_diluted = stkQ.data.sum()
|
||||
U_diluted = stkU.data.sum()
|
||||
I_diluted_err = np.sqrt(np.sum(stk_cov.data[0,0]))
|
||||
Q_diluted_err = np.sqrt(np.sum(stk_cov.data[1,1]))
|
||||
U_diluted_err = np.sqrt(np.sum(stk_cov.data[2,2]))
|
||||
IQ_diluted_err = np.sqrt(np.sum(stk_cov.data[0,1]**2))
|
||||
IU_diluted_err = np.sqrt(np.sum(stk_cov.data[0,2]**2))
|
||||
QU_diluted_err = np.sqrt(np.sum(stk_cov.data[1,2]**2))
|
||||
I_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,0]))
|
||||
Q_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[1,1]))
|
||||
U_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[2,2]))
|
||||
IQ_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,1]**2))
|
||||
IU_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,2]**2))
|
||||
QU_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[1,2]**2))
|
||||
|
||||
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted*100.
|
||||
P_diluted_err = (100./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
|
||||
P_diluted_err = np.sqrt(2/n_pix)*100.
|
||||
|
||||
PA_diluted = (90./np.pi)*np.arctan2(U_diluted,Q_diluted)+90.
|
||||
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
|
||||
PA_diluted_err = P_diluted_err/(2.*P_diluted)*180./np.pi
|
||||
|
||||
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1:.1e} $\pm$ {2:.1e} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav,I_int*convert_flux,I_int_err*convert_flux)+"\n"+r"$P^{{int}}$ = {0:.2f} $\pm$ {1:.2f} %".format(P_int,P_int_err)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.2f} $\pm$ {1:.2f} °".format(PA_int,PA_int_err)+"\n"+r"$P^{{diluted}}$ = {0:.2f} $\pm$ {1:.2f} %".format(P_diluted,P_diluted_err)+"\n"+r"$\theta_{{P}}^{{diluted}}$ = {0:.2f} $\pm$ {1:.2f} °".format(PA_diluted,PA_diluted_err), color='white', fontsize=11, xy=(0.01, 0.90), xycoords='axes fraction')
|
||||
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav,sci_not(I_diluted*convert_flux,I_diluted_err*convert_flux,2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_diluted,P_diluted_err)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_diluted,PA_diluted_err), color='white', fontsize=16, xy=(0.01, 0.92), xycoords='axes fraction')
|
||||
|
||||
ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
|
||||
ax.coords[0].set_axislabel('Right Ascension (J2000)')
|
||||
@@ -311,7 +329,7 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
|
||||
ax.axis('equal')
|
||||
|
||||
if not savename is None:
|
||||
fig.suptitle(savename)
|
||||
#fig.suptitle(savename)
|
||||
fig.savefig(plots_folder+savename+".png",bbox_inches='tight',dpi=200)
|
||||
|
||||
plt.show()
|
||||
|
||||
@@ -1,247 +0,0 @@
|
||||
"""
|
||||
Library functions for displaying informations using matplotlib
|
||||
|
||||
prototypes :
|
||||
- plot_obs(data_array, headers, shape, vmin, vmax, savename, plots_folder)
|
||||
Plots whole observation raw data in given display shape
|
||||
|
||||
- polarization_map(Stokes_hdul, SNRp_cut, SNRi_cut, step_vec, savename, plots_folder, display)
|
||||
Plots polarization map of polarimetric parameters saved in an HDUList
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib.patches import Rectangle
|
||||
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
|
||||
from astropy.wcs import WCS
|
||||
|
||||
|
||||
def plot_obs(data_array, headers, shape=None, vmin=0., vmax=6., rectangle=None,
|
||||
savename=None, plots_folder=""):
|
||||
"""
|
||||
Plots raw observation imagery with some information on the instrument and
|
||||
filters.
|
||||
----------
|
||||
Inputs:
|
||||
data_array : numpy.ndarray
|
||||
Array of images (2D floats, aligned and of the same shape) of a
|
||||
single observation with multiple polarizers of an instrument
|
||||
headers : header list
|
||||
List of headers corresponding to the images in data_array
|
||||
shape : array-like of length 2, optional
|
||||
Shape of the display, with shape = [#row, #columns]. If None, defaults
|
||||
to the optimal square.
|
||||
Defaults to None.
|
||||
vmin : float, optional
|
||||
Min pixel value that should be displayed.
|
||||
Defaults to 0.
|
||||
vmax : float, optional
|
||||
Max pixel value that should be displayed.
|
||||
Defaults to 6.
|
||||
rectangle : numpy.ndarray, optional
|
||||
Array of parameters for matplotlib.patches.Rectangle objects that will
|
||||
be displayed on each output image. If None, no rectangle displayed.
|
||||
Defaults to None.
|
||||
savename : str, optional
|
||||
Name of the figure the map should be saved to. If None, the map won't
|
||||
be saved (only displayed).
|
||||
Defaults to None.
|
||||
plots_folder : str, optional
|
||||
Relative (or absolute) filepath to the folder in wich the map will
|
||||
be saved. Not used if savename is None.
|
||||
Defaults to current folder.
|
||||
"""
|
||||
if shape is None:
|
||||
shape = np.array([np.ceil(np.sqrt(data_array.shape[0])).astype(int),]*2)
|
||||
fig, ax = plt.subplots(shape[0], shape[1], figsize=(10,10), dpi=200,
|
||||
sharex=True, sharey=True)
|
||||
|
||||
for i, enum in enumerate(list(zip(ax.flatten(),data_array))):
|
||||
ax = enum[0]
|
||||
data = enum[1]
|
||||
instr = headers[i]['instrume']
|
||||
rootname = headers[i]['rootname']
|
||||
exptime = headers[i]['exptime']
|
||||
filt = headers[i]['filtnam1']
|
||||
#plots
|
||||
im = ax.imshow(data, vmin=vmin, vmax=vmax, origin='lower')
|
||||
if not(rectangle is None):
|
||||
x, y, width, height = rectangle[i]
|
||||
ax.add_patch(Rectangle((x, y), width, height, edgecolor='r', fill=False))
|
||||
#position of centroid
|
||||
ax.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], lw=1,
|
||||
color='black')
|
||||
ax.plot([0,data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], lw=1,
|
||||
color='black')
|
||||
ax.annotate(instr+":"+rootname, color='white', fontsize=5, xy=(0.02, 0.95), xycoords='axes fraction')
|
||||
ax.annotate(filt, color='white', fontsize=10, xy=(0.02, 0.02), xycoords='axes fraction')
|
||||
ax.annotate(exptime, color='white', fontsize=5, xy=(0.80, 0.02), xycoords='axes fraction')
|
||||
|
||||
fig.subplots_adjust(hspace=0, wspace=0, right=0.85)
|
||||
cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75])
|
||||
fig.colorbar(im, cax=cbar_ax)
|
||||
|
||||
if not (savename is None):
|
||||
fig.suptitle(savename)
|
||||
fig.savefig(plots_folder+savename+".png",bbox_inches='tight')
|
||||
plt.show()
|
||||
return 0
|
||||
|
||||
def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
|
||||
savename=None, plots_folder="", display=None):
|
||||
"""
|
||||
Plots polarization map from Stokes HDUList.
|
||||
----------
|
||||
Inputs:
|
||||
Stokes : astropy.io.fits.hdu.hdulist.HDUList
|
||||
HDUList containing I, Q, U, P, s_P, PA, s_PA (in this particular order)
|
||||
for one observation.
|
||||
SNRp_cut : float, optional
|
||||
Cut that should be applied to the signal-to-noise ratio on P.
|
||||
Any SNR < SNRp_cut won't be displayed.
|
||||
Defaults to 3.
|
||||
SNRi_cut : float, optional
|
||||
Cut that should be applied to the signal-to-noise ratio on I.
|
||||
Any SNR < SNRi_cut won't be displayed.
|
||||
Defaults to 30. This value implies an uncertainty in P of 4.7%
|
||||
step_vec : int, optional
|
||||
Number of steps between each displayed polarization vector.
|
||||
If step_vec = 2, every other vector will be displayed.
|
||||
Defaults to 1
|
||||
savename : str, optional
|
||||
Name of the figure the map should be saved to. If None, the map won't
|
||||
be saved (only displayed).
|
||||
Defaults to None.
|
||||
plots_folder : str, optional
|
||||
Relative (or absolute) filepath to the folder in wich the map will
|
||||
be saved. Not used if savename is None.
|
||||
Defaults to current folder.
|
||||
display : str, optional
|
||||
Choose the map to display between intensity (default), polarization
|
||||
degree ('p','pol','pol_deg') or polarization degree error ('s_p',
|
||||
'pol_err','pol_deg_err').
|
||||
Defaults to None (intensity).
|
||||
"""
|
||||
#Get data
|
||||
stkI = Stokes[np.argmax([Stokes[i].header['datatype']=='I_stokes' for i in range(len(Stokes))])]
|
||||
stkQ = Stokes[np.argmax([Stokes[i].header['datatype']=='Q_stokes' for i in range(len(Stokes))])]
|
||||
stkU = Stokes[np.argmax([Stokes[i].header['datatype']=='U_stokes' for i in range(len(Stokes))])]
|
||||
stk_cov = Stokes[np.argmax([Stokes[i].header['datatype']=='IQU_cov_matrix' for i in range(len(Stokes))])]
|
||||
pol = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg' for i in range(len(Stokes))])]
|
||||
pol_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err' for i in range(len(Stokes))])]
|
||||
pol_err_Poisson = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err_Poisson_noise' for i in range(len(Stokes))])]
|
||||
pang = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang' for i in range(len(Stokes))])]
|
||||
pang_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang_err' for i in range(len(Stokes))])]
|
||||
|
||||
pivot_wav = Stokes[0].header['photplam']
|
||||
convert_flux = Stokes[0].header['photflam']/Stokes[0].header['exptot']
|
||||
wcs = WCS(Stokes[0]).deepcopy()
|
||||
|
||||
#Compute SNR and apply cuts
|
||||
pol.data[pol.data == 0.] = np.nan
|
||||
#SNRp = pol.data/pol_err.data
|
||||
SNRp = pol.data/pol_err_Poisson.data
|
||||
pol.data[SNRp < SNRp_cut] = np.nan
|
||||
#SNRi = stkI.data/np.sqrt(stk_cov.data[0,0])
|
||||
SNRi = stkI.data/np.std(stkI.data[0:10,0:10])
|
||||
pol.data[SNRi < SNRi_cut] = np.nan
|
||||
|
||||
mask = (SNRp < SNRp_cut) * (SNRi < SNRi_cut)
|
||||
|
||||
# Look for pixel of max polarization
|
||||
if np.isfinite(pol.data).any():
|
||||
p_max = np.max(pol.data[np.isfinite(pol.data)])
|
||||
x_max, y_max = np.unravel_index(np.argmax(pol.data==p_max),pol.data.shape)
|
||||
else:
|
||||
print("No pixel with polarization information above requested SNR.")
|
||||
|
||||
#Plot the map
|
||||
fig = plt.figure(figsize=(15,15))
|
||||
ax = fig.add_subplot(111, projection=wcs)
|
||||
ax.set_facecolor('k')
|
||||
fig.subplots_adjust(hspace=0, wspace=0, right=0.95)
|
||||
cbar_ax = fig.add_axes([0.98, 0.12, 0.01, 0.75])
|
||||
|
||||
if display is None:
|
||||
# If no display selected, show intensity map
|
||||
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||
im = ax.imshow(stkI.data*convert_flux,extent=[-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||
levelsI = np.linspace(SNRi_cut, SNRi.max(), 10)
|
||||
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
|
||||
elif display.lower() in ['p','pol','pol_deg']:
|
||||
# Display polarization degree map
|
||||
vmin, vmax = 0., 100.
|
||||
im = ax.imshow(pol.data,extent=[-pol.data.shape[1]/2.,pol.data.shape[1]/2.,-pol.data.shape[0]/2.,pol.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P$ [%]")
|
||||
elif display.lower() in ['s_p','pol_err','pol_deg_err']:
|
||||
# Display polarization degree error map
|
||||
vmin, vmax = 0., 5.
|
||||
im = ax.imshow(pol_err.data,extent=[-pol_err.data.shape[1]/2.,pol_err.data.shape[1]/2.,-pol_err.data.shape[0]/2.,pol_err.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$\sigma_P$ [%]")
|
||||
elif display.lower() in ['snr','snri']:
|
||||
# Display I_stokes signal-to-noise map
|
||||
vmin, vmax = 0., SNRi.max()
|
||||
im = ax.imshow(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$I_{Stokes}/\sigma_{I}$")
|
||||
levelsI = np.linspace(SNRi_cut, SNRi.max(), 20)
|
||||
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
|
||||
elif display.lower() in ['snrp']:
|
||||
# Display polarization degree signal-to-noise map
|
||||
vmin, vmax = SNRp_cut, np.max(SNRp[SNRp > 0.])
|
||||
im = ax.imshow(SNRp, extent=[-SNRp.shape[1]/2.,SNRp.shape[1]/2.,-SNRp.shape[0]/2.,SNRp.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P/\sigma_{P}$")
|
||||
levelsP = np.linspace(SNRp_cut, np.max(SNRp[SNRp > 0.]), 10)
|
||||
cont = ax.contour(SNRp, extent=[-SNRp.shape[1]/2.,SNRp.shape[1]/2.,-SNRp.shape[0]/2.,SNRp.shape[0]/2.], levels=levelsP, colors='grey', linewidths=0.5)
|
||||
else:
|
||||
# Defaults to intensity map
|
||||
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||
im = ax.imshow(stkI.data*convert_flux,extent=[-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
|
||||
levelsI = np.linspace(SNRi_cut, SNRi.max(), 10)
|
||||
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
|
||||
|
||||
px_size = wcs.wcs.get_cdelt()[0]
|
||||
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
|
||||
ax.add_artist(px_sc)
|
||||
|
||||
X, Y = np.meshgrid(np.linspace(-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.,stkI.data.shape[0]), np.linspace(-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,stkI.data.shape[1]))
|
||||
U, V = pol.data*np.cos(np.pi/2.+pang.data*np.pi/180.), pol.data*np.sin(np.pi/2.+pang.data*np.pi/180.)
|
||||
Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=50.,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='w')
|
||||
pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
|
||||
ax.add_artist(pol_sc)
|
||||
|
||||
# Compute integrated parameters and associated errors
|
||||
I_int = stkI.data[mask].sum()
|
||||
Q_int = stkQ.data[mask].sum()
|
||||
U_int = stkU.data[mask].sum()
|
||||
I_int_err = np.sqrt(np.sum(stk_cov.data[0,0][mask]))
|
||||
Q_int_err = np.sqrt(np.sum(stk_cov.data[1,1][mask]))
|
||||
U_int_err = np.sqrt(np.sum(stk_cov.data[2,2][mask]))
|
||||
IQ_int_err = np.sqrt(np.sum(stk_cov.data[0,1][mask]**2))
|
||||
IU_int_err = np.sqrt(np.sum(stk_cov.data[0,2][mask]**2))
|
||||
QU_int_err = np.sqrt(np.sum(stk_cov.data[1,2][mask]**2))
|
||||
|
||||
P_int = np.sqrt(Q_int**2+U_int**2)/I_int*100.
|
||||
P_int_err = np.sqrt(2.)*(I_int)**(-0.5)*100.#(100./I_int)*np.sqrt((Q_int**2*Q_int_err**2 + U_int**2*U_int_err**2 + 2.*Q_int*U_int*QU_int_err)/(Q_int**2 + U_int**2) + ((Q_int/I_int)**2 + (U_int/I_int)**2)*I_int_err**2 - 2.*(Q_int/I_int)*IQ_int_err - 2.*(U_int/I_int)*IU_int_err)
|
||||
|
||||
PA_int = (90./np.pi)*np.arctan2(U_int,Q_int)+90.
|
||||
PA_int_err = P_int_err/(P_int)*180./np.pi#(90./(np.pi*(Q_int**2 + U_int**2)))*np.sqrt(U_int**2*Q_int_err**2 + Q_int**2*U_int_err**2 - 2.*Q_int*U_int*QU_int_err)
|
||||
|
||||
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1:.1e} $\pm$ {2:.1e} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav,I_int*convert_flux,I_int_err*convert_flux)+"\n"+r"$P^{{int}}$ = {0:.2f} $\pm$ {1:.2f} %".format(P_int,P_int_err)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.2f} $\pm$ {1:.2f} °".format(PA_int,PA_int_err), color='white', fontsize=11, xy=(0.01, 0.94), xycoords='axes fraction')
|
||||
|
||||
ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
|
||||
ax.coords[0].set_axislabel('Right Ascension (J2000)')
|
||||
ax.coords[0].set_axislabel_position('t')
|
||||
ax.coords[0].set_ticklabel_position('t')
|
||||
ax.coords[1].set_axislabel('Declination (J2000)')
|
||||
ax.coords[1].set_axislabel_position('l')
|
||||
ax.coords[1].set_ticklabel_position('l')
|
||||
ax.axis('equal')
|
||||
|
||||
if not savename is None:
|
||||
fig.suptitle(savename)
|
||||
fig.savefig(plots_folder+savename+".png",bbox_inches='tight',dpi=200)
|
||||
|
||||
plt.show()
|
||||
return 0
|
||||
@@ -45,6 +45,7 @@ import copy
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.dates as mdates
|
||||
from matplotlib.patches import Rectangle
|
||||
from datetime import datetime
|
||||
from scipy.ndimage import rotate as sc_rotate
|
||||
from scipy.ndimage import shift as sc_shift
|
||||
@@ -166,7 +167,8 @@ def bin_ndarray(ndarray, new_shape, operation='sum'):
|
||||
return ndarray
|
||||
|
||||
|
||||
def crop_array(data_array, error_array=None, step=5, null_val=None, inside=False):
|
||||
def crop_array(data_array, headers, error_array=None, step=5, null_val=None,
|
||||
inside=False, display=False, savename=None, plots_folder=""):
|
||||
"""
|
||||
Homogeneously crop an array: all contained images will have the same shape.
|
||||
'inside' parameter will decide how much should be cropped.
|
||||
@@ -175,6 +177,8 @@ def crop_array(data_array, error_array=None, step=5, null_val=None, inside=False
|
||||
data_array : numpy.ndarray
|
||||
Array containing the observation data (2D float arrays) to
|
||||
homogeneously crop.
|
||||
headers : header list
|
||||
Headers associated with the images in data_array.
|
||||
error_array : numpy.ndarray, optional
|
||||
Array of images (2D floats, aligned and of the same shape) containing
|
||||
the error in each pixel of the observation images in data_array.
|
||||
@@ -197,6 +201,10 @@ def crop_array(data_array, error_array=None, step=5, null_val=None, inside=False
|
||||
inside the image. If False, the cropped image will be the minimum
|
||||
rectangle in which the whole image is included.
|
||||
Defaults to False.
|
||||
display : boolean, optional
|
||||
If True, data_array will be displayed with a rectangle around the
|
||||
sub-image selected for region of interest.
|
||||
Defaults to False.
|
||||
----------
|
||||
Returns:
|
||||
cropped_array : numpy.ndarray
|
||||
@@ -225,6 +233,46 @@ def crop_array(data_array, error_array=None, step=5, null_val=None, inside=False
|
||||
v_array[3] = np.max(vertex[:,3]).astype(int)
|
||||
|
||||
new_shape = np.array([v_array[1]-v_array[0],v_array[3]-v_array[2]])
|
||||
rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 'b']
|
||||
if display:
|
||||
fig, ax = plt.subplots()
|
||||
data = data_array[0]
|
||||
instr = headers[0]['instrume']
|
||||
rootname = headers[0]['rootname']
|
||||
exptime = headers[0]['exptime']
|
||||
filt = headers[0]['filtnam1']
|
||||
#plots
|
||||
im = ax.imshow(data, vmin=data.min(), vmax=data.max(), origin='lower')
|
||||
x, y, width, height, color = rectangle
|
||||
ax.add_patch(Rectangle((x, y),width,height,edgecolor=color,fill=False))
|
||||
#position of centroid
|
||||
ax.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], lw=1,
|
||||
color='black')
|
||||
ax.plot([0,data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], lw=1,
|
||||
color='black')
|
||||
ax.annotate(instr+":"+rootname, color='white', fontsize=5,
|
||||
xy=(0.02, 0.95), xycoords='axes fraction')
|
||||
ax.annotate(filt, color='white', fontsize=10, xy=(0.02, 0.02),
|
||||
xycoords='axes fraction')
|
||||
ax.annotate(exptime, color='white', fontsize=5, xy=(0.80, 0.02),
|
||||
xycoords='axes fraction')
|
||||
ax.set(title="Location of cropped image.",
|
||||
xlabel='pixel offset',
|
||||
ylabel='pixel offset')
|
||||
|
||||
fig.subplots_adjust(hspace=0, wspace=0, right=0.85)
|
||||
cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75])
|
||||
fig.colorbar(im, cax=cbar_ax)
|
||||
|
||||
if not(savename is None):
|
||||
fig.suptitle(savename+'_'+filt+'_crop_region')
|
||||
fig.savefig(plots_folder+savename+'_'+filt+'_crop_region.png',
|
||||
bbox_inches='tight')
|
||||
plot_obs(data_array, headers, vmin=data_array.min(),
|
||||
vmax=data_array.max(), rectangle=[rectangle,]*len(headers),
|
||||
savename=savename+'_crop_region',plots_folder=plots_folder)
|
||||
plt.show()
|
||||
|
||||
crop_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1]))
|
||||
crop_error_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1]))
|
||||
for i,image in enumerate(data_array):
|
||||
@@ -360,7 +408,7 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
|
||||
Only returned if return_background is True.
|
||||
"""
|
||||
# Crop out any null edges
|
||||
data, error = crop_array(data_array, step=5, null_val=0., inside=False)
|
||||
data, error = crop_array(data_array, headers, step=5, null_val=0., inside=False)
|
||||
|
||||
sub_shape = np.array(sub_shape)
|
||||
# Make sub_shape of odd values
|
||||
@@ -371,7 +419,7 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
|
||||
diff = (sub_shape-1).astype(int)
|
||||
temp = np.zeros((shape[0],shape[1]-diff[0],shape[2]-diff[1]))
|
||||
error_array = np.ones(data_array.shape)
|
||||
rectangle = np.zeros((shape[0],4))
|
||||
rectangle = []
|
||||
background = np.zeros((shape[0]))
|
||||
|
||||
for i,image in enumerate(data):
|
||||
@@ -384,7 +432,7 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
|
||||
minima = np.unravel_index(np.argmin(temp.sum(axis=0)),temp.shape[1:])
|
||||
|
||||
for i, image in enumerate(data):
|
||||
rectangle[i] = minima[1], minima[0], sub_shape[1], sub_shape[0]
|
||||
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 'r'])
|
||||
# Compute error : root mean square of the background
|
||||
sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]]
|
||||
#error = np.std(sub_image) # Previously computed using standard deviation over the background
|
||||
@@ -398,6 +446,7 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
|
||||
|
||||
if display:
|
||||
|
||||
convert_flux = headers[0]['photflam']
|
||||
date_time = np.array([headers[i]['date-obs']+';'+headers[i]['time-obs']
|
||||
for i in range(len(headers))])
|
||||
date_time = np.array([datetime.strptime(d,'%Y-%m-%d;%H:%M:%S')
|
||||
@@ -406,12 +455,13 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
|
||||
dict_filt = {"POL0":'r', "POL60":'g', "POL120":'b'}
|
||||
c_filt = np.array([dict_filt[f] for f in filt])
|
||||
|
||||
fig,ax = plt.subplots(figsize=(10,6),constrained_layout=True)
|
||||
fig,ax = plt.subplots(figsize=(10,6), constrained_layout=True)
|
||||
for f in np.unique(filt):
|
||||
mask = [fil==f for fil in filt]
|
||||
ax.scatter(date_time[mask], background[mask], color=dict_filt[f],
|
||||
label="Filter : {0:s}".format(f))
|
||||
ax.errorbar(date_time, background, yerr=error_array[:,0,0], fmt='+k',
|
||||
ax.scatter(date_time[mask], background[mask]*convert_flux,
|
||||
color=dict_filt[f],label="Filter : {0:s}".format(f))
|
||||
ax.errorbar(date_time, background*convert_flux,
|
||||
yerr=error_array[:,0,0]*convert_flux, fmt='+k',
|
||||
markersize=0, ecolor=c_filt)
|
||||
# Date handling
|
||||
locator = mdates.AutoDateLocator()
|
||||
@@ -424,12 +474,12 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
|
||||
plt.legend()
|
||||
|
||||
if not(savename is None):
|
||||
fig.suptitle(savename+"_background_flux.png")
|
||||
fig.suptitle(savename+"_background_flux")
|
||||
fig.savefig(plots_folder+savename+"_background_flux.png",
|
||||
bbox_inches='tight')
|
||||
vmin = np.min(np.log10(data[data > 0.]))
|
||||
vmax = np.max(np.log10(data[data > 0.]))
|
||||
plot_obs(np.log10(data), headers, vmin=vmin, vmax=vmax,
|
||||
plot_obs(data, headers, vmin=data.min(), vmax=data.max(),
|
||||
rectangle=rectangle,
|
||||
savename=savename+"_background_location",
|
||||
plots_folder=plots_folder)
|
||||
@@ -555,8 +605,8 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
|
||||
return rebinned_data, rebinned_error, rebinned_headers, Dxy
|
||||
|
||||
|
||||
def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
|
||||
ref_center=None, return_shifts=True):
|
||||
def align_data(data_array, headers, error_array=None, upsample_factor=1.,
|
||||
ref_data=None, ref_center=None, return_shifts=True):
|
||||
"""
|
||||
Align images in data_array using cross correlation, and rescale them to
|
||||
wider images able to contain any rotation of the reference image.
|
||||
@@ -565,6 +615,8 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
|
||||
Inputs:
|
||||
data_array : numpy.ndarray
|
||||
Array containing the data to align (2D float arrays).
|
||||
headers : header list
|
||||
List of headers corresponding to the images in data_array.
|
||||
error_array : numpy.ndarray, optional
|
||||
Array of images (2D floats, aligned and of the same shape) containing
|
||||
the error in each pixel of the observation images in data_array.
|
||||
@@ -626,7 +678,7 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
|
||||
full_array = np.concatenate((data_array,[ref_data]),axis=0)
|
||||
err_array = np.concatenate((error_array,[np.zeros(ref_data.shape)]),axis=0)
|
||||
|
||||
full_array, err_array = crop_array(full_array, err_array, step=5,
|
||||
full_array, err_array = crop_array(full_array, headers, err_array, step=5,
|
||||
inside=False)
|
||||
|
||||
data_array, ref_data = full_array[:-1], full_array[-1]
|
||||
@@ -649,6 +701,7 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
|
||||
res_shape = int(np.ceil(np.sqrt(2.5)*np.max(shape[1:])))
|
||||
rescaled_image = np.zeros((shape[0],res_shape,res_shape))
|
||||
rescaled_error = np.ones((shape[0],res_shape,res_shape))
|
||||
rescaled_mask = np.ones((shape[0],res_shape,res_shape),dtype=bool)
|
||||
res_center = (np.array(rescaled_image.shape[1:])/2).astype(int)
|
||||
|
||||
shifts, errors = [], []
|
||||
@@ -665,9 +718,12 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
|
||||
res_shift[1]:res_shift[1]+shape[2]] = copy.deepcopy(image)
|
||||
rescaled_error[i,res_shift[0]:res_shift[0]+shape[1],
|
||||
res_shift[1]:res_shift[1]+shape[2]] = copy.deepcopy(error_array[i])
|
||||
rescaled_mask[i,res_shift[0]:res_shift[0]+shape[1],
|
||||
res_shift[1]:res_shift[1]+shape[2]] = False
|
||||
# Shift images to align
|
||||
rescaled_image[i] = sc_shift(rescaled_image[i], shift, cval=0.)
|
||||
rescaled_error[i] = sc_shift(rescaled_error[i], shift, cval=background[i])
|
||||
rescaled_mask[i] = sc_shift(rescaled_mask[i], shift, cval=True)
|
||||
|
||||
rescaled_image[i][rescaled_image[i] < 0.] = 0.
|
||||
|
||||
@@ -678,13 +734,13 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
|
||||
errors = np.array(errors)
|
||||
|
||||
if return_shifts:
|
||||
return rescaled_image, rescaled_error, shifts, errors
|
||||
return rescaled_image, rescaled_error, rescaled_mask.any(axis=0), shifts, errors
|
||||
else:
|
||||
return rescaled_image, rescaled_error
|
||||
return rescaled_image, rescaled_error, rescaled_mask.any(axis=0)
|
||||
|
||||
|
||||
def smooth_data(data_array, error_array, headers, FWHM=1., scale='pixel',
|
||||
smoothing='gaussian'):
|
||||
def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
|
||||
scale='pixel', smoothing='gaussian'):
|
||||
"""
|
||||
Smooth a data_array using selected function.
|
||||
----------
|
||||
@@ -746,6 +802,7 @@ def smooth_data(data_array, error_array, headers, FWHM=1., scale='pixel',
|
||||
|
||||
#Define gaussian stdev
|
||||
stdev = FWHM/(2.*np.sqrt(2.*np.log(2)))
|
||||
fmax = np.finfo(np.float64).max
|
||||
|
||||
if smoothing.lower() in ['combine','combining']:
|
||||
# Smooth using N images combination algorithm
|
||||
@@ -761,11 +818,15 @@ def smooth_data(data_array, error_array, headers, FWHM=1., scale='pixel',
|
||||
for r in range(smoothed.shape[0]):
|
||||
for c in range(smoothed.shape[1]):
|
||||
# Compute distance from current pixel
|
||||
dist_rc = np.sqrt((r-xx)**2+(c-yy)**2)
|
||||
dist_rc = np.where(data_mask, fmax, np.sqrt((r-xx)**2+(c-yy)**2))
|
||||
g_rc = np.array([np.exp(-0.5*(dist_rc/stdev)**2),]*len(data_array))
|
||||
# Apply weighted combination
|
||||
smoothed[r,c] = np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc)
|
||||
error[r,c] = np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc)
|
||||
if data_mask[r,c]:
|
||||
smoothed[r,c] = 0.
|
||||
error[r,c] = 1.
|
||||
else:
|
||||
smoothed[r,c] = np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc)
|
||||
error[r,c] = np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc)
|
||||
|
||||
elif smoothing.lower() in ['gauss','gaussian']:
|
||||
#Convolution with gaussian function
|
||||
@@ -775,10 +836,14 @@ def smooth_data(data_array, error_array, headers, FWHM=1., scale='pixel',
|
||||
xx, yy = np.indices(image.shape)
|
||||
for r in range(image.shape[0]):
|
||||
for c in range(image.shape[1]):
|
||||
dist_rc = np.sqrt((r-xx)**2+(c-yy)**2)
|
||||
dist_rc = np.where(data_mask, fmax, np.sqrt((r-xx)**2+(c-yy)**2))
|
||||
g_rc = np.exp(-0.5*(dist_rc/stdev)**2)/(2.*np.pi*stdev**2)
|
||||
smoothed[i][r,c] = np.sum(image*g_rc)
|
||||
error[i][r,c] = np.sum(error_array*g_rc**2)
|
||||
if data_mask[r,c]:
|
||||
smoothed[r,c] = 0.
|
||||
error[r,c] = 1.
|
||||
else:
|
||||
smoothed[r,c] = np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc)
|
||||
error[r,c] = np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc)
|
||||
|
||||
else:
|
||||
raise ValueError("{} is not a valid smoothing option".format(smoothing))
|
||||
@@ -786,8 +851,8 @@ def smooth_data(data_array, error_array, headers, FWHM=1., scale='pixel',
|
||||
return smoothed, error
|
||||
|
||||
|
||||
def polarizer_avg(data_array, error_array, headers, FWHM=None, scale='pixel',
|
||||
smoothing='gaussian'):
|
||||
def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
|
||||
scale='pixel', smoothing='gaussian'):
|
||||
"""
|
||||
Make the average image from a single polarizer for a given instrument.
|
||||
-----------
|
||||
@@ -862,11 +927,11 @@ def polarizer_avg(data_array, error_array, headers, FWHM=None, scale='pixel',
|
||||
|
||||
if not(FWHM is None) and (smoothing.lower() in ['combine','combining']):
|
||||
# Smooth by combining each polarizer images
|
||||
pol0, err0 = smooth_data(pol0_array, err0_array, headers0,
|
||||
pol0, err0 = smooth_data(pol0_array, err0_array, data_mask, headers0,
|
||||
FWHM=FWHM, scale=scale, smoothing=smoothing)
|
||||
pol60, err60 = smooth_data(pol60_array, err60_array, headers60,
|
||||
pol60, err60 = smooth_data(pol60_array, err60_array, data_mask, headers60,
|
||||
FWHM=FWHM, scale=scale, smoothing=smoothing)
|
||||
pol120, err120 = smooth_data(pol120_array, err120_array, headers120,
|
||||
pol120, err120 = smooth_data(pol120_array, err120_array, data_mask, headers120,
|
||||
FWHM=FWHM, scale=scale, smoothing=smoothing)
|
||||
|
||||
else:
|
||||
@@ -908,7 +973,7 @@ def polarizer_avg(data_array, error_array, headers, FWHM=None, scale='pixel',
|
||||
if not(FWHM is None) and (smoothing.lower() in ['gaussian','gauss']):
|
||||
# Smooth by convoluting with a gaussian each polX image.
|
||||
pol_array, polerr_array = smooth_data(pol_array, polerr_array,
|
||||
headers_array, FWHM=FWHM, scale=scale)
|
||||
data_mask, headers_array, FWHM=FWHM, scale=scale)
|
||||
pol0, pol60, pol120 = pol_array
|
||||
err0, err60, err120 = polerr_array
|
||||
|
||||
@@ -931,8 +996,8 @@ def polarizer_avg(data_array, error_array, headers, FWHM=None, scale='pixel',
|
||||
return polarizer_array, polarizer_cov
|
||||
|
||||
|
||||
def compute_Stokes(data_array, error_array, headers, FWHM=None,
|
||||
scale='pixel', smoothing='gaussian_after'):
|
||||
def compute_Stokes(data_array, error_array, data_mask, headers,
|
||||
FWHM=None, scale='pixel', smoothing='gaussian_after'):
|
||||
"""
|
||||
Compute the Stokes parameters I, Q and U for a given data_set
|
||||
----------
|
||||
@@ -989,8 +1054,8 @@ def compute_Stokes(data_array, error_array, headers, FWHM=None,
|
||||
# Routine for the FOC instrument
|
||||
if instr == 'FOC':
|
||||
# Get image from each polarizer and covariance matrix
|
||||
pol_array, pol_cov = polarizer_avg(data_array, error_array, headers,
|
||||
FWHM=FWHM, scale=scale, smoothing=smoothing)
|
||||
pol_array, pol_cov = polarizer_avg(data_array, error_array, data_mask,
|
||||
headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
|
||||
pol0, pol60, pol120 = pol_array
|
||||
|
||||
if (pol0 < 0.).any() or (pol60 < 0.).any() or (pol120 < 0.).any():
|
||||
@@ -1012,10 +1077,10 @@ def compute_Stokes(data_array, error_array, headers, FWHM=None,
|
||||
if mask.any():
|
||||
print("WARNING : I_pol > I_stokes : ", len(I_stokes[mask]))
|
||||
|
||||
plt.imshow(np.sqrt(Q_stokes**2+U_stokes**2)/I_stokes*mask, origin='lower')
|
||||
plt.colorbar()
|
||||
plt.title(r"$I_{pol}/I_{tot}$")
|
||||
plt.show()
|
||||
#plt.imshow(np.sqrt(Q_stokes**2+U_stokes**2)/I_stokes*mask, origin='lower')
|
||||
#plt.colorbar()
|
||||
#plt.title(r"$I_{pol}/I_{tot}$")
|
||||
#plt.show()
|
||||
|
||||
#I_stokes[mask]=0.
|
||||
#Q_stokes[mask]=0.
|
||||
@@ -1107,11 +1172,12 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
|
||||
#Compute the total exposure time so that
|
||||
#I_stokes*exp_tot = N_tot the total number of events
|
||||
exp_tot = np.array([header['exptime'] for header in headers]).sum()
|
||||
print("Total exposure time : {} sec".format(exp_tot))
|
||||
N_obs = I_stokes*exp_tot
|
||||
|
||||
#Errors on P, PA supposing Poisson noise
|
||||
s_P_P = np.sqrt(2.)/np.sqrt(N_obs)*100.
|
||||
s_PA_P = s_P_P/(2.*P/100.)*180./np.pi
|
||||
s_PA_P = s_P_P/(2.*P)*180./np.pi
|
||||
|
||||
# Nan handling :
|
||||
fmax = np.finfo(np.float64).max
|
||||
@@ -1126,7 +1192,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
|
||||
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
|
||||
|
||||
|
||||
def rotate_data(data_array, error_array, headers, ang):
|
||||
def rotate_data(data_array, error_array, data_mask, headers, ang):
|
||||
"""
|
||||
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
|
||||
matrix to rotate Q, U of a given angle in degrees and update header
|
||||
@@ -1164,6 +1230,7 @@ def rotate_data(data_array, error_array, headers, ang):
|
||||
new_error_array.append(sc_rotate(error_array[i], ang, reshape=False,
|
||||
cval=error_array.mean()))
|
||||
new_data_array = np.array(new_data_array)
|
||||
new_data_mask = sc_rotate(data_mask, ang, reshape=False, cval=True)
|
||||
new_error_array = np.array(new_error_array)
|
||||
|
||||
for i in range(len(new_data_array)):
|
||||
@@ -1192,10 +1259,10 @@ def rotate_data(data_array, error_array, headers, ang):
|
||||
|
||||
new_headers.append(new_header)
|
||||
|
||||
return new_data_array, new_error_array, new_headers
|
||||
return new_data_array, new_error_array, new_data_mask, new_headers
|
||||
|
||||
|
||||
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang, SNRi_cut=None):
|
||||
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, ang, SNRi_cut=None):
|
||||
"""
|
||||
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
|
||||
matrix to rotate Q, U of a given angle in degrees and update header
|
||||
@@ -1266,16 +1333,14 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang, SNRi_c
|
||||
new_Stokes_cov[1,2] = new_Stokes_cov[2,1] = np.cos(2.*alpha)*np.sin(2.*alpha)*(Stokes_cov[2,2] - Stokes_cov[1,1]) + (np.cos(2.*alpha)**2 - np.sin(2.*alpha)**2)*Stokes_cov[1,2]
|
||||
|
||||
#Rotate original images using scipy.ndimage.rotate
|
||||
new_I_stokes = sc_rotate(new_I_stokes, ang, reshape=False,
|
||||
cval=0.0*np.sqrt(new_Stokes_cov[0,0][0,0]))
|
||||
new_Q_stokes = sc_rotate(new_Q_stokes, ang, reshape=False,
|
||||
cval=0.0*np.sqrt(new_Stokes_cov[1,1][0,0]))
|
||||
new_U_stokes = sc_rotate(new_U_stokes, ang, reshape=False,
|
||||
cval=0.0*np.sqrt(new_Stokes_cov[2,2][0,0]))
|
||||
new_I_stokes = sc_rotate(new_I_stokes, ang, reshape=False, cval=0.)
|
||||
new_Q_stokes = sc_rotate(new_Q_stokes, ang, reshape=False, cval=0.)
|
||||
new_U_stokes = sc_rotate(new_U_stokes, ang, reshape=False, cval=0.)
|
||||
new_data_mask = sc_rotate(data_mask, ang, reshape=False, cval=True)
|
||||
for i in range(3):
|
||||
for j in range(3):
|
||||
new_Stokes_cov[i,j] = sc_rotate(new_Stokes_cov[i,j], ang, reshape=False,
|
||||
cval=0.0*new_Stokes_cov[i,j].mean())
|
||||
new_Stokes_cov[i,j] = sc_rotate(new_Stokes_cov[i,j], ang,
|
||||
reshape=False, cval=0.)
|
||||
|
||||
#Update headers to new angle
|
||||
new_headers = []
|
||||
@@ -1310,10 +1375,10 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang, SNRi_c
|
||||
new_U_stokes[np.isnan(new_U_stokes)] = 0.
|
||||
new_Stokes_cov[np.isnan(new_Stokes_cov)] = fmax
|
||||
|
||||
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_headers
|
||||
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_headers
|
||||
|
||||
|
||||
def rotate2_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang):
|
||||
def rotate2_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, ang):
|
||||
"""
|
||||
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
|
||||
matrix to rotate Q, U of a given angle in degrees and update header
|
||||
@@ -1443,4 +1508,4 @@ def rotate2_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang):
|
||||
s_P_P[np.isnan(s_P_P)] = fmax
|
||||
s_PA_P[np.isnan(s_PA_P)] = fmax
|
||||
|
||||
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, new_headers
|
||||
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, data_mask, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, new_headers
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user