clean and complete docstrings

This commit is contained in:
Thibault Barnouin
2022-07-05 12:49:01 +02:00
parent c75852e1ea
commit cc4625ffd5
6 changed files with 234 additions and 400 deletions

View File

@@ -13,8 +13,8 @@ except ImportError:
import numpy as np import numpy as np
def _upsampled_dft(data, upsampled_region_size, def _upsampled_dft(data, upsampled_region_size, upsample_factor=1,
upsample_factor=1, axis_offsets=None): axis_offsets=None):
""" """
Upsampled DFT by matrix multiplication. Upsampled DFT by matrix multiplication.
This code is intended to provide the same result as if the following This code is intended to provide the same result as if the following

View File

@@ -1,5 +1,30 @@
""" """
Library functions for the implementation of various deconvolution algorithms. Library functions for the implementation of various deconvolution algorithms.
prototypes :
- gaussian_psf(FWHM, shape) -> kernel
Return the normalized gaussian point spread function over some kernel shape.
- from_file_psf(filename) -> kernel
Get the point spread function from an external FITS file.
- wiener(image, psf, alpha, clip) -> im_deconv
Implement the simplified Wiener filtering.
- van_cittert(image, psf, alpha, iterations, clip, filter_epsilon) -> im_deconv
Implement Van-Cittert iterative algorithm.
- richardson_lucy(image, psf, iterations, clip, filter_epsilon) -> im_deconv
Implement Richardson-Lucy iterative algorithm.
- one_step_gradient(image, psf, iterations, clip, filter_epsilon) -> im_deconv
Implement One-step gradient iterative algorithm.
- conjgrad(image, psf, alpha, error, iterations) -> im_deconv
Implement the Conjugate Gradient algorithm.
- deconvolve_im(image, psf, alpha, error, iterations, clip, filter_epsilon, algo) -> im_deconv
Prepare data for deconvolution using specified algorithm.
""" """
import numpy as np import numpy as np
@@ -18,9 +43,10 @@ def abs2(x):
def zeropad(arr, shape): def zeropad(arr, shape):
"""Zero-pad array ARR to given shape. """
Zero-pad array ARR to given shape.
The contents of ARR is approximately centered in the result.""" The contents of ARR is approximately centered in the result.
"""
rank = arr.ndim rank = arr.ndim
if len(shape) != rank: if len(shape) != rank:
raise ValueError("bad number of dimensions") raise ValueError("bad number of dimensions")
@@ -67,6 +93,60 @@ def zeropad(arr, shape):
return z return z
def gaussian2d(x, y, sigma):
return np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2)
def gaussian_psf(FWHM=1., shape=(5,5)):
"""
Define the gaussian Point-Spread-Function of chosen shape and FWHM.
----------
Inputs:
FWHM : float, optional
The Full Width at Half Maximum of the desired gaussian function for the
PSF in pixel increments.
Defaults to 1.
shape : tuple, optional
The shape of the PSF kernel. Must be of dimension 2.
Defaults to (5,5).
----------
Returns:
kernel : numpy.ndarray
Kernel containing the weights of the desired gaussian PSF.
"""
# Compute standard deviation from FWHM
stdev = FWHM/(2.*np.sqrt(2.*np.log(2.)))
# Create kernel of desired shape
x, y = np.meshgrid(np.arange(-shape[0]/2,shape[0]/2),np.arange(-shape[1]/2,shape[1]/2))
kernel = gaussian2d(x, y, stdev)
return kernel/kernel.sum()
def from_file_psf(filename):
"""
Get the Point-Spread-Function from an external FITS file.
Such PSF can be generated using the TinyTim standalone program by STSCI.
See:
[1] https://www.stsci.edu/hst/instrumentation/focus-and-pointing/focus/tiny-tim-hst-psf-modeling
[2] https://doi.org/10.1117/12.892762
----------
Inputs:
filename : str
----------
kernel : numpy.ndarray
Kernel containing the weights of the desired gaussian PSF.
"""
with fits.open(filename) as f:
psf = f[0].data
if (type(psf) != np.ndarray) or len(psf) != 2:
raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename))
#Return the normalized Point Spread Function
kernel = psf/psf.max()
return kernel
def wiener(image, psf, alpha=0.1, clip=True): def wiener(image, psf, alpha=0.1, clip=True):
""" """
Implement the simplified Wiener filtering. Implement the simplified Wiener filtering.
@@ -88,9 +168,6 @@ def wiener(image, psf, alpha=0.1, clip=True):
Returns: Returns:
im_deconv : ndarray im_deconv : ndarray
The deconvolved image. The deconvolved image.
----------
References:
[1]
""" """
float_type = np.promote_types(image.dtype, np.float32) float_type = np.promote_types(image.dtype, np.float32)
image = image.astype(float_type, copy=False) image = image.astype(float_type, copy=False)
@@ -135,9 +212,6 @@ def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon=
Returns: Returns:
im_deconv : ndarray im_deconv : ndarray
The deconvolved image. The deconvolved image.
----------
References
[1]
""" """
float_type = np.promote_types(image.dtype, np.float32) float_type = np.promote_types(image.dtype, np.float32)
image = image.astype(float_type, copy=False) image = image.astype(float_type, copy=False)
@@ -231,9 +305,6 @@ def one_step_gradient(image, psf, iterations=20, clip=True, filter_epsilon=None)
Returns: Returns:
im_deconv : ndarray im_deconv : ndarray
The deconvolved image. The deconvolved image.
----------
References
[1]
""" """
float_type = np.promote_types(image.dtype, np.float32) float_type = np.promote_types(image.dtype, np.float32)
image = image.astype(float_type, copy=False) image = image.astype(float_type, copy=False)
@@ -282,9 +353,6 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
Returns: Returns:
im_deconv : ndarray im_deconv : ndarray
The deconvolved image. The deconvolved image.
----------
References:
[1]
""" """
float_type = np.promote_types(image.dtype, np.float32) float_type = np.promote_types(image.dtype, np.float32)
image = image.astype(float_type, copy=False) image = image.astype(float_type, copy=False)
@@ -402,8 +470,8 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True, def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
filter_epsilon=None, algo='richardson'): filter_epsilon=None, algo='richardson'):
""" """
Prepare an image for deconvolution using Richardson-Lucy algorithm and Prepare an image for deconvolution using a chosen algorithm and return
return results. results.
---------- ----------
Inputs: Inputs:
image : numpy.ndarray image : numpy.ndarray
@@ -466,54 +534,3 @@ def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
im_deconv = pxmax*norm_deconv im_deconv = pxmax*norm_deconv
return im_deconv return im_deconv
def gaussian2d(x,y,sigma):
return np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2)
def gaussian_psf(FWHM=1., shape=(5,5)):
"""
Define the gaussian Point-Spread-Function of chosen shape and FWHM.
----------
Inputs:
FWHM : float, optional
The Full Width at Half Maximum of the desired gaussian function for the
PSF in pixel increments.
Defaults to 1.
shape : tuple, optional
The shape of the PSF kernel. Must be of dimension 2.
Defaults to (5,5).
----------
Returns:
kernel : numpy.ndarray
Kernel containing the weights of the desired gaussian PSF.
"""
# Compute standard deviation from FWHM
stdev = FWHM/(2.*np.sqrt(2.*np.log(2.)))
# Create kernel of desired shape
x, y = np.meshgrid(np.arange(-shape[0]/2,shape[0]/2),np.arange(-shape[1]/2,shape[1]/2))
kernel = gaussian2d(x, y, stdev)
return kernel/kernel.sum()
def from_file_psf(filename):
"""
Get the Point-Spread-Function from an external FITS file.
Such PSF can be generated using the TinyTim standalone program by STSCI.
See:
[1] https://www.stsci.edu/hst/instrumentation/focus-and-pointing/focus/tiny-tim-hst-psf-modeling
[2] https://doi.org/10.1117/12.892762
----------
Inputs:
filename : str
----------
kernel : numpy.ndarray
Kernel containing the weights of the desired gaussian PSF.
"""
with fits.open(filename) as f:
psf = f[0].data
if (type(psf) != np.ndarray) or len(psf) != 2:
raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename))
#Return the normalized Point Spread Function
kernel = psf/psf.max()
return kernel

View File

@@ -5,7 +5,7 @@ prototypes :
- get_obs_data(infiles, data_folder) -> data_array, headers - get_obs_data(infiles, data_folder) -> data_array, headers
Extract the observationnal data from fits files Extract the observationnal data from fits files
- save_Stokes(I, Q, U, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, ref_header, filename, data_folder, return_hdul) -> ( HDUL_data ) - save_Stokes(I, Q, U, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder, return_hdul) -> ( HDUL_data )
Save computed polarimetry parameters to a single fits file (and return HDUList) Save computed polarimetry parameters to a single fits file (and return HDUList)
""" """
@@ -97,6 +97,8 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
headers : header list headers : header list
Header of reference some keywords will be copied from (CRVAL, CDELT, Header of reference some keywords will be copied from (CRVAL, CDELT,
INSTRUME, PROPOSID, TARGNAME, ORIENTAT, EXPTOT). INSTRUME, PROPOSID, TARGNAME, ORIENTAT, EXPTOT).
data_mask : numpy.ndarray
2D boolean array delimiting the data to work on.
filename : str filename : str
Name that will be given to the file on writing (will appear in header). Name that will be given to the file on writing (will appear in header).
data_folder : str, optional data_folder : str, optional

View File

@@ -2,11 +2,38 @@
Library functions for displaying informations using matplotlib Library functions for displaying informations using matplotlib
prototypes : prototypes :
- plot_obs(data_array, headers, shape, vmin, vmax, savename, plots_folder) - plot_obs(data_array, headers, shape, vmin, vmax, rectangle, savename, plots_folder)
Plots whole observation raw data in given display shape Plots whole observation raw data in given display shape.
- polarization_map(Stokes_hdul, SNRp_cut, SNRi_cut, step_vec, savename, plots_folder, display) - plot_Stokes(Stokes, savename, plots_folder)
Plots polarization map of polarimetric parameters saved in an HDUList Plot the I/Q/U maps from the Stokes HDUList.
- polarization_map(Stokes, data_mask, rectangle, SNRp_cut, SNRi_cut, step_vec, savename, plots_folder, display) -> fig, ax
Plots polarization map of polarimetric parameters saved in an HDUList.
class align_maps(map, other_map, **kwargs)
Class to interactively align maps with different WCS.
class overplot_radio(align_maps)
Class inherited from align_maps to overplot radio data as contours.
class overplot_pol(align_maps)
Class inherited from align_maps to overplot UV polarization vectors on other maps.
class crop_map(hdul, fig, ax)
Class to interactively crop a region of interest of a HDUList.
class crop_Stokes(crop_map)
Class inherited from crop_map to work on polarization maps.
class image_lasso_selector(img, fig, ax)
Class to interactively select part of a map to work on.
class aperture(img, cdelt, radius, fig, ax)
Class to interactively simulate aperture integration.
class pol_map(Stokes, SNRp_cut, SNRi_cut, selection)
Class to interactively study polarization maps making use of the cropping and selecting tools.
""" """
from copy import deepcopy from copy import deepcopy
@@ -946,7 +973,8 @@ class crop_Stokes(crop_map):
def data_mask(self): def data_mask(self):
return self.hdul_crop[-1].data return self.hdul_crop[-1].data
class image_lasso_selector:
class image_lasso_selector(object):
def __init__(self, img, fig=None, ax=None): def __init__(self, img, fig=None, ax=None):
""" """
img must have shape (X, Y) img must have shape (X, Y)
@@ -1012,7 +1040,7 @@ class image_lasso_selector:
self.on_close() self.on_close()
class aperture: class aperture(object):
def __init__(self, img, cdelt=np.array([1.,1.]), radius=1., fig=None, ax=None): def __init__(self, img, cdelt=np.array([1.,1.]), radius=1., fig=None, ax=None):
""" """
img must have shape (X, Y) img must have shape (X, Y)
@@ -1194,6 +1222,7 @@ class pol_map(object):
self.data = self.Stokes[0].data self.data = self.Stokes[0].data
if self.selected: if self.selected:
self.selected = False self.selected = False
self.select_instance.update_mask()
self.region = deepcopy(self.select_instance.mask.astype(bool)) self.region = deepcopy(self.select_instance.mask.astype(bool))
self.select_instance.displayed.remove() self.select_instance.displayed.remove()
for coll in self.select_instance.cont.collections[:]: for coll in self.select_instance.cont.collections[:]:
@@ -1206,11 +1235,6 @@ class pol_map(object):
self.region = None self.region = None
self.select_instance = aperture(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, radius=s_aper_radius.val) self.select_instance = aperture(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, radius=s_aper_radius.val)
self.select_instance.circ.set_visible(True) self.select_instance.circ.set_visible(True)
#k = 0
#while not self.select_instance.selected and k<60:
# self.fig.canvas.start_event_loop(timeout=1)
# k+=1
#select_aperture(event)
self.fig.canvas.draw_idle() self.fig.canvas.draw_idle()
@@ -1617,7 +1641,10 @@ class pol_map(object):
if hasattr(self, 'cont'): if hasattr(self, 'cont'):
for coll in self.cont.collections: for coll in self.cont.collections:
try:
coll.remove() coll.remove()
except:
return
del self.cont del self.cont
if fig is None: if fig is None:
fig = self.fig fig = self.fig

View File

@@ -5,36 +5,38 @@ prototypes :
- bin_ndarray(ndarray, new_shape, operation) -> ndarray - bin_ndarray(ndarray, new_shape, operation) -> ndarray
Bins an ndarray to new_shape. Bins an ndarray to new_shape.
- crop_array(data_array, error_array, step, null_val, inside) -> crop_data_array, crop_error_array - crop_array(data_array, error_array, data_mask, step, null_val, inside, display, savename, plots_folder) -> crop_data_array, crop_error_array (, crop_mask), crop_headers
Homogeneously crop out null edges off a data array. Homogeneously crop out null edges off a data array.
- deconvolve_array(data_array, psf, FWHM, iterations) -> deconvolved_data_array - deconvolve_array(data_array, headers, psf, FWHM, scale, shape, iterations, algo) -> deconvolved_data_array
Homogeneously deconvolve a data array using Richardson-Lucy iterative algorithm Homogeneously deconvolve a data array using a chosen deconvolution algorithm.
- get_error(data_array, sub_shape, display, headers, savename, plots_folder) -> data_array, error_array - get_error(data_array, headers, error_array, data_mask, sub_shape, display, savename, plots_folder, return_background) -> data_array, error_array, headers (, background)
Compute the error (noise) on each image of the input array. Compute the error (noise) on each image of the input array.
- rebin_array(data_array, error_array, headers, pxsize, scale, operation) -> rebinned_data, rebinned_error, rebinned_headers, Dxy - rebin_array(data_array, error_array, headers, pxsize, scale, operation, data_mask) -> rebinned_data, rebinned_error, rebinned_headers, Dxy (, data_mask)
Homegeneously rebin a data array given a target pixel size in scale units. Homegeneously rebin a data array given a target pixel size in scale units.
- align_data(data_array, error_array, upsample_factor, ref_data, ref_center, return_shifts) -> data_array, error_array (, shifts, errors) - align_data(data_array, error_array, upsample_factor, ref_data, ref_center, return_shifts) -> data_array, error_array (, shifts, errors)
Align data_array on ref_data by cross-correlation. Align data_array on ref_data by cross-correlation.
- smooth_data(data_array, error_array, FWHM, scale, smoothing) -> smoothed_array - smooth_data(data_array, error_array, data_mask, headers, FWHM, scale, smoothing) -> smoothed_array, smoothed_error
Smooth data by convoluting with a gaussian or by combining weighted images Smooth data by convoluting with a gaussian or by combining weighted images
- polarizer_avg(data_array, error_array, headers, FWHM, scale, smoothing) -> polarizer_array, pol_error_array - polarizer_avg(data_array, error_array, data_mask, headers, FWHM, scale, smoothing) -> polarizer_array, polarizer_cov
Average images in data_array on each used polarizer filter. Average images in data_array on each used polarizer filter and compute correlated errors.
- compute_Stokes(data_array, error_array, headers, FWHM, scale, smoothing) -> I_stokes, Q_stokes, U_stokes, Stokes_cov, pol_flux - compute_Stokes(data_array, error_array, data_mask, headers, FWHM, scale, smoothing) -> I_stokes, Q_stokes, U_stokes, Stokes_cov
Compute Stokes parameters I, Q and U and their respective errors from data_array. Compute Stokes parameters I, Q and U and their respective correlated errors from data_array.
- compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, pol_flux, headers) -> P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P - compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers) -> P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
Compute polarization degree (in %) and angle (in degree) and their Compute polarization degree (in %) and angle (in degree) and their respective errors.
respective errors
- rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, pol_flux, headers, ang) -> I_stokes, Q_stokes, U_stokes, Stokes_cov, pol_flux, headers - rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, ang, SNRi_cut) -> I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers
Rotate I, Q, U given an angle in degrees using scipy functions. Rotate I, Q, U given an angle in degrees using scipy functions.
- rotate_data(data_array, error_array, data_mask, headers, ang) -> data_array, error_array, data_mask, headers
Rotate data before reduction given an angle in degrees using scipy functions.
""" """
from copy import deepcopy from copy import deepcopy
@@ -206,6 +208,10 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu
the error in each pixel of the observation images in data_array. the error in each pixel of the observation images in data_array.
If None, will be initialized to zeros. If None, will be initialized to zeros.
Defaults to None. Defaults to None.
data_mask : numpy.ndarray, optional
2D boolean array delimiting the data to work on.
If None, will be initialized with a full true mask.
Defaults to None.
step : int, optional step : int, optional
For images with straight edges, not all lines and columns need to be For images with straight edges, not all lines and columns need to be
browsed in order to have a good convex hull. Step value determine browsed in order to have a good convex hull. Step value determine
@@ -345,9 +351,14 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px',
relevant values of 'psf' variable. relevant values of 'psf' variable.
Defaults to (9,9). Defaults to (9,9).
iterations : int, optional iterations : int, optional
Number of iterations of Richardson-Lucy deconvolution algorithm. Act as Number of iterations for iterative deconvolution algorithms. Act as
as a regulation of the process. as a regulation of the process.
Defaults to 20. Defaults to 20.
algo : str, optional
Name of the deconvolution algorithm that will be used. Implemented
algorithms are the following : 'Wiener', 'Van-Cittert',
'One Step Gradient', 'Conjugate Gradient' and 'Richardson-Lucy'.
Defaults to 'Richardson-Lucy'.
---------- ----------
Returns: Returns:
deconv_array : numpy.ndarray deconv_array : numpy.ndarray
@@ -394,6 +405,15 @@ def get_error(data_array, headers, error_array=None, data_mask=None, sub_shape=N
Array containing the data to study (2D float arrays). Array containing the data to study (2D float arrays).
headers : header list headers : header list
Headers associated with the images in data_array. Headers associated with the images in data_array.
error_array : numpy.ndarray, optional
Array of images (2D floats, aligned and of the same shape) containing
the error in each pixel of the observation images in data_array.
If None, will be initialized to zeros.
Defaults to None.
data_mask : numpy.ndarray, optional
2D boolean array delimiting the data to work on.
If None, will be initialized with a full true mask.
Defaults to None.
sub_shape : tuple, optional sub_shape : tuple, optional
Shape of the sub-image to look for. Must be odd. Shape of the sub-image to look for. Must be odd.
Defaults to 10% of input array. Defaults to 10% of input array.
@@ -587,6 +607,10 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
Set the way original components of the matrix are put together Set the way original components of the matrix are put together
between summing ('sum') and averaging ('average', 'avg', 'mean') them. between summing ('sum') and averaging ('average', 'avg', 'mean') them.
Defaults to 'sum'. Defaults to 'sum'.
data_mask : numpy.ndarray, optional
2D boolean array delimiting the data to work on.
If None, will be initialized with a full true mask.
Defaults to None.
---------- ----------
Returns: Returns:
rebinned_data, rebinned_error : numpy.ndarray rebinned_data, rebinned_error : numpy.ndarray
@@ -595,6 +619,9 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
Updated headers corresponding to the images in rebinned_data. Updated headers corresponding to the images in rebinned_data.
Dxy : numpy.ndarray Dxy : numpy.ndarray
Array containing the rebinning factor in each direction of the image. Array containing the rebinning factor in each direction of the image.
data_mask : numpy.ndarray, optional
Updated 2D boolean array delimiting the data to work on.
Only returned if inputed data_mask is not None.
""" """
# Check that all images are from the same instrument # Check that all images are from the same instrument
ref_header = headers[0] ref_header = headers[0]
@@ -716,6 +743,8 @@ def align_data(data_array, headers, error_array=None, upsample_factor=1.,
image with margins of value 0. image with margins of value 0.
rescaled_error : numpy.ndarray rescaled_error : numpy.ndarray
Array containing the errors on the aligned images in the rescaled array. Array containing the errors on the aligned images in the rescaled array.
data_mask : numpy.ndarray
2D boolean array delimiting the data to work on.
shifts : numpy.ndarray shifts : numpy.ndarray
Array containing the pixel shifts on the x and y directions from Array containing the pixel shifts on the x and y directions from
the reference image. the reference image.
@@ -839,6 +868,8 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
error_array : numpy.ndarray error_array : numpy.ndarray
Array of images (2D floats, aligned and of the same shape) containing Array of images (2D floats, aligned and of the same shape) containing
the error in each pixel of the observation images in data_array. the error in each pixel of the observation images in data_array.
data_mask : numpy.ndarray
2D boolean array delimiting the data to work on.
headers : header list headers : header list
List of headers corresponding to the images in data_array. List of headers corresponding to the images in data_array.
FWHM : float, optional FWHM : float, optional
@@ -942,6 +973,8 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
error_array : numpy.ndarray error_array : numpy.ndarray
Array of images (2D floats, aligned and of the same shape) containing Array of images (2D floats, aligned and of the same shape) containing
the error in each pixel of the observation images in data_array. the error in each pixel of the observation images in data_array.
data_mask : numpy.ndarray
2D boolean array delimiting the data to work on.
headers : header list headers : header list
List of headers corresponding to the images in data_array. List of headers corresponding to the images in data_array.
FWHM : float, optional FWHM : float, optional
@@ -1089,6 +1122,8 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
error_array : numpy.ndarray error_array : numpy.ndarray
Array of images (2D floats, aligned and of the same shape) containing Array of images (2D floats, aligned and of the same shape) containing
the error in each pixel of the observation images in data_array. the error in each pixel of the observation images in data_array.
data_mask : numpy.ndarray
2D boolean array delimiting the data to work on.
headers : header list headers : header list
List of headers corresponding to the images in data_array. List of headers corresponding to the images in data_array.
FWHM : float, optional FWHM : float, optional
@@ -1376,6 +1411,8 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
+45/-45deg linear polarization intensity +45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U. Covariance matrix of the Stokes parameters I, Q, U.
data_mask : numpy.ndarray
2D boolean array delimiting the data to work on.
headers : header list headers : header list
List of headers corresponding to the reduced images. List of headers corresponding to the reduced images.
ang : float ang : float
@@ -1401,6 +1438,8 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
new_headers : header list new_headers : header list
Updated list of headers corresponding to the reduced images accounting Updated list of headers corresponding to the reduced images accounting
for the new orientation angle. for the new orientation angle.
new_data_mask : numpy.ndarray
Updated 2D boolean array delimiting the data to work on.
""" """
#Apply cuts #Apply cuts
if not(SNRi_cut is None): if not(SNRi_cut is None):
@@ -1520,7 +1559,8 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
header['PA_int_err'] = (PA_diluted_err, 'Integrated polarization angle error') header['PA_int_err'] = (PA_diluted_err, 'Integrated polarization angle error')
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_headers, new_data_mask return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_headers
def rotate_data(data_array, error_array, data_mask, headers, ang): def rotate_data(data_array, error_array, data_mask, headers, ang):
""" """
@@ -1533,6 +1573,8 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
Array of images (2D floats) to be rotated by angle ang. Array of images (2D floats) to be rotated by angle ang.
error_array : numpy.ndarray error_array : numpy.ndarray
Array of error associated to images in data_array. Array of error associated to images in data_array.
data_mask : numpy.ndarray
2D boolean array delimiting the data to work on.
headers : header list headers : header list
List of headers corresponding to the reduced images. List of headers corresponding to the reduced images.
ang : float ang : float
@@ -1547,6 +1589,8 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
new_headers : header list new_headers : header list
Updated list of headers corresponding to the reduced images accounting Updated list of headers corresponding to the reduced images accounting
for the new orientation angle. for the new orientation angle.
new_data_mask : numpy.ndarray
Updated 2D boolean array delimiting the data to work on.
""" """
#Rotate I_stokes, Q_stokes, U_stokes using rotation matrix #Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
alpha = ang*np.pi/180. alpha = ang*np.pi/180.
@@ -1609,4 +1653,4 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
new_headers.append(new_header) new_headers.append(new_header)
globals()['theta'] = theta - alpha globals()['theta'] = theta - alpha
return new_data_array, new_error_array, new_headers, new_data_mask return new_data_array, new_error_array, new_data_mask, new_headers

View File

@@ -1,256 +0,0 @@
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.wcs import WCS
from aplpy import FITSFigure
import scipy.ndimage
import os as os
plt.close('all')
def bin_ndarray(ndarray, new_shape, operation='sum'):
"""
Bins an ndarray in all axes based on the target shape, by summing or
averaging.
Number of output dimensions must match number of input dimensions.
Example
-------
>>> m = np.arange(0,100,1).reshape((10,10))
>>> n = bin_ndarray(m, new_shape=(5,5), operation='sum')
>>> print(n)
[[ 22 30 38 46 54]
[102 110 118 126 134]
[182 190 198 206 214]
[262 270 278 286 294]
[342 350 358 366 374]]
"""
if not operation.lower() in ['sum', 'mean', 'average', 'avg']:
raise ValueError("Operation not supported.")
if ndarray.ndim != len(new_shape):
raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape,
new_shape))
compression_pairs = [(d, c//d) for d,c in zip(new_shape,
ndarray.shape)]
flattened = [l for p in compression_pairs for l in p]
ndarray = ndarray.reshape(flattened)
for i in range(len(new_shape)):
if operation.lower() == "sum":
ndarray = ndarray.sum(-1*(i+1))
elif operation.lower() in ["mean", "average", "avg"]:
ndarray = ndarray.mean(-1*(i+1))
return ndarray
def plots(ax,data,vmax,vmin):
ax.imshow(data,vmax=vmax,vmin=vmin,origin='lower')
### User input
infiles = ['x274020at.c0f.fits','x274020bt.c0f.fits','x274020ct.c0f.fits','x274020dt.c0f.fits',
'x274020et.c0f.fits','x274020ft.c0f.fits','x274020gt.c0f.fits','x274020ht.c0f.fits',
'x274020it.c0f.fits']
#Centroids
#The centroids should be estimated by cross-correlating the images.
#Here I used the position of the central source for each file as the reference pixel position.
ycen_0 = [304,306,303,296,295,295,294,305,304]
xcen_0 = [273,274,273,276,274,274,274,272,271]
#size, in pixels, of the FOV centered in x_cen_0,y_cen_0
Dx = 200
Dy = 200
#set the new image size (Dxy x Dxy pixels binning)
Dxy = 5
new_shape = (Dx//Dxy,Dy//Dxy)
#figures
#test alignment
vmin = 0
vmax = 6
font_size=40.0
label_size=20.
lw = 3.
#pol. map
SNRp_cut = 3 #P measumentes with SNR>3
SNRi_cut = 30 #I measuremntes with SNR>30, which implies an uncerrtainty in P of 4.7%.
scalevec = 0.05 #length of vectors in pol. map
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
vec_legend = 10 #% pol for legend
figname = 'NGC1068_FOC.png'
### SCRIPT ###
### Step 1. Check input images before data reduction
#this step is very simplistic.
#Here I used the position of the central source for each file as the
#reference pixel position.
#The centroids should be estimated by cross-correlating the images,
#and compare with the simplistic approach of using the peak pixel of the
#object as the reference pixel.
fig,axs = plt.subplots(3,3,figsize=(30,30),dpi=200,sharex=True,sharey=True)
for jj, a in enumerate(axs.flatten()):
img = fits.open(infiles[jj])
ima = img[0].data
ima = ima[ycen_0[jj]-Dy:ycen_0[jj]+Dy,xcen_0[jj]-Dx:xcen_0[jj]+Dx]
ima = bin_ndarray(ima,new_shape=new_shape,operation='sum') #binning
exptime = img[0].header['EXPTIME']
fil = img[0].header['FILTNAM1']
ima = ima/exptime
globals()['ima_%s' % jj] = ima
#plots
plots(a,ima,vmax=vmax,vmin=vmin)
#position of centroid
a.plot([ima.shape[1]/2,ima.shape[1]/2],[0,ima.shape[0]-1],lw=1,color='black')
a.plot([0,ima.shape[1]-1],[ima.shape[1]/2,ima.shape[1]/2],lw=1,color='black')
a.text(2,2,infiles[jj][0:8],color='white',fontsize=10)
a.text(2,5,fil,color='white',fontsize=30)
a.text(ima.shape[1]-20,1,exptime,color='white',fontsize=20)
fig.subplots_adjust(hspace=0,wspace=0)
fig.savefig('test_alignment.png',dpi=300)
os.system('open test_alignment.png')
### Step 2. average of all images for a single polarizer to have them in the same units e/s.
pol0 = (ima_0 + ima_1 + ima_2)/3.
pol60 = (ima_3 + ima_4 + ima_5 + ima_6)/4.
pol120 = (ima_7 + ima_8)/2.
fig1,(ax1,ax2,ax3) = plt.subplots(1,3,figsize=(26,8),dpi=200)
CF = ax1.imshow(pol0,vmin=vmin,vmax=vmax,origin='lower')
cbar = plt.colorbar(CF,ax=ax1)
cbar.ax.tick_params(labelsize=20)
ax1.tick_params(axis='both', which='major', labelsize=20)
ax1.text(2,2,'POL0',color='white',fontsize=10)
CF = ax2.imshow(pol60,vmin=vmin,vmax=vmax,origin='lower')
cbar = plt.colorbar(CF,ax=ax2)
cbar.ax.tick_params(labelsize=20)
ax2.tick_params(axis='both', which='major', labelsize=20)
ax2.text(2,2,'POL60',color='white',fontsize=10)
CF = ax3.imshow(pol120,vmin=vmin,vmax=vmax,origin='lower')
cbar = plt.colorbar(CF,ax=ax3)
cbar.ax.tick_params(labelsize=20)
ax3.tick_params(axis='both', which='major', labelsize=20)
ax3.text(2,2,'POL120',color='white',fontsize=10)
fig1.savefig('test_combinePol.png',dpi=300)
os.system('open test_combinePol.png')
### Step 3. Compute Stokes IQU, P, PA, PI
#Stokes parameters
I_stokes = (2./3.)*(pol0 + pol60 + pol120)
Q_stokes = (2./3.)*(2*pol0 - pol60 - pol120)
U_stokes = (2./np.sqrt(3.))*(pol60 - pol120)
#Remove nan
I_stokes[np.isnan(I_stokes)]=0.
Q_stokes[np.isnan(Q_stokes)]=0.
U_stokes[np.isnan(U_stokes)]=0.
#Polarimetry
PI = np.sqrt(Q_stokes*Q_stokes + U_stokes*U_stokes)
P = PI/I_stokes*100
PA = 0.5*arctan2(U_stokes,Q_stokes)*180./np.pi+90
s_P = np.sqrt(2.)*(I_stokes)**(-0.5)
s_PA = s_P/(P/100.)*180./np.pi
fig2,((ax1,ax2,ax3),(ax4,ax5,ax6)) = plt.subplots(2,3,figsize=(40,20),dpi=200)
CF = ax1.imshow(I_stokes,origin='lower')
cbar = plt.colorbar(CF,ax=ax1)
cbar.ax.tick_params(labelsize=20)
ax1.tick_params(axis='both', which='major', labelsize=20)
ax1.text(2,2,'I',color='white',fontsize=10)
CF = ax2.imshow(Q_stokes,origin='lower')
cbar = plt.colorbar(CF,ax=ax2)
cbar.ax.tick_params(labelsize=20)
ax2.tick_params(axis='both', which='major', labelsize=20)
ax2.text(2,2,'Q',color='white',fontsize=10)
CF = ax3.imshow(U_stokes,origin='lower')
cbar = plt.colorbar(CF,ax=ax3)
cbar.ax.tick_params(labelsize=20)
ax3.tick_params(axis='both', which='major', labelsize=20)
ax3.text(2,2,'U',color='white',fontsize=10)
v = np.linspace(0,40,50)
CF = ax4.imshow(P,origin='lower',vmin=0,vmax=40)
cbar = plt.colorbar(CF,ax=ax4)
cbar.ax.tick_params(labelsize=20)
ax4.tick_params(axis='both', which='major', labelsize=20)
ax4.text(2,2,'P',color='white',fontsize=10)
CF = ax5.imshow(PA,origin='lower',vmin=0,vmax=180)
cbar = plt.colorbar(CF,ax=ax5)
cbar.ax.tick_params(labelsize=20)
ax5.tick_params(axis='both', which='major', labelsize=20)
ax5.text(2,2,'PA',color='white',fontsize=10)
CF = ax6.imshow(PI,origin='lower')
cbar = plt.colorbar(CF,ax=ax6)
cbar.ax.tick_params(labelsize=20)
ax6.tick_params(axis='both', which='major', labelsize=20)
ax6.text(2,2,'PI',color='white',fontsize=10)
fig2.savefig('test_Stokes.png',dpi=300)
os.system('open test_Stokes.png')
### Step 4. Binning and smoothing
#Images can be binned and smoothed to improve SNR. This step can also be done
#using the PolX images.
### Step 5. Roate images to have North up
#Images needs to be reprojected to have North up.
#this procedure implies to rotate the Stokes QU using a rotation matrix
### STEP 6. image to FITS with updated WCS
new_wcs = WCS(naxis=2)
new_wcs.wcs.crpix = [I_stokes.shape[0]/2, I_stokes.shape[1]/2]
new_wcs.wcs.crval = [img[0].header['CRVAL1'], img[0].header['CRVAL2']]
new_wcs.wcs.cunit = ["deg", "deg"]
new_wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
new_wcs.wcs.cdelt = [img[0].header['CD1_1']*Dxy, img[0].header['CD1_2']*Dxy]
#hdu_ori = WCS(img[0])
stkI=fits.PrimaryHDU(data=I_stokes,header=new_wcs.to_header())
pol=fits.PrimaryHDU(data=P,header=new_wcs.to_header())
pang=fits.PrimaryHDU(data=PA,header=new_wcs.to_header())
pol_err=fits.PrimaryHDU(data=s_P,header=new_wcs.to_header())
pang_err=fits.PrimaryHDU(data=s_PA,header=new_wcs.to_header())
### STEP 7. polarization map
#quality cuts
pxscale = stkI.header['CDELT1']
#apply quality cuts
SNRp = pol.data/pol_err.data
pol.data[SNRp < SNRp_cut] = np.nan
SNRi = stkI.data/np.std(stkI.data[0:10,0:10])
pol.data[SNRi < SNRi_cut] = np.nan
fig = plt.figure(figsize=(11,10))
gc = FITSFigure(stkI,figure=fig)
gc.show_contour(np.log10(SNRi),levels=np.linspace(np.log10(SNRi_cut),np.max(np.log10(SNRi)),20),\
filled=True,cmap='magma')
gc.show_vectors(pol,pang,scale=scalevec,step=step_vec,color='white',linewidth=1.0)
fig.savefig(figname,dpi=300)
os.system('open '+figname)