clean and complete docstrings
This commit is contained in:
@@ -13,8 +13,8 @@ except ImportError:
|
||||
import numpy as np
|
||||
|
||||
|
||||
def _upsampled_dft(data, upsampled_region_size,
|
||||
upsample_factor=1, axis_offsets=None):
|
||||
def _upsampled_dft(data, upsampled_region_size, upsample_factor=1,
|
||||
axis_offsets=None):
|
||||
"""
|
||||
Upsampled DFT by matrix multiplication.
|
||||
This code is intended to provide the same result as if the following
|
||||
|
||||
@@ -1,5 +1,30 @@
|
||||
"""
|
||||
Library functions for the implementation of various deconvolution algorithms.
|
||||
|
||||
prototypes :
|
||||
- gaussian_psf(FWHM, shape) -> kernel
|
||||
Return the normalized gaussian point spread function over some kernel shape.
|
||||
|
||||
- from_file_psf(filename) -> kernel
|
||||
Get the point spread function from an external FITS file.
|
||||
|
||||
- wiener(image, psf, alpha, clip) -> im_deconv
|
||||
Implement the simplified Wiener filtering.
|
||||
|
||||
- van_cittert(image, psf, alpha, iterations, clip, filter_epsilon) -> im_deconv
|
||||
Implement Van-Cittert iterative algorithm.
|
||||
|
||||
- richardson_lucy(image, psf, iterations, clip, filter_epsilon) -> im_deconv
|
||||
Implement Richardson-Lucy iterative algorithm.
|
||||
|
||||
- one_step_gradient(image, psf, iterations, clip, filter_epsilon) -> im_deconv
|
||||
Implement One-step gradient iterative algorithm.
|
||||
|
||||
- conjgrad(image, psf, alpha, error, iterations) -> im_deconv
|
||||
Implement the Conjugate Gradient algorithm.
|
||||
|
||||
- deconvolve_im(image, psf, alpha, error, iterations, clip, filter_epsilon, algo) -> im_deconv
|
||||
Prepare data for deconvolution using specified algorithm.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
@@ -18,53 +43,108 @@ def abs2(x):
|
||||
|
||||
|
||||
def zeropad(arr, shape):
|
||||
"""Zero-pad array ARR to given shape.
|
||||
"""
|
||||
Zero-pad array ARR to given shape.
|
||||
The contents of ARR is approximately centered in the result.
|
||||
"""
|
||||
rank = arr.ndim
|
||||
if len(shape) != rank:
|
||||
raise ValueError("bad number of dimensions")
|
||||
diff = np.asarray(shape) - np.asarray(arr.shape)
|
||||
if diff.min() < 0:
|
||||
raise ValueError("output dimensions must be larger or equal input dimensions")
|
||||
offset = diff//2
|
||||
z = np.zeros(shape, dtype=arr.dtype)
|
||||
if rank == 1:
|
||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
||||
z[i0:n0] = arr
|
||||
elif rank == 2:
|
||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
||||
z[i0:n0,i1:n1] = arr
|
||||
elif rank == 3:
|
||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
||||
i2 = offset[2]; n2 = i2 + arr.shape[2]
|
||||
z[i0:n0,i1:n1,i2:n2] = arr
|
||||
elif rank == 4:
|
||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
||||
i2 = offset[2]; n2 = i2 + arr.shape[2]
|
||||
i3 = offset[3]; n3 = i3 + arr.shape[3]
|
||||
z[i0:n0,i1:n1,i2:n2,i3:n3] = arr
|
||||
elif rank == 5:
|
||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
||||
i2 = offset[2]; n2 = i2 + arr.shape[2]
|
||||
i3 = offset[3]; n3 = i3 + arr.shape[3]
|
||||
i4 = offset[4]; n4 = i4 + arr.shape[4]
|
||||
z[i0:n0,i1:n1,i2:n2,i3:n3,i4:n4] = arr
|
||||
elif rank == 6:
|
||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
||||
i2 = offset[2]; n2 = i2 + arr.shape[2]
|
||||
i3 = offset[3]; n3 = i3 + arr.shape[3]
|
||||
i4 = offset[4]; n4 = i4 + arr.shape[4]
|
||||
i5 = offset[5]; n5 = i5 + arr.shape[5]
|
||||
z[i0:n0,i1:n1,i2:n2,i3:n3,i4:n4,i5:n5] = arr
|
||||
else:
|
||||
raise ValueError("too many dimensions")
|
||||
return z
|
||||
|
||||
The contents of ARR is approximately centered in the result."""
|
||||
rank = arr.ndim
|
||||
if len(shape) != rank:
|
||||
raise ValueError("bad number of dimensions")
|
||||
diff = np.asarray(shape) - np.asarray(arr.shape)
|
||||
if diff.min() < 0:
|
||||
raise ValueError("output dimensions must be larger or equal input dimensions")
|
||||
offset = diff//2
|
||||
z = np.zeros(shape, dtype=arr.dtype)
|
||||
if rank == 1:
|
||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
||||
z[i0:n0] = arr
|
||||
elif rank == 2:
|
||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
||||
z[i0:n0,i1:n1] = arr
|
||||
elif rank == 3:
|
||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
||||
i2 = offset[2]; n2 = i2 + arr.shape[2]
|
||||
z[i0:n0,i1:n1,i2:n2] = arr
|
||||
elif rank == 4:
|
||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
||||
i2 = offset[2]; n2 = i2 + arr.shape[2]
|
||||
i3 = offset[3]; n3 = i3 + arr.shape[3]
|
||||
z[i0:n0,i1:n1,i2:n2,i3:n3] = arr
|
||||
elif rank == 5:
|
||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
||||
i2 = offset[2]; n2 = i2 + arr.shape[2]
|
||||
i3 = offset[3]; n3 = i3 + arr.shape[3]
|
||||
i4 = offset[4]; n4 = i4 + arr.shape[4]
|
||||
z[i0:n0,i1:n1,i2:n2,i3:n3,i4:n4] = arr
|
||||
elif rank == 6:
|
||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
||||
i2 = offset[2]; n2 = i2 + arr.shape[2]
|
||||
i3 = offset[3]; n3 = i3 + arr.shape[3]
|
||||
i4 = offset[4]; n4 = i4 + arr.shape[4]
|
||||
i5 = offset[5]; n5 = i5 + arr.shape[5]
|
||||
z[i0:n0,i1:n1,i2:n2,i3:n3,i4:n4,i5:n5] = arr
|
||||
else:
|
||||
raise ValueError("too many dimensions")
|
||||
return z
|
||||
|
||||
def gaussian2d(x, y, sigma):
|
||||
return np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2)
|
||||
|
||||
|
||||
def gaussian_psf(FWHM=1., shape=(5,5)):
|
||||
"""
|
||||
Define the gaussian Point-Spread-Function of chosen shape and FWHM.
|
||||
----------
|
||||
Inputs:
|
||||
FWHM : float, optional
|
||||
The Full Width at Half Maximum of the desired gaussian function for the
|
||||
PSF in pixel increments.
|
||||
Defaults to 1.
|
||||
shape : tuple, optional
|
||||
The shape of the PSF kernel. Must be of dimension 2.
|
||||
Defaults to (5,5).
|
||||
----------
|
||||
Returns:
|
||||
kernel : numpy.ndarray
|
||||
Kernel containing the weights of the desired gaussian PSF.
|
||||
"""
|
||||
# Compute standard deviation from FWHM
|
||||
stdev = FWHM/(2.*np.sqrt(2.*np.log(2.)))
|
||||
|
||||
# Create kernel of desired shape
|
||||
x, y = np.meshgrid(np.arange(-shape[0]/2,shape[0]/2),np.arange(-shape[1]/2,shape[1]/2))
|
||||
kernel = gaussian2d(x, y, stdev)
|
||||
|
||||
return kernel/kernel.sum()
|
||||
|
||||
|
||||
def from_file_psf(filename):
|
||||
"""
|
||||
Get the Point-Spread-Function from an external FITS file.
|
||||
Such PSF can be generated using the TinyTim standalone program by STSCI.
|
||||
See:
|
||||
[1] https://www.stsci.edu/hst/instrumentation/focus-and-pointing/focus/tiny-tim-hst-psf-modeling
|
||||
[2] https://doi.org/10.1117/12.892762
|
||||
----------
|
||||
Inputs:
|
||||
filename : str
|
||||
----------
|
||||
kernel : numpy.ndarray
|
||||
Kernel containing the weights of the desired gaussian PSF.
|
||||
"""
|
||||
with fits.open(filename) as f:
|
||||
psf = f[0].data
|
||||
if (type(psf) != np.ndarray) or len(psf) != 2:
|
||||
raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename))
|
||||
#Return the normalized Point Spread Function
|
||||
kernel = psf/psf.max()
|
||||
return kernel
|
||||
|
||||
|
||||
def wiener(image, psf, alpha=0.1, clip=True):
|
||||
@@ -88,9 +168,6 @@ def wiener(image, psf, alpha=0.1, clip=True):
|
||||
Returns:
|
||||
im_deconv : ndarray
|
||||
The deconvolved image.
|
||||
----------
|
||||
References:
|
||||
[1]
|
||||
"""
|
||||
float_type = np.promote_types(image.dtype, np.float32)
|
||||
image = image.astype(float_type, copy=False)
|
||||
@@ -135,9 +212,6 @@ def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon=
|
||||
Returns:
|
||||
im_deconv : ndarray
|
||||
The deconvolved image.
|
||||
----------
|
||||
References
|
||||
[1]
|
||||
"""
|
||||
float_type = np.promote_types(image.dtype, np.float32)
|
||||
image = image.astype(float_type, copy=False)
|
||||
@@ -231,9 +305,6 @@ def one_step_gradient(image, psf, iterations=20, clip=True, filter_epsilon=None)
|
||||
Returns:
|
||||
im_deconv : ndarray
|
||||
The deconvolved image.
|
||||
----------
|
||||
References
|
||||
[1]
|
||||
"""
|
||||
float_type = np.promote_types(image.dtype, np.float32)
|
||||
image = image.astype(float_type, copy=False)
|
||||
@@ -282,9 +353,6 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
|
||||
Returns:
|
||||
im_deconv : ndarray
|
||||
The deconvolved image.
|
||||
----------
|
||||
References:
|
||||
[1]
|
||||
"""
|
||||
float_type = np.promote_types(image.dtype, np.float32)
|
||||
image = image.astype(float_type, copy=False)
|
||||
@@ -402,8 +470,8 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
|
||||
def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
|
||||
filter_epsilon=None, algo='richardson'):
|
||||
"""
|
||||
Prepare an image for deconvolution using Richardson-Lucy algorithm and
|
||||
return results.
|
||||
Prepare an image for deconvolution using a chosen algorithm and return
|
||||
results.
|
||||
----------
|
||||
Inputs:
|
||||
image : numpy.ndarray
|
||||
@@ -466,54 +534,3 @@ def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
|
||||
im_deconv = pxmax*norm_deconv
|
||||
|
||||
return im_deconv
|
||||
|
||||
def gaussian2d(x,y,sigma):
|
||||
return np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2)
|
||||
|
||||
def gaussian_psf(FWHM=1., shape=(5,5)):
|
||||
"""
|
||||
Define the gaussian Point-Spread-Function of chosen shape and FWHM.
|
||||
----------
|
||||
Inputs:
|
||||
FWHM : float, optional
|
||||
The Full Width at Half Maximum of the desired gaussian function for the
|
||||
PSF in pixel increments.
|
||||
Defaults to 1.
|
||||
shape : tuple, optional
|
||||
The shape of the PSF kernel. Must be of dimension 2.
|
||||
Defaults to (5,5).
|
||||
----------
|
||||
Returns:
|
||||
kernel : numpy.ndarray
|
||||
Kernel containing the weights of the desired gaussian PSF.
|
||||
"""
|
||||
# Compute standard deviation from FWHM
|
||||
stdev = FWHM/(2.*np.sqrt(2.*np.log(2.)))
|
||||
|
||||
# Create kernel of desired shape
|
||||
x, y = np.meshgrid(np.arange(-shape[0]/2,shape[0]/2),np.arange(-shape[1]/2,shape[1]/2))
|
||||
kernel = gaussian2d(x, y, stdev)
|
||||
|
||||
return kernel/kernel.sum()
|
||||
|
||||
def from_file_psf(filename):
|
||||
"""
|
||||
Get the Point-Spread-Function from an external FITS file.
|
||||
Such PSF can be generated using the TinyTim standalone program by STSCI.
|
||||
See:
|
||||
[1] https://www.stsci.edu/hst/instrumentation/focus-and-pointing/focus/tiny-tim-hst-psf-modeling
|
||||
[2] https://doi.org/10.1117/12.892762
|
||||
----------
|
||||
Inputs:
|
||||
filename : str
|
||||
----------
|
||||
kernel : numpy.ndarray
|
||||
Kernel containing the weights of the desired gaussian PSF.
|
||||
"""
|
||||
with fits.open(filename) as f:
|
||||
psf = f[0].data
|
||||
if (type(psf) != np.ndarray) or len(psf) != 2:
|
||||
raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename))
|
||||
#Return the normalized Point Spread Function
|
||||
kernel = psf/psf.max()
|
||||
return kernel
|
||||
@@ -5,7 +5,7 @@ prototypes :
|
||||
- get_obs_data(infiles, data_folder) -> data_array, headers
|
||||
Extract the observationnal data from fits files
|
||||
|
||||
- save_Stokes(I, Q, U, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, ref_header, filename, data_folder, return_hdul) -> ( HDUL_data )
|
||||
- save_Stokes(I, Q, U, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder, return_hdul) -> ( HDUL_data )
|
||||
Save computed polarimetry parameters to a single fits file (and return HDUList)
|
||||
"""
|
||||
|
||||
@@ -97,6 +97,8 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
|
||||
headers : header list
|
||||
Header of reference some keywords will be copied from (CRVAL, CDELT,
|
||||
INSTRUME, PROPOSID, TARGNAME, ORIENTAT, EXPTOT).
|
||||
data_mask : numpy.ndarray
|
||||
2D boolean array delimiting the data to work on.
|
||||
filename : str
|
||||
Name that will be given to the file on writing (will appear in header).
|
||||
data_folder : str, optional
|
||||
|
||||
@@ -2,11 +2,38 @@
|
||||
Library functions for displaying informations using matplotlib
|
||||
|
||||
prototypes :
|
||||
- plot_obs(data_array, headers, shape, vmin, vmax, savename, plots_folder)
|
||||
Plots whole observation raw data in given display shape
|
||||
- plot_obs(data_array, headers, shape, vmin, vmax, rectangle, savename, plots_folder)
|
||||
Plots whole observation raw data in given display shape.
|
||||
|
||||
- plot_Stokes(Stokes, savename, plots_folder)
|
||||
Plot the I/Q/U maps from the Stokes HDUList.
|
||||
|
||||
- polarization_map(Stokes_hdul, SNRp_cut, SNRi_cut, step_vec, savename, plots_folder, display)
|
||||
Plots polarization map of polarimetric parameters saved in an HDUList
|
||||
- polarization_map(Stokes, data_mask, rectangle, SNRp_cut, SNRi_cut, step_vec, savename, plots_folder, display) -> fig, ax
|
||||
Plots polarization map of polarimetric parameters saved in an HDUList.
|
||||
|
||||
class align_maps(map, other_map, **kwargs)
|
||||
Class to interactively align maps with different WCS.
|
||||
|
||||
class overplot_radio(align_maps)
|
||||
Class inherited from align_maps to overplot radio data as contours.
|
||||
|
||||
class overplot_pol(align_maps)
|
||||
Class inherited from align_maps to overplot UV polarization vectors on other maps.
|
||||
|
||||
class crop_map(hdul, fig, ax)
|
||||
Class to interactively crop a region of interest of a HDUList.
|
||||
|
||||
class crop_Stokes(crop_map)
|
||||
Class inherited from crop_map to work on polarization maps.
|
||||
|
||||
class image_lasso_selector(img, fig, ax)
|
||||
Class to interactively select part of a map to work on.
|
||||
|
||||
class aperture(img, cdelt, radius, fig, ax)
|
||||
Class to interactively simulate aperture integration.
|
||||
|
||||
class pol_map(Stokes, SNRp_cut, SNRi_cut, selection)
|
||||
Class to interactively study polarization maps making use of the cropping and selecting tools.
|
||||
"""
|
||||
|
||||
from copy import deepcopy
|
||||
@@ -946,7 +973,8 @@ class crop_Stokes(crop_map):
|
||||
def data_mask(self):
|
||||
return self.hdul_crop[-1].data
|
||||
|
||||
class image_lasso_selector:
|
||||
|
||||
class image_lasso_selector(object):
|
||||
def __init__(self, img, fig=None, ax=None):
|
||||
"""
|
||||
img must have shape (X, Y)
|
||||
@@ -1012,7 +1040,7 @@ class image_lasso_selector:
|
||||
self.on_close()
|
||||
|
||||
|
||||
class aperture:
|
||||
class aperture(object):
|
||||
def __init__(self, img, cdelt=np.array([1.,1.]), radius=1., fig=None, ax=None):
|
||||
"""
|
||||
img must have shape (X, Y)
|
||||
@@ -1119,7 +1147,7 @@ class pol_map(object):
|
||||
"""
|
||||
Class to interactively study polarization maps.
|
||||
"""
|
||||
def __init__(self,Stokes, SNRp_cut=3., SNRi_cut=30., selection=None):
|
||||
def __init__(self, Stokes, SNRp_cut=3., SNRi_cut=30., selection=None):
|
||||
|
||||
if type(Stokes) == str:
|
||||
Stokes = fits.open(Stokes)
|
||||
@@ -1194,6 +1222,7 @@ class pol_map(object):
|
||||
self.data = self.Stokes[0].data
|
||||
if self.selected:
|
||||
self.selected = False
|
||||
self.select_instance.update_mask()
|
||||
self.region = deepcopy(self.select_instance.mask.astype(bool))
|
||||
self.select_instance.displayed.remove()
|
||||
for coll in self.select_instance.cont.collections[:]:
|
||||
@@ -1206,11 +1235,6 @@ class pol_map(object):
|
||||
self.region = None
|
||||
self.select_instance = aperture(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, radius=s_aper_radius.val)
|
||||
self.select_instance.circ.set_visible(True)
|
||||
#k = 0
|
||||
#while not self.select_instance.selected and k<60:
|
||||
# self.fig.canvas.start_event_loop(timeout=1)
|
||||
# k+=1
|
||||
#select_aperture(event)
|
||||
|
||||
self.fig.canvas.draw_idle()
|
||||
|
||||
@@ -1617,7 +1641,10 @@ class pol_map(object):
|
||||
|
||||
if hasattr(self, 'cont'):
|
||||
for coll in self.cont.collections:
|
||||
coll.remove()
|
||||
try:
|
||||
coll.remove()
|
||||
except:
|
||||
return
|
||||
del self.cont
|
||||
if fig is None:
|
||||
fig = self.fig
|
||||
|
||||
@@ -5,36 +5,38 @@ prototypes :
|
||||
- bin_ndarray(ndarray, new_shape, operation) -> ndarray
|
||||
Bins an ndarray to new_shape.
|
||||
|
||||
- crop_array(data_array, error_array, step, null_val, inside) -> crop_data_array, crop_error_array
|
||||
- crop_array(data_array, error_array, data_mask, step, null_val, inside, display, savename, plots_folder) -> crop_data_array, crop_error_array (, crop_mask), crop_headers
|
||||
Homogeneously crop out null edges off a data array.
|
||||
|
||||
- deconvolve_array(data_array, psf, FWHM, iterations) -> deconvolved_data_array
|
||||
Homogeneously deconvolve a data array using Richardson-Lucy iterative algorithm
|
||||
- deconvolve_array(data_array, headers, psf, FWHM, scale, shape, iterations, algo) -> deconvolved_data_array
|
||||
Homogeneously deconvolve a data array using a chosen deconvolution algorithm.
|
||||
|
||||
- get_error(data_array, sub_shape, display, headers, savename, plots_folder) -> data_array, error_array
|
||||
- get_error(data_array, headers, error_array, data_mask, sub_shape, display, savename, plots_folder, return_background) -> data_array, error_array, headers (, background)
|
||||
Compute the error (noise) on each image of the input array.
|
||||
|
||||
- rebin_array(data_array, error_array, headers, pxsize, scale, operation) -> rebinned_data, rebinned_error, rebinned_headers, Dxy
|
||||
- rebin_array(data_array, error_array, headers, pxsize, scale, operation, data_mask) -> rebinned_data, rebinned_error, rebinned_headers, Dxy (, data_mask)
|
||||
Homegeneously rebin a data array given a target pixel size in scale units.
|
||||
|
||||
- align_data(data_array, error_array, upsample_factor, ref_data, ref_center, return_shifts) -> data_array, error_array (, shifts, errors)
|
||||
Align data_array on ref_data by cross-correlation.
|
||||
|
||||
- smooth_data(data_array, error_array, FWHM, scale, smoothing) -> smoothed_array
|
||||
- smooth_data(data_array, error_array, data_mask, headers, FWHM, scale, smoothing) -> smoothed_array, smoothed_error
|
||||
Smooth data by convoluting with a gaussian or by combining weighted images
|
||||
|
||||
- polarizer_avg(data_array, error_array, headers, FWHM, scale, smoothing) -> polarizer_array, pol_error_array
|
||||
Average images in data_array on each used polarizer filter.
|
||||
- polarizer_avg(data_array, error_array, data_mask, headers, FWHM, scale, smoothing) -> polarizer_array, polarizer_cov
|
||||
Average images in data_array on each used polarizer filter and compute correlated errors.
|
||||
|
||||
- compute_Stokes(data_array, error_array, headers, FWHM, scale, smoothing) -> I_stokes, Q_stokes, U_stokes, Stokes_cov, pol_flux
|
||||
Compute Stokes parameters I, Q and U and their respective errors from data_array.
|
||||
- compute_Stokes(data_array, error_array, data_mask, headers, FWHM, scale, smoothing) -> I_stokes, Q_stokes, U_stokes, Stokes_cov
|
||||
Compute Stokes parameters I, Q and U and their respective correlated errors from data_array.
|
||||
|
||||
- compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, pol_flux, headers) -> P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
|
||||
Compute polarization degree (in %) and angle (in degree) and their
|
||||
respective errors
|
||||
- compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers) -> P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
|
||||
Compute polarization degree (in %) and angle (in degree) and their respective errors.
|
||||
|
||||
- rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, pol_flux, headers, ang) -> I_stokes, Q_stokes, U_stokes, Stokes_cov, pol_flux, headers
|
||||
- rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, ang, SNRi_cut) -> I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers
|
||||
Rotate I, Q, U given an angle in degrees using scipy functions.
|
||||
|
||||
- rotate_data(data_array, error_array, data_mask, headers, ang) -> data_array, error_array, data_mask, headers
|
||||
Rotate data before reduction given an angle in degrees using scipy functions.
|
||||
"""
|
||||
|
||||
from copy import deepcopy
|
||||
@@ -206,6 +208,10 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu
|
||||
the error in each pixel of the observation images in data_array.
|
||||
If None, will be initialized to zeros.
|
||||
Defaults to None.
|
||||
data_mask : numpy.ndarray, optional
|
||||
2D boolean array delimiting the data to work on.
|
||||
If None, will be initialized with a full true mask.
|
||||
Defaults to None.
|
||||
step : int, optional
|
||||
For images with straight edges, not all lines and columns need to be
|
||||
browsed in order to have a good convex hull. Step value determine
|
||||
@@ -345,9 +351,14 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px',
|
||||
relevant values of 'psf' variable.
|
||||
Defaults to (9,9).
|
||||
iterations : int, optional
|
||||
Number of iterations of Richardson-Lucy deconvolution algorithm. Act as
|
||||
Number of iterations for iterative deconvolution algorithms. Act as
|
||||
as a regulation of the process.
|
||||
Defaults to 20.
|
||||
algo : str, optional
|
||||
Name of the deconvolution algorithm that will be used. Implemented
|
||||
algorithms are the following : 'Wiener', 'Van-Cittert',
|
||||
'One Step Gradient', 'Conjugate Gradient' and 'Richardson-Lucy'.
|
||||
Defaults to 'Richardson-Lucy'.
|
||||
----------
|
||||
Returns:
|
||||
deconv_array : numpy.ndarray
|
||||
@@ -394,6 +405,15 @@ def get_error(data_array, headers, error_array=None, data_mask=None, sub_shape=N
|
||||
Array containing the data to study (2D float arrays).
|
||||
headers : header list
|
||||
Headers associated with the images in data_array.
|
||||
error_array : numpy.ndarray, optional
|
||||
Array of images (2D floats, aligned and of the same shape) containing
|
||||
the error in each pixel of the observation images in data_array.
|
||||
If None, will be initialized to zeros.
|
||||
Defaults to None.
|
||||
data_mask : numpy.ndarray, optional
|
||||
2D boolean array delimiting the data to work on.
|
||||
If None, will be initialized with a full true mask.
|
||||
Defaults to None.
|
||||
sub_shape : tuple, optional
|
||||
Shape of the sub-image to look for. Must be odd.
|
||||
Defaults to 10% of input array.
|
||||
@@ -587,6 +607,10 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
|
||||
Set the way original components of the matrix are put together
|
||||
between summing ('sum') and averaging ('average', 'avg', 'mean') them.
|
||||
Defaults to 'sum'.
|
||||
data_mask : numpy.ndarray, optional
|
||||
2D boolean array delimiting the data to work on.
|
||||
If None, will be initialized with a full true mask.
|
||||
Defaults to None.
|
||||
----------
|
||||
Returns:
|
||||
rebinned_data, rebinned_error : numpy.ndarray
|
||||
@@ -595,6 +619,9 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
|
||||
Updated headers corresponding to the images in rebinned_data.
|
||||
Dxy : numpy.ndarray
|
||||
Array containing the rebinning factor in each direction of the image.
|
||||
data_mask : numpy.ndarray, optional
|
||||
Updated 2D boolean array delimiting the data to work on.
|
||||
Only returned if inputed data_mask is not None.
|
||||
"""
|
||||
# Check that all images are from the same instrument
|
||||
ref_header = headers[0]
|
||||
@@ -716,6 +743,8 @@ def align_data(data_array, headers, error_array=None, upsample_factor=1.,
|
||||
image with margins of value 0.
|
||||
rescaled_error : numpy.ndarray
|
||||
Array containing the errors on the aligned images in the rescaled array.
|
||||
data_mask : numpy.ndarray
|
||||
2D boolean array delimiting the data to work on.
|
||||
shifts : numpy.ndarray
|
||||
Array containing the pixel shifts on the x and y directions from
|
||||
the reference image.
|
||||
@@ -839,6 +868,8 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
|
||||
error_array : numpy.ndarray
|
||||
Array of images (2D floats, aligned and of the same shape) containing
|
||||
the error in each pixel of the observation images in data_array.
|
||||
data_mask : numpy.ndarray
|
||||
2D boolean array delimiting the data to work on.
|
||||
headers : header list
|
||||
List of headers corresponding to the images in data_array.
|
||||
FWHM : float, optional
|
||||
@@ -942,6 +973,8 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
|
||||
error_array : numpy.ndarray
|
||||
Array of images (2D floats, aligned and of the same shape) containing
|
||||
the error in each pixel of the observation images in data_array.
|
||||
data_mask : numpy.ndarray
|
||||
2D boolean array delimiting the data to work on.
|
||||
headers : header list
|
||||
List of headers corresponding to the images in data_array.
|
||||
FWHM : float, optional
|
||||
@@ -1089,6 +1122,8 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
|
||||
error_array : numpy.ndarray
|
||||
Array of images (2D floats, aligned and of the same shape) containing
|
||||
the error in each pixel of the observation images in data_array.
|
||||
data_mask : numpy.ndarray
|
||||
2D boolean array delimiting the data to work on.
|
||||
headers : header list
|
||||
List of headers corresponding to the images in data_array.
|
||||
FWHM : float, optional
|
||||
@@ -1376,6 +1411,8 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
|
||||
+45/-45deg linear polarization intensity
|
||||
Stokes_cov : numpy.ndarray
|
||||
Covariance matrix of the Stokes parameters I, Q, U.
|
||||
data_mask : numpy.ndarray
|
||||
2D boolean array delimiting the data to work on.
|
||||
headers : header list
|
||||
List of headers corresponding to the reduced images.
|
||||
ang : float
|
||||
@@ -1401,6 +1438,8 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
|
||||
new_headers : header list
|
||||
Updated list of headers corresponding to the reduced images accounting
|
||||
for the new orientation angle.
|
||||
new_data_mask : numpy.ndarray
|
||||
Updated 2D boolean array delimiting the data to work on.
|
||||
"""
|
||||
#Apply cuts
|
||||
if not(SNRi_cut is None):
|
||||
@@ -1520,7 +1559,8 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
|
||||
header['PA_int_err'] = (PA_diluted_err, 'Integrated polarization angle error')
|
||||
|
||||
|
||||
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_headers, new_data_mask
|
||||
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_headers
|
||||
|
||||
|
||||
def rotate_data(data_array, error_array, data_mask, headers, ang):
|
||||
"""
|
||||
@@ -1533,6 +1573,8 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
|
||||
Array of images (2D floats) to be rotated by angle ang.
|
||||
error_array : numpy.ndarray
|
||||
Array of error associated to images in data_array.
|
||||
data_mask : numpy.ndarray
|
||||
2D boolean array delimiting the data to work on.
|
||||
headers : header list
|
||||
List of headers corresponding to the reduced images.
|
||||
ang : float
|
||||
@@ -1547,6 +1589,8 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
|
||||
new_headers : header list
|
||||
Updated list of headers corresponding to the reduced images accounting
|
||||
for the new orientation angle.
|
||||
new_data_mask : numpy.ndarray
|
||||
Updated 2D boolean array delimiting the data to work on.
|
||||
"""
|
||||
#Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
|
||||
alpha = ang*np.pi/180.
|
||||
@@ -1609,4 +1653,4 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
|
||||
new_headers.append(new_header)
|
||||
globals()['theta'] = theta - alpha
|
||||
|
||||
return new_data_array, new_error_array, new_headers, new_data_mask
|
||||
return new_data_array, new_error_array, new_data_mask, new_headers
|
||||
|
||||
Reference in New Issue
Block a user