Put I,Q,U into single Stokes matrix

This commit is contained in:
2025-08-05 18:56:45 +02:00
parent 8b4c7c38f2
commit 20280e7226
4 changed files with 148 additions and 255 deletions

View File

@@ -42,11 +42,11 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Background estimation # Background estimation
error_sub_type = "scott" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) error_sub_type = "scott" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 0.50 subtract_error = 0.50
display_bkg = True display_bkg = False
# Data binning # Data binning
pxsize = 4 pxsize = 0.10
pxscale = "px" # pixel, arcsec or full pxscale = "arcsec" # pixel, arcsec or full
rebin_operation = "sum" # sum or average rebin_operation = "sum" # sum or average
# Alignement # Alignement
@@ -59,17 +59,17 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Smoothing # Smoothing
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 1.5 # If None, no smoothing is done smoothing_FWHM = 0.15 # If None, no smoothing is done
smoothing_scale = "px" # pixel or arcsec smoothing_scale = "arcsec" # pixel or arcsec
# Rotation # Rotation
rotate_North = True rotate_North = True
# Polarization map output # Polarization map output
P_cut = 3 # if >=1.0 cut on the signal-to-noise else cut on the confidence level in Q, U P_cut = 3 # if >=1.0 cut on the signal-to-noise else cut on the confidence level in Q, U
SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 10.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
scale_vec = 5 scale_vec = 3
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
# Pipeline start # Pipeline start
@@ -197,68 +197,30 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]), norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]),
) )
background = np.array([np.array(bkg).reshape(1, 1) for bkg in background])
background_error = np.array(
[
np.array(
np.sqrt(
(bkg - background[np.array([h["filtnam1"] == head["filtnam1"] for h in headers], dtype=bool)].mean()) ** 2
/ np.sum([h["filtnam1"] == head["filtnam1"] for h in headers])
)
).reshape(1, 1)
for bkg, head in zip(background, headers)
]
)
# Step 2: # Step 2:
# Compute Stokes I, Q, U with smoothed polarized images # Compute Stokes I, Q, U with smoothed polarized images
# SMOOTHING DISCUSSION : # SMOOTHING DISCUSSION :
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide # FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2 # see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
# Bibcode : 1995chst.conf...10J # Bibcode : 1995chst.conf...10J
I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_stat = proj_red.compute_Stokes( Stokes, Stokes_cov, header_stokes, s_IQU_stat = proj_red.compute_Stokes(
data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr
) )
I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg, s_IQU_stat_bkg = proj_red.compute_Stokes(
background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False
)
# Step 3: # Step 3:
# Rotate images to have North up # Rotate images to have North up
if rotate_North: if rotate_North:
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, s_IQU_stat = proj_red.rotate_Stokes( Stokes, Stokes_cov, data_mask, header_stokes, s_IQU_stat = proj_red.rotate_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, s_IQU_stat=s_IQU_stat, SNRi_cut=None Stokes, Stokes_cov, data_mask, header_stokes, s_IQU_stat=s_IQU_stat, SNRi_cut=None
)
I_bkg, Q_bkg, U_bkg, S_cov_bkg, data_mask_bkg, header_bkg, s_IQU_stat_bkg = proj_red.rotate_Stokes(
I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), header_bkg, s_IQU_stat=s_IQU_stat_bkg, SNRi_cut=None
) )
# Compute polarimetric parameters (polarization degree and angle). # Compute polarimetric parameters (polarization degree and angle).
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_stat=s_IQU_stat) P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(Stokes, Stokes_cov, header_stokes, s_IQU_stat=s_IQU_stat)
P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(
I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg, s_IQU_stat=s_IQU_stat_bkg
)
# Step 4: # Step 4:
# Save image to FITS. # Save image to FITS.
figname = "_".join([figname, figtype]) if figtype != "" else figname figname = "_".join([figname, figtype]) if figtype != "" else figname
Stokes_hdul = proj_fits.save_Stokes( Stokes_hdul = proj_fits.save_Stokes(
I_stokes, Stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, header_stokes, data_mask, figname, data_folder=data_folder, return_hdul=True
Q_stokes,
U_stokes,
Stokes_cov,
P,
debiased_P,
s_P,
s_P_P,
PA,
s_PA,
s_PA_P,
header_stokes,
data_mask,
figname,
data_folder=data_folder,
return_hdul=True,
) )
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"])) outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"]))
@@ -277,8 +239,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format( "F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
header_stokes["PHOTPLAM"], header_stokes["PHOTPLAM"],
*sci_not( *sci_not(
Stokes_hdul[0].data[data_mask].sum() * header_stokes["PHOTFLAM"], Stokes_hdul["STOKES"].data[0][data_mask].sum() * header_stokes["PHOTFLAM"],
np.sqrt(Stokes_hdul[3].data[0, 0][data_mask].sum()) * header_stokes["PHOTFLAM"], np.sqrt(Stokes_hdul["STOKES_COV"].data[0, 0][data_mask].sum()) * header_stokes["PHOTFLAM"],
2, 2,
out=int, out=int,
), ),
@@ -286,14 +248,6 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
) )
print("P_int = {0:.1f} ± {1:.1f} %".format(header_stokes["p_int"] * 100.0, np.ceil(header_stokes["sP_int"] * 1000.0) / 10.0)) print("P_int = {0:.1f} ± {1:.1f} %".format(header_stokes["p_int"] * 100.0, np.ceil(header_stokes["sP_int"] * 1000.0) / 10.0))
print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(header_stokes["pa_int"]), princ_angle(np.ceil(header_stokes["sPA_int"] * 10.0) / 10.0))) print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(header_stokes["pa_int"]), princ_angle(np.ceil(header_stokes["sPA_int"] * 10.0) / 10.0)))
# Background values
print(
"F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
header_stokes["photplam"], *sci_not(I_bkg[0, 0] * header_stokes["photflam"], np.sqrt(S_cov_bkg[0, 0][0, 0]) * header_stokes["photflam"], 2, out=int)
)
)
print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0] * 100.0, np.ceil(s_P_bkg[0, 0] * 1000.0) / 10.0))
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0] * 10.0) / 10.0)))
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error). # Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
if pxscale.lower() not in ["full", "integrate"] and not interactive: if pxscale.lower() not in ["full", "integrate"] and not interactive:
proj_plots.polarization_map( proj_plots.polarization_map(

View File

@@ -105,17 +105,15 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
return data_array, headers return data_array, headers
def save_Stokes( def save_Stokes(Stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, header_stokes, data_mask, filename, data_folder="", return_hdul=False):
I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, header_stokes, data_mask, filename, data_folder="", return_hdul=False
):
""" """
Save computed polarimetry parameters to a single fits file, Save computed polarimetry parameters to a single fits file,
updating header accordingly. updating header accordingly.
---------- ----------
Inputs: Inputs:
I_stokes, Q_stokes, U_stokes, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P : numpy.ndarray Stokes, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P : numpy.ndarray
Images (2D float arrays) containing the computed polarimetric data : Stokes cube (3D float arrays) containing the computed polarimetric data :
Stokes parameters I, Q, U, Polarization degree and debieased, Stokes parameters I, Q, U, V, Polarization degree and debieased,
its error propagated and assuming Poisson noise, Polarization angle, its error propagated and assuming Poisson noise, Polarization angle,
its error propagated and assuming Poisson noise. its error propagated and assuming Poisson noise.
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
@@ -137,7 +135,7 @@ def save_Stokes(
---------- ----------
Return: Return:
hdul : astropy.io.fits.hdu.hdulist.HDUList hdul : astropy.io.fits.hdu.hdulist.HDUList
HDUList containing I_stokes in the PrimaryHDU, then Q_stokes, U_stokes, HDUList containing the Stokes cube in the PrimaryHDU, then
P, s_P, PA, s_PA in this order. Headers have been updated to relevant P, s_P, PA, s_PA in this order. Headers have been updated to relevant
informations (WCS, orientation, data_type). informations (WCS, orientation, data_type).
Only returned if return_hdul is True. Only returned if return_hdul is True.
@@ -148,8 +146,8 @@ def save_Stokes(
if data_mask.shape != (1, 1): if data_mask.shape != (1, 1):
vertex = clean_ROI(data_mask) vertex = clean_ROI(data_mask)
shape = vertex[1::2] - vertex[0::2] shape = vertex[1::2] - vertex[0::2]
new_wcs.array_shape = shape new_wcs.array_shape = (4, *shape)
new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2] new_wcs.wcs.crpix[1:] = np.array(new_wcs.wcs.crpix[1:]) - vertex[0::-2]
header = new_wcs.to_header() header = new_wcs.to_header()
header["TELESCOP"] = (header_stokes["TELESCOP"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "telescope used to acquire data") header["TELESCOP"] = (header_stokes["TELESCOP"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "telescope used to acquire data")
@@ -161,6 +159,7 @@ def save_Stokes(
header["EXPTIME"] = (header_stokes["EXPTIME"], "Total exposure time in sec") header["EXPTIME"] = (header_stokes["EXPTIME"], "Total exposure time in sec")
header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation") header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation")
header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name") header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name")
header["TARGET_NAME"] = (header_stokes["TARGNAME"], "Target name")
header["ORIENTAT"] = (header_stokes["ORIENTAT"], "Angle between North and the y-axis of the image") header["ORIENTAT"] = (header_stokes["ORIENTAT"], "Angle between North and the y-axis of the image")
header["FILENAME"] = (filename, "ORIGINAL FILENAME") header["FILENAME"] = (filename, "ORIGINAL FILENAME")
header["BKG_TYPE"] = (header_stokes["BKG_TYPE"], "Bkg estimation method used during reduction") header["BKG_TYPE"] = (header_stokes["BKG_TYPE"], "Bkg estimation method used during reduction")
@@ -174,9 +173,8 @@ def save_Stokes(
# Crop Data to mask # Crop Data to mask
if data_mask.shape != (1, 1): if data_mask.shape != (1, 1):
I_stokes = I_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]] Stokes = Stokes[:, vertex[2] : vertex[3], vertex[0] : vertex[1]]
Q_stokes = Q_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]] Stokes_cov = Stokes_cov[:, :, vertex[2] : vertex[3], vertex[0] : vertex[1]]
U_stokes = U_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]]
P = P[vertex[2] : vertex[3], vertex[0] : vertex[1]] P = P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
debiased_P = debiased_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] debiased_P = debiased_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_P = s_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] s_P = s_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
@@ -184,14 +182,6 @@ def save_Stokes(
PA = PA[vertex[2] : vertex[3], vertex[0] : vertex[1]] PA = PA[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_PA = s_PA[vertex[2] : vertex[3], vertex[0] : vertex[1]] s_PA = s_PA[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1]))
for i in range(3):
for j in range(3):
Stokes_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
Stokes_cov = new_Stokes_cov
data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]] data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]]
data_mask = data_mask.astype(float, copy=False) data_mask = data_mask.astype(float, copy=False)
@@ -199,17 +189,15 @@ def save_Stokes(
hdul = fits.HDUList([]) hdul = fits.HDUList([])
# Add I_stokes as PrimaryHDU # Add I_stokes as PrimaryHDU
header["datatype"] = ("I_stokes", "type of data stored in the HDU") header["datatype"] = ("Stokes", "type of data stored in the HDU")
I_stokes[(1 - data_mask).astype(bool)] = 0.0 Stokes[np.broadcast_to((1 - data_mask).astype(bool), Stokes.shape)] = 0.0
primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header) primary_hdu = fits.PrimaryHDU(data=Stokes, header=header)
primary_hdu.name = "I_stokes" primary_hdu.name = "Stokes"
hdul.append(primary_hdu) hdul.append(primary_hdu)
# Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList # Add Stokes_cov, P, s_P, PA, s_PA to the HDUList
for data, name in [ for data, name in [
[Q_stokes, "Q_stokes"], [Stokes_cov, "STOKES_COV"],
[U_stokes, "U_stokes"],
[Stokes_cov, "IQU_cov_matrix"],
[P, "Pol_deg"], [P, "Pol_deg"],
[debiased_P, "Pol_deg_debiased"], [debiased_P, "Pol_deg_debiased"],
[s_P, "Pol_deg_err"], [s_P, "Pol_deg_err"],
@@ -221,7 +209,9 @@ def save_Stokes(
]: ]:
hdu_header = header.copy() hdu_header = header.copy()
hdu_header["datatype"] = name hdu_header["datatype"] = name
if not name == "IQU_cov_matrix": if name == "STOKES_COV":
data[np.broadcast_to((1 - data_mask).astype(bool), data.shape)] = 0.0
else:
data[(1 - data_mask).astype(bool)] = 0.0 data[(1 - data_mask).astype(bool)] = 0.0
hdu = fits.ImageHDU(data=data, header=hdu_header) hdu = fits.ImageHDU(data=data, header=hdu_header)
hdu.name = name hdu.name = name

View File

@@ -2949,19 +2949,19 @@ class pol_map(object):
@property @property
def wcs(self): def wcs(self):
return WCS(self.Stokes[0].header).celestial.deepcopy() return WCS(self.Stokes[0].header).celestial
@property @property
def I(self): def I(self):
return self.Stokes["I_STOKES"].data return self.Stokes["STOKES"].data[0]
@property @property
def I_ERR(self): def I_ERR(self):
return np.sqrt(self.Stokes["IQU_COV_MATRIX"].data[0, 0]) return np.sqrt(self.Stokes["STOKES_COV"].data[0, 0])
@property @property
def Q(self): def Q(self):
return self.Stokes["Q_STOKES"].data return self.Stokes["STOKES"].data[1]
@property @property
def QN(self): def QN(self):
@@ -2969,7 +2969,7 @@ class pol_map(object):
@property @property
def Q_ERR(self): def Q_ERR(self):
return np.sqrt(self.Stokes["IQU_COV_MATRIX"].data[1, 1]) return np.sqrt(self.Stokes["STOKES_COV"].data[1, 1])
@property @property
def QN_ERR(self): def QN_ERR(self):
@@ -2977,7 +2977,7 @@ class pol_map(object):
@property @property
def U(self): def U(self):
return self.Stokes["U_STOKES"].data return self.Stokes["STOKES"].data[2]
@property @property
def UN(self): def UN(self):
@@ -2985,7 +2985,7 @@ class pol_map(object):
@property @property
def U_ERR(self): def U_ERR(self):
return np.sqrt(self.Stokes["IQU_COV_MATRIX"].data[2, 2]) return np.sqrt(self.Stokes["STOKES_COV"].data[2, 2])
@property @property
def UN_ERR(self): def UN_ERR(self):
@@ -2993,7 +2993,7 @@ class pol_map(object):
@property @property
def IQU_cov(self): def IQU_cov(self):
return self.Stokes["IQU_COV_MATRIX"].data return self.Stokes["STOKES_COV"].data
@property @property
def P(self): def P(self):

View File

@@ -46,6 +46,7 @@ import matplotlib.pyplot as plt
import numpy as np import numpy as np
from astropy import log from astropy import log
from astropy.wcs import WCS from astropy.wcs import WCS
from astropy.wcs.utils import add_stokes_axis_to_wcs
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
from matplotlib.patches import Rectangle from matplotlib.patches import Rectangle
from scipy.ndimage import rotate as sc_rotate from scipy.ndimage import rotate as sc_rotate
@@ -1182,15 +1183,8 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
Defaults to True. Defaults to True.
---------- ----------
Returns: Returns:
I_stokes : numpy.ndarray Stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for Image (2D floats) containing the Stokes I,Q,U,V flux
total intensity
Q_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
vertical/horizontal linear polarization intensity
U_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
+45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U. Covariance matrix of the Stokes parameters I, Q, U.
""" """
@@ -1269,28 +1263,26 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
N = (coeff_stokes[0, :] * transmit / 2.0).sum() N = (coeff_stokes[0, :] * transmit / 2.0).sum()
coeff_stokes = coeff_stokes / N coeff_stokes = coeff_stokes / N
coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T
I_stokes = np.zeros(pol_array[0].shape) Stokes = np.zeros((4, pol_array[0].shape[0], pol_array[0].shape[1]))
Q_stokes = np.zeros(pol_array[0].shape) Stokes_cov = np.zeros((4, 4, Stokes.shape[1], Stokes.shape[2]))
U_stokes = np.zeros(pol_array[0].shape)
Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1]))
for i in range(I_stokes.shape[0]): for i in range(Stokes.shape[1]):
for j in range(I_stokes.shape[1]): for j in range(Stokes.shape[2]):
I_stokes[i, j], Q_stokes[i, j], U_stokes[i, j] = np.dot(coeff_stokes, pol_flux[:, i, j]).T Stokes[:3, i, j] = np.dot(coeff_stokes, pol_flux[:, i, j]).T
Stokes_cov[:, :, i, j] = np.dot(coeff_stokes, np.dot(pol_cov[:, :, i, j], coeff_stokes.T)) Stokes_cov[:3, :3, i, j] = np.dot(coeff_stokes, np.dot(pol_cov[:, :, i, j], coeff_stokes.T))
if (FWHM is not None) and (smoothing.lower() in ["weighted_gaussian_after", "weight_gauss_after", "gaussian_after", "gauss_after"]): if (FWHM is not None) and (smoothing.lower() in ["weighted_gaussian_after", "weight_gauss_after", "gaussian_after", "gauss_after"]):
smoothing = smoothing.lower()[:-6] smoothing = smoothing.lower()[:-6]
Stokes_array = np.array([I_stokes, Q_stokes, U_stokes]) Stokes_array = deepcopy(Stokes[:3])
Stokes_error = np.array([np.sqrt(Stokes_cov[i, i]) for i in range(3)]) Stokes_error = np.array([np.sqrt(Stokes_cov[i, i]) for i in range(3)])
Stokes_headers = headers[0:3] Stokes_headers = headers[0:3]
Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask, headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask, headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
I_stokes, Q_stokes, U_stokes = Stokes_array Stokes[:3] = deepcopy(Stokes_array)
Stokes_cov[0, 0], Stokes_cov[1, 1], Stokes_cov[2, 2] = deepcopy(Stokes_error**2) Stokes_cov[0, 0], Stokes_cov[1, 1], Stokes_cov[2, 2] = deepcopy(Stokes_error**2)
sStokes_array = np.array([I_stokes * Q_stokes, I_stokes * U_stokes, Q_stokes * U_stokes]) sStokes_array = np.array([Stokes[0, 1], Stokes[0, 2], Stokes[1, 2]])
sStokes_error = np.array([Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2]]) sStokes_error = np.array([Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2]])
uStokes_error = np.array([Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1]]) uStokes_error = np.array([Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1]])
@@ -1304,14 +1296,14 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2] = deepcopy(sStokes_error) Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2] = deepcopy(sStokes_error)
Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1] = deepcopy(uStokes_error) Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1] = deepcopy(uStokes_error)
mask = (Q_stokes**2 + U_stokes**2) > I_stokes**2 mask = (Stokes[1] ** 2 + Stokes[2] ** 2) > Stokes[0] ** 2
if mask.any(): if mask.any():
print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(I_stokes[mask].size)) print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(mask.sum()))
# Statistical error: Poisson noise is assumed # Statistical error: Poisson noise is assumed
sigma_flux = np.array([np.sqrt(flux / head["exptime"]) for flux, head in zip(pol_flux, pol_headers)]) sigma_flux = np.array([np.sqrt(flux / head["exptime"]) for flux, head in zip(pol_flux, pol_headers)])
s_IQU_stat = np.zeros(Stokes_cov.shape) s_IQU_stat = np.zeros(Stokes_cov.shape)
for i in range(Stokes_cov.shape[0]): for i in range(3):
s_IQU_stat[i, i] = np.sum([coeff_stokes[i, k] ** 2 * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0) s_IQU_stat[i, i] = np.sum([coeff_stokes[i, k] ** 2 * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
for j in [k for k in range(3) if k > i]: for j in [k for k in range(3) if k > i]:
s_IQU_stat[i, j] = np.sum([coeff_stokes[i, k] * coeff_stokes[j, k] * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0) s_IQU_stat[i, j] = np.sum([coeff_stokes[i, k] * coeff_stokes[j, k] * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
@@ -1327,13 +1319,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[j] * pol_eff[j]
/ N / N
* ( * (
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - I_stokes) pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - Stokes[0])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3]) * (pol_flux_corr[(j + 2) % 3] - I_stokes) - pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3]) * (pol_flux_corr[(j + 2) % 3] - Stokes[0])
+ coeff_stokes_corr[0, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes) + coeff_stokes_corr[0, j] * (np.sin(2.0 * theta[j]) * Stokes[1] - np.cos(2 * theta[j]) * Stokes[2])
) )
) )
# Derivative of Q_stokes wrt theta_1, 2, 3 # Derivative of Stokes[1] wrt theta_1, 2, 3
for j in range(3): for j in range(3):
dIQU_dtheta[1, j] = ( dIQU_dtheta[1, j] = (
2.0 2.0
@@ -1345,12 +1337,12 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j]) pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3]) - pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3])
) )
* Q_stokes * Stokes[1]
+ coeff_stokes_corr[1, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes) + coeff_stokes_corr[1, j] * (np.sin(2.0 * theta[j]) * Stokes[1] - np.cos(2 * theta[j]) * Stokes[2])
) )
) )
# Derivative of U_stokes wrt theta_1, 2, 3 # Derivative of Stokes[2] wrt theta_1, 2, 3
for j in range(3): for j in range(3):
dIQU_dtheta[2, j] = ( dIQU_dtheta[2, j] = (
2.0 2.0
@@ -1362,14 +1354,14 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j]) pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3]) - pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3])
) )
* U_stokes * Stokes[2]
+ coeff_stokes_corr[2, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes) + coeff_stokes_corr[2, j] * (np.sin(2.0 * theta[j]) * Stokes[1] - np.cos(2 * theta[j]) * Stokes[2])
) )
) )
# Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999) # Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999)
s_IQU_axis = np.zeros(Stokes_cov.shape) s_IQU_axis = np.zeros(Stokes_cov.shape)
for i in range(Stokes_cov.shape[0]): for i in range(3):
s_IQU_axis[i, i] = np.sum([dIQU_dtheta[i, k] ** 2 * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0) s_IQU_axis[i, i] = np.sum([dIQU_dtheta[i, k] ** 2 * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0)
for j in [k for k in range(3) if k > i]: for j in [k for k in range(3) if k > i]:
s_IQU_axis[i, j] = np.sum( s_IQU_axis[i, j] = np.sum(
@@ -1386,15 +1378,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
header_stokes = pol_headers[0] header_stokes = pol_headers[0]
else: else:
all_I_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2])) all_Stokes = np.zeros((np.unique(rotate).size, 4, data_array.shape[1], data_array.shape[2]))
all_Q_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2])) all_Stokes_cov = np.zeros((np.unique(rotate).size, 4, 4, data_array.shape[1], data_array.shape[2]))
all_U_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_Stokes_cov = np.zeros((np.unique(rotate).size, 3, 3, data_array.shape[1], data_array.shape[2]))
all_header_stokes = [{}] * np.unique(rotate).size all_header_stokes = [{}] * np.unique(rotate).size
for i, rot in enumerate(np.unique(rotate)): for i, rot in enumerate(np.unique(rotate)):
rot_mask = rotate == rot rot_mask = rotate == rot
all_I_stokes[i], all_Q_stokes[i], all_U_stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes( all_Stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes(
data_array[rot_mask], data_array[rot_mask],
error_array[rot_mask], error_array[rot_mask],
data_mask, data_mask,
@@ -1407,10 +1397,8 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
) )
all_exp = np.array([float(h["exptime"]) for h in all_header_stokes]) all_exp = np.array([float(h["exptime"]) for h in all_header_stokes])
I_stokes = np.sum([exp * I for exp, I in zip(all_exp, all_I_stokes)], axis=0) / all_exp.sum() Stokes = np.sum([exp * S for exp, S in zip(all_exp, all_Stokes)], axis=0) / all_exp.sum()
Q_stokes = np.sum([exp * Q for exp, Q in zip(all_exp, all_Q_stokes)], axis=0) / all_exp.sum() Stokes_cov = np.zeros((4, 4, Stokes.shape[1], Stokes.shape[2]))
U_stokes = np.sum([exp * U for exp, U in zip(all_exp, all_U_stokes)], axis=0) / all_exp.sum()
Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1]))
for i in range(3): for i in range(3):
Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2 Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2
for j in [x for x in range(3) if x != i]: for j in [x for x in range(3) if x != i]:
@@ -1424,19 +1412,19 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Nan handling : # Nan handling :
fmax = np.finfo(np.float64).max fmax = np.finfo(np.float64).max
I_stokes[np.isnan(I_stokes)] = 0.0 Stokes[np.isnan(Stokes)] = 0.0
Q_stokes[I_stokes == 0.0] = 0.0 Stokes[1:][np.broadcast_to(Stokes[0] == 0.0, Stokes[1:].shape)] = 0.0
U_stokes[I_stokes == 0.0] = 0.0
Q_stokes[np.isnan(Q_stokes)] = 0.0
U_stokes[np.isnan(U_stokes)] = 0.0
Stokes_cov[np.isnan(Stokes_cov)] = fmax Stokes_cov[np.isnan(Stokes_cov)] = fmax
wcs_Stokes = add_stokes_axis_to_wcs(WCS(header_stokes), 0)
wcs_Stokes.array_shape = (4, *Stokes.shape[1:])[::-1]
header_stokes["NAXIS1"], header_stokes["NAXIS2"], header_stokes["NAXIS3"] = wcs_Stokes.array_shape[::-1]
for key, val in list(wcs_Stokes.to_header().items()) + list(zip(["PC1_1", "PC1_2", "PC1_3", "PC2_1", "PC3_1", "CUNIT1"], [1, 0, 0, 0, 0, "STOKES"])):
header_stokes[key] = val
if integrate: if integrate:
# Compute integrated values for P, PA before any rotation # Compute integrated values for P, PA before any rotation
mask = deepcopy(data_mask).astype(bool) mask = deepcopy(data_mask).astype(bool)
I_diluted = I_stokes[mask].sum() I_diluted, Q_diluted, U_diluted = (Stokes[:3] * np.broadcast_to(mask, Stokes[:3].shape)).sum(axis=(1, 2))
Q_diluted = Q_stokes[mask].sum()
U_diluted = U_stokes[mask].sum()
I_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 0][mask])) I_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 0][mask]))
Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 1][mask])) Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 1][mask]))
U_diluted_err = np.sqrt(np.sum(Stokes_cov[2, 2][mask])) U_diluted_err = np.sqrt(np.sum(Stokes_cov[2, 2][mask]))
@@ -1462,26 +1450,19 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle") header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
return I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_stat return Stokes, Stokes_cov, header_stokes, s_IQU_stat
def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_stat=None): def compute_pol(Stokes, Stokes_cov, header_stokes, s_IQU_stat=None):
""" """
Compute the polarization degree (in %) and angle (in deg) and their Compute the polarization degree (in %) and angle (in deg) and their
respective errors from given Stokes parameters. respective errors from given Stokes parameters.
---------- ----------
Inputs: Inputs:
I_stokes : numpy.ndarray Stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for Image (2D floats) containing the Stokes I,Q,U,V fluxes
total intensity
Q_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
vertical/horizontal linear polarization intensity
U_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
+45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U. Covariance matrix of the Stokes parameters I, Q, U, V.
header_stokes : astropy.fits.header.Header header_stokes : astropy.fits.header.Header
Header file associated with the Stokes fluxes. Header file associated with the Stokes fluxes.
---------- ----------
@@ -1504,49 +1485,49 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_s
polarization angle. polarization angle.
""" """
# Polarization degree and angle computation # Polarization degree and angle computation
mask = I_stokes > 0.0 mask = Stokes[0] > 0.0
I_pol = np.zeros(I_stokes.shape) I_pol = np.zeros(Stokes[0].shape)
I_pol[mask] = np.sqrt(Q_stokes[mask] ** 2 + U_stokes[mask] ** 2) I_pol[mask] = np.sqrt(Stokes[1][mask] ** 2 + Stokes[2][mask] ** 2)
P = np.zeros(I_stokes.shape) P = np.zeros(Stokes[0].shape)
P[mask] = I_pol[mask] / I_stokes[mask] P[mask] = I_pol[mask] / Stokes[0][mask]
PA = np.zeros(I_stokes.shape) PA = np.zeros(Stokes[0].shape)
PA[mask] = (90.0 / np.pi) * np.arctan2(U_stokes[mask], Q_stokes[mask]) PA[mask] = (90.0 / np.pi) * np.arctan2(Stokes[2][mask], Stokes[1][mask])
if (P > 1).any(): if (P > 1).any():
print("WARNING : found {0:d} pixels for which P > 1".format(P[P > 1.0].size)) print("WARNING : found {0:d} pixels for which P > 1".format(P[P > 1.0].size))
# Associated errors # Associated errors
fmax = np.finfo(np.float64).max fmax = np.finfo(np.float64).max
s_P = np.ones(I_stokes.shape) * fmax s_P = np.ones(Stokes[0].shape) * fmax
s_PA = np.ones(I_stokes.shape) * fmax s_PA = np.ones(Stokes[0].shape) * fmax
# Propagate previously computed errors # Propagate previously computed errors
s_P[mask] = (1 / I_stokes[mask]) * np.sqrt( s_P[mask] = (1 / Stokes[0][mask]) * np.sqrt(
( (
Q_stokes[mask] ** 2 * Stokes_cov[1, 1][mask] Stokes[1][mask] ** 2 * Stokes_cov[1, 1][mask]
+ U_stokes[mask] ** 2 * Stokes_cov[2, 2][mask] + Stokes[2][mask] ** 2 * Stokes_cov[2, 2][mask]
+ 2.0 * Q_stokes[mask] * U_stokes[mask] * Stokes_cov[1, 2][mask] + 2.0 * Stokes[1][mask] * Stokes[2][mask] * Stokes_cov[1, 2][mask]
) )
/ (Q_stokes[mask] ** 2 + U_stokes[mask] ** 2) / (Stokes[1][mask] ** 2 + Stokes[2][mask] ** 2)
+ ((Q_stokes[mask] / I_stokes[mask]) ** 2 + (U_stokes[mask] / I_stokes[mask]) ** 2) * Stokes_cov[0, 0][mask] + ((Stokes[1][mask] / Stokes[0][mask]) ** 2 + (Stokes[2][mask] / Stokes[0][mask]) ** 2) * Stokes_cov[0, 0][mask]
- 2.0 * (Q_stokes[mask] / I_stokes[mask]) * Stokes_cov[0, 1][mask] - 2.0 * (Stokes[1][mask] / Stokes[0][mask]) * Stokes_cov[0, 1][mask]
- 2.0 * (U_stokes[mask] / I_stokes[mask]) * Stokes_cov[0, 2][mask] - 2.0 * (Stokes[2][mask] / Stokes[0][mask]) * Stokes_cov[0, 2][mask]
) )
s_PA[mask] = (90.0 / (np.pi * (Q_stokes[mask] ** 2 + U_stokes[mask] ** 2))) * np.sqrt( s_PA[mask] = (90.0 / (np.pi * (Stokes[1][mask] ** 2 + Stokes[2][mask] ** 2))) * np.sqrt(
U_stokes[mask] ** 2 * Stokes_cov[1, 1][mask] Stokes[2][mask] ** 2 * Stokes_cov[1, 1][mask]
+ Q_stokes[mask] ** 2 * Stokes_cov[2, 2][mask] + Stokes[1][mask] ** 2 * Stokes_cov[2, 2][mask]
- 2.0 * Q_stokes[mask] * U_stokes[mask] * Stokes_cov[1, 2][mask] - 2.0 * Stokes[1][mask] * Stokes[2][mask] * Stokes_cov[1, 2][mask]
) )
s_P[np.isnan(s_P)] = fmax s_P[np.isnan(s_P)] = fmax
s_PA[np.isnan(s_PA)] = fmax s_PA[np.isnan(s_PA)] = fmax
# Compute the total exposure time so that # Compute the total exposure time so that
# I_stokes*exp_tot = N_tot the total number of events # Stokes[0]*exp_tot = N_tot the total number of events
N_obs = I_stokes * float(header_stokes["exptime"]) N_obs = Stokes[0] * float(header_stokes["exptime"])
# Errors on P, PA supposing Poisson noise # Errors on P, PA supposing Poisson noise
s_P_P = np.ones(I_stokes.shape) * fmax s_P_P = np.ones(Stokes[0].shape) * fmax
s_PA_P = np.ones(I_stokes.shape) * fmax s_PA_P = np.ones(Stokes[0].shape) * fmax
maskP = np.logical_and(mask, P > 0.0) maskP = np.logical_and(mask, P > 0.0)
if s_IQU_stat is not None: if s_IQU_stat is not None:
# If IQU covariance matrix containing only statistical error is given propagate to P and PA # If IQU covariance matrix containing only statistical error is given propagate to P and PA
@@ -1554,25 +1535,25 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_s
with warnings.catch_warnings(record=True) as _: with warnings.catch_warnings(record=True) as _:
s_P_P[maskP] = ( s_P_P[maskP] = (
P[maskP] P[maskP]
/ I_stokes[maskP] / Stokes[0][maskP]
* np.sqrt( * np.sqrt(
s_IQU_stat[0, 0][maskP] s_IQU_stat[0, 0][maskP]
- 2.0 / (I_stokes[maskP] * P[maskP] ** 2) * (Q_stokes[maskP] * s_IQU_stat[0, 1][maskP] + U_stokes[maskP] * s_IQU_stat[0, 2][maskP]) - 2.0 / (Stokes[0][maskP] * P[maskP] ** 2) * (Stokes[1][maskP] * s_IQU_stat[0, 1][maskP] + Stokes[2][maskP] * s_IQU_stat[0, 2][maskP])
+ 1.0 + 1.0
/ (I_stokes[maskP] ** 2 * P[maskP] ** 4) / (Stokes[0][maskP] ** 2 * P[maskP] ** 4)
* ( * (
Q_stokes[maskP] ** 2 * s_IQU_stat[1, 1][maskP] Stokes[1][maskP] ** 2 * s_IQU_stat[1, 1][maskP]
+ U_stokes[maskP] ** 2 * s_IQU_stat[2, 2][maskP] * Q_stokes[maskP] * U_stokes[maskP] * s_IQU_stat[1, 2][maskP] + Stokes[2][maskP] ** 2 * s_IQU_stat[2, 2][maskP] * Stokes[1][maskP] * Stokes[2][maskP] * s_IQU_stat[1, 2][maskP]
) )
) )
) )
s_PA_P[maskP] = ( s_PA_P[maskP] = (
90.0 90.0
/ (np.pi * I_stokes[maskP] ** 2 * P[maskP] ** 2) / (np.pi * Stokes[0][maskP] ** 2 * P[maskP] ** 2)
* ( * (
Q_stokes[maskP] ** 2 * s_IQU_stat[2, 2][maskP] Stokes[1][maskP] ** 2 * s_IQU_stat[2, 2][maskP]
+ U_stokes[maskP] * s_IQU_stat[1, 1][maskP] + Stokes[2][maskP] * s_IQU_stat[1, 1][maskP]
- 2.0 * Q_stokes[maskP] * U_stokes[maskP] * s_IQU_stat[1, 2][maskP] - 2.0 * Stokes[1][maskP] * Stokes[2][maskP] * s_IQU_stat[1, 2][maskP]
) )
) )
else: else:
@@ -1583,7 +1564,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_s
# Catch expected "OverflowWarning" as wrong pixel have an overflowing error # Catch expected "OverflowWarning" as wrong pixel have an overflowing error
with warnings.catch_warnings(record=True) as _: with warnings.catch_warnings(record=True) as _:
mask2 = P**2 >= s_P_P**2 mask2 = P**2 >= s_P_P**2
debiased_P = np.zeros(I_stokes.shape) debiased_P = np.zeros(Stokes[0].shape)
debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P_P[mask2] ** 2) debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P_P[mask2] ** 2)
if (debiased_P > 1.0).any(): if (debiased_P > 1.0).any():
@@ -1600,24 +1581,17 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_s
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, s_IQU_stat=None, SNRi_cut=None): def rotate_Stokes(Stokes, Stokes_cov, data_mask, header_stokes, s_IQU_stat=None, SNRi_cut=None):
""" """
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header matrix to rotate Q, U of a given angle in degrees and update header
orientation keyword. orientation keyword.
---------- ----------
Inputs: Inputs:
I_stokes : numpy.ndarray Stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for Stokes cube (3D floats) containing the Stokes I, Q, U, V fluxes.
total intensity
Q_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
vertical/horizontal linear polarization intensity
U_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
+45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U. Covariance matrix of the Stokes parameters I, Q, U, V.
data_mask : numpy.ndarray data_mask : numpy.ndarray
2D boolean array delimiting the data to work on. 2D boolean array delimiting the data to work on.
header_stokes : astropy.fits.header.Header header_stokes : astropy.fits.header.Header
@@ -1628,17 +1602,10 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
Defaults to None. Defaults to None.
---------- ----------
Returns: Returns:
new_I_stokes : numpy.ndarray Stokes : numpy.ndarray
Rotated mage (2D floats) containing the rotated Stokes parameters Rotated Stokes cube (3D floats) containing the rotated Stokes I, Q, U, V fluxes.
accounting for total intensity
new_Q_stokes : numpy.ndarray
Rotated mage (2D floats) containing the rotated Stokes parameters
accounting for vertical/horizontal linear polarization intensity
new_U_stokes : numpy.ndarray
Rotated image (2D floats) containing the rotated Stokes parameters
accounting for +45/-45deg linear polarization intensity.
new_Stokes_cov : numpy.ndarray new_Stokes_cov : numpy.ndarray
Updated covariance matrix of the Stokes parameters I, Q, U. Updated covariance matrix of the Stokes parameters I, Q, U, V.
new_header_stokes : astropy.fits.header.Header new_header_stokes : astropy.fits.header.Header
Updated Header file associated with the Stokes fluxes accounting Updated Header file associated with the Stokes fluxes accounting
for the new orientation angle. for the new orientation angle.
@@ -1647,51 +1614,38 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
""" """
# Apply cuts # Apply cuts
if SNRi_cut is not None: if SNRi_cut is not None:
SNRi = I_stokes / np.sqrt(Stokes_cov[0, 0]) SNRi = Stokes[0] / np.sqrt(Stokes_cov[0, 0])
mask = SNRi < SNRi_cut mask = SNRi < SNRi_cut
eps = 1e-5 eps = 1e-5
for i in range(I_stokes.shape[0]): for i in range(4):
for j in range(I_stokes.shape[1]): Stokes[i][mask] = eps * np.sqrt(Stokes_cov[i, i][mask])
if mask[i, j]:
I_stokes[i, j] = eps * np.sqrt(Stokes_cov[0, 0][i, j])
Q_stokes[i, j] = eps * np.sqrt(Stokes_cov[1, 1][i, j])
U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j])
# Rotate I_stokes, Q_stokes, U_stokes using rotation matrix # Rotate Stokes I, Q, U using rotation matrix
ang = -float(header_stokes["ORIENTAT"]) ang = -float(header_stokes["ORIENTAT"])
alpha = np.pi / 180.0 * ang alpha = np.pi / 180.0 * ang
mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]]) mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]])
old_center = np.array(I_stokes.shape) / 2 old_center = np.array(Stokes.shape[1:]) / 2
shape = np.fix(np.array(I_stokes.shape) * np.sqrt(2.5)).astype(int) shape = np.fix(np.array(Stokes.shape[1:]) * np.sqrt(2.5)).astype(int)
new_center = np.array(shape) / 2 new_center = np.array(shape) / 2
I_stokes = zeropad(I_stokes, shape) Stokes = zeropad(Stokes, (*Stokes.shape[:-2], *shape))
Q_stokes = zeropad(Q_stokes, shape)
U_stokes = zeropad(U_stokes, shape)
data_mask = zeropad(data_mask, shape) data_mask = zeropad(data_mask, shape)
Stokes_cov = zeropad(Stokes_cov, [*Stokes_cov.shape[:-2], *shape]) Stokes_cov = zeropad(Stokes_cov, (*Stokes_cov.shape[:-2], *shape))
new_I_stokes = np.zeros(shape) new_Stokes = np.zeros((*Stokes.shape[:-2], *shape))
new_Q_stokes = np.zeros(shape)
new_U_stokes = np.zeros(shape)
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape)) new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape))
# Rotate original images using scipy.ndimage.rotate # Rotate original images using scipy.ndimage.rotate
new_I_stokes = sc_rotate(I_stokes, ang, order=1, reshape=False, cval=0.0) new_Stokes = sc_rotate(Stokes, ang, axes=(1, 2), order=1, reshape=False, cval=0.0)
new_Q_stokes = sc_rotate(Q_stokes, ang, order=1, reshape=False, cval=0.0)
new_U_stokes = sc_rotate(U_stokes, ang, order=1, reshape=False, cval=0.0)
new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0) new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0)
new_data_mask[new_data_mask < 1.0] = 0.0 new_data_mask[new_data_mask < 1.0] = 0.0
new_data_mask = new_data_mask.astype(bool) new_data_mask = new_data_mask.astype(bool)
for i in range(3): new_Stokes_cov = np.abs(sc_rotate(Stokes_cov, ang, axes=(2, 3), order=1, reshape=False, cval=0.0))
for j in range(3):
new_Stokes_cov[i, j] = sc_rotate(Stokes_cov[i, j], ang, order=1, reshape=False, cval=0.0)
new_Stokes_cov[i, i] = np.abs(new_Stokes_cov[i, i])
for i in range(shape[0]): for i in range(shape[0]):
for j in range(shape[1]): for j in range(shape[1]):
new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j] = np.dot(mrot, np.array([new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j]])).T new_Stokes[:3, i, j] = np.dot(mrot, new_Stokes[:3, i, j])
new_Stokes_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:, :, i, j], mrot.T)) new_Stokes_cov[:3, :3, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:3, :3, i, j], mrot.T))
if s_IQU_stat is not None: if s_IQU_stat is not None:
s_IQU_stat = zeropad(s_IQU_stat, [*s_IQU_stat.shape[:-2], *shape]) s_IQU_stat = zeropad(s_IQU_stat, [*s_IQU_stat.shape[:-2], *shape])
@@ -1702,16 +1656,16 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
new_s_IQU_stat[i, i] = np.abs(new_s_IQU_stat[i, i]) new_s_IQU_stat[i, i] = np.abs(new_s_IQU_stat[i, i])
for i in range(shape[0]): for i in range(shape[0]):
for j in range(shape[1]): for j in range(shape[1]):
new_s_IQU_stat[:, :, i, j] = np.dot(mrot, np.dot(new_s_IQU_stat[:, :, i, j], mrot.T)) new_s_IQU_stat[:3, :3, i, j] = np.dot(mrot, np.dot(new_s_IQU_stat[:3, :3, i, j], mrot.T))
# Update headers to new angle # Update headers to new angle
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
new_header_stokes = deepcopy(header_stokes) new_header_stokes = deepcopy(header_stokes)
new_wcs = WCS(header_stokes).celestial.deepcopy() new_wcs = WCS(header_stokes).deepcopy()
new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc) new_wcs.wcs.pc[1:] = np.dot(mrot, new_wcs.wcs.pc[1:])
new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center[::-1]) + new_center[::-1] new_wcs.wcs.crpix[1:] = np.dot(mrot, new_wcs.wcs.crpix[1:] - old_center[::-1]) + new_center[::-1]
new_wcs.wcs.set() new_wcs.wcs.set()
for key, val in new_wcs.to_header().items(): for key, val in new_wcs.to_header().items():
new_header_stokes.set(key, val) new_header_stokes.set(key, val)
@@ -1720,18 +1674,13 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
# Nan handling : # Nan handling :
fmax = np.finfo(np.float64).max fmax = np.finfo(np.float64).max
new_I_stokes[np.isnan(new_I_stokes)] = 0.0 new_Stokes[np.isnan(new_Stokes)] = 0.0
new_Q_stokes[new_I_stokes == 0.0] = 0.0 new_Stokes[1:][np.broadcast_to(new_Stokes[0] == 0.0, Stokes[1:].shape)] = 0.0
new_U_stokes[new_I_stokes == 0.0] = 0.0
new_Q_stokes[np.isnan(new_Q_stokes)] = 0.0
new_U_stokes[np.isnan(new_U_stokes)] = 0.0
new_Stokes_cov[np.isnan(new_Stokes_cov)] = fmax new_Stokes_cov[np.isnan(new_Stokes_cov)] = fmax
# Compute updated integrated values for P, PA # Compute updated integrated values for P, PA
mask = deepcopy(new_data_mask).astype(bool) mask = deepcopy(new_data_mask).astype(bool)
I_diluted = new_I_stokes[mask].sum() I_diluted, Q_diluted, U_diluted = (new_Stokes[:3] * np.broadcast_to(mask, Stokes[:3].shape)).sum(axis=(1, 2))
Q_diluted = new_Q_stokes[mask].sum()
U_diluted = new_U_stokes[mask].sum()
I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 0][mask])) I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 0][mask]))
Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 1][mask])) Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 1][mask]))
U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2, 2][mask])) U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2, 2][mask]))
@@ -1758,9 +1707,9 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
new_header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") new_header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
if s_IQU_stat is not None: if s_IQU_stat is not None:
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_header_stokes, new_s_IQU_stat return new_Stokes, new_Stokes_cov, new_data_mask, new_header_stokes, new_s_IQU_stat
else: else:
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_header_stokes return new_Stokes, new_Stokes_cov, new_data_mask, new_header_stokes
def rotate_data(data_array, error_array, data_mask, headers): def rotate_data(data_array, error_array, data_mask, headers):