10 Commits

5 changed files with 391 additions and 268 deletions

View File

@@ -40,12 +40,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
display_crop = False display_crop = False
# Background estimation # Background estimation
error_sub_type = "scott" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 1.50 subtract_error = 1.33
display_bkg = True display_bkg = True
# Data binning # Data binning
pxsize = 0.10 pxsize = 0.05
pxscale = "arcsec" # pixel, arcsec or full pxscale = "arcsec" # pixel, arcsec or full
rebin_operation = "sum" # sum or average rebin_operation = "sum" # sum or average
@@ -59,7 +59,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Smoothing # Smoothing
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 0.150 # If None, no smoothing is done smoothing_FWHM = 0.075 # If None, no smoothing is done
smoothing_scale = "arcsec" # pixel or arcsec smoothing_scale = "arcsec" # pixel or arcsec
# Rotation # Rotation
@@ -117,10 +117,9 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
figtype = "_".join([figtype, "not_aligned"] if figtype != "" else ["not_aligned"]) figtype = "_".join([figtype, "not_aligned"] if figtype != "" else ["not_aligned"])
# Crop data to remove outside blank margins. # Crop data to remove outside blank margins.
data_array, error_array, headers = proj_red.crop_array( data_array, error_array, data_mask, headers = proj_red.crop_array(
data_array, headers, step=5, null_val=0.0, inside=True, display=display_crop, savename=figname, plots_folder=plots_folder data_array, headers, step=5, null_val=0.0, crop=True, inside=True, display=display_crop, savename=figname, plots_folder=plots_folder
) )
data_mask = np.ones(data_array[0].shape, dtype=bool)
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM. # Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
if deconvolve: if deconvolve:
@@ -217,26 +216,26 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide # FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2 # see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
# Bibcode : 1995chst.conf...10J # Bibcode : 1995chst.conf...10J
I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes = proj_red.compute_Stokes( I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, header_stokes = proj_red.compute_Stokes(
data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr
) )
I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg = proj_red.compute_Stokes( I_bkg, Q_bkg, U_bkg, S_cov_bkg, S_stat_cov_bkg, header_bkg = proj_red.compute_Stokes(
background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False
) )
# Step 3: # Step 3:
# Rotate images to have North up # Rotate images to have North up
if rotate_North: if rotate_North:
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes = proj_red.rotate_Stokes( I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, data_mask, header_stokes = proj_red.rotate_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, data_mask, header_stokes, SNRi_cut=None
) )
I_bkg, Q_bkg, U_bkg, S_cov_bkg, data_mask_bkg, header_bkg = proj_red.rotate_Stokes( I_bkg, Q_bkg, U_bkg, S_cov_bkg, S_stat_cov_bkg, data_mask_bkg, header_bkg = proj_red.rotate_Stokes(
I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), header_bkg, SNRi_cut=None I_bkg, Q_bkg, U_bkg, S_cov_bkg, S_stat_cov_bkg, np.array(True).reshape(1, 1), header_bkg, SNRi_cut=None
) )
# Compute polarimetric parameters (polarization degree and angle). # Compute polarimetric parameters (polarization degree and angle).
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes) P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, header_stokes)
P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg) P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, S_stat_cov_bkg, header_bkg)
# Step 4: # Step 4:
# Save image to FITS. # Save image to FITS.
@@ -246,6 +245,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
Q_stokes, Q_stokes,
U_stokes, U_stokes,
Stokes_cov, Stokes_cov,
Stokes_stat_cov,
P, P,
debiased_P, debiased_P,
s_P, s_P,

View File

@@ -106,7 +106,23 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
def save_Stokes( def save_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, header_stokes, data_mask, filename, data_folder="", return_hdul=False I_stokes,
Q_stokes,
U_stokes,
Stokes_cov,
Stokes_stat_cov,
P,
debiased_P,
s_P,
s_P_P,
PA,
s_PA,
s_PA_P,
header_stokes,
data_mask,
filename,
data_folder="",
return_hdul=False,
): ):
""" """
Save computed polarimetry parameters to a single fits file, Save computed polarimetry parameters to a single fits file,
@@ -186,11 +202,15 @@ def save_Stokes(
s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1])) new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1]))
new_Stokes_stat_cov = np.zeros((*Stokes_stat_cov.shape[:-2], *shape[::-1]))
for i in range(3): for i in range(3):
for j in range(3): for j in range(3):
Stokes_cov[i, j][(1 - data_mask).astype(bool)] = 0.0 Stokes_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]] new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
Stokes_stat_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
new_Stokes_stat_cov[i, j] = Stokes_stat_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
Stokes_cov = new_Stokes_cov Stokes_cov = new_Stokes_cov
Stokes_stat_cov = new_Stokes_stat_cov
data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]] data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]]
data_mask = data_mask.astype(float, copy=False) data_mask = data_mask.astype(float, copy=False)
@@ -210,18 +230,19 @@ def save_Stokes(
[Q_stokes, "Q_stokes"], [Q_stokes, "Q_stokes"],
[U_stokes, "U_stokes"], [U_stokes, "U_stokes"],
[Stokes_cov, "IQU_cov_matrix"], [Stokes_cov, "IQU_cov_matrix"],
[Stokes_stat_cov, "IQU_stat_cov_matrix"],
[P, "Pol_deg"], [P, "Pol_deg"],
[debiased_P, "Pol_deg_debiased"], [debiased_P, "Pol_deg_debiased"],
[s_P, "Pol_deg_err"], [s_P, "Pol_deg_err"],
[s_P_P, "Pol_deg_err_Poisson_noise"], [s_P_P, "Pol_deg_stat_err"],
[PA, "Pol_ang"], [PA, "Pol_ang"],
[s_PA, "Pol_ang_err"], [s_PA, "Pol_ang_err"],
[s_PA_P, "Pol_ang_err_Poisson_noise"], [s_PA_P, "Pol_ang_stat_err"],
[data_mask, "Data_mask"], [data_mask, "Data_mask"],
]: ]:
hdu_header = header.copy() hdu_header = header.copy()
hdu_header["datatype"] = name hdu_header["datatype"] = name
if not name == "IQU_cov_matrix": if not name[-10:] == "cov_matrix":
data[(1 - data_mask).astype(bool)] = 0.0 data[(1 - data_mask).astype(bool)] = 0.0
hdu = fits.ImageHDU(data=data, header=hdu_header) hdu = fits.ImageHDU(data=data, header=hdu_header)
hdu.name = name hdu.name = name

View File

@@ -360,7 +360,7 @@ def polarization_map(
if fig is None: if fig is None:
ratiox = max(int(stkI.shape[1] / (stkI.shape[0])), 1) ratiox = max(int(stkI.shape[1] / (stkI.shape[0])), 1)
ratioy = max(int((stkI.shape[0]) / stkI.shape[1]), 1) ratioy = max(int((stkI.shape[0]) / stkI.shape[1]), 1)
fig = plt.figure(figsize=(7 * ratiox, 7 * ratioy), layout="constrained") fig = plt.figure(figsize=(8 * ratiox, 8 * ratioy), layout="constrained")
if ax is None: if ax is None:
ax = fig.add_subplot(111, projection=wcs) ax = fig.add_subplot(111, projection=wcs)
ax.set(aspect="equal", fc="k") # , ylim=[-0.05 * stkI.shape[0], 1.05 * stkI.shape[0]]) ax.set(aspect="equal", fc="k") # , ylim=[-0.05 * stkI.shape[0], 1.05 * stkI.shape[0]])
@@ -435,33 +435,33 @@ def polarization_map(
else: else:
vmin, vmax = 1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][stkI > 0.0]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux) vmin, vmax = 1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][stkI > 0.0]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux)
pfmax = (stkI[stkI > 0.0] * pol[stkI > 0.0] * convert_flux).max() pfmax = (stkI[stkI > 0.0] * pol[stkI > 0.0] * convert_flux).max()
im = ax.imshow(stkI * convert_flux * pol, norm=LogNorm(vmin, vmax), aspect="equal", cmap=kwargs["cmap"], alpha=1.0) im = ax.imshow(stkI * convert_flux * pol, norm=LogNorm(vmin, vmax), aspect="equal", cmap=kwargs["cmap"], alpha=1.0 - 0.75 * (pol < pol_err))
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
# levelsPf = np.linspace(0.0.60, 0.50, 5) * pfmax # levelsPf = np.linspace(0.0.60, 0.50, 5) * pfmax
levelsPf = np.array([1.73, 13.0, 33.0, 66.0]) / 100.0 * pfmax levelsPf = np.array([13.0, 33.0, 66.0]) / 100.0 * pfmax
print("Polarized flux density contour levels : ", levelsPf) print("Polarized flux density contour levels : ", levelsPf)
ax.contour(stkI * convert_flux * pol, levels=levelsPf, colors="grey", linewidths=0.5) ax.contour(stkI * convert_flux * pol, levels=levelsPf, colors="grey", linewidths=0.5)
elif display.lower() in ["p", "pol", "pol_deg"]: elif display.lower() in ["p", "pol", "pol_deg"]:
# Display polarization degree map # Display polarization degree map
display = "p" display = "p"
vmin, vmax = 0.0, min(pol[np.isfinite(pol)].max(), 1.0) * 100.0 vmin, vmax = 0.0, min(pol[pol > pol_err].max(), 1.0) * 100.0
im = ax.imshow(pol * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0) im = ax.imshow(pol * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0 - 0.75 * (pol < pol_err))
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$P$ [%]") fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$P$ [%]")
elif display.lower() in ["pa", "pang", "pol_ang"]: elif display.lower() in ["pa", "pang", "pol_ang"]:
# Display polarization degree map # Display polarization degree map
display = "pa" display = "pa"
vmin, vmax = 0.0, 180.0 vmin, vmax = 0.0, 180.0
im = ax.imshow(princ_angle(pang), vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0) im = ax.imshow(princ_angle(pang), vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0 - 0.75 * (pol < pol_err))
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$\theta_P$ [°]") fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$\theta_P$ [°]")
elif display.lower() in ["s_p", "pol_err", "pol_deg_err"]: elif display.lower() in ["s_p", "pol_err", "pol_deg_err"]:
# Display polarization degree error map # Display polarization degree error map
display = "s_p" display = "s_p"
if (SNRp > P_cut).any(): if (SNRp > P_cut).any():
vmin, vmax = 0.0, np.max([pol_err[SNRp > P_cut].max(), 1.0]) * 100.0 vmin, vmax = 0.0, np.max([pol_err[SNRp > P_cut].max(), 1.0]) * 100.0
im = ax.imshow(pol_err * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno_r", alpha=1.0) im = ax.imshow(pol_err * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno_r", alpha=1.0 - 0.75 * (pol < pol_err))
else: else:
vmin, vmax = 0.0, 100.0 vmin, vmax = 0.0, 100.0
im = ax.imshow(pol_err * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno_r", alpha=1.0) im = ax.imshow(pol_err * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno_r", alpha=1.0 - 0.75 * (pol < pol_err))
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$\sigma_P$ [%]") fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$\sigma_P$ [%]")
elif display.lower() in ["s_i", "i_err"]: elif display.lower() in ["s_i", "i_err"]:
# Display intensity error map # Display intensity error map
@@ -471,39 +471,41 @@ def polarization_map(
1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][stk_cov[0, 0] > 0.0]) * convert_flux), 1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][stk_cov[0, 0] > 0.0]) * convert_flux),
np.max(np.sqrt(stk_cov[0, 0][stk_cov[0, 0] > 0.0]) * convert_flux), np.max(np.sqrt(stk_cov[0, 0][stk_cov[0, 0] > 0.0]) * convert_flux),
) )
im = ax.imshow(np.sqrt(stk_cov[0, 0]) * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno_r", alpha=1.0) im = ax.imshow(
np.sqrt(stk_cov[0, 0]) * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno_r", alpha=1.0 - 0.75 * (pol < pol_err)
)
else: else:
im = ax.imshow(np.sqrt(stk_cov[0, 0]) * convert_flux, aspect="equal", cmap=kwargs["cmap"], alpha=1.0) im = ax.imshow(np.sqrt(stk_cov[0, 0]) * convert_flux, aspect="equal", cmap=kwargs["cmap"], alpha=1.0 - 0.75 * (pol < pol_err))
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
elif display.lower() in ["snri"]: elif display.lower() in ["snri"]:
# Display I_stokes signal-to-noise map # Display I_stokes signal-to-noise map
display = "snri" display = "snri"
vmin, vmax = 0.0, np.max(SNRi[np.isfinite(SNRi)]) vmin, vmax = 0.0, np.max(SNRi[np.isfinite(SNRi)])
if vmax * 0.99 > SNRi_cut: if vmax * 0.99 > SNRi_cut + 3:
im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0) im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"])
levelsSNRi = np.linspace(SNRi_cut, vmax * 0.99, 5).astype(int) levelsSNRi = np.linspace(SNRi_cut, vmax * 0.99, 3).astype(int)
print("SNRi contour levels : ", levelsSNRi) print("SNRi contour levels : ", levelsSNRi)
ax.contour(SNRi, levels=levelsSNRi, colors="grey", linewidths=0.5) ax.contour(SNRi, levels=levelsSNRi, colors="grey", linewidths=0.5)
else: else:
im = ax.imshow(SNRi, aspect="equal", cmap=kwargs["cmap"], alpha=1.0) im = ax.imshow(SNRi, aspect="equal", cmap=kwargs["cmap"])
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$I_{Stokes}/\sigma_{I}$") fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$I_{Stokes}/\sigma_{I}$")
elif display.lower() in ["snr", "snrp"]: elif display.lower() in ["snr", "snrp"]:
# Display polarization degree signal-to-noise map # Display polarization degree signal-to-noise map
display = "snrp" display = "snrp"
vmin, vmax = 0.0, np.max(SNRp[np.isfinite(SNRp)]) vmin, vmax = 0.0, np.max(SNRp[np.isfinite(SNRp)])
if vmax * 0.99 > SNRp_cut: if vmax * 0.99 > SNRp_cut + 3:
im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0) im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"])
levelsSNRp = np.linspace(P_cut, vmax * 0.99, 5).astype(int) levelsSNRp = np.linspace(SNRp_cut, vmax * 0.99, 3).astype(int)
print("SNRp contour levels : ", levelsSNRp) print("SNRp contour levels : ", levelsSNRp)
ax.contour(SNRp, levels=levelsSNRp, colors="grey", linewidths=0.5) ax.contour(SNRp, levels=levelsSNRp, colors="grey", linewidths=0.5)
else: else:
im = ax.imshow(SNRp, aspect="equal", cmap=kwargs["cmap"], alpha=1.0) im = ax.imshow(SNRp, aspect="equal", cmap=kwargs["cmap"])
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$P/\sigma_{P}$") fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$P/\sigma_{P}$")
elif display.lower() in ["conf", "confp"]: elif display.lower() in ["conf", "confp"]:
# Display polarization degree signal-to-noise map # Display polarization degree signal-to-noise map
display = "confp" display = "confp"
vmin, vmax = 0.0, 1.0 vmin, vmax = 0.0, 1.0
im = ax.imshow(confP, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0) im = ax.imshow(confP, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"])
levelsconfp = np.array([0.500, 0.900, 0.990, 0.999]) levelsconfp = np.array([0.500, 0.900, 0.990, 0.999])
print("confp contour levels : ", levelsconfp) print("confp contour levels : ", levelsconfp)
ax.contour(confP, levels=levelsconfp, colors="grey", linewidths=0.5) ax.contour(confP, levels=levelsconfp, colors="grey", linewidths=0.5)
@@ -1895,7 +1897,7 @@ class crop_map(object):
else: else:
self.ax = ax self.ax = ax
self.mask_alpha = 0.75 self.mask_alpha = 0.75
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1]) self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1], spancoords="pixels", useblit=True)
self.embedded = True self.embedded = True
self.ax.set(xlabel="Right Ascension (J2000)", ylabel="Declination (J2000)") self.ax.set(xlabel="Right Ascension (J2000)", ylabel="Declination (J2000)")
self.display(self.data, self.wcs, self.map_convert, **self.kwargs) self.display(self.data, self.wcs, self.map_convert, **self.kwargs)
@@ -1956,7 +1958,7 @@ class crop_map(object):
self.display() self.display()
if self.fig.canvas.manager.toolbar.mode == "": if self.fig.canvas.manager.toolbar.mode == "":
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1]) self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1], spancoords="pixels", useblit=True)
self.RSextent = deepcopy(self.extent) self.RSextent = deepcopy(self.extent)
self.RScenter = deepcopy(self.center) self.RScenter = deepcopy(self.center)
@@ -2016,7 +2018,7 @@ class crop_map(object):
self.ax.set_ylim(0, ylim) self.ax.set_ylim(0, ylim)
if self.fig.canvas.manager.toolbar.mode == "": if self.fig.canvas.manager.toolbar.mode == "":
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1]) self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1], spancoords="pixels", useblit=True)
self.fig.canvas.draw_idle() self.fig.canvas.draw_idle()
@@ -2028,7 +2030,7 @@ class crop_map(object):
def crop(self) -> None: def crop(self) -> None:
if self.fig.canvas.manager.toolbar.mode == "": if self.fig.canvas.manager.toolbar.mode == "":
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1]) self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1], spancoords="pixels", useblit=True)
self.bapply.on_clicked(self.apply_crop) self.bapply.on_clicked(self.apply_crop)
self.breset.on_clicked(self.reset_crop) self.breset.on_clicked(self.reset_crop)
self.fig.canvas.mpl_connect("close_event", self.on_close) self.fig.canvas.mpl_connect("close_event", self.on_close)
@@ -2077,7 +2079,7 @@ class crop_Stokes(crop_map):
# Crop dataset # Crop dataset
for dataset in self.hdul_crop: for dataset in self.hdul_crop:
if dataset.header["datatype"] == "IQU_cov_matrix": if dataset.header["datatype"][-10:] == "cov_matrix":
stokes_cov = np.zeros((3, 3, shape[1], shape[0])) stokes_cov = np.zeros((3, 3, shape[1], shape[0]))
for i in range(3): for i in range(3):
for j in range(3): for j in range(3):
@@ -2100,18 +2102,24 @@ class crop_Stokes(crop_map):
self.on_close(event) self.on_close(event)
if self.fig.canvas.manager.toolbar.mode == "": if self.fig.canvas.manager.toolbar.mode == "":
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1]) self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1], spancoords="pixels", useblit=True)
# Update integrated values # Update integrated values
mask = np.logical_and(self.hdul_crop["data_mask"].data.astype(bool), self.hdul_crop[0].data > 0) mask = np.logical_and(self.hdul_crop["DATA_MASK"].data.astype(bool), self.hdul_crop[0].data > 0)
I_diluted = self.hdul_crop["i_stokes"].data[mask].sum() I_diluted = self.hdul_crop["I_STOKES"].data[mask].sum()
Q_diluted = self.hdul_crop["q_stokes"].data[mask].sum() Q_diluted = self.hdul_crop["Q_STOKES"].data[mask].sum()
U_diluted = self.hdul_crop["u_stokes"].data[mask].sum() U_diluted = self.hdul_crop["U_STOKES"].data[mask].sum()
I_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[0, 0][mask])) I_diluted_err = np.sqrt(np.sum(self.hdul_crop["IQU_COV_MATRIX"].data[0, 0][mask]))
Q_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[1, 1][mask])) Q_diluted_err = np.sqrt(np.sum(self.hdul_crop["IQU_COV_MATRIX"].data[1, 1][mask]))
U_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[2, 2][mask])) U_diluted_err = np.sqrt(np.sum(self.hdul_crop["IQU_COV_MATRIX"].data[2, 2][mask]))
IQ_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[0, 1][mask] ** 2)) IQ_diluted_err = np.sqrt(np.sum(self.hdul_crop["IQU_COV_MATRIX"].data[0, 1][mask] ** 2))
IU_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[0, 2][mask] ** 2)) IU_diluted_err = np.sqrt(np.sum(self.hdul_crop["IQU_COV_MATRIX"].data[0, 2][mask] ** 2))
QU_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[1, 2][mask] ** 2)) QU_diluted_err = np.sqrt(np.sum(self.hdul_crop["IQU_COV_MATRIX"].data[1, 2][mask] ** 2))
I_diluted_stat_err = np.sqrt(np.sum(self.hdul_crop["IQU_STAT_COV_MATRIX"].data[0, 0][mask]))
Q_diluted_stat_err = np.sqrt(np.sum(self.hdul_crop["IQU_STAT_COV_MATRIX"].data[1, 1][mask]))
U_diluted_stat_err = np.sqrt(np.sum(self.hdul_crop["IQU_STAT_COV_MATRIX"].data[2, 2][mask]))
IQ_diluted_stat_err = np.sqrt(np.sum(self.hdul_crop["IQU_STAT_COV_MATRIX"].data[0, 1][mask] ** 2))
IU_diluted_stat_err = np.sqrt(np.sum(self.hdul_crop["IQU_STAT_COV_MATRIX"].data[0, 2][mask] ** 2))
QU_diluted_stat_err = np.sqrt(np.sum(self.hdul_crop["IQU_STAT_COV_MATRIX"].data[1, 2][mask] ** 2))
P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted
P_diluted_err = (1.0 / I_diluted) * np.sqrt( P_diluted_err = (1.0 / I_diluted) * np.sqrt(
@@ -2120,6 +2128,18 @@ class crop_Stokes(crop_map):
- 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err - 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err
- 2.0 * (U_diluted / I_diluted) * IU_diluted_err - 2.0 * (U_diluted / I_diluted) * IU_diluted_err
) )
P_diluted_stat_err = (
P_diluted
/ I_diluted
* np.sqrt(
I_diluted_stat_err
- 2.0 / (I_diluted * P_diluted**2) * (Q_diluted * IQ_diluted_stat_err + U_diluted * IU_diluted_stat_err)
+ 1.0
/ (I_diluted**2 * P_diluted**4)
* (Q_diluted**2 * Q_diluted_stat_err + U_diluted**2 * U_diluted_stat_err + 2.0 * Q_diluted * U_diluted * QU_diluted_stat_err)
)
)
debiased_P_diluted = np.sqrt(P_diluted**2 - P_diluted_stat_err**2) if P_diluted**2 > P_diluted_stat_err**2 else 0.0
PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted)) PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted))
PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt( PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt(
@@ -2129,7 +2149,7 @@ class crop_Stokes(crop_map):
for dataset in self.hdul_crop: for dataset in self.hdul_crop:
if dataset.header["FILENAME"][-4:] != "crop": if dataset.header["FILENAME"][-4:] != "crop":
dataset.header["FILENAME"] += "_crop" dataset.header["FILENAME"] += "_crop"
dataset.header["P_int"] = (P_diluted, "Integrated polarization degree") dataset.header["P_int"] = (debiased_P_diluted, "Integrated polarization degree")
dataset.header["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error") dataset.header["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
dataset.header["PA_int"] = (PA_diluted, "Integrated polarization angle") dataset.header["PA_int"] = (PA_diluted, "Integrated polarization angle")
dataset.header["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") dataset.header["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
@@ -2463,9 +2483,11 @@ class pol_map(object):
ax_snr_reset = self.fig.add_axes([0.060, 0.020, 0.05, 0.02]) ax_snr_reset = self.fig.add_axes([0.060, 0.020, 0.05, 0.02])
ax_snr_conf = self.fig.add_axes([0.115, 0.020, 0.05, 0.02]) ax_snr_conf = self.fig.add_axes([0.115, 0.020, 0.05, 0.02])
SNRi_max = np.max(self.I[self.IQU_cov[0, 0] > 0.0] / np.sqrt(self.IQU_cov[0, 0][self.IQU_cov[0, 0] > 0.0])) SNRi_max = np.max(self.I[self.IQU_cov[0, 0] > 0.0] / np.sqrt(self.IQU_cov[0, 0][self.IQU_cov[0, 0] > 0.0]))
SNRp_max = np.max(self.P[self.s_P > 0.0] / self.s_P[self.s_P > 0.0]) SNRp_max = np.max(self.P[self.P_ERR > 0.0] / self.P_ERR[self.P_ERR > 0.0])
s_I_cut = Slider(ax_I_cut, r"$SNR^{I}_{cut}$", 1.0, int(SNRi_max * 0.95), valstep=1, valinit=self.SNRi_cut) s_I_cut = Slider(ax_I_cut, r"$SNR^{I}_{cut}$", 1.0, int(SNRi_max * 0.95), valstep=1, valinit=self.SNRi_cut)
self.s_P_cut = Slider(self.ax_P_cut, r"$Conf^{P}_{cut}$", 0.50, 1.0, valstep=[0.500, 0.900, 0.990, 0.999], valinit=self.P_cut if P_cut <= 1.0 else 0.99) self.P_ERR_cut = Slider(
self.ax_P_cut, r"$Conf^{P}_{cut}$", 0.50, 1.0, valstep=[0.500, 0.900, 0.990, 0.999], valinit=self.P_cut if P_cut <= 1.0 else 0.99
)
s_vec_sc = Slider(ax_vec_sc, r"Vec scale", 0.0, 10.0, valstep=1, valinit=self.scale_vec) s_vec_sc = Slider(ax_vec_sc, r"Vec scale", 0.0, 10.0, valstep=1, valinit=self.scale_vec)
b_snr_reset = Button(ax_snr_reset, "Reset") b_snr_reset = Button(ax_snr_reset, "Reset")
b_snr_reset.label.set_fontsize(8) b_snr_reset.label.set_fontsize(8)
@@ -2493,7 +2515,7 @@ class pol_map(object):
def reset_snr(event): def reset_snr(event):
s_I_cut.reset() s_I_cut.reset()
self.s_P_cut.reset() self.P_ERR_cut.reset()
s_vec_sc.reset() s_vec_sc.reset()
def toggle_snr_conf(event=None): def toggle_snr_conf(event=None):
@@ -2502,21 +2524,21 @@ class pol_map(object):
if self.snr_conf: if self.snr_conf:
self.snr_conf = 0 self.snr_conf = 0
b_snr_conf.label.set_text("Conf") b_snr_conf.label.set_text("Conf")
self.s_P_cut = Slider( self.P_ERR_cut = Slider(
self.ax_P_cut, r"$SNR^{P}_{cut}$", 1.0, max(int(SNRp_max * 0.95), 3), valstep=1, valinit=self.P_cut if P_cut >= 1.0 else 3 self.ax_P_cut, r"$SNR^{P}_{cut}$", 1.0, max(int(SNRp_max * 0.95), 3), valstep=1, valinit=self.P_cut if P_cut >= 1.0 else 3
) )
else: else:
self.snr_conf = 1 self.snr_conf = 1
b_snr_conf.label.set_text("SNR") b_snr_conf.label.set_text("SNR")
self.s_P_cut = Slider( self.P_ERR_cut = Slider(
self.ax_P_cut, r"$Conf^{P}_{cut}$", 0.50, 1.0, valstep=[0.500, 0.900, 0.990, 0.999], valinit=self.P_cut if P_cut <= 1.0 else 0.99 self.ax_P_cut, r"$Conf^{P}_{cut}$", 0.50, 1.0, valstep=[0.500, 0.900, 0.990, 0.999], valinit=self.P_cut if P_cut <= 1.0 else 0.99
) )
self.s_P_cut.on_changed(update_snrp) self.P_ERR_cut.on_changed(update_snrp)
update_snrp(self.s_P_cut.val) update_snrp(self.P_ERR_cut.val)
self.fig.canvas.draw_idle() self.fig.canvas.draw_idle()
s_I_cut.on_changed(update_snri) s_I_cut.on_changed(update_snri)
self.s_P_cut.on_changed(update_snrp) self.P_ERR_cut.on_changed(update_snrp)
s_vec_sc.on_changed(update_vecsc) s_vec_sc.on_changed(update_vecsc)
b_snr_reset.on_clicked(reset_snr) b_snr_reset.on_clicked(reset_snr)
b_snr_conf.on_clicked(toggle_snr_conf) b_snr_conf.on_clicked(toggle_snr_conf)
@@ -2957,12 +2979,16 @@ class pol_map(object):
def IQU_cov(self): def IQU_cov(self):
return self.Stokes["IQU_COV_MATRIX"].data return self.Stokes["IQU_COV_MATRIX"].data
@property
def IQU_stat_cov(self):
return self.Stokes["IQU_STAT_COV_MATRIX"].data
@property @property
def P(self): def P(self):
return self.Stokes["POL_DEG_DEBIASED"].data return self.Stokes["POL_DEG_DEBIASED"].data
@property @property
def s_P(self): def P_ERR(self):
return self.Stokes["POL_DEG_ERR"].data return self.Stokes["POL_DEG_ERR"].data
@property @property
@@ -2970,7 +2996,7 @@ class pol_map(object):
return self.Stokes["POL_ANG"].data return self.Stokes["POL_ANG"].data
@property @property
def s_PA(self): def PA_ERR(self):
return self.Stokes["POL_ANG_ERR"].data return self.Stokes["POL_ANG_ERR"].data
@property @property
@@ -2978,7 +3004,7 @@ class pol_map(object):
return self.Stokes["DATA_MASK"].data return self.Stokes["DATA_MASK"].data
def set_data_mask(self, mask): def set_data_mask(self, mask):
self.Stokes[np.argmax([self.Stokes[i].header["datatype"] == "Data_mask" for i in range(len(self.Stokes))])].data = mask.astype(float) self.Stokes["DATA_MASK"].data = mask.astype(float)
@property @property
def cut(self): def cut(self):
@@ -2986,7 +3012,7 @@ class pol_map(object):
SNRp_mask, SNRi_mask = (np.zeros(self.P.shape).astype(bool), np.zeros(self.I.shape).astype(bool)) SNRp_mask, SNRi_mask = (np.zeros(self.P.shape).astype(bool), np.zeros(self.I.shape).astype(bool))
SNRi_mask[s_I > 0.0] = self.I[s_I > 0.0] / s_I[s_I > 0.0] > self.SNRi SNRi_mask[s_I > 0.0] = self.I[s_I > 0.0] / s_I[s_I > 0.0] > self.SNRi
if self.SNRp >= 1.0: if self.SNRp >= 1.0:
SNRp_mask[self.s_P > 0.0] = self.P[self.s_P > 0.0] / self.s_P[self.s_P > 0.0] > self.SNRp SNRp_mask[self.P_ERR > 0.0] = self.P[self.P_ERR > 0.0] / self.P_ERR[self.P_ERR > 0.0] > self.SNRp
else: else:
SNRp_mask = self.conf > self.SNRp SNRp_mask = self.conf > self.SNRp
return np.logical_and(SNRi_mask, SNRp_mask) return np.logical_and(SNRi_mask, SNRp_mask)
@@ -3054,7 +3080,7 @@ class pol_map(object):
ax.add_artist(self.north_dir) ax.add_artist(self.north_dir)
def display(self, fig=None, ax=None, flux_lim=None): def display(self, fig=None, ax=None, flux_lim=None):
norm = None kwargs = dict([])
if self.display_selection is None: if self.display_selection is None:
self.display_selection = "total_flux" self.display_selection = "total_flux"
if flux_lim is None: if flux_lim is None:
@@ -3065,7 +3091,7 @@ class pol_map(object):
vmin, vmax = (1.0 / 2.0 * np.median(self.data[self.data > 0.0]), np.max(self.data[self.data > 0.0])) vmin, vmax = (1.0 / 2.0 * np.median(self.data[self.data > 0.0]), np.max(self.data[self.data > 0.0]))
else: else:
vmin, vmax = flux_lim vmin, vmax = flux_lim
norm = LogNorm(vmin, vmax) kwargs["norm"] = LogNorm(vmin, vmax)
label = r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]" label = r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]"
elif self.display_selection.lower() in ["pol_flux"]: elif self.display_selection.lower() in ["pol_flux"]:
self.data = self.I * self.map_convert * self.P self.data = self.I * self.map_convert * self.P
@@ -3073,28 +3099,30 @@ class pol_map(object):
vmin, vmax = (1.0 / 2.0 * np.median(self.I[self.I > 0.0] * self.map_convert), np.max(self.I[self.I > 0.0] * self.map_convert)) vmin, vmax = (1.0 / 2.0 * np.median(self.I[self.I > 0.0] * self.map_convert), np.max(self.I[self.I > 0.0] * self.map_convert))
else: else:
vmin, vmax = flux_lim vmin, vmax = flux_lim
norm = LogNorm(vmin, vmax) kwargs["norm"] = LogNorm(vmin, vmax)
label = r"$P \cdot F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]" label = r"$P \cdot F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]"
elif self.display_selection.lower() in ["pol_deg"]: elif self.display_selection.lower() in ["pol_deg"]:
self.data = self.P * 100.0 self.data = self.P * 100.0
vmin, vmax = 0.0, np.max(self.data[self.P > self.s_P]) kwargs["vmin"], kwargs["vmax"] = 0.0, min(np.max(self.data[self.P > self.P_ERR]), 100.0)
kwargs["alpha"] = 1.0 - 0.75 * (self.P < self.P_ERR)
label = r"$P$ [%]" label = r"$P$ [%]"
elif self.display_selection.lower() in ["pol_ang"]: elif self.display_selection.lower() in ["pol_ang"]:
self.data = princ_angle(self.PA) self.data = princ_angle(self.PA)
vmin, vmax = 0, 180.0 kwargs["vmin"], kwargs["vmax"] = 0, 180.0
kwargs["alpha"] = 1.0 - 0.75 * (self.P < self.P_ERR)
label = r"$\theta_{P}$ [°]" label = r"$\theta_{P}$ [°]"
elif self.display_selection.lower() in ["snri"]: elif self.display_selection.lower() in ["snri"]:
s_I = np.sqrt(self.IQU_cov[0, 0]) s_I = np.sqrt(self.IQU_cov[0, 0])
SNRi = np.zeros(self.I.shape) SNRi = np.zeros(self.I.shape)
SNRi[s_I > 0.0] = self.I[s_I > 0.0] / s_I[s_I > 0.0] SNRi[s_I > 0.0] = self.I[s_I > 0.0] / s_I[s_I > 0.0]
self.data = SNRi self.data = SNRi
vmin, vmax = 0.0, np.max(self.data[self.data > 0.0]) kwargs["vmin"], kwargs["vmax"] = 0.0, np.max(self.data[self.data > 0.0])
label = r"$I_{Stokes}/\sigma_{I}$" label = r"$I_{Stokes}/\sigma_{I}$"
elif self.display_selection.lower() in ["snrp"]: elif self.display_selection.lower() in ["snrp"]:
SNRp = np.zeros(self.P.shape) SNRp = np.zeros(self.P.shape)
SNRp[self.s_P > 0.0] = self.P[self.s_P > 0.0] / self.s_P[self.s_P > 0.0] SNRp[self.P_ERR > 0.0] = self.P[self.P_ERR > 0.0] / self.P_ERR[self.P_ERR > 0.0]
self.data = SNRp self.data = SNRp
vmin, vmax = 0.0, np.max(self.data[self.data > 0.0]) kwargs["vmin"], kwargs["vmax"] = 0.0, np.max(self.data[self.data > 0.0])
label = r"$P/\sigma_{P}$" label = r"$P/\sigma_{P}$"
if fig is None: if fig is None:
@@ -3105,20 +3133,14 @@ class pol_map(object):
self.cbar.remove() self.cbar.remove()
if hasattr(self, "im"): if hasattr(self, "im"):
self.im.remove() self.im.remove()
if norm is not None: self.im = ax.imshow(self.data, aspect="equal", cmap="inferno", **kwargs)
self.im = ax.imshow(self.data, norm=norm, aspect="equal", cmap="inferno")
else:
self.im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno")
plt.rcParams.update({"font.size": 14}) plt.rcParams.update({"font.size": 14})
self.cbar = fig.colorbar(self.im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=label) self.cbar = fig.colorbar(self.im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=label)
plt.rcParams.update({"font.size": 10}) plt.rcParams.update({"font.size": 10})
fig.canvas.draw_idle() fig.canvas.draw_idle()
return self.im return self.im
else: else:
if norm is not None: im = ax.imshow(self.data, aspect="equal", cmap="inferno", **kwargs)
im = ax.imshow(self.data, norm=norm, aspect="equal", cmap="inferno")
else:
im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno")
ax.set_xlim(0, self.data.shape[1]) ax.set_xlim(0, self.data.shape[1])
ax.set_ylim(0, self.data.shape[0]) ax.set_ylim(0, self.data.shape[0])
plt.rcParams.update({"font.size": 14}) plt.rcParams.update({"font.size": 14})
@@ -3163,12 +3185,12 @@ class pol_map(object):
) )
if self.pa_err: if self.pa_err:
XY_U_err1, XY_V_err1 = ( XY_U_err1, XY_V_err1 = (
P_cut * np.cos(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0), P_cut * np.cos(np.pi / 2.0 + (self.PA + 3.0 * self.P_ERRA) * np.pi / 180.0),
P_cut * np.sin(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0), P_cut * np.sin(np.pi / 2.0 + (self.PA + 3.0 * self.P_ERRA) * np.pi / 180.0),
) )
XY_U_err2, XY_V_err2 = ( XY_U_err2, XY_V_err2 = (
P_cut * np.cos(np.pi / 2.0 + (self.PA - 3.0 * self.s_PA) * np.pi / 180.0), P_cut * np.cos(np.pi / 2.0 + (self.PA - 3.0 * self.P_ERRA) * np.pi / 180.0),
P_cut * np.sin(np.pi / 2.0 + (self.PA - 3.0 * self.s_PA) * np.pi / 180.0), P_cut * np.sin(np.pi / 2.0 + (self.PA - 3.0 * self.P_ERRA) * np.pi / 180.0),
) )
if hasattr(self, "quiver_err1"): if hasattr(self, "quiver_err1"):
self.quiver_err1.remove() self.quiver_err1.remove()
@@ -3232,12 +3254,12 @@ class pol_map(object):
) )
if self.pa_err: if self.pa_err:
XY_U_err1, XY_V_err1 = ( XY_U_err1, XY_V_err1 = (
P_cut * np.cos(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0), P_cut * np.cos(np.pi / 2.0 + (self.PA + 3.0 * self.P_ERRA) * np.pi / 180.0),
P_cut * np.sin(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0), P_cut * np.sin(np.pi / 2.0 + (self.PA + 3.0 * self.P_ERRA) * np.pi / 180.0),
) )
XY_U_err2, XY_V_err2 = ( XY_U_err2, XY_V_err2 = (
P_cut * np.cos(np.pi / 2.0 + (self.PA - 3.0 * self.s_PA) * np.pi / 180.0), P_cut * np.cos(np.pi / 2.0 + (self.PA - 3.0 * self.P_ERRA) * np.pi / 180.0),
P_cut * np.sin(np.pi / 2.0 + (self.PA - 3.0 * self.s_PA) * np.pi / 180.0), P_cut * np.sin(np.pi / 2.0 + (self.PA - 3.0 * self.P_ERRA) * np.pi / 180.0),
) )
ax.quiver( ax.quiver(
X[:: self.step_vec, :: self.step_vec], X[:: self.step_vec, :: self.step_vec],
@@ -3283,27 +3305,26 @@ class pol_map(object):
s_I = np.sqrt(self.IQU_cov[0, 0]) s_I = np.sqrt(self.IQU_cov[0, 0])
I_reg = self.I.sum() I_reg = self.I.sum()
I_reg_err = np.sqrt(np.sum(s_I**2)) I_reg_err = np.sqrt(np.sum(s_I**2))
P_reg = self.Stokes[0].header["P_int"] debiased_P_reg = self.Stokes[0].header["P_int"]
P_reg_err = self.Stokes[0].header["sP_int"] P_reg_err = self.Stokes[0].header["sP_int"]
PA_reg = self.Stokes[0].header["PA_int"] PA_reg = self.Stokes[0].header["PA_int"]
PA_reg_err = self.Stokes[0].header["sPA_int"] PA_reg_err = self.Stokes[0].header["sPA_int"]
s_I = np.sqrt(self.IQU_cov[0, 0])
s_Q = np.sqrt(self.IQU_cov[1, 1])
s_U = np.sqrt(self.IQU_cov[2, 2])
s_IQ = self.IQU_cov[0, 1]
s_IU = self.IQU_cov[0, 2]
s_QU = self.IQU_cov[1, 2]
I_cut = self.I[self.cut].sum() I_cut = self.I[self.cut].sum()
Q_cut = self.Q[self.cut].sum() Q_cut = self.Q[self.cut].sum()
U_cut = self.U[self.cut].sum() U_cut = self.U[self.cut].sum()
I_cut_err = np.sqrt(np.sum(s_I[self.cut] ** 2)) I_cut_err = np.sqrt(np.sum(self.IQU_cov[0, 0][self.cut]))
Q_cut_err = np.sqrt(np.sum(s_Q[self.cut] ** 2)) Q_cut_err = np.sqrt(np.sum(self.IQU_cov[1, 1][self.cut]))
U_cut_err = np.sqrt(np.sum(s_U[self.cut] ** 2)) U_cut_err = np.sqrt(np.sum(self.IQU_cov[2, 2][self.cut]))
IQ_cut_err = np.sqrt(np.sum(s_IQ[self.cut] ** 2)) IQ_cut_err = np.sqrt(np.sum(self.IQU_cov[0, 1][self.cut] ** 2))
IU_cut_err = np.sqrt(np.sum(s_IU[self.cut] ** 2)) IU_cut_err = np.sqrt(np.sum(self.IQU_cov[0, 2][self.cut] ** 2))
QU_cut_err = np.sqrt(np.sum(s_QU[self.cut] ** 2)) QU_cut_err = np.sqrt(np.sum(self.IQU_cov[1, 2][self.cut] ** 2))
I_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 0][self.cut]))
Q_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[1, 1][self.cut]))
U_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[2, 2][self.cut]))
IQ_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 1][self.cut] ** 2))
IU_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 2][self.cut] ** 2))
QU_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[1, 2][self.cut] ** 2))
with np.errstate(divide="ignore", invalid="ignore"): with np.errstate(divide="ignore", invalid="ignore"):
P_cut = np.sqrt(Q_cut**2 + U_cut**2) / I_cut P_cut = np.sqrt(Q_cut**2 + U_cut**2) / I_cut
@@ -3316,6 +3337,16 @@ class pol_map(object):
) )
/ I_cut / I_cut
) )
P_cut_stat_err = (
P_cut
/ I_cut
* np.sqrt(
I_cut_stat_err
- 2.0 / (I_cut * P_cut**2) * (Q_cut * IQ_cut_stat_err + U_cut * IU_cut_stat_err)
+ 1.0 / (I_cut**2 * P_cut**4) * (Q_cut**2 * Q_cut_stat_err + U_cut**2 * U_cut_stat_err + 2.0 * Q_cut * U_cut * QU_cut_stat_err)
)
)
debiased_P_cut = np.sqrt(P_cut**2 - P_cut_stat_err**2) if P_cut**2 > P_cut_stat_err**2 else 0.0
PA_cut = princ_angle((90.0 / np.pi) * np.arctan2(U_cut, Q_cut)) PA_cut = princ_angle((90.0 / np.pi) * np.arctan2(U_cut, Q_cut))
PA_cut_err = (90.0 / (np.pi * (Q_cut**2 + U_cut**2))) * np.sqrt( PA_cut_err = (90.0 / (np.pi * (Q_cut**2 + U_cut**2))) * np.sqrt(
@@ -3323,22 +3354,21 @@ class pol_map(object):
) )
else: else:
s_I = np.sqrt(self.IQU_cov[0, 0])
s_Q = np.sqrt(self.IQU_cov[1, 1])
s_U = np.sqrt(self.IQU_cov[2, 2])
s_IQ = self.IQU_cov[0, 1]
s_IU = self.IQU_cov[0, 2]
s_QU = self.IQU_cov[1, 2]
I_reg = self.I[self.region].sum() I_reg = self.I[self.region].sum()
Q_reg = self.Q[self.region].sum() Q_reg = self.Q[self.region].sum()
U_reg = self.U[self.region].sum() U_reg = self.U[self.region].sum()
I_reg_err = np.sqrt(np.sum(s_I[self.region] ** 2)) I_reg_err = np.sqrt(np.sum(self.IQU_cov[0, 0][self.region]))
Q_reg_err = np.sqrt(np.sum(s_Q[self.region] ** 2)) Q_reg_err = np.sqrt(np.sum(self.IQU_cov[1, 1][self.region]))
U_reg_err = np.sqrt(np.sum(s_U[self.region] ** 2)) U_reg_err = np.sqrt(np.sum(self.IQU_cov[2, 2][self.region]))
IQ_reg_err = np.sqrt(np.sum(s_IQ[self.region] ** 2)) IQ_reg_err = np.sqrt(np.sum(self.IQU_cov[0, 1][self.region] ** 2))
IU_reg_err = np.sqrt(np.sum(s_IU[self.region] ** 2)) IU_reg_err = np.sqrt(np.sum(self.IQU_cov[0, 2][self.region] ** 2))
QU_reg_err = np.sqrt(np.sum(s_QU[self.region] ** 2)) QU_reg_err = np.sqrt(np.sum(self.IQU_cov[1, 2][self.region] ** 2))
I_reg_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 0][self.region]))
Q_reg_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[1, 1][self.region]))
U_reg_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[2, 2][self.region]))
IQ_reg_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 1][self.region] ** 2))
IU_reg_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 2][self.region] ** 2))
QU_reg_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[1, 2][self.region] ** 2))
conf = PCconf(QN=Q_reg / I_reg, QN_ERR=Q_reg_err / I_reg, UN=U_reg / I_reg, UN_ERR=U_reg_err / I_reg) conf = PCconf(QN=Q_reg / I_reg, QN_ERR=Q_reg_err / I_reg, UN=U_reg / I_reg, UN_ERR=U_reg_err / I_reg)
if 1.0 - conf > 1e-3: if 1.0 - conf > 1e-3:
@@ -3355,6 +3385,16 @@ class pol_map(object):
) )
/ I_reg / I_reg
) )
P_reg_stat_err = (
P_reg
/ I_reg
* np.sqrt(
I_reg_stat_err
- 2.0 / (I_reg * P_reg**2) * (Q_reg * IQ_reg_stat_err + U_reg * IU_reg_stat_err)
+ 1.0 / (I_reg**2 * P_reg**4) * (Q_reg**2 * Q_reg_stat_err + U_reg**2 * U_reg_stat_err + 2.0 * Q_reg * U_reg * QU_reg_stat_err)
)
)
debiased_P_reg = np.sqrt(P_reg**2 - P_reg_stat_err**2) if P_reg**2 > P_reg_stat_err**2 else 0.0
PA_reg = princ_angle((90.0 / np.pi) * np.arctan2(U_reg, Q_reg)) PA_reg = princ_angle((90.0 / np.pi) * np.arctan2(U_reg, Q_reg))
PA_reg_err = (90.0 / (np.pi * (Q_reg**2 + U_reg**2))) * np.sqrt( PA_reg_err = (90.0 / (np.pi * (Q_reg**2 + U_reg**2))) * np.sqrt(
@@ -3365,12 +3405,18 @@ class pol_map(object):
I_cut = self.I[new_cut].sum() I_cut = self.I[new_cut].sum()
Q_cut = self.Q[new_cut].sum() Q_cut = self.Q[new_cut].sum()
U_cut = self.U[new_cut].sum() U_cut = self.U[new_cut].sum()
I_cut_err = np.sqrt(np.sum(s_I[new_cut] ** 2)) I_cut_err = np.sqrt(np.sum(self.IQU_cov[0, 0][new_cut]))
Q_cut_err = np.sqrt(np.sum(s_Q[new_cut] ** 2)) Q_cut_err = np.sqrt(np.sum(self.IQU_cov[1, 1][new_cut]))
U_cut_err = np.sqrt(np.sum(s_U[new_cut] ** 2)) U_cut_err = np.sqrt(np.sum(self.IQU_cov[2, 2][new_cut]))
IQ_cut_err = np.sqrt(np.sum(s_IQ[new_cut] ** 2)) IQ_cut_err = np.sqrt(np.sum(self.IQU_cov[0, 1][new_cut] ** 2))
IU_cut_err = np.sqrt(np.sum(s_IU[new_cut] ** 2)) IU_cut_err = np.sqrt(np.sum(self.IQU_cov[0, 2][new_cut] ** 2))
QU_cut_err = np.sqrt(np.sum(s_QU[new_cut] ** 2)) QU_cut_err = np.sqrt(np.sum(self.IQU_cov[1, 2][new_cut] ** 2))
I_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 0][new_cut]))
Q_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[1, 1][new_cut]))
U_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[2, 2][new_cut]))
IQ_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 1][new_cut] ** 2))
IU_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 2][new_cut] ** 2))
QU_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[1, 2][new_cut] ** 2))
with np.errstate(divide="ignore", invalid="ignore"): with np.errstate(divide="ignore", invalid="ignore"):
P_cut = np.sqrt(Q_cut**2 + U_cut**2) / I_cut P_cut = np.sqrt(Q_cut**2 + U_cut**2) / I_cut
@@ -3383,6 +3429,16 @@ class pol_map(object):
) )
/ I_cut / I_cut
) )
P_cut_stat_err = (
P_cut
/ I_cut
* np.sqrt(
I_cut_stat_err
- 2.0 / (I_cut * P_cut**2) * (Q_cut * IQ_cut_stat_err + U_cut * IU_cut_stat_err)
+ 1.0 / (I_cut**2 * P_cut**4) * (Q_cut**2 * Q_cut_stat_err + U_cut**2 * U_cut_stat_err + 2.0 * Q_cut * U_cut * QU_cut_stat_err)
)
)
debiased_P_cut = np.sqrt(P_cut**2 - P_cut_stat_err**2) if P_cut**2 > P_cut_stat_err**2 else 0.0
PA_cut = princ_angle((90.0 / np.pi) * np.arctan2(U_cut, Q_cut)) PA_cut = princ_angle((90.0 / np.pi) * np.arctan2(U_cut, Q_cut))
PA_cut_err = (90.0 / (np.pi * (Q_cut**2 + U_cut**2))) * np.sqrt( PA_cut_err = (90.0 / (np.pi * (Q_cut**2 + U_cut**2))) * np.sqrt(
@@ -3403,7 +3459,7 @@ class pol_map(object):
self.pivot_wav, sci_not(I_reg * self.map_convert, I_reg_err * self.map_convert, 2) self.pivot_wav, sci_not(I_reg * self.map_convert, I_reg_err * self.map_convert, 2)
) )
+ "\n" + "\n"
+ r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg * 100.0, np.ceil(P_reg_err * 1000.0) / 10.0) + r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(debiased_P_reg * 100.0, np.ceil(P_reg_err * 1000.0) / 10.0)
+ "\n" + "\n"
+ r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err * 10.0) / 10.0) + r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err * 10.0) / 10.0)
+ str_conf + str_conf
@@ -3415,7 +3471,7 @@ class pol_map(object):
# self.pivot_wav, sci_not(I_cut * self.map_convert, I_cut_err * self.map_convert, 2) # self.pivot_wav, sci_not(I_cut * self.map_convert, I_cut_err * self.map_convert, 2)
# ) # )
# + "\n" # + "\n"
# + r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_cut * 100.0, np.ceil(P_cut_err * 1000.0) / 10.0) # + r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(debiased_P_cut * 100.0, np.ceil(P_cut_err * 1000.0) / 10.0)
# + "\n" # + "\n"
# + r"$\theta_{{P}}^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_cut, np.ceil(PA_cut_err * 10.0) / 10.0) # + r"$\theta_{{P}}^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_cut, np.ceil(PA_cut_err * 10.0) / 10.0)
# ) # )
@@ -3439,7 +3495,7 @@ class pol_map(object):
self.pivot_wav, sci_not(I_reg * self.map_convert, I_reg_err * self.map_convert, 2) self.pivot_wav, sci_not(I_reg * self.map_convert, I_reg_err * self.map_convert, 2)
) )
+ "\n" + "\n"
+ r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg * 100.0, np.ceil(P_reg_err * 1000.0) / 10.0) + r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(debiased_P_reg * 100.0, np.ceil(P_reg_err * 1000.0) / 10.0)
+ "\n" + "\n"
+ r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err * 10.0) / 10.0) + r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err * 10.0) / 10.0)
+ str_conf + str_conf
@@ -3451,7 +3507,7 @@ class pol_map(object):
# self.pivot_wav, sci_not(I_cut * self.map_convert, I_cut_err * self.map_convert, 2) # self.pivot_wav, sci_not(I_cut * self.map_convert, I_cut_err * self.map_convert, 2)
# ) # )
# + "\n" # + "\n"
# + r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_cut * 100.0, np.ceil(P_cut_err * 1000.0) / 10.0) # + r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(debiased_P_cut * 100.0, np.ceil(P_cut_err * 1000.0) / 10.0)
# + "\n" # + "\n"
# + r"$\theta_{{P}}^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_cut, np.ceil(PA_cut_err * 10.0) / 10.0) # + r"$\theta_{{P}}^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_cut, np.ceil(PA_cut_err * 10.0) / 10.0)
# ) # )

View File

@@ -224,7 +224,9 @@ def bin_ndarray(ndarray, new_shape, operation="sum"):
return ndarray return ndarray
def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, null_val=None, inside=False, display=False, savename=None, plots_folder=""): def crop_array(
data_array, headers, error_array=None, data_mask=None, step=5, null_val=None, crop=True, inside=False, display=False, savename=None, plots_folder=""
):
""" """
Homogeneously crop an array: all contained images will have the same shape. Homogeneously crop an array: all contained images will have the same shape.
'inside' parameter will decide how much should be cropped. 'inside' parameter will decide how much should be cropped.
@@ -256,6 +258,10 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu
If None, will be put to 75% of the mean value of the associated error If None, will be put to 75% of the mean value of the associated error
array. array.
Defaults to None. Defaults to None.
crop : boolean, optional
If True, data_array will be cropped down to contain only relevant data.
If False, this information will be kept in the crop_mask output.
Defaults to True.
inside : boolean, optional inside : boolean, optional
If True, the cropped image will be the maximum rectangle included If True, the cropped image will be the maximum rectangle included
inside the image. If False, the cropped image will be the minimum inside the image. If False, the cropped image will be the minimum
@@ -295,6 +301,9 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu
v_array[1] = np.max(vertex[:, 1]).astype(int) v_array[1] = np.max(vertex[:, 1]).astype(int)
v_array[2] = np.min(vertex[:, 2]).astype(int) v_array[2] = np.min(vertex[:, 2]).astype(int)
v_array[3] = np.max(vertex[:, 3]).astype(int) v_array[3] = np.max(vertex[:, 3]).astype(int)
if data_mask is None:
data_mask = np.zeros(data_array[0].shape).astype(bool)
data_mask[v_array[0] : v_array[1], v_array[2] : v_array[3]] = True
new_shape = np.array([v_array[1] - v_array[0], v_array[3] - v_array[2]]) new_shape = np.array([v_array[1] - v_array[0], v_array[3] - v_array[2]])
rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 0.0, "b"] rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 0.0, "b"]
@@ -352,11 +361,11 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu
) )
plt.show() plt.show()
if data_mask is not None: if crop:
crop_mask = data_mask[v_array[0] : v_array[1], v_array[2] : v_array[3]] crop_mask = data_mask[v_array[0] : v_array[1], v_array[2] : v_array[3]]
return crop_array, crop_error_array, crop_mask, crop_headers return crop_array, crop_error_array, crop_mask, crop_headers
else: else:
return crop_array, crop_error_array, crop_headers return data_array, error_array, data_mask, headers
def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px", shape=None, iterations=20, algo="richardson"): def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px", shape=None, iterations=20, algo="richardson"):
@@ -1243,6 +1252,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Orientation and error for each polarizer # Orientation and error for each polarizer
# fmax = np.finfo(np.float64).max # fmax = np.finfo(np.float64).max
pol_flux = np.array([corr[0] * pol0, corr[1] * pol60, corr[2] * pol120]) pol_flux = np.array([corr[0] * pol0, corr[1] * pol60, corr[2] * pol120])
pol_flux_corr = np.array([pf * 2.0 / t for (pf, t) in zip(pol_flux, transmit)])
coeff_stokes = np.zeros((3, 3)) coeff_stokes = np.zeros((3, 3))
# Coefficients linking each polarizer flux to each Stokes parameter # Coefficients linking each polarizer flux to each Stokes parameter
@@ -1258,6 +1268,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Normalization parameter for Stokes parameters computation # Normalization parameter for Stokes parameters computation
N = (coeff_stokes[0, :] * transmit / 2.0).sum() N = (coeff_stokes[0, :] * transmit / 2.0).sum()
coeff_stokes = coeff_stokes / N coeff_stokes = coeff_stokes / N
coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T
I_stokes = np.zeros(pol_array[0].shape) I_stokes = np.zeros(pol_array[0].shape)
Q_stokes = np.zeros(pol_array[0].shape) Q_stokes = np.zeros(pol_array[0].shape)
U_stokes = np.zeros(pol_array[0].shape) U_stokes = np.zeros(pol_array[0].shape)
@@ -1299,121 +1310,81 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Statistical error: Poisson noise is assumed # Statistical error: Poisson noise is assumed
sigma_flux = np.array([np.sqrt(flux / head["exptime"]) for flux, head in zip(pol_flux, pol_headers)]) sigma_flux = np.array([np.sqrt(flux / head["exptime"]) for flux, head in zip(pol_flux, pol_headers)])
s_I2_stat = np.sum([coeff_stokes[0, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0) Stokes_stat_cov = np.zeros(Stokes_cov.shape)
s_Q2_stat = np.sum([coeff_stokes[1, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0) for i in range(Stokes_cov.shape[0]):
s_U2_stat = np.sum([coeff_stokes[2, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0) Stokes_stat_cov[i, i] = np.sum([coeff_stokes[i, k] ** 2 * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
for j in [k for k in range(3) if k > i]:
Stokes_stat_cov[i, j] = np.sum([abs(coeff_stokes[i, k] * coeff_stokes[j, k]) * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
Stokes_stat_cov[j, i] = np.sum([abs(coeff_stokes[i, k] * coeff_stokes[j, k]) * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
pol_flux_corr = np.array([pf * 2.0 / t for (pf, t) in zip(pol_flux, transmit)])
coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T
# Compute the derivative of each Stokes parameter with respect to the polarizer orientation # Compute the derivative of each Stokes parameter with respect to the polarizer orientation
dI_dtheta1 = ( dIQU_dtheta = np.zeros(Stokes_cov.shape)
2.0
* pol_eff[0]
/ N
* (
pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes)
- pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes)
+ coeff_stokes_corr[0, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
)
)
dI_dtheta2 = (
2.0
* pol_eff[1]
/ N
* (
pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes)
- pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes)
+ coeff_stokes_corr[0, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
)
)
dI_dtheta3 = (
2.0
* pol_eff[2]
/ N
* (
pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes)
- pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes)
+ coeff_stokes_corr[0, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
)
)
dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3])
dQ_dtheta1 = ( # Derivative of I_stokes wrt theta_1, 2, 3
2.0 for j in range(3):
* pol_eff[0] dIQU_dtheta[0, j] = (
/ N 2.0
* ( * pol_eff[j]
np.cos(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2]) / N
- (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * Q_stokes * (
+ coeff_stokes_corr[1, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes) pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - I_stokes)
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3]) * (pol_flux_corr[(j + 2) % 3] - I_stokes)
+ coeff_stokes_corr[0, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes)
)
) )
)
dQ_dtheta2 = (
2.0
* pol_eff[1]
/ N
* (
np.cos(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * Q_stokes
+ coeff_stokes_corr[1, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
)
)
dQ_dtheta3 = (
2.0
* pol_eff[2]
/ N
* (
np.cos(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * Q_stokes
+ coeff_stokes_corr[1, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
)
)
dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3])
dU_dtheta1 = ( # Derivative of Q_stokes wrt theta_1, 2, 3
2.0 for j in range(3):
* pol_eff[0] dIQU_dtheta[1, j] = (
/ N 2.0
* ( * pol_eff[j]
np.sin(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2]) / N
- (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * U_stokes * (
+ coeff_stokes_corr[2, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes) np.cos(2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - pol_flux_corr[(j + 2) % 3])
- (
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3])
)
* Q_stokes
+ coeff_stokes_corr[1, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes)
)
) )
)
dU_dtheta2 = ( # Derivative of U_stokes wrt theta_1, 2, 3
2.0 for j in range(3):
* pol_eff[1] dIQU_dtheta[2, j] = (
/ N 2.0
* ( * pol_eff[j]
np.sin(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0]) / N
- (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * U_stokes * (
+ coeff_stokes_corr[2, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes) np.sin(2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - pol_flux_corr[(j + 2) % 3])
- (
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3])
)
* U_stokes
+ coeff_stokes_corr[2, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes)
)
) )
)
dU_dtheta3 = (
2.0
* pol_eff[2]
/ N
* (
np.sin(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * U_stokes
+ coeff_stokes_corr[2, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
)
)
dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3])
# Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999) # Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999)
s_I2_axis = np.sum([dI_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0) Stokes_axis_cov = np.zeros(Stokes_cov.shape)
s_Q2_axis = np.sum([dQ_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0) for i in range(Stokes_cov.shape[0]):
s_U2_axis = np.sum([dU_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0) Stokes_axis_cov[i, i] = np.sum([dIQU_dtheta[i, k] ** 2 * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0)
# np.savetxt("output/sI_dir.txt", np.sqrt(s_I2_axis)) for j in [k for k in range(3) if k > i]:
# np.savetxt("output/sQ_dir.txt", np.sqrt(s_Q2_axis)) Stokes_axis_cov[i, j] = np.sum(
# np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis)) [abs(dIQU_dtheta[i, k] * dIQU_dtheta[j, k]) * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0
)
Stokes_axis_cov[j, i] = np.sum(
[abs(dIQU_dtheta[i, k] * dIQU_dtheta[j, k]) * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0
)
# Add quadratically the uncertainty to the Stokes covariance matrix # Add quadratically the uncertainty to the Stokes covariance matrix
Stokes_cov[0, 0] += s_I2_axis + s_I2_stat for i in range(Stokes_cov.shape[0]):
Stokes_cov[1, 1] += s_Q2_axis + s_Q2_stat Stokes_cov[i, i] += Stokes_axis_cov[i, i] + Stokes_stat_cov[i, i]
Stokes_cov[2, 2] += s_U2_axis + s_U2_stat for j in [k for k in range(Stokes_cov.shape[0]) if k > i]:
Stokes_cov[i, j] = np.sqrt(Stokes_cov[i, j] ** 2 + Stokes_axis_cov[i, j] ** 2 + Stokes_stat_cov[i, j] ** 2)
Stokes_cov[j, i] = np.sqrt(Stokes_cov[j, i] ** 2 + Stokes_axis_cov[j, i] ** 2 + Stokes_stat_cov[j, i] ** 2)
# Save values to single header # Save values to single header
header_stokes = pol_headers[0] header_stokes = pol_headers[0]
@@ -1447,8 +1418,8 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
for i in range(3): for i in range(3):
Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2 Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2
for j in [x for x in range(3) if x != i]: for j in [x for x in range(3) if x != i]:
Stokes_cov[i, j] = np.sqrt(np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, i, j])], axis=0) / all_exp.sum() ** 2) Stokes_cov[i, j] = np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, i, j])], axis=0) / all_exp.sum() ** 2
Stokes_cov[j, i] = np.sqrt(np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, j, i])], axis=0) / all_exp.sum() ** 2) Stokes_cov[j, i] = np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, j, i])], axis=0) / all_exp.sum() ** 2
# Save values to single header # Save values to single header
header_stokes = all_header_stokes[0] header_stokes = all_header_stokes[0]
@@ -1463,6 +1434,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
Q_stokes[np.isnan(Q_stokes)] = 0.0 Q_stokes[np.isnan(Q_stokes)] = 0.0
U_stokes[np.isnan(U_stokes)] = 0.0 U_stokes[np.isnan(U_stokes)] = 0.0
Stokes_cov[np.isnan(Stokes_cov)] = fmax Stokes_cov[np.isnan(Stokes_cov)] = fmax
Stokes_stat_cov[np.isnan(Stokes_cov)] = fmax
if integrate: if integrate:
# Compute integrated values for P, PA before any rotation # Compute integrated values for P, PA before any rotation
@@ -1476,29 +1448,47 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
IQ_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 1][mask] ** 2)) IQ_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 1][mask] ** 2))
IU_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 2][mask] ** 2)) IU_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 2][mask] ** 2))
QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask] ** 2)) QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask] ** 2))
I_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 0][mask]))
Q_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[1, 1][mask]))
U_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[2, 2][mask]))
IQ_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 1][mask] ** 2))
IU_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 2][mask] ** 2))
QU_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[1, 2][mask] ** 2))
P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted
P_diluted_err = np.sqrt( P_diluted_err = (1.0 / I_diluted) * np.sqrt(
(Q_diluted**2 * Q_diluted_err**2 + U_diluted**2 * U_diluted_err**2 + 2.0 * Q_diluted * U_diluted * QU_diluted_err) / (Q_diluted**2 + U_diluted**2) (Q_diluted**2 * Q_diluted_err**2 + U_diluted**2 * U_diluted_err**2 + 2.0 * Q_diluted * U_diluted * QU_diluted_err) / (Q_diluted**2 + U_diluted**2)
+ ((Q_diluted / I_diluted) ** 2 + (U_diluted / I_diluted) ** 2) * I_diluted_err**2 + ((Q_diluted / I_diluted) ** 2 + (U_diluted / I_diluted) ** 2) * I_diluted_err**2
- 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err - 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err
- 2.0 * (U_diluted / I_diluted) * IU_diluted_err - 2.0 * (U_diluted / I_diluted) * IU_diluted_err
) )
P_diluted_stat_err = (
P_diluted
/ I_diluted
* np.sqrt(
I_diluted_stat_err
- 2.0 / (I_diluted * P_diluted**2) * (Q_diluted * IQ_diluted_stat_err + U_diluted * IU_diluted_stat_err)
+ 1.0
/ (I_diluted**2 * P_diluted**4)
* (Q_diluted**2 * Q_diluted_stat_err + U_diluted**2 * U_diluted_stat_err + 2.0 * Q_diluted * U_diluted * QU_diluted_stat_err)
)
)
debiased_P_diluted = np.sqrt(P_diluted**2 - P_diluted_stat_err**2) if P_diluted**2 > P_diluted_stat_err**2 else 0.0
PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted)) PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted))
PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt( PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt(
U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err
) )
header_stokes["P_int"] = (P_diluted, "Integrated polarization degree") header_stokes["P_int"] = (debiased_P_diluted, "Integrated polarization degree")
header_stokes["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error") header_stokes["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle") header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
return I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes return I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, header_stokes
def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes): def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, header_stokes):
""" """
Compute the polarization degree (in %) and angle (in deg) and their Compute the polarization degree (in %) and angle (in deg) and their
respective errors from given Stokes parameters. respective errors from given Stokes parameters.
@@ -1573,27 +1563,44 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes):
s_P[np.isnan(s_P)] = fmax s_P[np.isnan(s_P)] = fmax
s_PA[np.isnan(s_PA)] = fmax s_PA[np.isnan(s_PA)] = fmax
# Errors on P, PA supposing Poisson noise
s_P_P = np.ones(I_stokes.shape) * fmax
s_PA_P = np.ones(I_stokes.shape) * fmax
maskP = np.logical_and(mask, P > 0.0)
s_P_P[maskP] = (
P[maskP]
/ I_stokes[maskP]
* np.sqrt(
Stokes_stat_cov[0, 0][maskP]
- 2.0 / (I_stokes[maskP] * P[maskP] ** 2) * (Q_stokes[maskP] * Stokes_stat_cov[0, 1][maskP] + U_stokes[maskP] * Stokes_stat_cov[0, 2][maskP])
+ 1.0
/ (I_stokes[maskP] ** 2 * P[maskP] ** 4)
* (
Q_stokes[maskP] ** 2 * Stokes_stat_cov[1, 1][maskP]
+ U_stokes[maskP] ** 2 * Stokes_stat_cov[2, 2][maskP]
+ 2.0 * Q_stokes[maskP] * U_stokes[maskP] * Stokes_stat_cov[1, 2][maskP]
)
)
)
s_PA_P[maskP] = (
90.0
/ (np.pi * I_stokes[maskP] ** 2 * P[maskP] ** 2)
* (
Q_stokes[maskP] ** 2 * Stokes_stat_cov[2, 2][maskP]
+ U_stokes[maskP] * Stokes_stat_cov[1, 1][maskP]
- 2.0 * Q_stokes[maskP] * U_stokes[maskP] * Stokes_stat_cov[1, 2][maskP]
)
)
# Catch expected "OverflowWarning" as wrong pixel have an overflowing error # Catch expected "OverflowWarning" as wrong pixel have an overflowing error
with warnings.catch_warnings(record=True) as _: with warnings.catch_warnings(record=True) as _:
mask2 = P**2 >= s_P**2 mask2 = P**2 >= s_P_P**2
debiased_P = np.zeros(I_stokes.shape) debiased_P = np.zeros(I_stokes.shape)
debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P[mask2] ** 2) debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P_P[mask2] ** 2)
if (debiased_P > 1.0).any(): if (debiased_P > 1.0).any():
print("WARNING : found {0:d} pixels for which debiased_P > 100%".format(debiased_P[debiased_P > 1.0].size)) print("WARNING : found {0:d} pixels for which debiased_P > 100%".format(debiased_P[debiased_P > 1.0].size))
# Compute the total exposure time so that
# I_stokes*exp_tot = N_tot the total number of events
exp_tot = header_stokes["exptime"]
# print("Total exposure time : {} sec".format(exp_tot))
N_obs = I_stokes * exp_tot
# Errors on P, PA supposing Poisson noise
s_P_P = np.ones(I_stokes.shape) * fmax
s_P_P[mask] = np.sqrt(2.0) / np.sqrt(N_obs[mask]) * 100.0
s_PA_P = np.ones(I_stokes.shape) * fmax
s_PA_P[mask2] = s_P_P[mask2] / (2.0 * P[mask2]) * 180.0 / np.pi
# Nan handling : # Nan handling :
P[np.isnan(P)] = 0.0 P[np.isnan(P)] = 0.0
s_P[np.isnan(s_P)] = fmax s_P[np.isnan(s_P)] = fmax
@@ -1605,7 +1612,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes):
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None): def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, data_mask, header_stokes, SNRi_cut=None):
""" """
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header matrix to rotate Q, U of a given angle in degrees and update header
@@ -1622,7 +1629,11 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
Image (2D floats) containing the Stokes parameters accounting for Image (2D floats) containing the Stokes parameters accounting for
+45/-45deg linear polarization intensity +45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U. Covariance matrix containing all uncertainties of the Stokes
parameters I, Q, U.
Stokes_stat_cov : numpy.ndarray
Covariance matrix containing statistical uncertainty of the Stokes
parameters I, Q, U.
data_mask : numpy.ndarray data_mask : numpy.ndarray
2D boolean array delimiting the data to work on. 2D boolean array delimiting the data to work on.
header_stokes : astropy.fits.header.Header header_stokes : astropy.fits.header.Header
@@ -1644,6 +1655,8 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
accounting for +45/-45deg linear polarization intensity. accounting for +45/-45deg linear polarization intensity.
new_Stokes_cov : numpy.ndarray new_Stokes_cov : numpy.ndarray
Updated covariance matrix of the Stokes parameters I, Q, U. Updated covariance matrix of the Stokes parameters I, Q, U.
new_Stokes_stat_cov : numpy.ndarray
Updated statistical covariance matrix of the Stokes parameters I, Q, U.
new_header_stokes : astropy.fits.header.Header new_header_stokes : astropy.fits.header.Header
Updated Header file associated with the Stokes fluxes accounting Updated Header file associated with the Stokes fluxes accounting
for the new orientation angle. for the new orientation angle.
@@ -1675,11 +1688,9 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
Q_stokes = zeropad(Q_stokes, shape) Q_stokes = zeropad(Q_stokes, shape)
U_stokes = zeropad(U_stokes, shape) U_stokes = zeropad(U_stokes, shape)
data_mask = zeropad(data_mask, shape) data_mask = zeropad(data_mask, shape)
Stokes_cov = zeropad(Stokes_cov, [*Stokes_cov.shape[:-2], *shape])
new_I_stokes = np.zeros(shape) new_I_stokes = np.zeros(shape)
new_Q_stokes = np.zeros(shape) new_Q_stokes = np.zeros(shape)
new_U_stokes = np.zeros(shape) new_U_stokes = np.zeros(shape)
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape))
# Rotate original images using scipy.ndimage.rotate # Rotate original images using scipy.ndimage.rotate
new_I_stokes = sc_rotate(I_stokes, ang, order=1, reshape=False, cval=0.0) new_I_stokes = sc_rotate(I_stokes, ang, order=1, reshape=False, cval=0.0)
@@ -1688,6 +1699,10 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0) new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0)
new_data_mask[new_data_mask < 1.0] = 0.0 new_data_mask[new_data_mask < 1.0] = 0.0
new_data_mask = new_data_mask.astype(bool) new_data_mask = new_data_mask.astype(bool)
# Rotate covariance matrix
Stokes_cov = zeropad(Stokes_cov, [*Stokes_cov.shape[:-2], *shape])
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape))
for i in range(3): for i in range(3):
for j in range(3): for j in range(3):
new_Stokes_cov[i, j] = sc_rotate(Stokes_cov[i, j], ang, order=1, reshape=False, cval=0.0) new_Stokes_cov[i, j] = sc_rotate(Stokes_cov[i, j], ang, order=1, reshape=False, cval=0.0)
@@ -1698,6 +1713,17 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j] = np.dot(mrot, np.array([new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j]])).T new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j] = np.dot(mrot, np.array([new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j]])).T
new_Stokes_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:, :, i, j], mrot.T)) new_Stokes_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:, :, i, j], mrot.T))
# Rotate statistical covariance matrix
Stokes_stat_cov = zeropad(Stokes_stat_cov, [*Stokes_stat_cov.shape[:-2], *shape])
new_Stokes_stat_cov = np.zeros((*Stokes_stat_cov.shape[:-2], *shape))
for i in range(3):
for j in range(3):
new_Stokes_stat_cov[i, j] = sc_rotate(Stokes_stat_cov[i, j], ang, order=1, reshape=False, cval=0.0)
new_Stokes_stat_cov[i, i] = np.abs(new_Stokes_stat_cov[i, i])
for i in range(shape[0]):
for j in range(shape[1]):
new_Stokes_stat_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_stat_cov[:, :, i, j], mrot.T))
# Update headers to new angle # Update headers to new angle
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
@@ -1726,12 +1752,18 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
I_diluted = new_I_stokes[mask].sum() I_diluted = new_I_stokes[mask].sum()
Q_diluted = new_Q_stokes[mask].sum() Q_diluted = new_Q_stokes[mask].sum()
U_diluted = new_U_stokes[mask].sum() U_diluted = new_U_stokes[mask].sum()
I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 0][mask])) I_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 0][mask]))
Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 1][mask])) Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 1][mask]))
U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2, 2][mask])) U_diluted_err = np.sqrt(np.sum(Stokes_cov[2, 2][mask]))
IQ_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 1][mask] ** 2)) IQ_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 1][mask] ** 2))
IU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 2][mask] ** 2)) IU_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 2][mask] ** 2))
QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 2][mask] ** 2)) QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask] ** 2))
I_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 0][mask]))
Q_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[1, 1][mask]))
U_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[2, 2][mask]))
IQ_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 1][mask] ** 2))
IU_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 2][mask] ** 2))
QU_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[1, 2][mask] ** 2))
P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted
P_diluted_err = (1.0 / I_diluted) * np.sqrt( P_diluted_err = (1.0 / I_diluted) * np.sqrt(
@@ -1740,18 +1772,30 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
- 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err - 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err
- 2.0 * (U_diluted / I_diluted) * IU_diluted_err - 2.0 * (U_diluted / I_diluted) * IU_diluted_err
) )
P_diluted_stat_err = (
P_diluted
/ I_diluted
* np.sqrt(
I_diluted_stat_err
- 2.0 / (I_diluted * P_diluted**2) * (Q_diluted * IQ_diluted_stat_err + U_diluted * IU_diluted_stat_err)
+ 1.0
/ (I_diluted**2 * P_diluted**4)
* (Q_diluted**2 * Q_diluted_stat_err + U_diluted**2 * U_diluted_stat_err + 2.0 * Q_diluted * U_diluted * QU_diluted_stat_err)
)
)
debiased_P_diluted = np.sqrt(P_diluted**2 - P_diluted_stat_err**2) if P_diluted**2 > P_diluted_stat_err**2 else 0.0
PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted)) PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted))
PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt( PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt(
U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err
) )
new_header_stokes["P_int"] = (P_diluted, "Integrated polarization degree") new_header_stokes["P_int"] = (debiased_P_diluted, "Integrated polarization degree")
new_header_stokes["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error") new_header_stokes["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
new_header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle") new_header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
new_header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") new_header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_header_stokes return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_Stokes_stat_cov, new_data_mask, new_header_stokes
def rotate_data(data_array, error_array, data_mask, headers): def rotate_data(data_array, error_array, data_mask, headers):

View File

@@ -43,7 +43,9 @@ def main(infile, P_cut=0.99, target=None, display="pf", output_dir=None):
if target is None: if target is None:
target = Stokes[0].header["TARGNAME"] target = Stokes[0].header["TARGNAME"]
fig = figure(figsize=(8, 8.5), layout="constrained") ratiox = max(int(stkI.shape[1] / (stkI.shape[0])), 1)
ratioy = max(int((stkI.shape[0]) / stkI.shape[1]), 1)
fig = figure(figsize=(8 * ratiox, 8 * ratioy), layout="constrained")
fig, ax = polarization_map(Stokes, P_cut=P_cut, step_vec=1, scale_vec=5, display=display, fig=fig, width=0.33, linewidth=0.5) fig, ax = polarization_map(Stokes, P_cut=P_cut, step_vec=1, scale_vec=5, display=display, fig=fig, width=0.33, linewidth=0.5)
ax.plot(*Stokescenter, marker="+", color="k", lw=3) ax.plot(*Stokescenter, marker="+", color="k", lw=3)