28 Commits

Author SHA1 Message Date
03f2c550e3 fix WCS comparison and 2D display 2026-02-13 14:31:56 +01:00
561c5c2fec only add target file in respective folder when query multi target 2026-02-11 17:18:30 +01:00
b34645ac96 correct for missing POL filter when multiples target 2026-02-11 17:12:14 +01:00
3f79d56f42 retrieve products to divided program folders 2026-02-11 15:30:43 +01:00
4341897ba4 handle multi target query 2026-02-11 15:16:15 +01:00
561f226473 more accessible plotsé 2025-09-25 16:16:32 +02:00
5a48ea1b24 add back comparisaon K 2025-09-06 13:28:01 +02:00
6d7a7dfc4c correction to get_error and align_data when no mask is provided to crop_array 2025-09-05 17:23:17 +02:00
f05f6b789c update scripts and plots to new fits data shape 2025-09-04 11:17:14 +02:00
8a8359fed0 Remove old header content 2025-08-08 16:44:14 +02:00
f4effac343 Add Stokes_cov_stat to fits and compute again P_debiased in plots 2025-08-08 11:45:11 +02:00
e639695618 Right number of WCS axis for each HDU in output 2025-08-06 17:28:18 +02:00
f47c650dc5 remove target_name card in header 2025-08-05 19:35:57 +02:00
fa55a9ea84 adapt crop to Stokes cube 2025-08-05 19:33:13 +02:00
20280e7226 Put I,Q,U into single Stokes matrix 2025-08-05 18:56:45 +02:00
8b4c7c38f2 better handling of patheffects for black borders 2025-08-04 18:26:31 +02:00
0caa821b89 Add black borders to text, small improvment to pol_map.ax_cosmetics 2025-08-04 13:55:45 +02:00
97a9af63e5 align_map.write_to write all images in fits 2025-07-30 18:16:32 +02:00
e7b96e35e9 larger labels on plots 2025-07-25 17:25:21 +02:00
d21b5ecaa9 Modify mask display for emission center 2025-07-25 14:25:51 +02:00
49846d9497 slightly improve plots 2025-07-02 18:36:13 +02:00
f7c50bf136 change min/max for Pol deg plot 2025-06-12 17:00:45 +02:00
1c7c963d6e change theta_P to Psi in output 2025-06-11 20:21:56 +02:00
9b6e17918f fix default plot value in emission_center, value ranges in interactive SNR 2025-06-02 12:25:00 +02:00
999b581dc8 set static pdf output to inferno colormap 2025-05-13 17:29:25 +02:00
bce319581c fix mid-merge plots.py 2025-05-13 15:09:48 +02:00
ba7b4e23ae fit background with gaussion+polynomial 2025-05-13 15:06:31 +02:00
a5766ad618 change formatting of hovering value in interactive plot 2025-05-13 15:05:34 +02:00
12 changed files with 1271 additions and 716 deletions

View File

@@ -41,12 +41,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Background estimation # Background estimation
error_sub_type = "scott" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) error_sub_type = "scott" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 0.50 subtract_error = 0.80
display_bkg = True display_bkg = False
# Data binning # Data binning
pxsize = 4 pxsize = 0.10
pxscale = "px" # pixel, arcsec or full pxscale = "arcsec" # pixel, arcsec or full
rebin_operation = "sum" # sum or average rebin_operation = "sum" # sum or average
# Alignement # Alignement
@@ -59,17 +59,17 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Smoothing # Smoothing
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 1.5 # If None, no smoothing is done smoothing_FWHM = 0.15 # If None, no smoothing is done
smoothing_scale = "px" # pixel or arcsec smoothing_scale = "arcsec" # pixel or arcsec
# Rotation # Rotation
rotate_North = True rotate_North = True
# Polarization map output # Polarization map output
P_cut = 3 # if >=1.0 cut on the signal-to-noise else cut on the confidence level in Q, U P_cut = 3 # if >=1.0 cut on the signal-to-noise else cut on the confidence level in Q, U
SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 10.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
scale_vec = 5 scale_vec = 3
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
# Pipeline start # Pipeline start
@@ -197,56 +197,32 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]), norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]),
) )
background = np.array([np.array(bkg).reshape(1, 1) for bkg in background])
background_error = np.array(
[
np.array(
np.sqrt(
(bkg - background[np.array([h["filtnam1"] == head["filtnam1"] for h in headers], dtype=bool)].mean()) ** 2
/ np.sum([h["filtnam1"] == head["filtnam1"] for h in headers])
)
).reshape(1, 1)
for bkg, head in zip(background, headers)
]
)
# Step 2: # Step 2:
# Compute Stokes I, Q, U with smoothed polarized images # Compute Stokes I, Q, U with smoothed polarized images
# SMOOTHING DISCUSSION : # SMOOTHING DISCUSSION :
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide # FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2 # see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
# Bibcode : 1995chst.conf...10J # Bibcode : 1995chst.conf...10J
I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_stat = proj_red.compute_Stokes( Stokes, Stokes_cov, header_stokes, Stokes_cov_stat = proj_red.compute_Stokes(
data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr
) )
I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg, s_IQU_stat_bkg = proj_red.compute_Stokes(
background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False
)
# Step 3: # Step 3:
# Rotate images to have North up # Rotate images to have North up
if rotate_North: if rotate_North:
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, s_IQU_stat = proj_red.rotate_Stokes( Stokes, Stokes_cov, data_mask, header_stokes, Stokes_cov_stat = proj_red.rotate_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, s_IQU_stat=s_IQU_stat, SNRi_cut=None Stokes, Stokes_cov, data_mask, header_stokes, Stokes_cov_stat=Stokes_cov_stat, SNRi_cut=None
)
I_bkg, Q_bkg, U_bkg, S_cov_bkg, data_mask_bkg, header_bkg, s_IQU_stat_bkg = proj_red.rotate_Stokes(
I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), header_bkg, s_IQU_stat=s_IQU_stat_bkg, SNRi_cut=None
) )
# Compute polarimetric parameters (polarization degree and angle). # Compute polarimetric parameters (polarization degree and angle).
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_stat=s_IQU_stat) P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(Stokes, Stokes_cov, header_stokes, Stokes_cov_stat=Stokes_cov_stat)
P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(
I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg, s_IQU_stat=s_IQU_stat_bkg
)
# Step 4: # Step 4:
# Save image to FITS. # Save image to FITS.
figname = "_".join([figname, figtype]) if figtype != "" else figname figname = "_".join([figname, figtype]) if figtype != "" else figname
Stokes_hdul = proj_fits.save_Stokes( Stokes_hdul = proj_fits.save_Stokes(
I_stokes, Stokes,
Q_stokes,
U_stokes,
Stokes_cov, Stokes_cov,
Stokes_cov_stat,
P, P,
debiased_P, debiased_P,
s_P, s_P,
@@ -277,8 +253,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format( "F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
header_stokes["PHOTPLAM"], header_stokes["PHOTPLAM"],
*sci_not( *sci_not(
Stokes_hdul[0].data[data_mask].sum() * header_stokes["PHOTFLAM"], Stokes_hdul["STOKES"].data[0][data_mask].sum() * header_stokes["PHOTFLAM"],
np.sqrt(Stokes_hdul[3].data[0, 0][data_mask].sum()) * header_stokes["PHOTFLAM"], np.sqrt(Stokes_hdul["STOKES_COV"].data[0, 0][data_mask].sum()) * header_stokes["PHOTFLAM"],
2, 2,
out=int, out=int,
), ),
@@ -286,14 +262,6 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
) )
print("P_int = {0:.1f} ± {1:.1f} %".format(header_stokes["p_int"] * 100.0, np.ceil(header_stokes["sP_int"] * 1000.0) / 10.0)) print("P_int = {0:.1f} ± {1:.1f} %".format(header_stokes["p_int"] * 100.0, np.ceil(header_stokes["sP_int"] * 1000.0) / 10.0))
print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(header_stokes["pa_int"]), princ_angle(np.ceil(header_stokes["sPA_int"] * 10.0) / 10.0))) print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(header_stokes["pa_int"]), princ_angle(np.ceil(header_stokes["sPA_int"] * 10.0) / 10.0)))
# Background values
print(
"F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
header_stokes["photplam"], *sci_not(I_bkg[0, 0] * header_stokes["photflam"], np.sqrt(S_cov_bkg[0, 0][0, 0]) * header_stokes["photflam"], 2, out=int)
)
)
print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0] * 100.0, np.ceil(s_P_bkg[0, 0] * 1000.0) / 10.0))
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0] * 10.0) / 10.0)))
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error). # Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
if pxscale.lower() not in ["full", "integrate"] and not interactive: if pxscale.lower() not in ["full", "integrate"] and not interactive:
proj_plots.polarization_map( proj_plots.polarization_map(

View File

@@ -45,14 +45,14 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
date_time = np.array([datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in date_time]) date_time = np.array([datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in date_time])
date_err = np.array([timedelta(seconds=headers[i]["exptime"] / 2.0) for i in range(len(headers))]) date_err = np.array([timedelta(seconds=headers[i]["exptime"] / 2.0) for i in range(len(headers))])
filt = np.array([headers[i]["filtnam1"] for i in range(len(headers))]) filt = np.array([headers[i]["filtnam1"] for i in range(len(headers))])
dict_filt = {"POL0": "r", "POL60": "g", "POL120": "b"} dict_filt = {"POL0": "yo", "POL60": "bv", "POL120": "rs"}
c_filt = np.array([dict_filt[f] for f in filt]) c_filt = np.array([dict_filt[f][0] for f in filt])
fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True) fig, ax = plt.subplots(figsize=(10, 4), constrained_layout=True)
ax.errorbar(date_time, background * convert_flux, xerr=date_err, yerr=std_bkg * convert_flux, fmt="+k", markersize=0, ecolor=c_filt)
for f in np.unique(filt): for f in np.unique(filt):
mask = [fil == f for fil in filt] mask = [fil == f for fil in filt]
ax.scatter(date_time[mask], background[mask] * convert_flux[mask], color=dict_filt[f], label="{0:s}".format(f)) ax.scatter(date_time[mask], background[mask] * convert_flux[mask], color=dict_filt[f][0], marker=dict_filt[f][1], label="{0:s}".format(f))
ax.errorbar(date_time, background * convert_flux, xerr=date_err, yerr=std_bkg * convert_flux, fmt="+k", markersize=0, ecolor=c_filt)
# Date handling # Date handling
locator = mdates.AutoDateLocator() locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator) formatter = mdates.ConciseDateFormatter(locator)
@@ -69,7 +69,7 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
this_savename += "_background_flux.pdf" this_savename += "_background_flux.pdf"
else: else:
this_savename = savename[:-4] + "_background_flux" + savename[-4:] this_savename = savename[:-4] + "_background_flux" + savename[-4:]
fig.savefig(path_join(plots_folder, this_savename), bbox_inches="tight") fig.savefig(path_join(plots_folder, this_savename), bbox_inches="tight", facecolor="None", edgecolor="None")
if histograms is not None: if histograms is not None:
filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0} filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0}
@@ -93,8 +93,10 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
max(xmax, np.max(np.array(bins)[np.array(hist) > 5e1]) * convert_flux[0]), max(xmax, np.max(np.array(bins)[np.array(hist) > 5e1]) * convert_flux[0]),
) )
if coeff is not None: if coeff is not None:
# ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8) if len(coeff[i]) == 7:
ax_h.plot(bins * convert_flux[i], gauss(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8) ax_h.plot(bins * convert_flux[i], gausspol(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8)
elif len(coeff[i]) == 3:
ax_h.plot(bins * convert_flux[i], gauss(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8)
ax_h.set_xscale("log") ax_h.set_xscale("log")
ax_h.set_yscale("log") ax_h.set_yscale("log")
ax_h.set_ylim([5e1, np.max([hist.max() for hist in histograms])]) ax_h.set_ylim([5e1, np.max([hist.max() for hist in histograms])])
@@ -109,7 +111,7 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
this_savename += "_histograms.pdf" this_savename += "_histograms.pdf"
else: else:
this_savename = savename[:-4] + "_histograms" + savename[-4:] this_savename = savename[:-4] + "_histograms" + savename[-4:]
fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches="tight") fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches="tight", facecolor="None", edgecolor="None")
fig2, ax2 = plt.subplots(figsize=(10, 10)) fig2, ax2 = plt.subplots(figsize=(10, 10))
data0 = data[0] * convert_flux[0] data0 = data[0] * convert_flux[0]
@@ -142,7 +144,7 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
this_savename += "_" + filt + "_background_location.pdf" this_savename += "_" + filt + "_background_location.pdf"
else: else:
this_savename = savename[:-4] + "_" + filt + "_background_location" + savename[-4:] this_savename = savename[:-4] + "_" + filt + "_background_location" + savename[-4:]
fig2.savefig(path_join(plots_folder, this_savename), bbox_inches="tight") fig2.savefig(path_join(plots_folder, this_savename), bbox_inches="tight", facecolor="None", edgecolor="None")
if rectangle is not None: if rectangle is not None:
plot_obs( plot_obs(
data, data,
@@ -364,8 +366,8 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
bins_stdev = binning[-1][hist > hist.max() / 2.0] bins_stdev = binning[-1][hist > hist.max() / 2.0]
stdev = bins_stdev[-1] - bins_stdev[0] stdev = bins_stdev[-1] - bins_stdev[0]
# p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3] # p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3]
p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev]
# popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0) # popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev]
popt, pcov = curve_fit(gauss, binning[-1], hist, p0=p0) popt, pcov = curve_fit(gauss, binning[-1], hist, p0=p0)
coeff.append(popt) coeff.append(popt)
bkg = popt[1] + np.abs(popt[2]) * subtract_error bkg = popt[1] + np.abs(popt[2]) * subtract_error

View File

@@ -16,7 +16,7 @@ from astropy.io import fits
from astropy.wcs import WCS from astropy.wcs import WCS
from .convex_hull import clean_ROI from .convex_hull import clean_ROI
from .utils import wcs_CD_to_PC, wcs_PA from .utils import wcs_CD_to_PC, wcs_PA, add_stokes_axis_to_header, remove_stokes_axis_from_header
def get_obs_data(infiles, data_folder="", compute_flux=False): def get_obs_data(infiles, data_folder="", compute_flux=False):
@@ -89,7 +89,7 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
# force WCS for POL60 to have same pixel size as POL0 and POL120 # force WCS for POL60 to have same pixel size as POL0 and POL120
is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool) is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 10) cdelt = np.round(np.abs(np.array([WCS(head).wcs.cdelt[:2] for head in headers])), 10)
if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2: if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0)) print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0))
raise ValueError("Not all images have same pixel size") raise ValueError("Not all images have same pixel size")
@@ -106,20 +106,21 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
def save_Stokes( def save_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, header_stokes, data_mask, filename, data_folder="", return_hdul=False Stokes, Stokes_cov, Stokes_cov_stat, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, header_stokes, data_mask, filename, data_folder="", return_hdul=False
): ):
""" """
Save computed polarimetry parameters to a single fits file, Save computed polarimetry parameters to a single fits file,
updating header accordingly. updating header accordingly.
---------- ----------
Inputs: Inputs:
I_stokes, Q_stokes, U_stokes, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P : numpy.ndarray Stokes, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P : numpy.ndarray
Images (2D float arrays) containing the computed polarimetric data : Stokes cube (3D float arrays) containing the computed polarimetric data :
Stokes parameters I, Q, U, Polarization degree and debieased, Stokes parameters I, Q, U, V, Polarization degree and debieased,
its error propagated and assuming Poisson noise, Polarization angle, its error propagated and assuming Poisson noise, Polarization angle,
its error propagated and assuming Poisson noise. its error propagated and assuming Poisson noise.
Stokes_cov : numpy.ndarray Stokes_cov, Stokes_cov_stat : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U. Covariance matrix of the Stokes parameters I, Q, U, V and its statistical
uncertainties part.
headers : header list headers : header list
Header of reference some keywords will be copied from (CRVAL, CDELT, Header of reference some keywords will be copied from (CRVAL, CDELT,
INSTRUME, PROPOSID, TARGNAME, ORIENTAT, EXPTOT). INSTRUME, PROPOSID, TARGNAME, ORIENTAT, EXPTOT).
@@ -137,23 +138,30 @@ def save_Stokes(
---------- ----------
Return: Return:
hdul : astropy.io.fits.hdu.hdulist.HDUList hdul : astropy.io.fits.hdu.hdulist.HDUList
HDUList containing I_stokes in the PrimaryHDU, then Q_stokes, U_stokes, HDUList containing the Stokes cube in the PrimaryHDU, then Stokes_cov,
P, s_P, PA, s_PA in this order. Headers have been updated to relevant Stokes_cov_stat, P, s_P, PA, s_PA in this order. Headers have been updated
informations (WCS, orientation, data_type). to relevant informations (WCS, orientation, data_type).
Only returned if return_hdul is True. Only returned if return_hdul is True.
""" """
# Create new WCS object given the modified images # Create new WCS object given the modified images
new_wcs = WCS(header_stokes).deepcopy() wcs = WCS(header_stokes).deepcopy()
new_wcs = WCS(header_stokes).celestial.deepcopy()
header = wcs.to_header().copy()
header["NAXIS"] = header_stokes["NAXIS"]
for i in range(wcs.wcs.naxis):
header["NAXIS%d" % (i + 1)] = header_stokes["NAXIS%d" % (i + 1)]
header = remove_stokes_axis_from_header(header).copy() if header_stokes["NAXIS"] > 2 else header
if data_mask.shape != (1, 1): if data_mask.shape != (1, 1):
vertex = clean_ROI(data_mask) vertex = clean_ROI(data_mask)
shape = vertex[1::2] - vertex[0::2] shape = vertex[1::2] - vertex[0::2]
new_wcs.array_shape = shape new_wcs.array_shape = shape
new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2] new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2]
for key, val in list(new_wcs.to_header().items()) + [("NAXIS", 2), ("NAXIS1", new_wcs.array_shape[1]), ("NAXIS2", new_wcs.array_shape[0])]:
header[key] = val
header = new_wcs.to_header() header["TELESCOP"] = (header_stokes["TELESCOP"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "Telescope used to acquire data")
header["TELESCOP"] = (header_stokes["TELESCOP"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "telescope used to acquire data") header["INSTRUME"] = (header_stokes["INSTRUME"] if "INSTRUME" in list(header_stokes.keys()) else "FOC", "identifier for instrument used to acquire data")
header["INSTRUME"] = (header_stokes["INSTRUME"] if "INSTRUME" in list(header_stokes.keys()) else "FOC", "identifier for instrument used to acuire data")
header["PHOTPLAM"] = (header_stokes["PHOTPLAM"], "Pivot Wavelength") header["PHOTPLAM"] = (header_stokes["PHOTPLAM"], "Pivot Wavelength")
header["PHOTBW"] = (header_stokes["PHOTBW"], "RMS Bandwidth of the Filter and Detector") header["PHOTBW"] = (header_stokes["PHOTBW"], "RMS Bandwidth of the Filter and Detector")
header["PHOTFLAM"] = (header_stokes["PHOTFLAM"], "Inverse Sensitivity in DN/sec/cm**2/Angst") header["PHOTFLAM"] = (header_stokes["PHOTFLAM"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
@@ -174,9 +182,9 @@ def save_Stokes(
# Crop Data to mask # Crop Data to mask
if data_mask.shape != (1, 1): if data_mask.shape != (1, 1):
I_stokes = I_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]] Stokes = Stokes[:, vertex[2] : vertex[3], vertex[0] : vertex[1]]
Q_stokes = Q_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]] Stokes_cov = Stokes_cov[:, :, vertex[2] : vertex[3], vertex[0] : vertex[1]]
U_stokes = U_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]] Stokes_cov_stat = Stokes_cov_stat[:, :, vertex[2] : vertex[3], vertex[0] : vertex[1]]
P = P[vertex[2] : vertex[3], vertex[0] : vertex[1]] P = P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
debiased_P = debiased_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] debiased_P = debiased_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_P = s_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] s_P = s_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
@@ -184,14 +192,6 @@ def save_Stokes(
PA = PA[vertex[2] : vertex[3], vertex[0] : vertex[1]] PA = PA[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_PA = s_PA[vertex[2] : vertex[3], vertex[0] : vertex[1]] s_PA = s_PA[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1]))
for i in range(3):
for j in range(3):
Stokes_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
Stokes_cov = new_Stokes_cov
data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]] data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]]
data_mask = data_mask.astype(float, copy=False) data_mask = data_mask.astype(float, copy=False)
@@ -199,17 +199,17 @@ def save_Stokes(
hdul = fits.HDUList([]) hdul = fits.HDUList([])
# Add I_stokes as PrimaryHDU # Add I_stokes as PrimaryHDU
header["datatype"] = ("I_stokes", "type of data stored in the HDU") header["datatype"] = ("STOKES", "type of data stored in the HDU")
I_stokes[(1 - data_mask).astype(bool)] = 0.0 Stokes[np.broadcast_to((1 - data_mask).astype(bool), Stokes.shape)] = 0.0
primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header) hdu_head = add_stokes_axis_to_header(header, 0)
primary_hdu.name = "I_stokes" primary_hdu = fits.PrimaryHDU(data=Stokes, header=hdu_head)
primary_hdu.name = "STOKES"
hdul.append(primary_hdu) hdul.append(primary_hdu)
# Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList # Add Stokes_cov, P, s_P, PA, s_PA to the HDUList
for data, name in [ for data, name in [
[Q_stokes, "Q_stokes"], [Stokes_cov, "STOKES_COV"],
[U_stokes, "U_stokes"], [Stokes_cov_stat, "STOKES_COV_STAT"],
[Stokes_cov, "IQU_cov_matrix"],
[P, "Pol_deg"], [P, "Pol_deg"],
[debiased_P, "Pol_deg_debiased"], [debiased_P, "Pol_deg_debiased"],
[s_P, "Pol_deg_err"], [s_P, "Pol_deg_err"],
@@ -219,11 +219,15 @@ def save_Stokes(
[s_PA_P, "Pol_ang_stat_err"], [s_PA_P, "Pol_ang_stat_err"],
[data_mask, "Data_mask"], [data_mask, "Data_mask"],
]: ]:
hdu_header = header.copy() hdu_head = header.copy()
hdu_header["datatype"] = name hdu_head["datatype"] = name
if not name == "IQU_cov_matrix": if name[:10] == "STOKES_COV":
hdu_head = add_stokes_axis_to_header(hdu_head, 0)
hdu_head = add_stokes_axis_to_header(hdu_head, 0)
data[np.broadcast_to((1 - data_mask).astype(bool), data.shape)] = 0.0
else:
data[(1 - data_mask).astype(bool)] = 0.0 data[(1 - data_mask).astype(bool)] = 0.0
hdu = fits.ImageHDU(data=data, header=hdu_header) hdu = fits.ImageHDU(data=data, header=hdu_head)
hdu.name = name hdu.name = name
hdul.append(hdu) hdul.append(hdu)

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,7 @@ from warnings import filterwarnings
import astropy.units as u import astropy.units as u
import numpy as np import numpy as np
from astropy.table import Column, unique from astropy.table import Column, unique, vstack
from astropy.time import Time, TimeDelta from astropy.time import Time, TimeDelta
from astroquery.exceptions import NoResultsWarning from astroquery.exceptions import NoResultsWarning
from astroquery.mast import MastMissions, Observations from astroquery.mast import MastMissions, Observations
@@ -82,14 +82,20 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
"References", "References",
] ]
if target is None: if target is None and proposal_id is None:
target = input("Target name:\n>") target = input("Target name:\n>")
# Use query_object method to resolve the object name into coordinates # Use query_object method to resolve the object name into coordinates
if instrument == "foc": if instrument == "foc":
results = mission.query_object( if target is None and proposal_id is not None:
target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc" results = mission.query_criteria(
) sci_pep_id=proposal_id, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc"
)
target = list(np.unique(results["sci_targname"]))
else:
results = mission.query_object(
target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc"
)
dataproduct_type = "image" dataproduct_type = "image"
description = "DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP" description = "DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP"
elif instrument == "fos": elif instrument == "fos":
@@ -110,8 +116,16 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
results["Start"] = Column(Time(results["Start"])) results["Start"] = Column(Time(results["Start"]))
results["Stop"] = Column(Time(results["Stop"])) results["Stop"] = Column(Time(results["Stop"]))
results = divide_proposal(results) if isinstance(target, list):
obs = results.copy() for i, targ in enumerate(target):
results_div = divide_proposal(results[results["Target name"] == targ])
if i == 0:
obs = results_div.copy()
else:
obs = vstack([obs, results_div.copy()])
else:
results_div = divide_proposal(results)
obs = results_div.copy()
# Remove single observations for which a FIND filter is used # Remove single observations for which a FIND filter is used
to_remove = [] to_remove = []
@@ -122,12 +136,21 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
# Remove observations for which a polarization filter is missing # Remove observations for which a polarization filter is missing
if instrument == "foc": if instrument == "foc":
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2} polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
for pid in np.unique(obs["Proposal ID"]): if isinstance(target, list):
used_pol = np.zeros(3) for targ in target:
for dataset in obs[obs["Proposal ID"] == pid]: for pid in np.unique(obs[obs["Target name"] == targ]["Proposal ID"]):
used_pol[polfilt[dataset["POLFilters"]]] += 1 used_pol = np.zeros(3)
if np.any(used_pol < 1): for dataset in obs[np.logical_and(obs["Target name"] == targ, obs["Proposal ID"] == pid)]:
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid]) used_pol[polfilt[dataset["POLFilters"]]] += 1
if np.any(used_pol < 1):
obs.remove_rows(np.arange(len(obs))[np.logical_and(obs["Target name"] == targ, obs["Proposal ID"] == pid)])
else:
for pid in np.unique(obs["Proposal ID"]):
used_pol = np.zeros(3)
for dataset in obs[obs["Proposal ID"] == pid]:
used_pol[polfilt[dataset["POLFilters"]]] += 1
if np.any(used_pol < 1):
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
# Remove observations for which a spectropolarization has not been reduced # Remove observations for which a spectropolarization has not been reduced
if instrument == "fos": if instrument == "fos":
for pid in np.unique(obs["Proposal ID"]): for pid in np.unique(obs["Proposal ID"]):
@@ -142,6 +165,7 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
if len(c3prod) < 1: if len(c3prod) < 1:
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid]) obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
# tab = unique(obs, ["Target name", "Proposal ID"])
tab = unique(obs, ["Target name", "Proposal ID"]) tab = unique(obs, ["Target name", "Proposal ID"])
obs["Obs"] = [np.argmax(np.logical_and(tab["Proposal ID"] == data["Proposal ID"], tab["Target name"] == data["Target name"])) + 1 for data in obs] obs["Obs"] = [np.argmax(np.logical_and(tab["Proposal ID"] == data["Proposal ID"], tab["Target name"] == data["Target name"])) + 1 for data in obs]
try: try:
@@ -150,7 +174,7 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
raise ValueError("There is no observation with polarimetry for {0:s} in HST/{1:s} Legacy Archive".format(target, instrument.upper())) raise ValueError("There is no observation with polarimetry for {0:s} in HST/{1:s} Legacy Archive".format(target, instrument.upper()))
b = np.zeros(len(results), dtype=bool) b = np.zeros(len(results), dtype=bool)
if proposal_id is not None and str(proposal_id) in obs["Proposal ID"]: if proposal_id is not None and np.all(str(proposal_id) == np.unique(obs["Proposal ID"])):
b[results["Proposal ID"] == str(proposal_id)] = True b[results["Proposal ID"] == str(proposal_id)] = True
else: else:
n_obs.pprint(len(n_obs) + 2) n_obs.pprint(len(n_obs) + 2)
@@ -171,6 +195,8 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
else: else:
b[np.array([dataset in obs["Dataset"][obs["Obs"] == i[0]] for dataset in results["Dataset"]])] = True b[np.array([dataset in obs["Dataset"][obs["Obs"] == i[0]] for dataset in results["Dataset"]])] = True
targetb = list(np.unique(results["Target name"][b]))
target = targetb if len(targetb) > 1 else targetb[0]
observations = Observations.query_criteria(obs_id=list(results["Dataset"][b])) observations = Observations.query_criteria(obs_id=list(results["Dataset"][b]))
products = Observations.filter_products( products = Observations.filter_products(
Observations.get_product_list(observations), productType=["SCIENCE"], dataproduct_type=dataproduct_type, calib_level=[2], description=description Observations.get_product_list(observations), productType=["SCIENCE"], dataproduct_type=dataproduct_type, calib_level=[2], description=description
@@ -179,11 +205,12 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
products["proposal_id"] = Column(products["proposal_id"], dtype="U35") products["proposal_id"] = Column(products["proposal_id"], dtype="U35")
for prod in products: for prod in products:
prod["proposal_id"] = results["Proposal ID"][results["Dataset"] == prod["productFilename"][: len(results["Dataset"][0])].upper()][0] prod["proposal_id"] = obs["Proposal ID"][np.argmax(obs["Dataset"] == prod["productFilename"][: len(obs["Dataset"][0])].upper())]
tab = unique(products, "proposal_id") tab = unique(products, "proposal_id")
products["Obs"] = [np.argmax(tab["proposal_id"] == data["proposal_id"]) + 1 for data in products] products["Obs"] = [np.argmax(tab["proposal_id"] == data["proposal_id"]) + 1 for data in products]
products["targname"] = [obs["Target name"][np.argmax(obs["Dataset"] == data[: len(obs["Dataset"][0])].upper())] for data in products["productFilename"]]
return target, products return target, products
@@ -195,23 +222,43 @@ def retrieve_products(target=None, proposal_id=None, instrument="foc", output_di
prodpaths = [] prodpaths = []
# data_dir = path_join(output_dir, target) # data_dir = path_join(output_dir, target)
out = "" out = ""
for obs in unique(products, "Obs"): if isinstance(target, list):
filepaths = [] for targ in target:
# obs_dir = path_join(data_dir, obs['prodposal_id']) for obs in unique(products[products["targname"] == targ], "Obs"):
# if obs['target_name']!=target: filepaths = []
obs_dir = path_join(path_join(output_dir, target), obs["proposal_id"]) # obs_dir = path_join(data_dir, obs['prodposal_id'])
if not path_exists(obs_dir): # if obs['target_name']!=target:
system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots"))) obs_dir = path_join(path_join(output_dir, targ), obs["proposal_id"])
for file in products["productFilename"][products["Obs"] == obs["Obs"]]: if not path_exists(obs_dir):
fpath = path_join(obs_dir, file) system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
if not path_exists(fpath): for file in products["productFilename"][np.logical_and(products["Obs"] == obs["Obs"], products["targname"] == targ)]:
out += "{0:s} : {1:s}\n".format( fpath = path_join(obs_dir, file)
file, Observations.download_file(products["dataURI"][products["productFilename"] == file][0], local_path=fpath)[0] if not path_exists(fpath):
) out += "{0:s} : {1:s}\n".format(
else: file, Observations.download_file(products["dataURI"][products["productFilename"] == file][0], local_path=fpath)[0]
out += "{0:s} : Exists\n".format(file) )
filepaths.append([obs_dir, file]) else:
prodpaths.append(np.array(filepaths, dtype=str)) out += "{0:s} : Exists\n".format(file)
filepaths.append([obs_dir, file])
prodpaths.append(np.array(filepaths, dtype=str))
else:
for obs in unique(products, "Obs"):
filepaths = []
# obs_dir = path_join(data_dir, obs['prodposal_id'])
# if obs['target_name']!=target:
obs_dir = path_join(path_join(output_dir, target), obs["proposal_id"])
if not path_exists(obs_dir):
system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
for file in products["productFilename"][products["Obs"] == obs["Obs"]]:
fpath = path_join(obs_dir, file)
if not path_exists(fpath):
out += "{0:s} : {1:s}\n".format(
file, Observations.download_file(products["dataURI"][products["productFilename"] == file][0], local_path=fpath)[0]
)
else:
out += "{0:s} : Exists\n".format(file)
filepaths.append([obs_dir, file])
prodpaths.append(np.array(filepaths, dtype=str))
return target, prodpaths return target, prodpaths

View File

@@ -57,7 +57,7 @@ from .convex_hull import image_hull
from .cross_correlation import phase_cross_correlation from .cross_correlation import phase_cross_correlation
from .deconvolve import deconvolve_im, gaussian2d, gaussian_psf, zeropad from .deconvolve import deconvolve_im, gaussian2d, gaussian_psf, zeropad
from .plots import plot_obs from .plots import plot_obs
from .utils import princ_angle from .utils import princ_angle, add_stokes_axis_to_header
log.setLevel("ERROR") log.setLevel("ERROR")
@@ -515,8 +515,7 @@ def get_error(
if data_mask is not None: if data_mask is not None:
mask = deepcopy(data_mask) mask = deepcopy(data_mask)
else: else:
data_c, error_c, _ = crop_array(data, headers, error, step=5, null_val=0.0, inside=False) data_c, error_c, mask_c, _ = crop_array(data, headers, error_array=error, step=5, null_val=0.0, inside=False)
mask_c = np.ones(data_c[0].shape, dtype=bool)
for i, (data_ci, error_ci) in enumerate(zip(data_c, error_c)): for i, (data_ci, error_ci) in enumerate(zip(data_c, error_c)):
data[i], error[i] = zeropad(data_ci, data[i].shape), zeropad(error_ci, error[i].shape) data[i], error[i] = zeropad(data_ci, data[i].shape), zeropad(error_ci, error[i].shape)
mask = zeropad(mask_c, data[0].shape).astype(bool) mask = zeropad(mask_c, data[0].shape).astype(bool)
@@ -773,7 +772,7 @@ def align_data(
err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0) err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0)
if data_mask is None: if data_mask is None:
full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0) full_array, err_array, data_mask, full_headers = crop_array(full_array, full_headers, error_array=err_array, step=5, inside=False, null_val=0.0)
else: else:
full_array, err_array, data_mask, full_headers = crop_array( full_array, err_array, data_mask, full_headers = crop_array(
full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0 full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0
@@ -1182,15 +1181,8 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
Defaults to True. Defaults to True.
---------- ----------
Returns: Returns:
I_stokes : numpy.ndarray Stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for Image (2D floats) containing the Stokes I,Q,U,V flux
total intensity
Q_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
vertical/horizontal linear polarization intensity
U_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
+45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U. Covariance matrix of the Stokes parameters I, Q, U.
""" """
@@ -1269,28 +1261,26 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
N = (coeff_stokes[0, :] * transmit / 2.0).sum() N = (coeff_stokes[0, :] * transmit / 2.0).sum()
coeff_stokes = coeff_stokes / N coeff_stokes = coeff_stokes / N
coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T
I_stokes = np.zeros(pol_array[0].shape) Stokes = np.zeros((4, pol_array[0].shape[0], pol_array[0].shape[1]))
Q_stokes = np.zeros(pol_array[0].shape) Stokes_cov = np.zeros((4, 4, Stokes.shape[1], Stokes.shape[2]))
U_stokes = np.zeros(pol_array[0].shape)
Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1]))
for i in range(I_stokes.shape[0]): for i in range(Stokes.shape[1]):
for j in range(I_stokes.shape[1]): for j in range(Stokes.shape[2]):
I_stokes[i, j], Q_stokes[i, j], U_stokes[i, j] = np.dot(coeff_stokes, pol_flux[:, i, j]).T Stokes[:3, i, j] = np.dot(coeff_stokes, pol_flux[:, i, j]).T
Stokes_cov[:, :, i, j] = np.dot(coeff_stokes, np.dot(pol_cov[:, :, i, j], coeff_stokes.T)) Stokes_cov[:3, :3, i, j] = np.dot(coeff_stokes, np.dot(pol_cov[:, :, i, j], coeff_stokes.T))
if (FWHM is not None) and (smoothing.lower() in ["weighted_gaussian_after", "weight_gauss_after", "gaussian_after", "gauss_after"]): if (FWHM is not None) and (smoothing.lower() in ["weighted_gaussian_after", "weight_gauss_after", "gaussian_after", "gauss_after"]):
smoothing = smoothing.lower()[:-6] smoothing = smoothing.lower()[:-6]
Stokes_array = np.array([I_stokes, Q_stokes, U_stokes]) Stokes_array = deepcopy(Stokes[:3])
Stokes_error = np.array([np.sqrt(Stokes_cov[i, i]) for i in range(3)]) Stokes_error = np.array([np.sqrt(Stokes_cov[i, i]) for i in range(3)])
Stokes_headers = headers[0:3] Stokes_headers = headers[0:3]
Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask, headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask, headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
I_stokes, Q_stokes, U_stokes = Stokes_array Stokes[:3] = deepcopy(Stokes_array)
Stokes_cov[0, 0], Stokes_cov[1, 1], Stokes_cov[2, 2] = deepcopy(Stokes_error**2) Stokes_cov[0, 0], Stokes_cov[1, 1], Stokes_cov[2, 2] = deepcopy(Stokes_error**2)
sStokes_array = np.array([I_stokes * Q_stokes, I_stokes * U_stokes, Q_stokes * U_stokes]) sStokes_array = np.array([Stokes[0, 1], Stokes[0, 2], Stokes[1, 2]])
sStokes_error = np.array([Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2]]) sStokes_error = np.array([Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2]])
uStokes_error = np.array([Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1]]) uStokes_error = np.array([Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1]])
@@ -1304,18 +1294,18 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2] = deepcopy(sStokes_error) Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2] = deepcopy(sStokes_error)
Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1] = deepcopy(uStokes_error) Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1] = deepcopy(uStokes_error)
mask = (Q_stokes**2 + U_stokes**2) > I_stokes**2 mask = (Stokes[1] ** 2 + Stokes[2] ** 2) > Stokes[0] ** 2
if mask.any(): if mask.any():
print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(I_stokes[mask].size)) print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(mask.sum()))
# Statistical error: Poisson noise is assumed # Statistical error: Poisson noise is assumed
sigma_flux = np.array([np.sqrt(flux / head["exptime"]) for flux, head in zip(pol_flux, pol_headers)]) sigma_flux = np.array([np.sqrt(flux / head["exptime"]) for flux, head in zip(pol_flux, pol_headers)])
s_IQU_stat = np.zeros(Stokes_cov.shape) Stokes_cov_stat = np.zeros(Stokes_cov.shape)
for i in range(Stokes_cov.shape[0]): for i in range(3):
s_IQU_stat[i, i] = np.sum([coeff_stokes[i, k] ** 2 * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0) Stokes_cov_stat[i, i] = np.sum([coeff_stokes[i, k] ** 2 * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
for j in [k for k in range(3) if k > i]: for j in [k for k in range(3) if k > i]:
s_IQU_stat[i, j] = np.sum([coeff_stokes[i, k] * coeff_stokes[j, k] * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0) Stokes_cov_stat[i, j] = np.sum([coeff_stokes[i, k] * coeff_stokes[j, k] * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
s_IQU_stat[j, i] = np.sum([coeff_stokes[i, k] * coeff_stokes[j, k] * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0) Stokes_cov_stat[j, i] = np.sum([coeff_stokes[i, k] * coeff_stokes[j, k] * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
# Compute the derivative of each Stokes parameter with respect to the polarizer orientation # Compute the derivative of each Stokes parameter with respect to the polarizer orientation
dIQU_dtheta = np.zeros(Stokes_cov.shape) dIQU_dtheta = np.zeros(Stokes_cov.shape)
@@ -1327,13 +1317,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[j] * pol_eff[j]
/ N / N
* ( * (
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - I_stokes) pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - Stokes[0])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3]) * (pol_flux_corr[(j + 2) % 3] - I_stokes) - pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3]) * (pol_flux_corr[(j + 2) % 3] - Stokes[0])
+ coeff_stokes_corr[0, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes) + coeff_stokes_corr[0, j] * (np.sin(2.0 * theta[j]) * Stokes[1] - np.cos(2 * theta[j]) * Stokes[2])
) )
) )
# Derivative of Q_stokes wrt theta_1, 2, 3 # Derivative of Stokes[1] wrt theta_1, 2, 3
for j in range(3): for j in range(3):
dIQU_dtheta[1, j] = ( dIQU_dtheta[1, j] = (
2.0 2.0
@@ -1345,12 +1335,12 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j]) pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3]) - pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3])
) )
* Q_stokes * Stokes[1]
+ coeff_stokes_corr[1, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes) + coeff_stokes_corr[1, j] * (np.sin(2.0 * theta[j]) * Stokes[1] - np.cos(2 * theta[j]) * Stokes[2])
) )
) )
# Derivative of U_stokes wrt theta_1, 2, 3 # Derivative of Stokes[2] wrt theta_1, 2, 3
for j in range(3): for j in range(3):
dIQU_dtheta[2, j] = ( dIQU_dtheta[2, j] = (
2.0 2.0
@@ -1362,39 +1352,37 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j]) pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3]) - pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3])
) )
* U_stokes * Stokes[2]
+ coeff_stokes_corr[2, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes) + coeff_stokes_corr[2, j] * (np.sin(2.0 * theta[j]) * Stokes[1] - np.cos(2 * theta[j]) * Stokes[2])
) )
) )
# Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999) # Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999)
s_IQU_axis = np.zeros(Stokes_cov.shape) Stokes_cov_axis = np.zeros(Stokes_cov.shape)
for i in range(Stokes_cov.shape[0]): for i in range(3):
s_IQU_axis[i, i] = np.sum([dIQU_dtheta[i, k] ** 2 * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0) Stokes_cov_axis[i, i] = np.sum([dIQU_dtheta[i, k] ** 2 * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0)
for j in [k for k in range(3) if k > i]: for j in [k for k in range(3) if k > i]:
s_IQU_axis[i, j] = np.sum( Stokes_cov_axis[i, j] = np.sum(
[dIQU_dtheta[i, k] * dIQU_dtheta[j, k] * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0 [dIQU_dtheta[i, k] * dIQU_dtheta[j, k] * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0
) )
s_IQU_axis[j, i] = np.sum( Stokes_cov_axis[j, i] = np.sum(
[dIQU_dtheta[i, k] * dIQU_dtheta[j, k] * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0 [dIQU_dtheta[i, k] * dIQU_dtheta[j, k] * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0
) )
# Add quadratically the uncertainty to the Stokes covariance matrix # Add quadratically the uncertainty to the Stokes covariance matrix
Stokes_cov += s_IQU_axis + s_IQU_stat Stokes_cov += Stokes_cov_axis + Stokes_cov_stat
# Save values to single header # Save values to single header
header_stokes = pol_headers[0] header_stokes = pol_headers[0]
else: else:
all_I_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2])) all_Stokes = np.zeros((np.unique(rotate).size, 4, data_array.shape[1], data_array.shape[2]))
all_Q_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2])) all_Stokes_cov = np.zeros((np.unique(rotate).size, 4, 4, data_array.shape[1], data_array.shape[2]))
all_U_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_Stokes_cov = np.zeros((np.unique(rotate).size, 3, 3, data_array.shape[1], data_array.shape[2]))
all_header_stokes = [{}] * np.unique(rotate).size all_header_stokes = [{}] * np.unique(rotate).size
for i, rot in enumerate(np.unique(rotate)): for i, rot in enumerate(np.unique(rotate)):
rot_mask = rotate == rot rot_mask = rotate == rot
all_I_stokes[i], all_Q_stokes[i], all_U_stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes( all_Stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes(
data_array[rot_mask], data_array[rot_mask],
error_array[rot_mask], error_array[rot_mask],
data_mask, data_mask,
@@ -1407,10 +1395,8 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
) )
all_exp = np.array([float(h["exptime"]) for h in all_header_stokes]) all_exp = np.array([float(h["exptime"]) for h in all_header_stokes])
I_stokes = np.sum([exp * I for exp, I in zip(all_exp, all_I_stokes)], axis=0) / all_exp.sum() Stokes = np.sum([exp * S for exp, S in zip(all_exp, all_Stokes)], axis=0) / all_exp.sum()
Q_stokes = np.sum([exp * Q for exp, Q in zip(all_exp, all_Q_stokes)], axis=0) / all_exp.sum() Stokes_cov = np.zeros((4, 4, Stokes.shape[1], Stokes.shape[2]))
U_stokes = np.sum([exp * U for exp, U in zip(all_exp, all_U_stokes)], axis=0) / all_exp.sum()
Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1]))
for i in range(3): for i in range(3):
Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2 Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2
for j in [x for x in range(3) if x != i]: for j in [x for x in range(3) if x != i]:
@@ -1424,19 +1410,15 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Nan handling : # Nan handling :
fmax = np.finfo(np.float64).max fmax = np.finfo(np.float64).max
I_stokes[np.isnan(I_stokes)] = 0.0 Stokes[np.isnan(Stokes)] = 0.0
Q_stokes[I_stokes == 0.0] = 0.0 Stokes[1:][np.broadcast_to(Stokes[0] == 0.0, Stokes[1:].shape)] = 0.0
U_stokes[I_stokes == 0.0] = 0.0
Q_stokes[np.isnan(Q_stokes)] = 0.0
U_stokes[np.isnan(U_stokes)] = 0.0
Stokes_cov[np.isnan(Stokes_cov)] = fmax Stokes_cov[np.isnan(Stokes_cov)] = fmax
header_stokes = add_stokes_axis_to_header(header_stokes, 0)
if integrate: if integrate:
# Compute integrated values for P, PA before any rotation # Compute integrated values for P, PA before any rotation
mask = deepcopy(data_mask).astype(bool) mask = deepcopy(data_mask).astype(bool)
I_diluted = I_stokes[mask].sum() I_diluted, Q_diluted, U_diluted = (Stokes[:3] * np.broadcast_to(mask, Stokes[:3].shape)).sum(axis=(1, 2))
Q_diluted = Q_stokes[mask].sum()
U_diluted = U_stokes[mask].sum()
I_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 0][mask])) I_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 0][mask]))
Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 1][mask])) Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 1][mask]))
U_diluted_err = np.sqrt(np.sum(Stokes_cov[2, 2][mask])) U_diluted_err = np.sqrt(np.sum(Stokes_cov[2, 2][mask]))
@@ -1462,26 +1444,19 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle") header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
return I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_stat return Stokes, Stokes_cov, header_stokes, Stokes_cov_stat
def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_stat=None): def compute_pol(Stokes, Stokes_cov, header_stokes, Stokes_cov_stat=None):
""" """
Compute the polarization degree (in %) and angle (in deg) and their Compute the polarization degree (in %) and angle (in deg) and their
respective errors from given Stokes parameters. respective errors from given Stokes parameters.
---------- ----------
Inputs: Inputs:
I_stokes : numpy.ndarray Stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for Image (2D floats) containing the Stokes I,Q,U,V fluxes
total intensity
Q_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
vertical/horizontal linear polarization intensity
U_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
+45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U. Covariance matrix of the Stokes parameters I, Q, U, V.
header_stokes : astropy.fits.header.Header header_stokes : astropy.fits.header.Header
Header file associated with the Stokes fluxes. Header file associated with the Stokes fluxes.
---------- ----------
@@ -1504,75 +1479,77 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_s
polarization angle. polarization angle.
""" """
# Polarization degree and angle computation # Polarization degree and angle computation
mask = I_stokes > 0.0 mask = Stokes[0] > 0.0
I_pol = np.zeros(I_stokes.shape) I_pol = np.zeros(Stokes[0].shape)
I_pol[mask] = np.sqrt(Q_stokes[mask] ** 2 + U_stokes[mask] ** 2) I_pol[mask] = np.sqrt(Stokes[1][mask] ** 2 + Stokes[2][mask] ** 2)
P = np.zeros(I_stokes.shape) P = np.zeros(Stokes[0].shape)
P[mask] = I_pol[mask] / I_stokes[mask] P[mask] = I_pol[mask] / Stokes[0][mask]
PA = np.zeros(I_stokes.shape) PA = np.zeros(Stokes[0].shape)
PA[mask] = (90.0 / np.pi) * np.arctan2(U_stokes[mask], Q_stokes[mask]) PA[mask] = (90.0 / np.pi) * np.arctan2(Stokes[2][mask], Stokes[1][mask])
if (P > 1).any(): if (P > 1).any():
print("WARNING : found {0:d} pixels for which P > 1".format(P[P > 1.0].size)) print("WARNING : found {0:d} pixels for which P > 1".format(P[P > 1.0].size))
# Associated errors # Associated errors
fmax = np.finfo(np.float64).max fmax = np.finfo(np.float64).max
s_P = np.ones(I_stokes.shape) * fmax s_P = np.ones(Stokes[0].shape) * fmax
s_PA = np.ones(I_stokes.shape) * fmax s_PA = np.ones(Stokes[0].shape) * fmax
# Propagate previously computed errors # Propagate previously computed errors
s_P[mask] = (1 / I_stokes[mask]) * np.sqrt( s_P[mask] = (1 / Stokes[0][mask]) * np.sqrt(
( (
Q_stokes[mask] ** 2 * Stokes_cov[1, 1][mask] Stokes[1][mask] ** 2 * Stokes_cov[1, 1][mask]
+ U_stokes[mask] ** 2 * Stokes_cov[2, 2][mask] + Stokes[2][mask] ** 2 * Stokes_cov[2, 2][mask]
+ 2.0 * Q_stokes[mask] * U_stokes[mask] * Stokes_cov[1, 2][mask] + 2.0 * Stokes[1][mask] * Stokes[2][mask] * Stokes_cov[1, 2][mask]
) )
/ (Q_stokes[mask] ** 2 + U_stokes[mask] ** 2) / (Stokes[1][mask] ** 2 + Stokes[2][mask] ** 2)
+ ((Q_stokes[mask] / I_stokes[mask]) ** 2 + (U_stokes[mask] / I_stokes[mask]) ** 2) * Stokes_cov[0, 0][mask] + ((Stokes[1][mask] / Stokes[0][mask]) ** 2 + (Stokes[2][mask] / Stokes[0][mask]) ** 2) * Stokes_cov[0, 0][mask]
- 2.0 * (Q_stokes[mask] / I_stokes[mask]) * Stokes_cov[0, 1][mask] - 2.0 * (Stokes[1][mask] / Stokes[0][mask]) * Stokes_cov[0, 1][mask]
- 2.0 * (U_stokes[mask] / I_stokes[mask]) * Stokes_cov[0, 2][mask] - 2.0 * (Stokes[2][mask] / Stokes[0][mask]) * Stokes_cov[0, 2][mask]
) )
s_PA[mask] = (90.0 / (np.pi * (Q_stokes[mask] ** 2 + U_stokes[mask] ** 2))) * np.sqrt( s_PA[mask] = (90.0 / (np.pi * (Stokes[1][mask] ** 2 + Stokes[2][mask] ** 2))) * np.sqrt(
U_stokes[mask] ** 2 * Stokes_cov[1, 1][mask] Stokes[2][mask] ** 2 * Stokes_cov[1, 1][mask]
+ Q_stokes[mask] ** 2 * Stokes_cov[2, 2][mask] + Stokes[1][mask] ** 2 * Stokes_cov[2, 2][mask]
- 2.0 * Q_stokes[mask] * U_stokes[mask] * Stokes_cov[1, 2][mask] - 2.0 * Stokes[1][mask] * Stokes[2][mask] * Stokes_cov[1, 2][mask]
) )
s_P[np.isnan(s_P)] = fmax s_P[np.isnan(s_P)] = fmax
s_PA[np.isnan(s_PA)] = fmax s_PA[np.isnan(s_PA)] = fmax
# Compute the total exposure time so that # Compute the total exposure time so that
# I_stokes*exp_tot = N_tot the total number of events # Stokes[0]*exp_tot = N_tot the total number of events
N_obs = I_stokes * float(header_stokes["exptime"]) N_obs = Stokes[0] * float(header_stokes["exptime"])
# Errors on P, PA supposing Poisson noise # Errors on P, PA supposing Poisson noise
s_P_P = np.ones(I_stokes.shape) * fmax s_P_P = np.ones(Stokes[0].shape) * fmax
s_PA_P = np.ones(I_stokes.shape) * fmax s_PA_P = np.ones(Stokes[0].shape) * fmax
maskP = np.logical_and(mask, P > 0.0) maskP = np.logical_and(mask, P > 0.0)
if s_IQU_stat is not None: if Stokes_cov_stat is not None:
# If IQU covariance matrix containing only statistical error is given propagate to P and PA # If IQU covariance matrix containing only statistical error is given propagate to P and PA
# Catch Invalid value in sqrt when diagonal terms are big # Catch Invalid value in sqrt when diagonal terms are big
with warnings.catch_warnings(record=True) as _: with warnings.catch_warnings(record=True) as _:
s_P_P[maskP] = ( s_P_P[maskP] = (
P[maskP] P[maskP]
/ I_stokes[maskP] / Stokes[0][maskP]
* np.sqrt( * np.sqrt(
s_IQU_stat[0, 0][maskP] Stokes_cov_stat[0, 0][maskP]
- 2.0 / (I_stokes[maskP] * P[maskP] ** 2) * (Q_stokes[maskP] * s_IQU_stat[0, 1][maskP] + U_stokes[maskP] * s_IQU_stat[0, 2][maskP]) - 2.0
/ (Stokes[0][maskP] * P[maskP] ** 2)
* (Stokes[1][maskP] * Stokes_cov_stat[0, 1][maskP] + Stokes[2][maskP] * Stokes_cov_stat[0, 2][maskP])
+ 1.0 + 1.0
/ (I_stokes[maskP] ** 2 * P[maskP] ** 4) / (Stokes[0][maskP] ** 2 * P[maskP] ** 4)
* ( * (
Q_stokes[maskP] ** 2 * s_IQU_stat[1, 1][maskP] Stokes[1][maskP] ** 2 * Stokes_cov_stat[1, 1][maskP]
+ U_stokes[maskP] ** 2 * s_IQU_stat[2, 2][maskP] * Q_stokes[maskP] * U_stokes[maskP] * s_IQU_stat[1, 2][maskP] + Stokes[2][maskP] ** 2 * Stokes_cov_stat[2, 2][maskP] * Stokes[1][maskP] * Stokes[2][maskP] * Stokes_cov_stat[1, 2][maskP]
) )
) )
) )
s_PA_P[maskP] = ( s_PA_P[maskP] = (
90.0 90.0
/ (np.pi * I_stokes[maskP] ** 2 * P[maskP] ** 2) / (np.pi * Stokes[0][maskP] ** 2 * P[maskP] ** 2)
* ( * (
Q_stokes[maskP] ** 2 * s_IQU_stat[2, 2][maskP] Stokes[1][maskP] ** 2 * Stokes_cov_stat[2, 2][maskP]
+ U_stokes[maskP] * s_IQU_stat[1, 1][maskP] + Stokes[2][maskP] * Stokes_cov_stat[1, 1][maskP]
- 2.0 * Q_stokes[maskP] * U_stokes[maskP] * s_IQU_stat[1, 2][maskP] - 2.0 * Stokes[1][maskP] * Stokes[2][maskP] * Stokes_cov_stat[1, 2][maskP]
) )
) )
else: else:
@@ -1583,7 +1560,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_s
# Catch expected "OverflowWarning" as wrong pixel have an overflowing error # Catch expected "OverflowWarning" as wrong pixel have an overflowing error
with warnings.catch_warnings(record=True) as _: with warnings.catch_warnings(record=True) as _:
mask2 = P**2 >= s_P_P**2 mask2 = P**2 >= s_P_P**2
debiased_P = np.zeros(I_stokes.shape) debiased_P = np.zeros(Stokes[0].shape)
debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P_P[mask2] ** 2) debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P_P[mask2] ** 2)
if (debiased_P > 1.0).any(): if (debiased_P > 1.0).any():
@@ -1600,24 +1577,17 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes, s_IQU_s
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, s_IQU_stat=None, SNRi_cut=None): def rotate_Stokes(Stokes, Stokes_cov, data_mask, header_stokes, Stokes_cov_stat=None, SNRi_cut=None):
""" """
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header matrix to rotate Q, U of a given angle in degrees and update header
orientation keyword. orientation keyword.
---------- ----------
Inputs: Inputs:
I_stokes : numpy.ndarray Stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for Stokes cube (3D floats) containing the Stokes I, Q, U, V fluxes.
total intensity
Q_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
vertical/horizontal linear polarization intensity
U_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
+45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U. Covariance matrix of the Stokes parameters I, Q, U, V.
data_mask : numpy.ndarray data_mask : numpy.ndarray
2D boolean array delimiting the data to work on. 2D boolean array delimiting the data to work on.
header_stokes : astropy.fits.header.Header header_stokes : astropy.fits.header.Header
@@ -1628,17 +1598,10 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
Defaults to None. Defaults to None.
---------- ----------
Returns: Returns:
new_I_stokes : numpy.ndarray Stokes : numpy.ndarray
Rotated mage (2D floats) containing the rotated Stokes parameters Rotated Stokes cube (3D floats) containing the rotated Stokes I, Q, U, V fluxes.
accounting for total intensity
new_Q_stokes : numpy.ndarray
Rotated mage (2D floats) containing the rotated Stokes parameters
accounting for vertical/horizontal linear polarization intensity
new_U_stokes : numpy.ndarray
Rotated image (2D floats) containing the rotated Stokes parameters
accounting for +45/-45deg linear polarization intensity.
new_Stokes_cov : numpy.ndarray new_Stokes_cov : numpy.ndarray
Updated covariance matrix of the Stokes parameters I, Q, U. Updated covariance matrix of the Stokes parameters I, Q, U, V.
new_header_stokes : astropy.fits.header.Header new_header_stokes : astropy.fits.header.Header
Updated Header file associated with the Stokes fluxes accounting Updated Header file associated with the Stokes fluxes accounting
for the new orientation angle. for the new orientation angle.
@@ -1647,71 +1610,58 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
""" """
# Apply cuts # Apply cuts
if SNRi_cut is not None: if SNRi_cut is not None:
SNRi = I_stokes / np.sqrt(Stokes_cov[0, 0]) SNRi = Stokes[0] / np.sqrt(Stokes_cov[0, 0])
mask = SNRi < SNRi_cut mask = SNRi < SNRi_cut
eps = 1e-5 eps = 1e-5
for i in range(I_stokes.shape[0]): for i in range(4):
for j in range(I_stokes.shape[1]): Stokes[i][mask] = eps * np.sqrt(Stokes_cov[i, i][mask])
if mask[i, j]:
I_stokes[i, j] = eps * np.sqrt(Stokes_cov[0, 0][i, j])
Q_stokes[i, j] = eps * np.sqrt(Stokes_cov[1, 1][i, j])
U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j])
# Rotate I_stokes, Q_stokes, U_stokes using rotation matrix # Rotate Stokes I, Q, U using rotation matrix
ang = -float(header_stokes["ORIENTAT"]) ang = -float(header_stokes["ORIENTAT"])
alpha = np.pi / 180.0 * ang alpha = np.pi / 180.0 * ang
mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]]) mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]])
old_center = np.array(I_stokes.shape) / 2 old_center = np.array(Stokes.shape[1:]) / 2
shape = np.fix(np.array(I_stokes.shape) * np.sqrt(2.5)).astype(int) shape = np.fix(np.array(Stokes.shape[1:]) * np.sqrt(2.5)).astype(int)
new_center = np.array(shape) / 2 new_center = np.array(shape) / 2
I_stokes = zeropad(I_stokes, shape) Stokes = zeropad(Stokes, (*Stokes.shape[:-2], *shape))
Q_stokes = zeropad(Q_stokes, shape)
U_stokes = zeropad(U_stokes, shape)
data_mask = zeropad(data_mask, shape) data_mask = zeropad(data_mask, shape)
Stokes_cov = zeropad(Stokes_cov, [*Stokes_cov.shape[:-2], *shape]) Stokes_cov = zeropad(Stokes_cov, (*Stokes_cov.shape[:-2], *shape))
new_I_stokes = np.zeros(shape) new_Stokes = np.zeros((*Stokes.shape[:-2], *shape))
new_Q_stokes = np.zeros(shape)
new_U_stokes = np.zeros(shape)
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape)) new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape))
# Rotate original images using scipy.ndimage.rotate # Rotate original images using scipy.ndimage.rotate
new_I_stokes = sc_rotate(I_stokes, ang, order=1, reshape=False, cval=0.0) new_Stokes = sc_rotate(Stokes, ang, axes=(1, 2), order=1, reshape=False, cval=0.0)
new_Q_stokes = sc_rotate(Q_stokes, ang, order=1, reshape=False, cval=0.0)
new_U_stokes = sc_rotate(U_stokes, ang, order=1, reshape=False, cval=0.0)
new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0) new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0)
new_data_mask[new_data_mask < 1.0] = 0.0 new_data_mask[new_data_mask < 1.0] = 0.0
new_data_mask = new_data_mask.astype(bool) new_data_mask = new_data_mask.astype(bool)
for i in range(3): new_Stokes_cov = np.abs(sc_rotate(Stokes_cov, ang, axes=(2, 3), order=1, reshape=False, cval=0.0))
for j in range(3):
new_Stokes_cov[i, j] = sc_rotate(Stokes_cov[i, j], ang, order=1, reshape=False, cval=0.0)
new_Stokes_cov[i, i] = np.abs(new_Stokes_cov[i, i])
for i in range(shape[0]): for i in range(shape[0]):
for j in range(shape[1]): for j in range(shape[1]):
new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j] = np.dot(mrot, np.array([new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j]])).T new_Stokes[:3, i, j] = np.dot(mrot, new_Stokes[:3, i, j])
new_Stokes_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:, :, i, j], mrot.T)) new_Stokes_cov[:3, :3, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:3, :3, i, j], mrot.T))
if s_IQU_stat is not None: if Stokes_cov_stat is not None:
s_IQU_stat = zeropad(s_IQU_stat, [*s_IQU_stat.shape[:-2], *shape]) Stokes_cov_stat = zeropad(Stokes_cov_stat, [*Stokes_cov_stat.shape[:-2], *shape])
new_s_IQU_stat = np.zeros((*s_IQU_stat.shape[:-2], *shape)) new_Stokes_cov_stat = np.zeros((*Stokes_cov_stat.shape[:-2], *shape))
for i in range(3): for i in range(3):
for j in range(3): for j in range(3):
new_s_IQU_stat[i, j] = sc_rotate(s_IQU_stat[i, j], ang, order=1, reshape=False, cval=0.0) new_Stokes_cov_stat[i, j] = sc_rotate(Stokes_cov_stat[i, j], ang, order=1, reshape=False, cval=0.0)
new_s_IQU_stat[i, i] = np.abs(new_s_IQU_stat[i, i]) new_Stokes_cov_stat[i, i] = np.abs(new_Stokes_cov_stat[i, i])
for i in range(shape[0]): for i in range(shape[0]):
for j in range(shape[1]): for j in range(shape[1]):
new_s_IQU_stat[:, :, i, j] = np.dot(mrot, np.dot(new_s_IQU_stat[:, :, i, j], mrot.T)) new_Stokes_cov_stat[:3, :3, i, j] = np.dot(mrot, np.dot(new_Stokes_cov_stat[:3, :3, i, j], mrot.T))
# Update headers to new angle # Update headers to new angle
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
new_header_stokes = deepcopy(header_stokes) new_header_stokes = deepcopy(header_stokes)
new_wcs = WCS(header_stokes).celestial.deepcopy() new_wcs = WCS(header_stokes).deepcopy()
new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc) new_wcs.wcs.pc[1:] = np.dot(mrot, new_wcs.wcs.pc[1:])
new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center[::-1]) + new_center[::-1] new_wcs.wcs.crpix[1:] = np.dot(mrot, new_wcs.wcs.crpix[1:] - old_center[::-1]) + new_center[::-1]
new_wcs.wcs.set() new_wcs.wcs.set()
for key, val in new_wcs.to_header().items(): for key, val in new_wcs.to_header().items():
new_header_stokes.set(key, val) new_header_stokes.set(key, val)
@@ -1720,18 +1670,13 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
# Nan handling : # Nan handling :
fmax = np.finfo(np.float64).max fmax = np.finfo(np.float64).max
new_I_stokes[np.isnan(new_I_stokes)] = 0.0 new_Stokes[np.isnan(new_Stokes)] = 0.0
new_Q_stokes[new_I_stokes == 0.0] = 0.0 new_Stokes[1:][np.broadcast_to(new_Stokes[0] == 0.0, Stokes[1:].shape)] = 0.0
new_U_stokes[new_I_stokes == 0.0] = 0.0
new_Q_stokes[np.isnan(new_Q_stokes)] = 0.0
new_U_stokes[np.isnan(new_U_stokes)] = 0.0
new_Stokes_cov[np.isnan(new_Stokes_cov)] = fmax new_Stokes_cov[np.isnan(new_Stokes_cov)] = fmax
# Compute updated integrated values for P, PA # Compute updated integrated values for P, PA
mask = deepcopy(new_data_mask).astype(bool) mask = deepcopy(new_data_mask).astype(bool)
I_diluted = new_I_stokes[mask].sum() I_diluted, Q_diluted, U_diluted = (new_Stokes[:3] * np.broadcast_to(mask, Stokes[:3].shape)).sum(axis=(1, 2))
Q_diluted = new_Q_stokes[mask].sum()
U_diluted = new_U_stokes[mask].sum()
I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 0][mask])) I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 0][mask]))
Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 1][mask])) Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 1][mask]))
U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2, 2][mask])) U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2, 2][mask]))
@@ -1757,10 +1702,10 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
new_header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle") new_header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
new_header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") new_header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
if s_IQU_stat is not None: if Stokes_cov_stat is not None:
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_header_stokes, new_s_IQU_stat return new_Stokes, new_Stokes_cov, new_data_mask, new_header_stokes, new_Stokes_cov_stat
else: else:
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_header_stokes return new_Stokes, new_Stokes_cov, new_data_mask, new_header_stokes
def rotate_data(data_array, error_array, data_mask, headers): def rotate_data(data_array, error_array, data_mask, headers):

View File

@@ -1,4 +1,5 @@
import numpy as np import numpy as np
from matplotlib.transforms import Bbox, BboxTransform
def rot2D(ang): def rot2D(ang):
@@ -154,6 +155,50 @@ def sci_not(v, err, rnd=1, out=str):
return *output[1:], -power return *output[1:], -power
class cursor_data:
"""
Object to overwrite data getter and formatter in interactive plots.
"""
def __init__(self, im, error=None, fmt=None) -> None:
self.im = im
self.data = im.get_array()
self.fmt = "{:.2f}" if fmt is None else fmt
self.err = error
def set_err(self, err) -> None:
if self.data.shape != err.shape:
raise ValueError("Error and Data don't have the same shape")
else:
self.err = err
def set_fmt(self, fmt) -> None:
self.fmt = fmt
def get(self, event):
xmin, xmax, ymin, ymax = self.im.get_extent()
if self.im.origin == "upper":
ymin, ymax = ymax, ymin
data_extent = Bbox([[xmin, ymin], [xmax, ymax]])
array_extent = Bbox([[0, 0], [self.data.shape[1], self.data.shape[0]]])
trans = self.im.get_transform().inverted()
trans += BboxTransform(boxin=data_extent, boxout=array_extent)
point = trans.transform([event.x, event.y])
if any(np.isnan(point)):
return None
j, i = point.astype(int)
# Clip the coordinates at array bounds
if not (0 <= i < self.data.shape[0]) or not (0 <= j < self.data.shape[1]):
return None
elif self.err is not None:
return self.data[i, j], self.err[i, j]
else:
return self.data
def format(self, y) -> str:
return self.fmt.format(*y)
def wcs_CD_to_PC(CD): def wcs_CD_to_PC(CD):
""" """
Return the position angle in degrees to the North direction of a wcs Return the position angle in degrees to the North direction of a wcs
@@ -197,3 +242,83 @@ def wcs_PA(PC, cdelt):
rot2 = np.pi / 2.0 - np.arctan2(abs(cdelt[0]) * PC[0, 1], cdelt[1] * PC[1, 1]) rot2 = np.pi / 2.0 - np.arctan2(abs(cdelt[0]) * PC[0, 1], cdelt[1] * PC[1, 1])
orient = 0.5 * (rot + rot2) * 180.0 / np.pi orient = 0.5 * (rot + rot2) * 180.0 / np.pi
return orient return orient
def add_stokes_axis_to_header(header, ind=0):
"""
Add a new Stokes axis to the WCS cards in the header.
----------
Inputs:
header : astropy.io.fits.header.Header
The header in which the WCS to work on is saved.
ind : int, optional
Index of the WCS to insert the new Stokes axis in front of.
To add at the end, do add_before_ind = wcs.wcs.naxis
The beginning is at position 0.
Default to 0.
----------
Returns:
new_head : astropy.io.fits.header.Header
A new Header instance with an additional Stokes axis
"""
from astropy.wcs import WCS
from astropy.wcs.utils import add_stokes_axis_to_wcs
wcs = WCS(header).deepcopy()
wcs_Stokes = add_stokes_axis_to_wcs(wcs, ind).deepcopy()
wcs_Stokes.array_shape = (*wcs.array_shape[ind:], 4, *wcs.array_shape[:ind]) if ind < wcs.wcs.naxis else (4, *wcs.array_shape)
new_head = header.copy()
new_head["NAXIS"] = wcs_Stokes.wcs.naxis
for key in wcs.to_header().keys():
if key not in wcs_Stokes.to_header().keys():
del new_head[key]
for key, val in (
list(wcs_Stokes.to_header().items())
+ [("NAXIS%d" % (i + 1), k) for i, k in enumerate(wcs_Stokes.array_shape[::-1])]
+ [("CUNIT%d" % (ind + 1), "STOKES")]
):
if key not in header.keys() and key[:-1] + str(wcs.wcs.naxis) in header.keys():
new_head.insert(key[:-1] + str(wcs.wcs.naxis), (key, val), after=int(key[-1]) < wcs.wcs.naxis)
elif key not in header.keys() and key[:2] + str(wcs.wcs.naxis) + key[2:-1] + str(wcs.wcs.naxis) in header.keys():
new_head.insert(key[:2] + str(wcs.wcs.naxis) + key[2:-1] + str(wcs.wcs.naxis), (key, val), after=int(key[-1]) < wcs.wcs.naxis)
else:
new_head[key] = val
return new_head
def remove_stokes_axis_from_header(header):
"""
Remove a Stokes axis to the WCS cards in the header.
----------
Inputs:
header : astropy.io.fits.header.Header
The header in which the WCS to work on is saved.
----------
Returns:
new_head : astropy.io.fits.header.Header
A new Header instance with only a celestial WCS.
"""
from astropy.wcs import WCS
wcs = WCS(header).deepcopy()
new_wcs = WCS(header).celestial.deepcopy()
new_head = header.copy()
if "NAXIS%d" % (new_wcs.wcs.naxis + 1) in new_head.keys():
del new_head["NAXIS%d" % (new_wcs.wcs.naxis + 1)]
new_head["NAXIS"] = new_wcs.wcs.naxis
for i, k in enumerate(new_wcs.array_shape[::-1]):
new_head["NAXIS%d" % (i + 1)] = k
for key in list(WCS(header).to_header().keys()) + list(
np.unique([["PC%d_%d" % (i + 1, j + 1) for i in range(wcs.wcs.naxis)] for j in range(wcs.wcs.naxis)])
):
if key in new_head.keys() and key not in new_wcs.to_header().keys():
del new_head[key]
for key, val in new_wcs.to_header().items():
if key not in new_head.keys() and key[:-1] + str(wcs.wcs.naxis) in new_head.keys():
new_head.insert(key[:-1] + str(wcs.wcs.naxis), (key, val), after=True)
elif key not in new_head.keys() and key[:2] + str(wcs.wcs.naxis) + key[2:-1] + str(wcs.wcs.naxis) in new_head.keys():
new_head.insert(key[:2] + str(wcs.wcs.naxis) + key[2:-1] + str(wcs.wcs.naxis), (key, val), after=True)
else:
new_head[key] = val
return new_head

View File

@@ -1,5 +1,6 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# -*- coding:utf-8 -*- # -*- coding:utf-8 -*-
from copy import deepcopy
from pathlib import Path from pathlib import Path
from sys import path as syspath from sys import path as syspath
@@ -26,7 +27,7 @@ def same_reduction(infiles):
except KeyError: except KeyError:
pass pass
test_IQU = True test_IQU = True
for look in ["I_stokes", "Q_stokes", "U_stokes", "IQU_cov_matrix"]: for look in ["STOKES", "STOKES_COV"]:
test_IQU *= look in datatype test_IQU *= look in datatype
params["IQU"].append(test_IQU) params["IQU"].append(test_IQU)
# test for orientation and pixel size # test for orientation and pixel size
@@ -88,73 +89,78 @@ def same_obs(infiles, data_folder):
def combine_Stokes(infiles): def combine_Stokes(infiles):
""" """
Combine I, Q, U from different observations of a same object. Combine Stokes matrices from different observations of a same object.
""" """
from astropy.io.fits import open as fits_open from astropy.io.fits import open as fits_open
from lib.reduction import align_data, zeropad from lib.reduction import align_data, zeropad
from lib.utils import remove_stokes_axis_from_header
from scipy.ndimage import shift as sc_shift from scipy.ndimage import shift as sc_shift
I_array, Q_array, U_array, IQU_cov_array, data_mask, headers = [], [], [], [], [], [] Stokes_array, Stokes_cov_array, Stokes_cov_stat_array, data_mask, headers = [], [], [], [], []
shape = np.array([0, 0]) shape = np.array([0, 0])
for file in infiles: for file in infiles:
with fits_open(file) as f: with fits_open(file) as f:
headers.append(f[0].header) headers.append(f[0].header)
I_array.append(f["I_stokes"].data) Stokes_array.append(f["stokes"].data)
Q_array.append(f["Q_stokes"].data) Stokes_cov_array.append(f["stokes_cov"].data)
U_array.append(f["U_stokes"].data) Stokes_cov_stat_array.append(f["stokes_cov_stat"].data)
IQU_cov_array.append(f["IQU_cov_matrix"].data)
data_mask.append(f["data_mask"].data.astype(bool)) data_mask.append(f["data_mask"].data.astype(bool))
shape[0] = np.max([shape[0], f["I_stokes"].data.shape[0]]) shape[0] = np.max([shape[0], f["stokes"].data[0].shape[0]])
shape[1] = np.max([shape[1], f["I_stokes"].data.shape[1]]) shape[1] = np.max([shape[1], f["stokes"].data[0].shape[1]])
exposure_array = np.array([float(head["EXPTIME"]) for head in headers]) exposure_array = np.array([float(head["EXPTIME"]) for head in headers])
shape += np.array([5, 5]) shape += np.array([5, 5])
data_mask = np.sum([zeropad(mask, shape) for mask in data_mask], axis=0).astype(bool) data_mask = np.sum([zeropad(mask, shape) for mask in data_mask], axis=0).astype(bool)
I_array = np.array([zeropad(I, shape) for I in I_array]) Stokes_array = np.array([[zeropad(stk[i], shape) for i in range(4)] for stk in Stokes_array])
Q_array = np.array([zeropad(Q, shape) for Q in Q_array]) Stokes_cov_array = np.array([[[zeropad(cov[i, j], shape) for j in range(4)] for i in range(4)] for cov in Stokes_cov_array])
U_array = np.array([zeropad(U, shape) for U in U_array]) Stokes_cov_stat_array = np.array([[[zeropad(cov_stat[i, j], shape) for j in range(4)] for i in range(4)] for cov_stat in Stokes_cov_stat_array])
IQU_cov_array = np.array([[[zeropad(cov[i, j], shape) for j in range(3)] for i in range(3)] for cov in IQU_cov_array])
sI_array = np.sqrt(IQU_cov_array[:, 0, 0]) I_array = deepcopy(Stokes_array[:, 0])
sQ_array = np.sqrt(IQU_cov_array[:, 1, 1]) sI_array = deepcopy(np.sqrt(Stokes_cov_array[:, 0, 0]))
sU_array = np.sqrt(IQU_cov_array[:, 2, 2])
_, _, _, _, shifts, errors = align_data(I_array, headers, error_array=sI_array, data_mask=data_mask, ref_center="center", return_shifts=True) heads = [remove_stokes_axis_from_header(head) for head in headers]
_, _, _, _, shifts, errors = align_data(
I_array, heads, error_array=sI_array, background=sI_array[:, 0, 0], data_mask=data_mask, ref_center="center", return_shifts=True
)
data_mask_aligned = np.sum([sc_shift(data_mask, s, order=1, cval=0.0) for s in shifts], axis=0).astype(bool) data_mask_aligned = np.sum([sc_shift(data_mask, s, order=1, cval=0.0) for s in shifts], axis=0).astype(bool)
I_aligned, sI_aligned = ( Stokes_aligned = np.array([[sc_shift(stk[i], s, order=1, cval=0.0) for i in range(4)] for stk, s in zip(Stokes_array, shifts)])
np.array([sc_shift(I, s, order=1, cval=0.0) for I, s in zip(I_array, shifts)]), Stokes_cov_aligned = np.array(
np.array([sc_shift(sI, s, order=1, cval=0.0) for sI, s in zip(sI_array, shifts)]), [[[sc_shift(cov[i, j], s, order=1, cval=0.0) for j in range(4)] for i in range(4)] for cov, s in zip(Stokes_cov_array, shifts)]
) )
Q_aligned, sQ_aligned = ( Stokes_cov_stat_aligned = np.array(
np.array([sc_shift(Q, s, order=1, cval=0.0) for Q, s in zip(Q_array, shifts)]), [[[sc_shift(cov_stat[i, j], s, order=1, cval=0.0) for j in range(4)] for i in range(4)] for cov_stat, s in zip(Stokes_cov_stat_array, shifts)]
np.array([sc_shift(sQ, s, order=1, cval=0.0) for sQ, s in zip(sQ_array, shifts)]),
) )
U_aligned, sU_aligned = (
np.array([sc_shift(U, s, order=1, cval=0.0) for U, s in zip(U_array, shifts)]),
np.array([sc_shift(sU, s, order=1, cval=0.0) for sU, s in zip(sU_array, shifts)]),
)
IQU_cov_aligned = np.array([[[sc_shift(cov[i, j], s, order=1, cval=0.0) for j in range(3)] for i in range(3)] for cov, s in zip(IQU_cov_array, shifts)])
I_combined = np.sum([exp * I for exp, I in zip(exposure_array, I_aligned)], axis=0) / exposure_array.sum() Stokes_combined = np.zeros((4, shape[0], shape[1]))
Q_combined = np.sum([exp * Q for exp, Q in zip(exposure_array, Q_aligned)], axis=0) / exposure_array.sum() for i in range(4):
U_combined = np.sum([exp * U for exp, U in zip(exposure_array, U_aligned)], axis=0) / exposure_array.sum() Stokes_combined[i] = np.sum([exp * stk for exp, stk in zip(exposure_array, Stokes_aligned[:, i])], axis=0) / exposure_array.sum()
IQU_cov_combined = np.zeros((3, 3, shape[0], shape[1])) Stokes_cov_combined = np.zeros((4, 4, shape[0], shape[1]))
for i in range(3): Stokes_cov_stat_combined = np.zeros((4, 4, shape[0], shape[1]))
IQU_cov_combined[i, i] = np.sum([exp**2 * cov for exp, cov in zip(exposure_array, IQU_cov_aligned[:, i, i])], axis=0) / exposure_array.sum() ** 2 for i in range(4):
for j in [x for x in range(3) if x != i]: Stokes_cov_combined[i, i] = np.sum([exp**2 * cov for exp, cov in zip(exposure_array, Stokes_cov_aligned[:, i, i])], axis=0) / exposure_array.sum() ** 2
IQU_cov_combined[i, j] = np.sqrt( Stokes_cov_stat_combined[i, i] = (
np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, IQU_cov_aligned[:, i, j])], axis=0) / exposure_array.sum() ** 2 np.sum([exp**2 * cov_stat for exp, cov_stat in zip(exposure_array, Stokes_cov_stat_aligned[:, i, i])], axis=0) / exposure_array.sum() ** 2
)
for j in [x for x in range(4) if x != i]:
Stokes_cov_combined[i, j] = np.sqrt(
np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, Stokes_cov_aligned[:, i, j])], axis=0) / exposure_array.sum() ** 2
) )
IQU_cov_combined[j, i] = np.sqrt( Stokes_cov_combined[j, i] = np.sqrt(
np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, IQU_cov_aligned[:, j, i])], axis=0) / exposure_array.sum() ** 2 np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, Stokes_cov_aligned[:, j, i])], axis=0) / exposure_array.sum() ** 2
)
Stokes_cov_stat_combined[i, j] = np.sqrt(
np.sum([exp**2 * cov_stat**2 for exp, cov_stat in zip(exposure_array, Stokes_cov_stat_aligned[:, i, j])], axis=0) / exposure_array.sum() ** 2
)
Stokes_cov_stat_combined[j, i] = np.sqrt(
np.sum([exp**2 * cov_stat**2 for exp, cov_stat in zip(exposure_array, Stokes_cov_stat_aligned[:, j, i])], axis=0) / exposure_array.sum() ** 2
) )
header_combined = headers[0] header_combined = headers[0]
header_combined["EXPTIME"] = exposure_array.sum() header_combined["EXPTIME"] = exposure_array.sum()
return I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_aligned, header_combined return Stokes_combined, Stokes_cov_combined, Stokes_cov_stat_combined, data_mask_aligned, header_combined
def main(infiles, target=None, output_dir="./data/"): def main(infiles, target=None, output_dir="./data/"):
@@ -190,21 +196,24 @@ def main(infiles, target=None, output_dir="./data/"):
infiles = new_infiles infiles = new_infiles
I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = combine_Stokes(infiles=infiles) Stokes_combined, Stokes_cov_combined, Stokes_cov_stat_combined, data_mask_combined, header_combined = combine_Stokes(infiles=infiles)
I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = rotate_Stokes( Stokes_combined, Stokes_cov_combined, data_mask_combined, header_combined, Stokes_cov_stat_combined = rotate_Stokes(
I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, data_mask=data_mask_combined, header_stokes=header_combined Stokes=Stokes_combined,
Stokes_cov=Stokes_cov_combined,
Stokes_cov_stat=Stokes_cov_stat_combined,
data_mask=data_mask_combined,
header_stokes=header_combined,
) )
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = compute_pol( P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = compute_pol(
I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, header_stokes=header_combined Stokes=Stokes_combined, Stokes_cov=Stokes_cov_combined, Stokes_cov_stat=Stokes_cov_stat_combined, header_stokes=header_combined
) )
filename = header_combined["FILENAME"] filename = header_combined["FILENAME"]
figname = "_".join([target, filename[filename.find("FOC_") :], "combined"]) figname = "_".join([target, filename[filename.find("FOC_") :], "combined"])
Stokes_combined = save_Stokes( Stokes_c = save_Stokes(
I_stokes=I_combined, Stokes=Stokes_combined,
Q_stokes=Q_combined, Stokes_cov=Stokes_cov_combined,
U_stokes=U_combined, Stokes_cov_stat=Stokes_cov_stat_combined,
Stokes_cov=IQU_cov_combined,
P=P, P=P,
debiased_P=debiased_P, debiased_P=debiased_P,
s_P=s_P, s_P=s_P,
@@ -219,7 +228,7 @@ def main(infiles, target=None, output_dir="./data/"):
return_hdul=True, return_hdul=True,
) )
pol_map(Stokes_combined, **kwargs) pol_map(Stokes_c, **kwargs)
return "/".join([data_folder, figname + ".fits"]) return "/".join([data_folder, figname + ".fits"])

View File

@@ -0,0 +1,311 @@
#!/usr/bin/python
# -*- coding:utf-8 -*-
from pathlib import Path
from sys import path as syspath
syspath.append(str(Path(__file__).parent.parent))
from os.path import join as path_join
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
from lib.background import bin_centers, gauss
from lib.deconvolve import zeropad
from lib.plots import princ_angle
from lib.reduction import align_data
from lib.utils import remove_stokes_axis_from_header
from matplotlib.colors import LogNorm
from scipy.ndimage import shift
from scipy.optimize import curve_fit
root_dir = path_join("/home/tibeuleu/FOC_Reduction/")
root_dir_K = path_join(root_dir, "Kishimoto", "output")
root_dir_S = path_join(root_dir, "Code")
root_dir_data_S = path_join(root_dir, "data", "NGC1068", "5144")
root_dir_plot_S = path_join(root_dir, "plots", "NGC1068", "5144")
filename_S = "NGC1068_K_FOC_b10.00px.fits"
plt.rcParams.update({"font.size": 15})
SNRi_cut = 30.0
SNRp_cut = 3.0
data_K = {}
data_S = {}
for d, i in zip(["I", "Q", "U", "P", "PA", "sI", "sQ", "sU", "sP", "sPA"], [[0, 0], [0, 1], [0, 2], 4, 7, [2, 0, 0], [2, 1, 1], [2, 2, 2], 5, 8]):
data_K[d] = np.loadtxt(path_join(root_dir_K, d + ".txt"))
with fits.open(path_join(root_dir_data_S, filename_S)) as f:
if type(i) is not int:
data_S[d] = f[i[0]].data[*i[1:]] if d[0] != "s" else np.sqrt(f[i[0]].data[*i[1:]])
else:
data_S[d] = f[i].data
if d == "I":
header = f[i].header
header = remove_stokes_axis_from_header(header)
wcs = WCS(header).celestial
convert_flux = header["photflam"]
bkg_S = np.median(data_S["I"]) / 3
bkg_K = np.median(data_K["I"]) / 3
# zeropad data to get same size of array
shape = data_S["I"].shape
for d in data_K:
data_K[d] = zeropad(data_K[d], shape)
# shift array to get same information in same pixel
data_arr, error_ar, heads, data_msk, shifts, shifts_err = align_data(
np.array([data_S["I"], data_K["I"]]),
[header, header],
error_array=np.array([data_S["sI"], data_K["sI"]]),
background=np.array([bkg_S, bkg_K]),
upsample_factor=10.0,
ref_center="center",
return_shifts=True,
)
for d in data_K:
data_K[d] = shift(data_K[d], shifts[1], order=1, cval=0.0)
# compute pol components from shifted array
for d in [data_S, data_K]:
for i in d:
d[i][np.isnan(d[i])] = 0.0
d["P"] = np.where(np.logical_and(np.isfinite(d["I"]), d["I"] > 0.0), np.sqrt(d["Q"] ** 2 + d["U"] ** 2) / d["I"], 0.0)
d["sP"] = np.where(
np.logical_and(np.isfinite(d["I"]), d["I"] > 0.0),
np.sqrt(
(d["Q"] ** 2 * d["sQ"] ** 2 + d["U"] ** 2 * d["sU"] ** 2) / (d["Q"] ** 2 + d["U"] ** 2)
+ ((d["Q"] / d["I"]) ** 2 + (d["U"] / d["I"]) ** 2) * d["sI"] ** 2
)
/ d["I"],
0.0,
)
d["d_P"] = np.where(np.logical_and(np.isfinite(d["P"]), np.isfinite(d["sP"])), np.sqrt(d["P"] ** 2 - d["sP"] ** 2), 0.0)
d["PA"] = 0.5 * np.arctan2(d["U"], d["Q"]) + np.pi
d["SNRp"] = np.zeros(d["d_P"].shape)
d["SNRp"][d["sP"] > 0.0] = d["d_P"][d["sP"] > 0.0] / d["sP"][d["sP"] > 0.0]
d["SNRi"] = np.zeros(d["I"].shape)
d["SNRi"][d["sI"] > 0.0] = d["I"][d["sI"] > 0.0] / d["sI"][d["sI"] > 0.0]
d["mask"] = np.logical_and(d["SNRi"] > SNRi_cut, d["SNRp"] > SNRp_cut)
data_S["mask"], data_K["mask"] = np.logical_and(data_S["mask"], data_K["mask"]), np.logical_and(data_S["mask"], data_K["mask"])
#
# Compute histogram of measured polarization in cut
#
bins = int(data_S["mask"].sum() / 5)
bin_size = 1.0 / bins
mod_p = np.linspace(0.0, 1.0, 300)
for d in [data_S, data_K]:
d["hist"], d["bin_edges"] = np.histogram(d["d_P"][d["mask"]], bins=bins, range=(0.0, 1.0))
d["binning"] = bin_centers(d["bin_edges"])
peak, bins_fwhm = d["binning"][np.argmax(d["hist"])], d["binning"][d["hist"] > d["hist"].max() / 2.0]
fwhm = bins_fwhm[1] - bins_fwhm[0]
p0 = [d["hist"].max(), peak, fwhm]
try:
popt, pcov = curve_fit(gauss, d["binning"], d["hist"], p0=p0)
except RuntimeError:
popt = p0
d["hist_chi2"] = np.sum((d["hist"] - gauss(d["binning"], *popt)) ** 2) / d["hist"].size
d["hist_popt"] = popt
fig_p, ax_p = plt.subplots(num="Polarization degree histogram", figsize=(10, 6), constrained_layout=True)
ax_p.errorbar(data_S["binning"], data_S["hist"], xerr=bin_size / 2.0, fmt="b.", ecolor="b", label="P through this pipeline")
ax_p.plot(mod_p, gauss(mod_p, *data_S["hist_popt"]), "b--", label="mean = {1:.2f}, stdev = {2:.2f}".format(*data_S["hist_popt"]))
ax_p.errorbar(data_K["binning"], data_K["hist"], xerr=bin_size / 2.0, fmt="r.", ecolor="r", label="P through Kishimoto's pipeline")
ax_p.plot(mod_p, gauss(mod_p, *data_K["hist_popt"]), "r--", label="mean = {1:.2f}, stdev = {2:.2f}".format(*data_K["hist_popt"]))
ax_p.set(xlabel="Polarization degree", ylabel="Counts", title="Histogram of polarization degree computed in the cut for both pipelines.")
ax_p.legend()
fig_p.savefig(path_join(root_dir_plot_S, "NGC1068_K_pol_deg.pdf"), bbox_inches="tight", dpi=300)
#
# Compute angular difference between the maps in cut
#
dtheta = np.where(
data_S["mask"],
0.5
* np.arctan(
(np.sin(2 * data_S["PA"]) * np.cos(2 * data_K["PA"]) - np.cos(2 * data_S["PA"]) * np.cos(2 * data_K["PA"]))
/ (np.cos(2 * data_S["PA"]) * np.cos(2 * data_K["PA"]) + np.cos(2 * data_S["PA"]) * np.sin(2 * data_K["PA"]))
),
np.nan,
)
fig_pa = plt.figure(num="Polarization degree alignement")
ax_pa = fig_pa.add_subplot(111, projection=wcs)
cbar_ax_pa = fig_pa.add_axes([0.88, 0.12, 0.01, 0.75])
ax_pa.set_title(r"Degree of alignement $\zeta$ of the polarization angles from the 2 pipelines in the cut")
im_pa = ax_pa.imshow(np.cos(2 * dtheta), vmin=-1.0, vmax=1.0, origin="lower", cmap="bwr", label=r"$\zeta$ between this pipeline and Kishimoto's")
cbar_pa = plt.colorbar(im_pa, cax=cbar_ax_pa, label=r"$\zeta = \cos\left( 2 \cdot \delta\theta_P \right)$")
ax_pa.coords[0].set_axislabel("Right Ascension (J2000)")
ax_pa.coords[1].set_axislabel("Declination (J2000)")
fig_pa.savefig(path_join(root_dir_plot_S, "NGC1068_K_pol_ang.pdf"), bbox_inches="tight", dpi=300)
#
# Compute power uncertainty difference between the maps in cut
#
eta = np.where(data_S["mask"], np.abs(data_K["d_P"] - data_S["d_P"]) / np.sqrt(data_S["sP"] ** 2 + data_K["sP"] ** 2) / 2.0, np.nan)
fig_dif_p = plt.figure(num="Polarization power difference ratio")
ax_dif_p = fig_dif_p.add_subplot(111, projection=wcs)
cbar_ax_dif_p = fig_dif_p.add_axes([0.88, 0.12, 0.01, 0.75])
ax_dif_p.set_title(r"Degree of difference $\eta$ of the polarization from the 2 pipelines in the cut")
im_dif_p = ax_dif_p.imshow(eta, vmin=0.0, vmax=2.0, origin="lower", cmap="bwr_r", label=r"$\eta$ between this pipeline and Kishimoto's")
cbar_dif_p = plt.colorbar(im_dif_p, cax=cbar_ax_dif_p, label=r"$\eta = \frac{2 \left|P^K-P^S\right|}{\sqrt{{\sigma^K_P}^2+{\sigma^S_P}^2}}$")
ax_dif_p.coords[0].set_axislabel("Right Ascension (J2000)")
ax_dif_p.coords[1].set_axislabel("Declination (J2000)")
fig_dif_p.savefig(path_join(root_dir_plot_S, "NGC1068_K_pol_diff.pdf"), bbox_inches="tight", dpi=300)
#
# Compute angle uncertainty difference between the maps in cut
#
eta = np.where(data_S["mask"], np.abs(data_K["PA"] - data_S["PA"]) / np.sqrt(data_S["sPA"] ** 2 + data_K["sPA"] ** 2) / 2.0, np.nan)
fig_dif_pa = plt.figure(num="Polarization angle difference ratio")
ax_dif_pa = fig_dif_pa.add_subplot(111, projection=wcs)
cbar_ax_dif_pa = fig_dif_pa.add_axes([0.88, 0.12, 0.01, 0.75])
ax_dif_pa.set_title(r"Degree of difference $\eta$ of the polarization from the 2 pipelines in the cut")
im_dif_pa = ax_dif_pa.imshow(eta, vmin=0.0, vmax=2.0, origin="lower", cmap="bwr_r", label=r"$\eta$ between this pipeline and Kishimoto's")
cbar_dif_pa = plt.colorbar(
im_dif_pa, cax=cbar_ax_dif_pa, label=r"$\eta = \frac{2 \left|\theta_P^K-\theta_P^S\right|}{\sqrt{{\sigma^K_{\theta_P}}^2+{\sigma^S_{\theta_P}}^2}}$"
)
ax_dif_pa.coords[0].set_axislabel("Right Ascension (J2000)")
ax_dif_pa.coords[1].set_axislabel("Declination (J2000)")
fig_dif_pa.savefig(path_join(root_dir_plot_S, "NGC1068_K_polang_diff.pdf"), bbox_inches="tight", dpi=300)
# display both polarization maps to check consistency
# plt.rcParams.update({'font.size': 15})
fig = plt.figure(num="Polarization maps comparison", figsize=(10, 10))
ax = fig.add_subplot(111, projection=wcs)
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.88, 0.12, 0.01, 0.75])
for d in [data_S, data_K]:
d["X"], d["Y"] = np.meshgrid(np.arange(d["I"].shape[1]), np.arange(d["I"].shape[0]))
d["xy_U"], d["xy_V"] = (
np.where(d["mask"], d["d_P"] * np.cos(np.pi / 2.0 + d["PA"]), np.nan),
np.where(d["mask"], d["d_P"] * np.sin(np.pi / 2.0 + d["PA"]), np.nan),
)
im0 = ax.imshow(
data_S["I"] * convert_flux,
norm=LogNorm(data_S["I"][data_S["I"] > 0].min() * convert_flux, data_S["I"][data_S["I"] > 0].max() * convert_flux),
origin="lower",
cmap="gray",
label=r"$I_{STOKES}$ through this pipeline",
)
quiv0 = ax.quiver(
data_S["X"],
data_S["Y"],
data_S["xy_U"],
data_S["xy_V"],
units="xy",
angles="uv",
scale=0.5,
scale_units="xy",
pivot="mid",
headwidth=0.0,
headlength=0.0,
headaxislength=0.0,
width=0.2,
color="b",
alpha=0.75,
label="PA through this pipeline",
)
quiv1 = ax.quiver(
data_K["X"],
data_K["Y"],
data_K["xy_U"],
data_K["xy_V"],
units="xy",
angles="uv",
scale=0.5,
scale_units="xy",
pivot="mid",
headwidth=0.0,
headlength=0.0,
headaxislength=0.0,
width=0.1,
color="r",
alpha=0.75,
label="PA through Kishimoto's pipeline",
)
ax.set_title(r"$SNR_P \geq$ " + str(SNRi_cut) + r"$\; & \; SNR_I \geq $" + str(SNRp_cut))
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
ax.coords[0].set_axislabel("Right Ascension (J2000)")
ax.coords[0].set_axislabel_position("b")
ax.coords[0].set_ticklabel_position("b")
ax.coords[1].set_axislabel("Declination (J2000)")
ax.coords[1].set_axislabel_position("l")
ax.coords[1].set_ticklabel_position("l")
# ax.axis('equal')
cbar = plt.colorbar(im0, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
ax.legend(loc="upper right")
fig.savefig(path_join(root_dir_plot_S, "NGC1068_K_comparison.pdf"), bbox_inches="tight", dpi=300)
# compute integrated polarization parameters on a specific cut
for d in [data_S, data_K]:
d["I_dil"] = np.sum(d["I"][d["mask"]])
d["sI_dil"] = np.sqrt(np.sum(d["sI"][d["mask"]] ** 2))
d["Q_dil"] = np.sum(d["Q"][d["mask"]])
d["sQ_dil"] = np.sqrt(np.sum(d["sQ"][d["mask"]] ** 2))
d["U_dil"] = np.sum(d["U"][d["mask"]])
d["sU_dil"] = np.sqrt(np.sum(d["sU"][d["mask"]] ** 2))
d["P_dil"] = np.sqrt(d["Q_dil"] ** 2 + d["U_dil"] ** 2) / d["I_dil"]
d["sP_dil"] = (
np.sqrt(
(d["Q_dil"] ** 2 * d["sQ_dil"] ** 2 + d["U_dil"] ** 2 * d["sU_dil"] ** 2) / (d["Q_dil"] ** 2 + d["U_dil"] ** 2)
+ ((d["Q_dil"] / d["I_dil"]) ** 2 + (d["U_dil"] / d["I_dil"]) ** 2) * d["sI_dil"] ** 2
)
/ d["I_dil"]
)
d["d_P_dil"] = np.sqrt(d["P_dil"] ** 2 - d["sP_dil"] ** 2)
d["PA_dil"] = princ_angle((90.0 / np.pi) * np.arctan2(d["U_dil"], d["Q_dil"]))
d["sPA_dil"] = princ_angle(
(90.0 / (np.pi * (d["Q_dil"] ** 2 + d["U_dil"] ** 2))) * np.sqrt(d["Q_dil"] ** 2 * d["sU_dil"] ** 2 + d["U_dil"] ** 2 * d["sU_dil"] ** 2)
)
print(
"From this pipeline :\n",
"P = {0:.2f} ± {1:.2f} %\n".format(data_S["d_P_dil"] * 100.0, data_S["sP_dil"] * 100.0),
"PA = {0:.2f} ± {1:.2f} °".format(data_S["PA_dil"], data_S["sPA_dil"]),
)
print(
"From Kishimoto's pipeline :\n",
"P = {0:.2f} ± {1:.2f} %\n".format(data_K["d_P_dil"] * 100.0, data_K["sP_dil"] * 100.0),
"PA = {0:.2f} ± {1:.2f} °".format(data_K["PA_dil"], data_K["sPA_dil"]),
)
# compare different types of error
print(
"This pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(
np.mean(data_S["sI"][data_S["mask"]] / data_S["I"][data_S["mask"]]),
np.mean(data_S["sQ"][data_S["mask"]] / data_S["Q"][data_S["mask"]]),
np.mean(data_S["sU"][data_S["mask"]] / data_S["U"][data_S["mask"]]),
np.mean(data_S["sP"][data_S["mask"]] / data_S["P"][data_S["mask"]]),
)
)
print(
"Kishimoto's pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(
np.mean(data_K["sI"][data_S["mask"]] / data_K["I"][data_S["mask"]]),
np.mean(data_K["sQ"][data_S["mask"]] / data_K["Q"][data_S["mask"]]),
np.mean(data_K["sU"][data_S["mask"]] / data_K["U"][data_S["mask"]]),
np.mean(data_K["sP"][data_S["mask"]] / data_K["P"][data_S["mask"]]),
)
)
# for d, i in zip(["I", "Q", "U", "P", "PA", "sI", "sQ", "sU", "sP", "sPA"], [0, 1, 2, 5, 8, (3, 0, 0), (3, 1, 1), (3, 2, 2), 6, 9]):
# data_K[d] = np.loadtxt(path_join(root_dir_K, d + ".txt"))
# with fits.open(path_join(root_dir_data_S, filename_S)) as f:
# if not type(i) is int:
# data_S[d] = np.sqrt(f[i[0]].data[i[1], i[2]])
# else:
# data_S[d] = f[i].data
# if i == 0:
# header = f[i].header
# from Kishimoto's pipeline : IQU_dir, IQU_shift, IQU_stat, IQU_trans
# from my pipeline : raw_bg, raw_flat, raw_psf, raw_shift, raw_wav, IQU_dir
# but errors from my pipeline are propagated all along, how to compare then ?
plt.show()

View File

@@ -20,25 +20,28 @@ def main(infile, P_cut=0.99, target=None, display="pf", output_dir=None):
output = [] output = []
Stokes = fits_open(infile) Stokes = fits_open(infile)
stkI = Stokes["I_STOKES"].data stkI = Stokes["STOKES"].data[0]
s_I = np.sqrt(Stokes["STOKES_COV"].data[0, 0])
SNRi = np.zeros(stkI.shape)
SNRi[s_I > 0.0] = stkI[s_I > 0.0] / s_I[s_I > 0.0]
QN, UN, QN_ERR, UN_ERR = np.full((4, stkI.shape[0], stkI.shape[1]), np.nan) QN, UN, QN_ERR, UN_ERR = np.full((4, stkI.shape[0], stkI.shape[1]), np.nan)
for sflux, nflux in zip( for sflux, nflux in zip(
[Stokes["Q_STOKES"].data, Stokes["U_STOKES"].data, np.sqrt(Stokes["IQU_COV_MATRIX"].data[1, 1]), np.sqrt(Stokes["IQU_COV_MATRIX"].data[2, 2])], [Stokes["STOKES"].data[1], Stokes["STOKES"].data[2], np.sqrt(Stokes["STOKES_COV"].data[1, 1]), np.sqrt(Stokes["STOKES_COV"].data[2, 2])],
[QN, UN, QN_ERR, UN_ERR], [QN, UN, QN_ERR, UN_ERR],
): ):
nflux[stkI > 0.0] = sflux[stkI > 0.0] / stkI[stkI > 0.0] nflux[stkI > 0.0] = sflux[stkI > 0.0] / stkI[stkI > 0.0]
Stokesconf = PCconf(QN, UN, QN_ERR, UN_ERR) Stokesconf = PCconf(QN, UN, QN_ERR, UN_ERR)
Stokesmask = Stokes["DATA_MASK"].data.astype(bool) Stokesmask = np.logical_and(Stokes["DATA_MASK"].data.astype(bool), SNRi > 10.0)
Stokessnr = np.zeros(Stokesmask.shape) Stokessnr = np.zeros(Stokesmask.shape)
Stokessnr[Stokes["POL_DEG_ERR"].data > 0.0] = ( Stokessnr[Stokes["POL_DEG_ERR"].data > 0.0] = (
Stokes["POL_DEG_DEBIASED"].data[Stokes["POL_DEG_ERR"].data > 0.0] / Stokes["POL_DEG_ERR"].data[Stokes["POL_DEG_ERR"].data > 0.0] Stokes["POL_DEG_DEBIASED"].data[Stokes["POL_DEG_ERR"].data > 0.0] / Stokes["POL_DEG_ERR"].data[Stokes["POL_DEG_ERR"].data > 0.0]
) ) * Stokesmask[Stokes["POL_DEG_ERR"].data > 0.0]
if P_cut < 1.0: if P_cut < 1.0:
Stokescentconf, Stokescenter = CenterConf(Stokesconf > P_cut, Stokes["POL_ANG"].data, Stokes["POL_ANG_ERR"].data) Stokescentconf, Stokescenter = CenterConf(Stokesconf > P_cut, Stokes["POL_ANG"].data, Stokes["POL_ANG_ERR"].data)
else: else:
Stokescentconf, Stokescenter = CenterConf(Stokessnr > P_cut, Stokes["POL_ANG"].data, Stokes["POL_ANG_ERR"].data) Stokescentconf, Stokescenter = CenterConf(Stokessnr > P_cut, Stokes["POL_ANG"].data, Stokes["POL_ANG_ERR"].data)
Stokespos = WCS(Stokes[0].header).pixel_to_world(*Stokescenter) Stokespos = WCS(Stokes[0].header).celestial.pixel_to_world(*Stokescenter)
if target is None: if target is None:
target = Stokes[0].header["TARGNAME"] target = Stokes[0].header["TARGNAME"]
@@ -77,10 +80,10 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Look for the center of emission for a given reduced observation") parser = argparse.ArgumentParser(description="Look for the center of emission for a given reduced observation")
parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None) parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
parser.add_argument("-f", "--file", metavar="path", required=False, help="The full or relative path to the data product", type=str, default=None) parser.add_argument("-f", "--file", metavar="path", required=True, help="The full or relative path to the data product", type=str, default=None)
parser.add_argument("-c", "--pcut", metavar="pcut", required=False, help="The polarization cut for the data mask", type=float, default=0.99) parser.add_argument("-c", "--pcut", metavar="pcut", required=False, help="The polarization cut for the data mask", type=float, default=3.0)
parser.add_argument("-d", "--display", metavar="display", required=False, help="The map on which to display info", type=str, default="pf") parser.add_argument("-d", "--display", metavar="display", required=False, help="The map on which to display info", type=str, default="pf")
parser.add_argument("-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the plots", type=str, default="./data") parser.add_argument("-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the plots", type=str, default=None)
args = parser.parse_args() args = parser.parse_args()
exitcode = main(infile=args.file, P_cut=args.pcut, target=args.target, display=args.display, output_dir=args.output_dir) exitcode = main(infile=args.file, P_cut=args.pcut, target=args.target, display=args.display, output_dir=args.output_dir)
print("Written to: ", exitcode) print("Written to: ", exitcode)

View File

@@ -10,7 +10,7 @@ from astropy.io import fits
from lib.plots import overplot_pol, overplot_radio from lib.plots import overplot_pol, overplot_radio
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits") Stokes_UV = fits.open("./data/IC5063/5918/IC5063_5918_F502M_FOC_b0.10arcsec_c0.15arcsec.fits")
# Stokes_18GHz = fits.open("./data/IC5063/radio/IC5063_18GHz.fits") # Stokes_18GHz = fits.open("./data/IC5063/radio/IC5063_18GHz.fits")
# Stokes_24GHz = fits.open("./data/IC5063/radio/IC5063_24GHz.fits") # Stokes_24GHz = fits.open("./data/IC5063/radio/IC5063_24GHz.fits")
# Stokes_103GHz = fits.open("./data/IC5063/radio/IC5063_103GHz.fits") # Stokes_103GHz = fits.open("./data/IC5063/radio/IC5063_103GHz.fits")
@@ -47,10 +47,12 @@ Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
G = overplot_pol(Stokes_UV, Stokes_IR, cmap="inferno") G = overplot_pol(Stokes_UV, Stokes_IR, cmap="inferno")
G.plot( G.plot(
P_cut=0.99, P_cut=3,
SNRi_cut=1.0, SNRi_cut=10,
savename="./plots/IC5063/IR_overplot.pdf", savename="./plots/IC5063/IR_overplot.pdf",
scale_vec=None, scale_vec=5,
norm=LogNorm(Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"] / 1e3, Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"]), norm=LogNorm(Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"] / 1e3, Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"]),
cmap="inferno", cmap="inferno",
disptype="pf",
levels="Default",
) )

View File

@@ -11,9 +11,9 @@ from lib.plots import overplot_chandra, overplot_pol
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.fits") Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.fits")
# Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits") Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits")
# Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits") # Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits")
Radio = fits.open("./data/MRK463E/EMERLIN/Voorwerpjes_1356+1822_1356+1822_uniform-image.fits") # Radio = fits.open("./data/MRK463E/EMERLIN/Voorwerpjes_1356+1822_1356+1822_uniform-image.fits")
# levels = np.geomspace(1.0, 99.0, 7) # levels = np.geomspace(1.0, 99.0, 7)
@@ -21,26 +21,26 @@ Radio = fits.open("./data/MRK463E/EMERLIN/Voorwerpjes_1356+1822_1356+1822_unifor
# A.plot(levels=levels, P_cut=0.99, SNRi_cut=1.0, scale_vec=5, zoom=1, savename="./plots/MRK463E/Chandra_overplot.pdf") # A.plot(levels=levels, P_cut=0.99, SNRi_cut=1.0, scale_vec=5, zoom=1, savename="./plots/MRK463E/Chandra_overplot.pdf")
# A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned") # A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned")
# levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"] levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"]
# B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm()) B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm())
# B.plot(levels=levels, P_cut=0.99, SNRi_cut=1.0, scale_vec=5, norm=LogNorm(8.5e-18, 2.5e-15), savename="./plots/MRK463E/IR_overplot.pdf") B.plot(levels=levels, P_cut=0.99, SNRi_cut=1.0, scale_vec=5, norm=LogNorm(8.5e-18, 2.5e-15), savename="./plots/MRK463E/IR_overplot.pdf")
# B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned") B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned")
# levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"] # levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"]
levels = np.array([5, 10, 20, 50]) # levels = np.array([5, 10, 20, 50])
C = overplot_pol(Stokes_UV, Radio, norm=LogNorm()) # C = overplot_pol(Stokes_UV, Radio, norm=LogNorm())
C.other_im.set(norm=LogNorm(1e-4, 2e-2)) # C.other_im.set(norm=LogNorm(1e-4, 2e-2))
C.plot( # C.plot(
levels=levels, # levels=levels,
P_cut=0.99, # P_cut=0.99,
SNRi_cut=1.0, # SNRi_cut=1.0,
step_vec=0, # step_vec=0,
scale_vec=3, # scale_vec=3,
norm=LogNorm(1e-4, 2e-2), # norm=LogNorm(1e-4, 2e-2),
cmap="inferno_r", # cmap="inferno_r",
width=0.5, # width=0.5,
linewidth=0.5, # linewidth=0.5,
disptype="snri", # disptype="snri",
savename="./plots/MRK463E/EMERLIN_snri_overplot.pdf", # savename="./plots/MRK463E/EMERLIN_snri_overplot.pdf",
) # )
C.write_to(path1="./data/MRK463E/FOC_data_EMERLIN.fits", path2="./data/MRK463E/EMERLIN_data.fits", suffix="aligned") # C.write_to(path1="./data/MRK463E/FOC_data_EMERLIN.fits", path2="./data/MRK463E/EMERLIN_data.fits", suffix="aligned")