61 Commits

Author SHA1 Message Date
c8bbcc9d3b fix pol_map.set_data_mask and some keyword saving 2025-04-11 14:17:42 +02:00
4be32a54ac Merge branch 'display' of git.tibeuleu.xyz:Tibeuleu/FOC_Reduction into display
finish rebase display on main
2025-04-11 12:13:31 +02:00
c907700251 save raw flux in fits file and display
fix rebase display on main

fix rebase display on main

rebase display on main
2025-04-11 12:09:41 +02:00
a555cebb92 Save the raw total flux image as PrimaryHDU
fix for rebase display on main

fix rebase display on main
2025-04-11 12:09:16 +02:00
42526bac0a rollback CenterConf to simple CDF of chi2 for 2 dof 2025-04-11 12:08:16 +02:00
f69662add3 small improvments to ConfCenter et emission_center 2025-04-11 12:08:16 +02:00
c235167232 save raw flux in fits file and display
forward fixes to display with WCS
2025-04-11 12:07:38 +02:00
05bea15bbe Save the raw total flux image as PrimaryHDU
fast forward fixes to WCS

fix forward merging of display
2025-04-11 12:07:38 +02:00
716f683eb9 rollback CenterConf to simple CDF of chi2 for 2 dof 2025-04-11 12:06:44 +02:00
65abad6225 small improvments to ConfCenter et emission_center 2025-04-11 12:06:44 +02:00
5056f4e47a looking for displacement of WCS in pipeline
rebase display on main
2025-04-11 12:06:36 +02:00
88be9529d4 save raw flux in fits file and display
rebase display on main
2025-04-11 12:05:59 +02:00
4e9733b0df Save the raw total flux image as PrimaryHDU
rebase display on main
2025-04-11 12:02:13 +02:00
dec0c8171e save raw flux in fits file and display
fix rebase display on main

fix rebase display on main

rebase display on main
2025-04-11 11:59:09 +02:00
bd1a823dc2 Save the raw total flux image as PrimaryHDU
fix for rebase display on main

fix rebase display on main
2025-04-11 11:58:20 +02:00
ddf9a6e316 fix WCS computation, cdelt should not be sorted
fix rebase display on main
2025-04-11 11:58:20 +02:00
7a92b3ae70 rollback CenterConf to simple CDF of chi2 for 2 dof 2025-04-11 11:58:20 +02:00
729e60776f small improvments to ConfCenter et emission_center 2025-04-11 11:58:20 +02:00
aacc83f9c4 modifications to add raw image display 2025-04-11 11:58:20 +02:00
95b0cc4033 save raw flux in fits file and display
forward fixes to display with WCS
2025-04-11 11:58:20 +02:00
672a6cad45 Save the raw total flux image as PrimaryHDU
fast forward fixes to WCS

fix forward merging of display
2025-04-11 11:58:20 +02:00
df147d1731 rollback CenterConf to simple CDF of chi2 for 2 dof 2025-04-11 11:56:59 +02:00
3f5433ea52 small improvments to ConfCenter et emission_center 2025-04-11 11:56:59 +02:00
0087d18cba looking for displacement of WCS in pipeline 2025-04-11 11:56:59 +02:00
3da6cbcfaf save raw flux in fits file and display
fix rebase display on main

rebase display on main
2025-04-11 11:56:50 +02:00
c94d646b02 change histogram binning to numpy function 2025-04-11 11:54:28 +02:00
44256ba9a6 Save the raw total flux image as PrimaryHDU
fix rebase display on main

rebase display on main
2025-04-11 11:54:19 +02:00
89b01f58b5 fix pull rebase 2025-04-08 20:05:42 +02:00
fcbd5b8aec some more formatting
fix rebase display on main
2025-04-08 20:03:35 +02:00
77ce42bdd4 save raw flux in fits file and display
fix rebase display on main

fix rebase display on main
2025-04-08 20:02:38 +02:00
a7a6cbad1c Save the raw total flux image as PrimaryHDU
fix for rebase display on main

fix rebase display on main
2025-04-08 20:02:38 +02:00
d5d153e855 fix WCS computation, cdelt should not be sorted
fix rebase display on main
2025-04-08 20:02:38 +02:00
8a74291030 rollback CenterConf to simple CDF of chi2 for 2 dof 2025-04-08 20:02:38 +02:00
2f4dda688f small improvments to ConfCenter et emission_center 2025-04-08 20:02:38 +02:00
cebf4df8a4 modifications to add raw image display 2025-04-08 20:02:38 +02:00
4b104d79d4 save raw flux in fits file and display
forward fixes to display with WCS
2025-04-08 20:02:38 +02:00
e597d81e4f Save the raw total flux image as PrimaryHDU
fast forward fixes to WCS

fix forward merging of display
2025-04-08 20:02:38 +02:00
1193b2b091 fix WCS computation, cdelt should not be sorted
fix rebase display on main
2025-04-08 20:02:27 +02:00
8bfaee25fb rollback CenterConf to simple CDF of chi2 for 2 dof 2025-04-08 20:01:52 +02:00
e1409167b9 small improvments to ConfCenter et emission_center 2025-04-08 20:01:52 +02:00
6457b4ffe1 looking for displacement of WCS in pipeline 2025-04-08 20:01:52 +02:00
fb63b389e1 save raw flux in fits file and display
fix rebase display on main
2025-04-08 20:01:43 +02:00
e3a3abdb0d change histogram binning to numpy function 2025-04-08 19:56:12 +02:00
70035a9626 Save the raw total flux image as PrimaryHDU
fix rebase display on main
2025-04-08 19:55:53 +02:00
29786c2fa4 some more formatting 2025-04-08 19:48:46 +02:00
05f800b282 save raw flux in fits file and display
fix rebase display on main

fix rebase display on main
2025-04-07 17:45:48 +02:00
e9efd02e3f Save the raw total flux image as PrimaryHDU
fix for rebase display on main

fix rebase display on main
2025-04-07 17:43:39 +02:00
cdc19c42b4 fix error on WCS rotation that could mirror the image 2025-04-07 17:41:31 +02:00
9be8e28cc9 fix WCS computation, cdelt should not be sorted
fix rebase display on main
2025-04-07 17:41:13 +02:00
78eb00c486 rollback CenterConf to simple CDF of chi2 for 2 dof 2025-04-07 17:38:36 +02:00
40982f1fe0 small improvments to ConfCenter et emission_center 2025-04-07 17:38:36 +02:00
e40f6bcf64 modifications to add raw image display 2025-04-07 17:20:00 +02:00
427c4997a4 save raw flux in fits file and display
forward fixes to display with WCS
2025-04-01 17:35:12 +02:00
6387e23525 Save the raw total flux image as PrimaryHDU
fast forward fixes to WCS

fix forward merging of display
2025-04-01 17:33:58 +02:00
075dc4fd15 fix WCS computation, cdelt should not be sorted 2025-04-01 17:31:20 +02:00
84bbdb9634 rollback CenterConf to simple CDF of chi2 for 2 dof 2025-04-01 17:28:35 +02:00
b71aaa37e6 small improvments to ConfCenter et emission_center 2025-04-01 17:28:35 +02:00
71e9d89091 looking for displacement of WCS in pipeline 2025-04-01 16:20:40 +02:00
02f8d5213c save raw flux in fits file and display 2025-03-19 16:38:36 +01:00
dea36b2535 change histogram binning to numpy function 2025-03-19 16:38:03 +01:00
4ac47f8e3d Save the raw total flux image as PrimaryHDU 2025-03-14 14:30:30 +01:00
13 changed files with 1072 additions and 1504 deletions

View File

@@ -20,6 +20,7 @@ import lib.reduction as proj_red # Functions used in reduction pipeline
import numpy as np import numpy as np
from lib.utils import princ_angle, sci_not from lib.utils import princ_angle, sci_not
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
from astropy.wcs import WCS
def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False): def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False):
@@ -41,11 +42,11 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Background estimation # Background estimation
error_sub_type = "scott" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) error_sub_type = "scott" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 0.80 subtract_error = 1.50
display_bkg = False display_bkg = True
# Data binning # Data binning
pxsize = 0.10 pxsize = 0.05
pxscale = "arcsec" # pixel, arcsec or full pxscale = "arcsec" # pixel, arcsec or full
rebin_operation = "sum" # sum or average rebin_operation = "sum" # sum or average
@@ -59,17 +60,18 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Smoothing # Smoothing
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 0.15 # If None, no smoothing is done smoothing_FWHM = 0.075 # If None, no smoothing is done
smoothing_scale = "arcsec" # pixel or arcsec smoothing_scale = "arcsec" # pixel or arcsec
# Rotation # Rotation
rotate_data = False
rotate_North = True rotate_North = True
# Polarization map output # Polarization map output
P_cut = 3 # if >=1.0 cut on the signal-to-noise else cut on the confidence level in Q, U P_cut = 3 # if >=1.0 cut on the signal-to-noise else cut on the confidence level in Q, U
SNRi_cut = 10.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
scale_vec = 3 scale_vec = 5
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
# Pipeline start # Pipeline start
@@ -117,9 +119,10 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
figtype = "_".join([figtype, "not_aligned"] if figtype != "" else ["not_aligned"]) figtype = "_".join([figtype, "not_aligned"] if figtype != "" else ["not_aligned"])
# Crop data to remove outside blank margins. # Crop data to remove outside blank margins.
data_array, error_array, data_mask, headers = proj_red.crop_array( data_array, error_array, headers = proj_red.crop_array(
data_array, headers, step=5, null_val=0.0, crop=True, inside=True, display=display_crop, savename=figname, plots_folder=plots_folder data_array, headers, step=5, null_val=0.0, inside=True, display=display_crop, savename=figname, plots_folder=plots_folder
) )
data_mask = np.ones(data_array[0].shape, dtype=bool)
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM. # Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
if deconvolve: if deconvolve:
@@ -141,7 +144,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
) )
# Rotate data to have same orientation # Rotate data to have same orientation
rotate_data = np.unique([np.round(float(head["ORIENTAT"]), 3) for head in headers]).size != 1 rotate_data = rotate_data or np.unique([np.round(float(head["ORIENTAT"]), 3) for head in headers]).size != 1
if rotate_data: if rotate_data:
ang = np.mean([head["ORIENTAT"] for head in headers]) ang = np.mean([head["ORIENTAT"] for head in headers])
for head in headers: for head in headers:
@@ -157,7 +160,6 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"] vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]
), ),
) )
# Align and rescale images with oversampling. # Align and rescale images with oversampling.
data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data( data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data(
data_array, data_array,
@@ -181,6 +183,14 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]), norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]),
) )
flux_data, flux_error, flux_mask, flux_head = (
deepcopy(data_array.sum(axis=0)),
deepcopy(np.sqrt(np.sum(error_array**2, axis=0))),
deepcopy(data_mask),
deepcopy(headers[0]),
)
flux_head["EXPTIME"] = np.sum([head["EXPTIME"] for head in headers])
# Rebin data to desired pixel size. # Rebin data to desired pixel size.
if (pxsize is not None) and not (pxsize == 1 and pxscale.lower() in ["px", "pixel", "pixels"]): if (pxsize is not None) and not (pxsize == 1 and pxscale.lower() in ["px", "pixel", "pixels"]):
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array( data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(
@@ -197,32 +207,58 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]), norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]),
) )
background = np.array([np.array(bkg).reshape(1, 1) for bkg in background])
background_error = np.array(
[
np.array(
np.sqrt(
(bkg - background[np.array([h["filtnam1"] == head["filtnam1"] for h in headers], dtype=bool)].mean()) ** 2
/ np.sum([h["filtnam1"] == head["filtnam1"] for h in headers])
)
).reshape(1, 1)
for bkg, head in zip(background, headers)
]
)
# Step 2: # Step 2:
# Compute Stokes I, Q, U with smoothed polarized images # Compute Stokes I, Q, U with smoothed polarized images
# SMOOTHING DISCUSSION : # SMOOTHING DISCUSSION :
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide # FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2 # see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
# Bibcode : 1995chst.conf...10J # Bibcode : 1995chst.conf...10J
Stokes, Stokes_cov, header_stokes, Stokes_cov_stat = proj_red.compute_Stokes( I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes = proj_red.compute_Stokes(
data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr
) )
I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg = proj_red.compute_Stokes(
background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False
)
# Step 3: # Step 3:
# Rotate images to have North up # Rotate images to have North up
if rotate_North: if rotate_North:
Stokes, Stokes_cov, data_mask, header_stokes, Stokes_cov_stat = proj_red.rotate_Stokes( I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes = proj_red.rotate_Stokes(
Stokes, Stokes_cov, data_mask, header_stokes, Stokes_cov_stat=Stokes_cov_stat, SNRi_cut=None I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None
) )
I_bkg, Q_bkg, U_bkg, S_cov_bkg, data_mask_bkg, header_bkg = proj_red.rotate_Stokes(
I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), header_bkg, SNRi_cut=None
)
flux_data, flux_error, flux_mask, flux_head = proj_red.rotate_data(np.array([flux_data]), np.array([flux_error]), flux_mask, [flux_head])
flux_data, flux_error, flux_head = flux_data[0], flux_error[0], flux_head[0]
elif not rotate_data:
figname += "_noROT"
# Compute polarimetric parameters (polarization degree and angle). # Compute polarimetric parameters (polarization degree and angle).
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(Stokes, Stokes_cov, header_stokes, Stokes_cov_stat=Stokes_cov_stat) P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes)
P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg)
# Step 4: # Step 4:
# Save image to FITS. # Save image to FITS.
figname = "_".join([figname, figtype]) if figtype != "" else figname savename = "_".join([figname, figtype]) if figtype != "" else figname
Stokes_hdul = proj_fits.save_Stokes( Stokes_hdul = proj_fits.save_Stokes(
Stokes, I_stokes,
Q_stokes,
U_stokes,
Stokes_cov, Stokes_cov,
Stokes_cov_stat,
P, P,
debiased_P, debiased_P,
s_P, s_P,
@@ -232,36 +268,41 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
s_PA_P, s_PA_P,
header_stokes, header_stokes,
data_mask, data_mask,
figname, savename,
data_folder=data_folder, data_folder=data_folder,
return_hdul=True, return_hdul=True,
flux_data=np.array([flux_data, flux_error, flux_mask]),
flux_head=flux_head,
) )
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"])) outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"]))
# Step 5: # Step 5:
# crop to desired region of interest (roi) # crop to desired region of interest (roi)
if crop: if crop:
figname += "_crop" savename += "_crop"
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_hdul), norm=LogNorm()) stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_hdul), norm=LogNorm())
stokescrop.crop() stokescrop.crop()
stokescrop.write_to("/".join([data_folder, figname + ".fits"])) stokescrop.write_to("/".join([data_folder, figname + ".fits"]))
Stokes_hdul, header_stokes = stokescrop.hdul_crop, stokescrop.hdul_crop[0].header Stokes_hdul, header_stokes = stokescrop.hdul_crop, stokescrop.hdul_crop[0].header
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"])) outfiles.append("/".join([data_folder, Stokes_hdul["I_STOKES"].header["FILENAME"] + ".fits"]))
data_mask = Stokes_hdul["data_mask"].data.astype(bool) data_mask = Stokes_hdul["data_mask"].data.astype(bool)
print( print(
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format( "F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
header_stokes["PHOTPLAM"], flux_head["PHOTPLAM"],
*sci_not( *sci_not(flux_data[flux_mask].sum() * flux_head["PHOTFLAM"], np.sqrt(np.sum(flux_error[flux_mask] ** 2)) * flux_head["PHOTFLAM"], 2, out=int),
Stokes_hdul["STOKES"].data[0][data_mask].sum() * header_stokes["PHOTFLAM"],
np.sqrt(Stokes_hdul["STOKES_COV"].data[0, 0][data_mask].sum()) * header_stokes["PHOTFLAM"],
2,
out=int,
),
) )
) )
print("P_int = {0:.1f} ± {1:.1f} %".format(header_stokes["p_int"] * 100.0, np.ceil(header_stokes["sP_int"] * 1000.0) / 10.0)) print("P_int = {0:.1f} ± {1:.1f} %".format(header_stokes["p_int"] * 100.0, np.ceil(header_stokes["sP_int"] * 1000.0) / 10.0))
print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(header_stokes["pa_int"]), princ_angle(np.ceil(header_stokes["sPA_int"] * 10.0) / 10.0))) print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(header_stokes["pa_int"]), princ_angle(np.ceil(header_stokes["sPA_int"] * 10.0) / 10.0)))
# Background values
print(
"F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
header_stokes["photplam"], *sci_not(I_bkg[0, 0] * header_stokes["photflam"], np.sqrt(S_cov_bkg[0, 0][0, 0]) * header_stokes["photflam"], 2, out=int)
)
)
print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0] * 100.0, np.ceil(s_P_bkg[0, 0] * 1000.0) / 10.0))
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0] * 10.0) / 10.0)))
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error). # Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
if pxscale.lower() not in ["full", "integrate"] and not interactive: if pxscale.lower() not in ["full", "integrate"] and not interactive:
proj_plots.polarization_map( proj_plots.polarization_map(
@@ -272,12 +313,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
scale_vec=scale_vec, scale_vec=scale_vec,
savename="_".join([figname]), savename=figname,
plots_folder=plots_folder, plots_folder=plots_folder,
) )
for figtype, figsuffix in zip( for figtype, figsuffix in zip(
["Intensity", "Pol_flux", "Pol_deg", "Pol_ang", "I_err", "P_err", "SNRi", "SNRp", "confp"], ["FluxDensity", "Intensity", "Pol_flux", "Pol_deg", "Pol_ang", "I_err", "P_err", "SNRi", "SNRp", "confp"],
["I", "P_flux", "P", "PA", "I_err", "P_err", "SNRi", "SNRp", "confP"], ["F", "I", "P_flux", "P", "PA", "I_err", "P_err", "SNRi", "SNRp", "confP"],
): ):
try: try:
proj_plots.polarization_map( proj_plots.polarization_map(
@@ -288,7 +329,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
scale_vec=scale_vec, scale_vec=scale_vec,
savename="_".join([figname, figsuffix]), savename="_".join([savename, figsuffix]),
plots_folder=plots_folder, plots_folder=plots_folder,
display=figtype, display=figtype,
) )
@@ -296,7 +337,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
pass pass
elif not interactive: elif not interactive:
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_hdul), data_mask, P_cut=P_cut, SNRi_cut=SNRi_cut, savename=figname, plots_folder=plots_folder, display="integrate" deepcopy(Stokes_hdul), data_mask, P_cut=P_cut, SNRi_cut=SNRi_cut, savename=savename, plots_folder=plots_folder, display="integrate"
) )
elif pxscale.lower() not in ["full", "integrate"]: elif pxscale.lower() not in ["full", "integrate"]:
proj_plots.pol_map(Stokes_hdul, P_cut=P_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, scale_vec=scale_vec, flux_lim=flux_lim) proj_plots.pol_map(Stokes_hdul, P_cut=P_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, scale_vec=scale_vec, flux_lim=flux_lim)

View File

@@ -1,2 +1,2 @@
from .lib import * from .lib import *
from . import FOC_reduction from .FOC_reduction import main

View File

@@ -18,7 +18,6 @@ import matplotlib.dates as mdates
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
from astropy.time import Time from astropy.time import Time
from lib.plots import plot_obs
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
from matplotlib.patches import Rectangle from matplotlib.patches import Rectangle
from scipy.optimize import curve_fit from scipy.optimize import curve_fit
@@ -45,14 +44,14 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
date_time = np.array([datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in date_time]) date_time = np.array([datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in date_time])
date_err = np.array([timedelta(seconds=headers[i]["exptime"] / 2.0) for i in range(len(headers))]) date_err = np.array([timedelta(seconds=headers[i]["exptime"] / 2.0) for i in range(len(headers))])
filt = np.array([headers[i]["filtnam1"] for i in range(len(headers))]) filt = np.array([headers[i]["filtnam1"] for i in range(len(headers))])
dict_filt = {"POL0": "yo", "POL60": "bv", "POL120": "rs"} dict_filt = {"POL0": "r", "POL60": "g", "POL120": "b"}
c_filt = np.array([dict_filt[f][0] for f in filt]) c_filt = np.array([dict_filt[f] for f in filt])
fig, ax = plt.subplots(figsize=(10, 4), constrained_layout=True) fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
ax.errorbar(date_time, background * convert_flux, xerr=date_err, yerr=std_bkg * convert_flux, fmt="+k", markersize=0, ecolor=c_filt)
for f in np.unique(filt): for f in np.unique(filt):
mask = [fil == f for fil in filt] mask = [fil == f for fil in filt]
ax.scatter(date_time[mask], background[mask] * convert_flux[mask], color=dict_filt[f][0], marker=dict_filt[f][1], label="{0:s}".format(f)) ax.scatter(date_time[mask], background[mask] * convert_flux[mask], color=dict_filt[f], label="{0:s}".format(f))
ax.errorbar(date_time, background * convert_flux, xerr=date_err, yerr=std_bkg * convert_flux, fmt="+k", markersize=0, ecolor=c_filt)
# Date handling # Date handling
locator = mdates.AutoDateLocator() locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator) formatter = mdates.ConciseDateFormatter(locator)
@@ -69,7 +68,7 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
this_savename += "_background_flux.pdf" this_savename += "_background_flux.pdf"
else: else:
this_savename = savename[:-4] + "_background_flux" + savename[-4:] this_savename = savename[:-4] + "_background_flux" + savename[-4:]
fig.savefig(path_join(plots_folder, this_savename), bbox_inches="tight", facecolor="None", edgecolor="None") fig.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
if histograms is not None: if histograms is not None:
filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0} filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0}
@@ -93,10 +92,8 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
max(xmax, np.max(np.array(bins)[np.array(hist) > 5e1]) * convert_flux[0]), max(xmax, np.max(np.array(bins)[np.array(hist) > 5e1]) * convert_flux[0]),
) )
if coeff is not None: if coeff is not None:
if len(coeff[i]) == 7: # ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
ax_h.plot(bins * convert_flux[i], gausspol(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8) ax_h.plot(bins * convert_flux[i], gauss(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8)
elif len(coeff[i]) == 3:
ax_h.plot(bins * convert_flux[i], gauss(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8)
ax_h.set_xscale("log") ax_h.set_xscale("log")
ax_h.set_yscale("log") ax_h.set_yscale("log")
ax_h.set_ylim([5e1, np.max([hist.max() for hist in histograms])]) ax_h.set_ylim([5e1, np.max([hist.max() for hist in histograms])])
@@ -111,7 +108,7 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
this_savename += "_histograms.pdf" this_savename += "_histograms.pdf"
else: else:
this_savename = savename[:-4] + "_histograms" + savename[-4:] this_savename = savename[:-4] + "_histograms" + savename[-4:]
fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches="tight", facecolor="None", edgecolor="None") fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
fig2, ax2 = plt.subplots(figsize=(10, 10)) fig2, ax2 = plt.subplots(figsize=(10, 10))
data0 = data[0] * convert_flux[0] data0 = data[0] * convert_flux[0]
@@ -138,13 +135,15 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
fig2.subplots_adjust(hspace=0, wspace=0, right=1.0) fig2.subplots_adjust(hspace=0, wspace=0, right=1.0)
fig2.colorbar(im2, ax=ax2, location="right", shrink=0.60, aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") fig2.colorbar(im2, ax=ax2, location="right", shrink=0.60, aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
from .plots import plot_obs
if savename is not None: if savename is not None:
this_savename = deepcopy(savename) this_savename = deepcopy(savename)
if savename[-4:] not in [".png", ".jpg", ".pdf"]: if savename[-4:] not in [".png", ".jpg", ".pdf"]:
this_savename += "_" + filt + "_background_location.pdf" this_savename += "_" + filt + "_background_location.pdf"
else: else:
this_savename = savename[:-4] + "_" + filt + "_background_location" + savename[-4:] this_savename = savename[:-4] + "_" + filt + "_background_location" + savename[-4:]
fig2.savefig(path_join(plots_folder, this_savename), bbox_inches="tight", facecolor="None", edgecolor="None") fig2.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
if rectangle is not None: if rectangle is not None:
plot_obs( plot_obs(
data, data,
@@ -280,7 +279,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
return n_data_array, n_error_array, headers, background return n_data_array, n_error_array, headers, background
def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, display=False, savename=None, plots_folder=""): def bkg_hist(data, error, mask, headers, n_bins=None, subtract_error=True, display=False, savename=None, plots_folder=""):
""" """
---------- ----------
Inputs: Inputs:
@@ -335,29 +334,15 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
for i, image in enumerate(data): for i, image in enumerate(data):
# Compute the Count-rate histogram for the image # Compute the Count-rate histogram for the image
n_mask = np.logical_and(mask, image > 0.0) n_mask = np.logical_and(mask, image > 0.0)
if sub_type is not None:
if isinstance(sub_type, int):
n_bins = sub_type
elif sub_type.lower() in ["square-root", "squareroot", "sqrt"]:
n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
elif sub_type.lower() in ["sturges"]:
n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int) + 1 # Sturges
elif sub_type.lower() in ["rice"]:
n_bins = 2 * np.fix(np.power(image[n_mask].size, 1 / 3)).astype(int) # Rice
elif sub_type.lower() in ["freedman-diaconis", "freedmandiaconis", "freedman", "diaconis"]:
n_bins = np.fix(
(image[n_mask].max() - image[n_mask].min())
/ (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3))
).astype(int) # Freedman-Diaconis
else: # Fallback
n_bins = np.fix((image[n_mask].max() - image[n_mask].min()) / (3.5 * image[n_mask].std() / np.power(image[n_mask].size, 1 / 3))).astype(
int
) # Scott
else: # Default statistic
n_bins = np.fix((image[n_mask].max() - image[n_mask].min()) / (3.5 * image[n_mask].std() / np.power(image[n_mask].size, 1 / 3))).astype(
int
) # Scott
if not isinstance(n_bins, int) and n_bins not in ["auto", "fd", "doane", "scott", "stone", "rice", "sturges", "sqrt"]:
match n_bins.lower():
case "square-root" | "squareroot":
n_bins = "sqrt"
case "freedman-diaconis" | "freedmandiaconis":
n_bins = "fd"
case _:
n_bins = "scott"
hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins) hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins)
histograms.append(hist) histograms.append(hist)
binning.append(np.exp(bin_centers(bin_edges))) binning.append(np.exp(bin_centers(bin_edges)))
@@ -366,8 +351,8 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
bins_stdev = binning[-1][hist > hist.max() / 2.0] bins_stdev = binning[-1][hist > hist.max() / 2.0]
stdev = bins_stdev[-1] - bins_stdev[0] stdev = bins_stdev[-1] - bins_stdev[0]
# p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3] # p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3]
# popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev] p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev]
# popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
popt, pcov = curve_fit(gauss, binning[-1], hist, p0=p0) popt, pcov = curve_fit(gauss, binning[-1], hist, p0=p0)
coeff.append(popt) coeff.append(popt)
bkg = popt[1] + np.abs(popt[2]) * subtract_error bkg = popt[1] + np.abs(popt[2]) * subtract_error

View File

@@ -16,7 +16,7 @@ from astropy.io import fits
from astropy.wcs import WCS from astropy.wcs import WCS
from .convex_hull import clean_ROI from .convex_hull import clean_ROI
from .utils import wcs_CD_to_PC, wcs_PA, add_stokes_axis_to_header, remove_stokes_axis_from_header from .utils import wcs_CD_to_PC, wcs_PA
def get_obs_data(infiles, data_folder="", compute_flux=False): def get_obs_data(infiles, data_folder="", compute_flux=False):
@@ -47,9 +47,9 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
wcs_array.append(WCS(header=f[0].header, fobj=f).celestial) wcs_array.append(WCS(header=f[0].header, fobj=f).celestial)
f.flush() f.flush()
# Save pixel area for flux density computation # Save pixel area for flux density computation
if headers[i]["PXFORMT"] == "NORMAL": if "PXFORMT" in headers[i].keys() and headers[i]["PXFORMT"] == "NORMAL":
headers[i]["PXAREA"] = 1.96e-4 # 14x14 milliarcsec squared pixel area in arcsec^2 headers[i]["PXAREA"] = 1.96e-4 # 14x14 milliarcsec squared pixel area in arcsec^2
elif headers[i]["PXFORMT"] == "ZOOM": elif "PXFORMT" in headers[i].keys() and headers[i]["PXFORMT"] == "ZOOM":
headers[i]["PXAREA"] = 4.06e-4 # 29x14 milliarcsec squared pixel area in arcsec^2 headers[i]["PXAREA"] = 4.06e-4 # 29x14 milliarcsec squared pixel area in arcsec^2
else: else:
headers[i]["PXAREA"] = 1.0 # unknown default to 1 arcsec^2 headers[i]["PXAREA"] = 1.0 # unknown default to 1 arcsec^2
@@ -89,11 +89,11 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
# force WCS for POL60 to have same pixel size as POL0 and POL120 # force WCS for POL60 to have same pixel size as POL0 and POL120
is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool) is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
cdelt = np.round(np.abs(np.array([WCS(head).wcs.cdelt[:2] for head in headers])), 10) cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 10)
if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2: if np.any(is_pol60) and np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0)) print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0))
raise ValueError("Not all images have same pixel size") raise ValueError("Not all images have same pixel size")
else: elif np.any(is_pol60):
for i in np.arange(len(headers))[is_pol60]: for i in np.arange(len(headers))[is_pol60]:
headers[i]["cdelt1"], headers[i]["cdelt2"] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0] headers[i]["cdelt1"], headers[i]["cdelt2"] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0]
@@ -106,21 +106,37 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
def save_Stokes( def save_Stokes(
Stokes, Stokes_cov, Stokes_cov_stat, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, header_stokes, data_mask, filename, data_folder="", return_hdul=False I_stokes,
Q_stokes,
U_stokes,
Stokes_cov,
P,
debiased_P,
s_P,
s_P_P,
PA,
s_PA,
s_PA_P,
header_stokes,
data_mask,
filename,
data_folder="",
return_hdul=False,
flux_data=None,
flux_head=None,
): ):
""" """
Save computed polarimetry parameters to a single fits file, Save computed polarimetry parameters to a single fits file,
updating header accordingly. updating header accordingly.
---------- ----------
Inputs: Inputs:
Stokes, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P : numpy.ndarray I_stokes, Q_stokes, U_stokes, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P : numpy.ndarray
Stokes cube (3D float arrays) containing the computed polarimetric data : Images (2D float arrays) containing the computed polarimetric data :
Stokes parameters I, Q, U, V, Polarization degree and debieased, Stokes parameters I, Q, U, Polarization degree and debieased,
its error propagated and assuming Poisson noise, Polarization angle, its error propagated and assuming Poisson noise, Polarization angle,
its error propagated and assuming Poisson noise. its error propagated and assuming Poisson noise.
Stokes_cov, Stokes_cov_stat : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U, V and its statistical Covariance matrix of the Stokes parameters I, Q, U.
uncertainties part.
headers : header list headers : header list
Header of reference some keywords will be copied from (CRVAL, CDELT, Header of reference some keywords will be copied from (CRVAL, CDELT,
INSTRUME, PROPOSID, TARGNAME, ORIENTAT, EXPTOT). INSTRUME, PROPOSID, TARGNAME, ORIENTAT, EXPTOT).
@@ -138,30 +154,23 @@ def save_Stokes(
---------- ----------
Return: Return:
hdul : astropy.io.fits.hdu.hdulist.HDUList hdul : astropy.io.fits.hdu.hdulist.HDUList
HDUList containing the Stokes cube in the PrimaryHDU, then Stokes_cov, HDUList containing I_stokes in the PrimaryHDU, then Q_stokes, U_stokes,
Stokes_cov_stat, P, s_P, PA, s_PA in this order. Headers have been updated P, s_P, PA, s_PA in this order. Headers have been updated to relevant
to relevant informations (WCS, orientation, data_type). informations (WCS, orientation, data_type).
Only returned if return_hdul is True. Only returned if return_hdul is True.
""" """
# Create new WCS object given the modified images # Create new WCS object given the modified images
wcs = WCS(header_stokes).deepcopy() new_wcs = WCS(header_stokes).deepcopy()
new_wcs = WCS(header_stokes).celestial.deepcopy()
header = wcs.to_header().copy()
header["NAXIS"] = header_stokes["NAXIS"]
for i in range(wcs.wcs.naxis):
header["NAXIS%d" % (i + 1)] = header_stokes["NAXIS%d" % (i + 1)]
header = remove_stokes_axis_from_header(header).copy() if header_stokes["NAXIS"] > 2 else header
if data_mask.shape != (1, 1): if data_mask.shape != (1, 1):
vertex = clean_ROI(data_mask) vertex = clean_ROI(data_mask)
shape = vertex[1::2] - vertex[0::2] shape = vertex[1::2] - vertex[0::2]
new_wcs.array_shape = shape new_wcs.array_shape = shape
new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2] new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2]
for key, val in list(new_wcs.to_header().items()) + [("NAXIS", 2), ("NAXIS1", new_wcs.array_shape[1]), ("NAXIS2", new_wcs.array_shape[0])]:
header[key] = val
header["TELESCOP"] = (header_stokes["TELESCOP"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "Telescope used to acquire data") header = new_wcs.to_header()
header["INSTRUME"] = (header_stokes["INSTRUME"] if "INSTRUME" in list(header_stokes.keys()) else "FOC", "identifier for instrument used to acquire data") header["TELESCOP"] = (header_stokes["TELESCOP"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "telescope used to acquire data")
header["INSTRUME"] = (header_stokes["INSTRUME"] if "INSTRUME" in list(header_stokes.keys()) else "FOC", "identifier for instrument used to acuire data")
header["PHOTPLAM"] = (header_stokes["PHOTPLAM"], "Pivot Wavelength") header["PHOTPLAM"] = (header_stokes["PHOTPLAM"], "Pivot Wavelength")
header["PHOTBW"] = (header_stokes["PHOTBW"], "RMS Bandwidth of the Filter and Detector") header["PHOTBW"] = (header_stokes["PHOTBW"], "RMS Bandwidth of the Filter and Detector")
header["PHOTFLAM"] = (header_stokes["PHOTFLAM"], "Inverse Sensitivity in DN/sec/cm**2/Angst") header["PHOTFLAM"] = (header_stokes["PHOTFLAM"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
@@ -170,7 +179,7 @@ def save_Stokes(
header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation") header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation")
header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name") header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name")
header["ORIENTAT"] = (header_stokes["ORIENTAT"], "Angle between North and the y-axis of the image") header["ORIENTAT"] = (header_stokes["ORIENTAT"], "Angle between North and the y-axis of the image")
header["FILENAME"] = (filename, "ORIGINAL FILENAME") header["FILENAME"] = (filename, "Original filename")
header["BKG_TYPE"] = (header_stokes["BKG_TYPE"], "Bkg estimation method used during reduction") header["BKG_TYPE"] = (header_stokes["BKG_TYPE"], "Bkg estimation method used during reduction")
header["BKG_SUB"] = (header_stokes["BKG_SUB"], "Amount of bkg subtracted from images") header["BKG_SUB"] = (header_stokes["BKG_SUB"], "Amount of bkg subtracted from images")
header["SMOOTH"] = (header_stokes["SMOOTH"] if "SMOOTH" in list(header_stokes.keys()) else "None", "Smoothing method used during reduction") header["SMOOTH"] = (header_stokes["SMOOTH"] if "SMOOTH" in list(header_stokes.keys()) else "None", "Smoothing method used during reduction")
@@ -182,9 +191,9 @@ def save_Stokes(
# Crop Data to mask # Crop Data to mask
if data_mask.shape != (1, 1): if data_mask.shape != (1, 1):
Stokes = Stokes[:, vertex[2] : vertex[3], vertex[0] : vertex[1]] I_stokes = I_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]]
Stokes_cov = Stokes_cov[:, :, vertex[2] : vertex[3], vertex[0] : vertex[1]] Q_stokes = Q_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]]
Stokes_cov_stat = Stokes_cov_stat[:, :, vertex[2] : vertex[3], vertex[0] : vertex[1]] U_stokes = U_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]]
P = P[vertex[2] : vertex[3], vertex[0] : vertex[1]] P = P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
debiased_P = debiased_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] debiased_P = debiased_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_P = s_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] s_P = s_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
@@ -192,42 +201,64 @@ def save_Stokes(
PA = PA[vertex[2] : vertex[3], vertex[0] : vertex[1]] PA = PA[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_PA = s_PA[vertex[2] : vertex[3], vertex[0] : vertex[1]] s_PA = s_PA[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1]))
for i in range(3):
for j in range(3):
Stokes_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
Stokes_cov = new_Stokes_cov
data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]] data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]]
data_mask = data_mask.astype(float, copy=False) data_mask = data_mask.astype(float, copy=False)
# Create HDUList object # Create HDUList object
hdul = fits.HDUList([]) hdul = fits.HDUList([])
# Add I_stokes as PrimaryHDU # Add Flux density as PrimaryHDU
header["datatype"] = ("STOKES", "type of data stored in the HDU") if flux_data is None:
Stokes[np.broadcast_to((1 - data_mask).astype(bool), Stokes.shape)] = 0.0 header["DATATYPE"] = ("I_stokes", "type of data stored in the HDU")
hdu_head = add_stokes_axis_to_header(header, 0) I_stokes[(1 - data_mask).astype(bool)] = 0.0
primary_hdu = fits.PrimaryHDU(data=Stokes, header=hdu_head) primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header)
primary_hdu.name = "STOKES" primary_hdu.name = "I_stokes"
hdul.append(primary_hdu) hdul.append(primary_hdu)
else:
flux_head["FILENAME"] = header["FILENAME"]
head = WCS(flux_head).deepcopy().to_header()
for key in [key for key in header.keys() if key not in ["SMOOTH", "SAMPLING"]]:
try:
head[key] = flux_head[key]
except KeyError:
head[key] = header[key]
header["DATATYPE"] = ("Flux_density", "type of data stored in the HDU")
primary_hdu = fits.PrimaryHDU(data=flux_data, header=head)
primary_hdu.name = "Flux_density"
hdul.append(primary_hdu)
header["DATATYPE"] = ("I_stokes", "type of data stored in the HDU")
I_stokes[(1 - data_mask).astype(bool)] = 0.0
image_hdu = fits.ImageHDU(data=I_stokes, header=header)
image_hdu.name = "I_stokes"
hdul.append(image_hdu)
# Add Stokes_cov, P, s_P, PA, s_PA to the HDUList # Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList
for data, name in [ for data, name in [
[Stokes_cov, "STOKES_COV"], [Q_stokes, "Q_stokes"],
[Stokes_cov_stat, "STOKES_COV_STAT"], [U_stokes, "U_stokes"],
[Stokes_cov, "IQU_cov_matrix"],
[P, "Pol_deg"], [P, "Pol_deg"],
[debiased_P, "Pol_deg_debiased"], [debiased_P, "Pol_deg_debiased"],
[s_P, "Pol_deg_err"], [s_P, "Pol_deg_err"],
[s_P_P, "Pol_deg_stat_err"], [s_P_P, "Pol_deg_err_Poisson_noise"],
[PA, "Pol_ang"], [PA, "Pol_ang"],
[s_PA, "Pol_ang_err"], [s_PA, "Pol_ang_err"],
[s_PA_P, "Pol_ang_stat_err"], [s_PA_P, "Pol_ang_err_Poisson_noise"],
[data_mask, "Data_mask"], [data_mask, "Data_mask"],
]: ]:
hdu_head = header.copy() hdu_header = header.copy()
hdu_head["datatype"] = name hdu_header["DATATYPE"] = name
if name[:10] == "STOKES_COV": if not name == "IQU_cov_matrix":
hdu_head = add_stokes_axis_to_header(hdu_head, 0)
hdu_head = add_stokes_axis_to_header(hdu_head, 0)
data[np.broadcast_to((1 - data_mask).astype(bool), data.shape)] = 0.0
else:
data[(1 - data_mask).astype(bool)] = 0.0 data[(1 - data_mask).astype(bool)] = 0.0
hdu = fits.ImageHDU(data=data, header=hdu_head) hdu = fits.ImageHDU(data=data, header=hdu_header)
hdu.name = name hdu.name = name
hdul.append(hdu) hdul.append(hdu)

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,7 @@ from warnings import filterwarnings
import astropy.units as u import astropy.units as u
import numpy as np import numpy as np
from astropy.table import Column, unique, vstack from astropy.table import Column, unique
from astropy.time import Time, TimeDelta from astropy.time import Time, TimeDelta
from astroquery.exceptions import NoResultsWarning from astroquery.exceptions import NoResultsWarning
from astroquery.mast import MastMissions, Observations from astroquery.mast import MastMissions, Observations
@@ -82,20 +82,14 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
"References", "References",
] ]
if target is None and proposal_id is None: if target is None:
target = input("Target name:\n>") target = input("Target name:\n>")
# Use query_object method to resolve the object name into coordinates # Use query_object method to resolve the object name into coordinates
if instrument == "foc": if instrument == "foc":
if target is None and proposal_id is not None: results = mission.query_object(
results = mission.query_criteria( target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc"
sci_pep_id=proposal_id, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc" )
)
target = list(np.unique(results["sci_targname"]))
else:
results = mission.query_object(
target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc"
)
dataproduct_type = "image" dataproduct_type = "image"
description = "DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP" description = "DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP"
elif instrument == "fos": elif instrument == "fos":
@@ -116,16 +110,8 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
results["Start"] = Column(Time(results["Start"])) results["Start"] = Column(Time(results["Start"]))
results["Stop"] = Column(Time(results["Stop"])) results["Stop"] = Column(Time(results["Stop"]))
if isinstance(target, list): results = divide_proposal(results)
for i, targ in enumerate(target): obs = results.copy()
results_div = divide_proposal(results[results["Target name"] == targ])
if i == 0:
obs = results_div.copy()
else:
obs = vstack([obs, results_div.copy()])
else:
results_div = divide_proposal(results)
obs = results_div.copy()
# Remove single observations for which a FIND filter is used # Remove single observations for which a FIND filter is used
to_remove = [] to_remove = []
@@ -136,21 +122,12 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
# Remove observations for which a polarization filter is missing # Remove observations for which a polarization filter is missing
if instrument == "foc": if instrument == "foc":
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2} polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
if isinstance(target, list): for pid in np.unique(obs["Proposal ID"]):
for targ in target: used_pol = np.zeros(3)
for pid in np.unique(obs[obs["Target name"] == targ]["Proposal ID"]): for dataset in obs[obs["Proposal ID"] == pid]:
used_pol = np.zeros(3) used_pol[polfilt[dataset["POLFilters"]]] += 1
for dataset in obs[np.logical_and(obs["Target name"] == targ, obs["Proposal ID"] == pid)]: if np.any(used_pol < 1):
used_pol[polfilt[dataset["POLFilters"]]] += 1 obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
if np.any(used_pol < 1):
obs.remove_rows(np.arange(len(obs))[np.logical_and(obs["Target name"] == targ, obs["Proposal ID"] == pid)])
else:
for pid in np.unique(obs["Proposal ID"]):
used_pol = np.zeros(3)
for dataset in obs[obs["Proposal ID"] == pid]:
used_pol[polfilt[dataset["POLFilters"]]] += 1
if np.any(used_pol < 1):
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
# Remove observations for which a spectropolarization has not been reduced # Remove observations for which a spectropolarization has not been reduced
if instrument == "fos": if instrument == "fos":
for pid in np.unique(obs["Proposal ID"]): for pid in np.unique(obs["Proposal ID"]):
@@ -165,7 +142,6 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
if len(c3prod) < 1: if len(c3prod) < 1:
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid]) obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
# tab = unique(obs, ["Target name", "Proposal ID"])
tab = unique(obs, ["Target name", "Proposal ID"]) tab = unique(obs, ["Target name", "Proposal ID"])
obs["Obs"] = [np.argmax(np.logical_and(tab["Proposal ID"] == data["Proposal ID"], tab["Target name"] == data["Target name"])) + 1 for data in obs] obs["Obs"] = [np.argmax(np.logical_and(tab["Proposal ID"] == data["Proposal ID"], tab["Target name"] == data["Target name"])) + 1 for data in obs]
try: try:
@@ -174,7 +150,7 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
raise ValueError("There is no observation with polarimetry for {0:s} in HST/{1:s} Legacy Archive".format(target, instrument.upper())) raise ValueError("There is no observation with polarimetry for {0:s} in HST/{1:s} Legacy Archive".format(target, instrument.upper()))
b = np.zeros(len(results), dtype=bool) b = np.zeros(len(results), dtype=bool)
if proposal_id is not None and np.all(str(proposal_id) == np.unique(obs["Proposal ID"])): if proposal_id is not None and str(proposal_id) in obs["Proposal ID"]:
b[results["Proposal ID"] == str(proposal_id)] = True b[results["Proposal ID"] == str(proposal_id)] = True
else: else:
n_obs.pprint(len(n_obs) + 2) n_obs.pprint(len(n_obs) + 2)
@@ -195,8 +171,6 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
else: else:
b[np.array([dataset in obs["Dataset"][obs["Obs"] == i[0]] for dataset in results["Dataset"]])] = True b[np.array([dataset in obs["Dataset"][obs["Obs"] == i[0]] for dataset in results["Dataset"]])] = True
targetb = list(np.unique(results["Target name"][b]))
target = targetb if len(targetb) > 1 else targetb[0]
observations = Observations.query_criteria(obs_id=list(results["Dataset"][b])) observations = Observations.query_criteria(obs_id=list(results["Dataset"][b]))
products = Observations.filter_products( products = Observations.filter_products(
Observations.get_product_list(observations), productType=["SCIENCE"], dataproduct_type=dataproduct_type, calib_level=[2], description=description Observations.get_product_list(observations), productType=["SCIENCE"], dataproduct_type=dataproduct_type, calib_level=[2], description=description
@@ -205,12 +179,11 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
products["proposal_id"] = Column(products["proposal_id"], dtype="U35") products["proposal_id"] = Column(products["proposal_id"], dtype="U35")
for prod in products: for prod in products:
prod["proposal_id"] = obs["Proposal ID"][np.argmax(obs["Dataset"] == prod["productFilename"][: len(obs["Dataset"][0])].upper())] prod["proposal_id"] = results["Proposal ID"][results["Dataset"] == prod["productFilename"][: len(results["Dataset"][0])].upper()][0]
tab = unique(products, "proposal_id") tab = unique(products, "proposal_id")
products["Obs"] = [np.argmax(tab["proposal_id"] == data["proposal_id"]) + 1 for data in products] products["Obs"] = [np.argmax(tab["proposal_id"] == data["proposal_id"]) + 1 for data in products]
products["targname"] = [obs["Target name"][np.argmax(obs["Dataset"] == data[: len(obs["Dataset"][0])].upper())] for data in products["productFilename"]]
return target, products return target, products
@@ -222,43 +195,23 @@ def retrieve_products(target=None, proposal_id=None, instrument="foc", output_di
prodpaths = [] prodpaths = []
# data_dir = path_join(output_dir, target) # data_dir = path_join(output_dir, target)
out = "" out = ""
if isinstance(target, list): for obs in unique(products, "Obs"):
for targ in target: filepaths = []
for obs in unique(products[products["targname"] == targ], "Obs"): # obs_dir = path_join(data_dir, obs['prodposal_id'])
filepaths = [] # if obs['target_name']!=target:
# obs_dir = path_join(data_dir, obs['prodposal_id']) obs_dir = path_join(path_join(output_dir, target), obs["proposal_id"])
# if obs['target_name']!=target: if not path_exists(obs_dir):
obs_dir = path_join(path_join(output_dir, targ), obs["proposal_id"]) system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
if not path_exists(obs_dir): for file in products["productFilename"][products["Obs"] == obs["Obs"]]:
system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots"))) fpath = path_join(obs_dir, file)
for file in products["productFilename"][np.logical_and(products["Obs"] == obs["Obs"], products["targname"] == targ)]: if not path_exists(fpath):
fpath = path_join(obs_dir, file) out += "{0:s} : {1:s}\n".format(
if not path_exists(fpath): file, Observations.download_file(products["dataURI"][products["productFilename"] == file][0], local_path=fpath)[0]
out += "{0:s} : {1:s}\n".format( )
file, Observations.download_file(products["dataURI"][products["productFilename"] == file][0], local_path=fpath)[0] else:
) out += "{0:s} : Exists\n".format(file)
else: filepaths.append([obs_dir, file])
out += "{0:s} : Exists\n".format(file) prodpaths.append(np.array(filepaths, dtype=str))
filepaths.append([obs_dir, file])
prodpaths.append(np.array(filepaths, dtype=str))
else:
for obs in unique(products, "Obs"):
filepaths = []
# obs_dir = path_join(data_dir, obs['prodposal_id'])
# if obs['target_name']!=target:
obs_dir = path_join(path_join(output_dir, target), obs["proposal_id"])
if not path_exists(obs_dir):
system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
for file in products["productFilename"][products["Obs"] == obs["Obs"]]:
fpath = path_join(obs_dir, file)
if not path_exists(fpath):
out += "{0:s} : {1:s}\n".format(
file, Observations.download_file(products["dataURI"][products["productFilename"] == file][0], local_path=fpath)[0]
)
else:
out += "{0:s} : Exists\n".format(file)
filepaths.append([obs_dir, file])
prodpaths.append(np.array(filepaths, dtype=str))
return target, prodpaths return target, prodpaths

View File

@@ -52,12 +52,11 @@ from scipy.ndimage import rotate as sc_rotate
from scipy.ndimage import shift as sc_shift from scipy.ndimage import shift as sc_shift
from scipy.signal import fftconvolve from scipy.signal import fftconvolve
from .background import bkg_fit, bkg_hist, bkg_mini
from .convex_hull import image_hull from .convex_hull import image_hull
from .cross_correlation import phase_cross_correlation from .cross_correlation import phase_cross_correlation
from .deconvolve import deconvolve_im, gaussian2d, gaussian_psf, zeropad from .deconvolve import deconvolve_im, gaussian2d, gaussian_psf, zeropad
from .plots import plot_obs from .plots import plot_obs
from .utils import princ_angle, add_stokes_axis_to_header from .utils import princ_angle
log.setLevel("ERROR") log.setLevel("ERROR")
@@ -224,9 +223,7 @@ def bin_ndarray(ndarray, new_shape, operation="sum"):
return ndarray return ndarray
def crop_array( def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, null_val=None, inside=False, display=False, savename=None, plots_folder=""):
data_array, headers, error_array=None, data_mask=None, step=5, null_val=None, crop=True, inside=False, display=False, savename=None, plots_folder=""
):
""" """
Homogeneously crop an array: all contained images will have the same shape. Homogeneously crop an array: all contained images will have the same shape.
'inside' parameter will decide how much should be cropped. 'inside' parameter will decide how much should be cropped.
@@ -258,10 +255,6 @@ def crop_array(
If None, will be put to 75% of the mean value of the associated error If None, will be put to 75% of the mean value of the associated error
array. array.
Defaults to None. Defaults to None.
crop : boolean, optional
If True, data_array will be cropped down to contain only relevant data.
If False, this information will be kept in the crop_mask output.
Defaults to True.
inside : boolean, optional inside : boolean, optional
If True, the cropped image will be the maximum rectangle included If True, the cropped image will be the maximum rectangle included
inside the image. If False, the cropped image will be the minimum inside the image. If False, the cropped image will be the minimum
@@ -301,9 +294,6 @@ def crop_array(
v_array[1] = np.max(vertex[:, 1]).astype(int) v_array[1] = np.max(vertex[:, 1]).astype(int)
v_array[2] = np.min(vertex[:, 2]).astype(int) v_array[2] = np.min(vertex[:, 2]).astype(int)
v_array[3] = np.max(vertex[:, 3]).astype(int) v_array[3] = np.max(vertex[:, 3]).astype(int)
if data_mask is None:
data_mask = np.zeros(data_array[0].shape).astype(bool)
data_mask[v_array[0] : v_array[1], v_array[2] : v_array[3]] = True
new_shape = np.array([v_array[1] - v_array[0], v_array[3] - v_array[2]]) new_shape = np.array([v_array[1] - v_array[0], v_array[3] - v_array[2]])
rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 0.0, "b"] rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 0.0, "b"]
@@ -317,6 +307,8 @@ def crop_array(
# Update CRPIX value in the associated header # Update CRPIX value in the associated header
curr_wcs = WCS(crop_headers[i]).celestial.deepcopy() curr_wcs = WCS(crop_headers[i]).celestial.deepcopy()
curr_wcs.wcs.crpix[:2] = curr_wcs.wcs.crpix[:2] - np.array([v_array[2], v_array[0]]) curr_wcs.wcs.crpix[:2] = curr_wcs.wcs.crpix[:2] - np.array([v_array[2], v_array[0]])
curr_wcs.array_shape = crop_array[i].shape
curr_wcs.wcs.set()
crop_headers[i].update(curr_wcs.to_header()) crop_headers[i].update(curr_wcs.to_header())
crop_headers[i]["naxis1"], crop_headers[i]["naxis2"] = crop_array[i].shape crop_headers[i]["naxis1"], crop_headers[i]["naxis2"] = crop_array[i].shape
@@ -361,11 +353,11 @@ def crop_array(
) )
plt.show() plt.show()
if crop: if data_mask is not None:
crop_mask = data_mask[v_array[0] : v_array[1], v_array[2] : v_array[3]] crop_mask = data_mask[v_array[0] : v_array[1], v_array[2] : v_array[3]]
return crop_array, crop_error_array, crop_mask, crop_headers return crop_array, crop_error_array, crop_mask, crop_headers
else: else:
return data_array, error_array, data_mask, headers return crop_array, crop_error_array, crop_headers
def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px", shape=None, iterations=20, algo="richardson"): def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px", shape=None, iterations=20, algo="richardson"):
@@ -515,7 +507,8 @@ def get_error(
if data_mask is not None: if data_mask is not None:
mask = deepcopy(data_mask) mask = deepcopy(data_mask)
else: else:
data_c, error_c, mask_c, _ = crop_array(data, headers, error_array=error, step=5, null_val=0.0, inside=False) data_c, error_c, _ = crop_array(data, headers, error, step=5, null_val=0.0, inside=False)
mask_c = np.ones(data_c[0].shape, dtype=bool)
for i, (data_ci, error_ci) in enumerate(zip(data_c, error_c)): for i, (data_ci, error_ci) in enumerate(zip(data_c, error_c)):
data[i], error[i] = zeropad(data_ci, data[i].shape), zeropad(error_ci, error[i].shape) data[i], error[i] = zeropad(data_ci, data[i].shape), zeropad(error_ci, error[i].shape)
mask = zeropad(mask_c, data[0].shape).astype(bool) mask = zeropad(mask_c, data[0].shape).astype(bool)
@@ -531,20 +524,22 @@ def get_error(
# estimated to less than 3% # estimated to less than 3%
err_flat = data * 0.03 err_flat = data * 0.03
from .background import bkg_fit, bkg_hist, bkg_mini
if sub_type is None: if sub_type is None:
n_data_array, c_error_bkg, headers, background = bkg_hist( n_data_array, c_error_bkg, headers, background = bkg_hist(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "histogram ", str(int(subtract_error > 0.0)) sub_type, subtract_error = "histogram ", str(int(subtract_error > 0.0))
elif isinstance(sub_type, str): elif isinstance(sub_type, str):
if sub_type.lower() in ["auto"]: if sub_type.lower() in ["fit"]:
n_data_array, c_error_bkg, headers, background = bkg_fit( n_data_array, c_error_bkg, headers, background = bkg_fit(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0 sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
else: else:
n_data_array, c_error_bkg, headers, background = bkg_hist( n_data_array, c_error_bkg, headers, background = bkg_hist(
data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, n_bins=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "histogram ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0 sub_type, subtract_error = "histogram ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
elif isinstance(sub_type, tuple): elif isinstance(sub_type, tuple):
@@ -642,7 +637,8 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
pxsize, scale = "", "full" pxsize, scale = "", "full"
else: else:
raise ValueError("'{0:s}' invalid scale for binning.".format(scale)) raise ValueError("'{0:s}' invalid scale for binning.".format(scale))
new_shape = np.ceil(min(image.shape / Dxy_arr, key=lambda x: x[0] + x[1])).astype(int) new_shape_float = min(image.shape / Dxy_arr, key=lambda x: x[0] + x[1])
new_shape = np.ceil(new_shape_float).astype(int)
for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))): for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))):
# Get current pixel size # Get current pixel size
@@ -671,8 +667,10 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
# Update header # Update header
nw = w.deepcopy() nw = w.deepcopy()
nw.wcs.cdelt *= Dxy nw.wcs.cdelt *= Dxy
# nw.wcs.crpix += np.abs(new_shape_float - new_shape) * np.array(new_shape) / Dxy
nw.wcs.crpix /= Dxy nw.wcs.crpix /= Dxy
nw.array_shape = new_shape nw.array_shape = new_shape
nw.wcs.set()
new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape
new_header["PXAREA"] *= Dxy[0] * Dxy[1] new_header["PXAREA"] *= Dxy[0] * Dxy[1]
for key, val in nw.to_header().items(): for key, val in nw.to_header().items():
@@ -772,7 +770,7 @@ def align_data(
err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0) err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0)
if data_mask is None: if data_mask is None:
full_array, err_array, data_mask, full_headers = crop_array(full_array, full_headers, error_array=err_array, step=5, inside=False, null_val=0.0) full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0)
else: else:
full_array, err_array, data_mask, full_headers = crop_array( full_array, err_array, data_mask, full_headers = crop_array(
full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0 full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0
@@ -852,7 +850,10 @@ def align_data(
new_crpix = np.array([wcs.wcs.crpix for wcs in headers_wcs]) + shifts[:, ::-1] + res_shift[::-1] new_crpix = np.array([wcs.wcs.crpix for wcs in headers_wcs]) + shifts[:, ::-1] + res_shift[::-1]
for i in range(len(headers_wcs)): for i in range(len(headers_wcs)):
headers_wcs[i].wcs.crpix = new_crpix[0] headers_wcs[i].wcs.crpix = new_crpix[0]
headers_wcs[i].array_shape = (res_shape, res_shape)
headers_wcs[i].wcs.set()
headers[i].update(headers_wcs[i].to_header()) headers[i].update(headers_wcs[i].to_header())
headers[i]["NAXIS1"], headers[i]["NAXIS2"] = res_shape, res_shape
data_mask = rescaled_mask.all(axis=0) data_mask = rescaled_mask.all(axis=0)
data_array, error_array, data_mask, headers = crop_array(rescaled_image, headers, rescaled_error, data_mask, null_val=0.01 * background) data_array, error_array, data_mask, headers = crop_array(rescaled_image, headers, rescaled_error, data_mask, null_val=0.01 * background)
@@ -1181,8 +1182,15 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
Defaults to True. Defaults to True.
---------- ----------
Returns: Returns:
Stokes : numpy.ndarray I_stokes : numpy.ndarray
Image (2D floats) containing the Stokes I,Q,U,V flux Image (2D floats) containing the Stokes parameters accounting for
total intensity
Q_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
vertical/horizontal linear polarization intensity
U_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
+45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U. Covariance matrix of the Stokes parameters I, Q, U.
""" """
@@ -1244,7 +1252,6 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Orientation and error for each polarizer # Orientation and error for each polarizer
# fmax = np.finfo(np.float64).max # fmax = np.finfo(np.float64).max
pol_flux = np.array([corr[0] * pol0, corr[1] * pol60, corr[2] * pol120]) pol_flux = np.array([corr[0] * pol0, corr[1] * pol60, corr[2] * pol120])
pol_flux_corr = np.array([pf * 2.0 / t for (pf, t) in zip(pol_flux, transmit)])
coeff_stokes = np.zeros((3, 3)) coeff_stokes = np.zeros((3, 3))
# Coefficients linking each polarizer flux to each Stokes parameter # Coefficients linking each polarizer flux to each Stokes parameter
@@ -1260,27 +1267,28 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Normalization parameter for Stokes parameters computation # Normalization parameter for Stokes parameters computation
N = (coeff_stokes[0, :] * transmit / 2.0).sum() N = (coeff_stokes[0, :] * transmit / 2.0).sum()
coeff_stokes = coeff_stokes / N coeff_stokes = coeff_stokes / N
coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T I_stokes = np.zeros(pol_array[0].shape)
Stokes = np.zeros((4, pol_array[0].shape[0], pol_array[0].shape[1])) Q_stokes = np.zeros(pol_array[0].shape)
Stokes_cov = np.zeros((4, 4, Stokes.shape[1], Stokes.shape[2])) U_stokes = np.zeros(pol_array[0].shape)
Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1]))
for i in range(Stokes.shape[1]): for i in range(I_stokes.shape[0]):
for j in range(Stokes.shape[2]): for j in range(I_stokes.shape[1]):
Stokes[:3, i, j] = np.dot(coeff_stokes, pol_flux[:, i, j]).T I_stokes[i, j], Q_stokes[i, j], U_stokes[i, j] = np.dot(coeff_stokes, pol_flux[:, i, j]).T
Stokes_cov[:3, :3, i, j] = np.dot(coeff_stokes, np.dot(pol_cov[:, :, i, j], coeff_stokes.T)) Stokes_cov[:, :, i, j] = np.dot(coeff_stokes, np.dot(pol_cov[:, :, i, j], coeff_stokes.T))
if (FWHM is not None) and (smoothing.lower() in ["weighted_gaussian_after", "weight_gauss_after", "gaussian_after", "gauss_after"]): if (FWHM is not None) and (smoothing.lower() in ["weighted_gaussian_after", "weight_gauss_after", "gaussian_after", "gauss_after"]):
smoothing = smoothing.lower()[:-6] smoothing = smoothing.lower()[:-6]
Stokes_array = deepcopy(Stokes[:3]) Stokes_array = np.array([I_stokes, Q_stokes, U_stokes])
Stokes_error = np.array([np.sqrt(Stokes_cov[i, i]) for i in range(3)]) Stokes_error = np.array([np.sqrt(Stokes_cov[i, i]) for i in range(3)])
Stokes_headers = headers[0:3] Stokes_headers = headers[0:3]
Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask, headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask, headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
Stokes[:3] = deepcopy(Stokes_array) I_stokes, Q_stokes, U_stokes = Stokes_array
Stokes_cov[0, 0], Stokes_cov[1, 1], Stokes_cov[2, 2] = deepcopy(Stokes_error**2) Stokes_cov[0, 0], Stokes_cov[1, 1], Stokes_cov[2, 2] = deepcopy(Stokes_error**2)
sStokes_array = np.array([Stokes[0, 1], Stokes[0, 2], Stokes[1, 2]]) sStokes_array = np.array([I_stokes * Q_stokes, I_stokes * U_stokes, Q_stokes * U_stokes])
sStokes_error = np.array([Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2]]) sStokes_error = np.array([Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2]])
uStokes_error = np.array([Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1]]) uStokes_error = np.array([Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1]])
@@ -1294,95 +1302,141 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2] = deepcopy(sStokes_error) Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2] = deepcopy(sStokes_error)
Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1] = deepcopy(uStokes_error) Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1] = deepcopy(uStokes_error)
mask = (Stokes[1] ** 2 + Stokes[2] ** 2) > Stokes[0] ** 2 mask = (Q_stokes**2 + U_stokes**2) > I_stokes**2
if mask.any(): if mask.any():
print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(mask.sum())) print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(I_stokes[mask].size))
# Statistical error: Poisson noise is assumed # Statistical error: Poisson noise is assumed
sigma_flux = np.array([np.sqrt(flux / head["exptime"]) for flux, head in zip(pol_flux, pol_headers)]) sigma_flux = np.array([np.sqrt(flux / head["exptime"]) for flux, head in zip(pol_flux, pol_headers)])
Stokes_cov_stat = np.zeros(Stokes_cov.shape) s_I2_stat = np.sum([coeff_stokes[0, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0)
for i in range(3): s_Q2_stat = np.sum([coeff_stokes[1, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0)
Stokes_cov_stat[i, i] = np.sum([coeff_stokes[i, k] ** 2 * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0) s_U2_stat = np.sum([coeff_stokes[2, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0)
for j in [k for k in range(3) if k > i]:
Stokes_cov_stat[i, j] = np.sum([coeff_stokes[i, k] * coeff_stokes[j, k] * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
Stokes_cov_stat[j, i] = np.sum([coeff_stokes[i, k] * coeff_stokes[j, k] * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
pol_flux_corr = np.array([pf * 2.0 / t for (pf, t) in zip(pol_flux, transmit)])
coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T
# Compute the derivative of each Stokes parameter with respect to the polarizer orientation # Compute the derivative of each Stokes parameter with respect to the polarizer orientation
dIQU_dtheta = np.zeros(Stokes_cov.shape) dI_dtheta1 = (
2.0
# Derivative of I_stokes wrt theta_1, 2, 3 * pol_eff[0]
for j in range(3): / N
dIQU_dtheta[0, j] = ( * (
2.0 pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes)
* pol_eff[j] - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes)
/ N + coeff_stokes_corr[0, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
* (
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - Stokes[0])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3]) * (pol_flux_corr[(j + 2) % 3] - Stokes[0])
+ coeff_stokes_corr[0, j] * (np.sin(2.0 * theta[j]) * Stokes[1] - np.cos(2 * theta[j]) * Stokes[2])
)
) )
)
# Derivative of Stokes[1] wrt theta_1, 2, 3 dI_dtheta2 = (
for j in range(3): 2.0
dIQU_dtheta[1, j] = ( * pol_eff[1]
2.0 / N
* pol_eff[j] * (
/ N pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes)
* ( - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes)
np.cos(2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - pol_flux_corr[(j + 2) % 3]) + coeff_stokes_corr[0, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
- (
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3])
)
* Stokes[1]
+ coeff_stokes_corr[1, j] * (np.sin(2.0 * theta[j]) * Stokes[1] - np.cos(2 * theta[j]) * Stokes[2])
)
) )
)
# Derivative of Stokes[2] wrt theta_1, 2, 3 dI_dtheta3 = (
for j in range(3): 2.0
dIQU_dtheta[2, j] = ( * pol_eff[2]
2.0 / N
* pol_eff[j] * (
/ N pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes)
* ( - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes)
np.sin(2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - pol_flux_corr[(j + 2) % 3]) + coeff_stokes_corr[0, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
- (
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3])
)
* Stokes[2]
+ coeff_stokes_corr[2, j] * (np.sin(2.0 * theta[j]) * Stokes[1] - np.cos(2 * theta[j]) * Stokes[2])
)
) )
)
dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3])
dQ_dtheta1 = (
2.0
* pol_eff[0]
/ N
* (
np.cos(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
- (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * Q_stokes
+ coeff_stokes_corr[1, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
)
)
dQ_dtheta2 = (
2.0
* pol_eff[1]
/ N
* (
np.cos(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * Q_stokes
+ coeff_stokes_corr[1, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
)
)
dQ_dtheta3 = (
2.0
* pol_eff[2]
/ N
* (
np.cos(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * Q_stokes
+ coeff_stokes_corr[1, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
)
)
dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3])
dU_dtheta1 = (
2.0
* pol_eff[0]
/ N
* (
np.sin(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
- (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * U_stokes
+ coeff_stokes_corr[2, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
)
)
dU_dtheta2 = (
2.0
* pol_eff[1]
/ N
* (
np.sin(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * U_stokes
+ coeff_stokes_corr[2, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
)
)
dU_dtheta3 = (
2.0
* pol_eff[2]
/ N
* (
np.sin(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * U_stokes
+ coeff_stokes_corr[2, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
)
)
dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3])
# Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999) # Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999)
Stokes_cov_axis = np.zeros(Stokes_cov.shape) s_I2_axis = np.sum([dI_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0)
for i in range(3): s_Q2_axis = np.sum([dQ_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0)
Stokes_cov_axis[i, i] = np.sum([dIQU_dtheta[i, k] ** 2 * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0) s_U2_axis = np.sum([dU_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0)
for j in [k for k in range(3) if k > i]: # np.savetxt("output/sI_dir.txt", np.sqrt(s_I2_axis))
Stokes_cov_axis[i, j] = np.sum( # np.savetxt("output/sQ_dir.txt", np.sqrt(s_Q2_axis))
[dIQU_dtheta[i, k] * dIQU_dtheta[j, k] * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0 # np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis))
)
Stokes_cov_axis[j, i] = np.sum(
[dIQU_dtheta[i, k] * dIQU_dtheta[j, k] * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0
)
# Add quadratically the uncertainty to the Stokes covariance matrix # Add quadratically the uncertainty to the Stokes covariance matrix
Stokes_cov += Stokes_cov_axis + Stokes_cov_stat Stokes_cov[0, 0] += s_I2_axis + s_I2_stat
Stokes_cov[1, 1] += s_Q2_axis + s_Q2_stat
Stokes_cov[2, 2] += s_U2_axis + s_U2_stat
# Save values to single header # Save values to single header
header_stokes = pol_headers[0] header_stokes = pol_headers[0]
else: else:
all_Stokes = np.zeros((np.unique(rotate).size, 4, data_array.shape[1], data_array.shape[2])) all_I_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_Stokes_cov = np.zeros((np.unique(rotate).size, 4, 4, data_array.shape[1], data_array.shape[2])) all_Q_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_U_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_Stokes_cov = np.zeros((np.unique(rotate).size, 3, 3, data_array.shape[1], data_array.shape[2]))
all_header_stokes = [{}] * np.unique(rotate).size all_header_stokes = [{}] * np.unique(rotate).size
for i, rot in enumerate(np.unique(rotate)): for i, rot in enumerate(np.unique(rotate)):
rot_mask = rotate == rot rot_mask = rotate == rot
all_Stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes( all_I_stokes[i], all_Q_stokes[i], all_U_stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes(
data_array[rot_mask], data_array[rot_mask],
error_array[rot_mask], error_array[rot_mask],
data_mask, data_mask,
@@ -1395,8 +1449,10 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
) )
all_exp = np.array([float(h["exptime"]) for h in all_header_stokes]) all_exp = np.array([float(h["exptime"]) for h in all_header_stokes])
Stokes = np.sum([exp * S for exp, S in zip(all_exp, all_Stokes)], axis=0) / all_exp.sum() I_stokes = np.sum([exp * I for exp, I in zip(all_exp, all_I_stokes)], axis=0) / all_exp.sum()
Stokes_cov = np.zeros((4, 4, Stokes.shape[1], Stokes.shape[2])) Q_stokes = np.sum([exp * Q for exp, Q in zip(all_exp, all_Q_stokes)], axis=0) / all_exp.sum()
U_stokes = np.sum([exp * U for exp, U in zip(all_exp, all_U_stokes)], axis=0) / all_exp.sum()
Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1]))
for i in range(3): for i in range(3):
Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2 Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2
for j in [x for x in range(3) if x != i]: for j in [x for x in range(3) if x != i]:
@@ -1410,15 +1466,19 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Nan handling : # Nan handling :
fmax = np.finfo(np.float64).max fmax = np.finfo(np.float64).max
Stokes[np.isnan(Stokes)] = 0.0 I_stokes[np.isnan(I_stokes)] = 0.0
Stokes[1:][np.broadcast_to(Stokes[0] == 0.0, Stokes[1:].shape)] = 0.0 Q_stokes[I_stokes == 0.0] = 0.0
U_stokes[I_stokes == 0.0] = 0.0
Q_stokes[np.isnan(Q_stokes)] = 0.0
U_stokes[np.isnan(U_stokes)] = 0.0
Stokes_cov[np.isnan(Stokes_cov)] = fmax Stokes_cov[np.isnan(Stokes_cov)] = fmax
header_stokes = add_stokes_axis_to_header(header_stokes, 0)
if integrate: if integrate:
# Compute integrated values for P, PA before any rotation # Compute integrated values for P, PA before any rotation
mask = deepcopy(data_mask).astype(bool) mask = deepcopy(data_mask).astype(bool)
I_diluted, Q_diluted, U_diluted = (Stokes[:3] * np.broadcast_to(mask, Stokes[:3].shape)).sum(axis=(1, 2)) I_diluted = I_stokes[mask].sum()
Q_diluted = Q_stokes[mask].sum()
U_diluted = U_stokes[mask].sum()
I_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 0][mask])) I_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 0][mask]))
Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 1][mask])) Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 1][mask]))
U_diluted_err = np.sqrt(np.sum(Stokes_cov[2, 2][mask])) U_diluted_err = np.sqrt(np.sum(Stokes_cov[2, 2][mask]))
@@ -1444,19 +1504,26 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle") header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
return Stokes, Stokes_cov, header_stokes, Stokes_cov_stat return I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes
def compute_pol(Stokes, Stokes_cov, header_stokes, Stokes_cov_stat=None): def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes):
""" """
Compute the polarization degree (in %) and angle (in deg) and their Compute the polarization degree (in %) and angle (in deg) and their
respective errors from given Stokes parameters. respective errors from given Stokes parameters.
---------- ----------
Inputs: Inputs:
Stokes : numpy.ndarray I_stokes : numpy.ndarray
Image (2D floats) containing the Stokes I,Q,U,V fluxes Image (2D floats) containing the Stokes parameters accounting for
total intensity
Q_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
vertical/horizontal linear polarization intensity
U_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
+45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U, V. Covariance matrix of the Stokes parameters I, Q, U.
header_stokes : astropy.fits.header.Header header_stokes : astropy.fits.header.Header
Header file associated with the Stokes fluxes. Header file associated with the Stokes fluxes.
---------- ----------
@@ -1479,93 +1546,63 @@ def compute_pol(Stokes, Stokes_cov, header_stokes, Stokes_cov_stat=None):
polarization angle. polarization angle.
""" """
# Polarization degree and angle computation # Polarization degree and angle computation
mask = Stokes[0] > 0.0 mask = I_stokes > 0.0
I_pol = np.zeros(Stokes[0].shape) I_pol = np.zeros(I_stokes.shape)
I_pol[mask] = np.sqrt(Stokes[1][mask] ** 2 + Stokes[2][mask] ** 2) I_pol[mask] = np.sqrt(Q_stokes[mask] ** 2 + U_stokes[mask] ** 2)
P = np.zeros(Stokes[0].shape) P = np.zeros(I_stokes.shape)
P[mask] = I_pol[mask] / Stokes[0][mask] P[mask] = I_pol[mask] / I_stokes[mask]
PA = np.zeros(Stokes[0].shape) PA = np.zeros(I_stokes.shape)
PA[mask] = (90.0 / np.pi) * np.arctan2(Stokes[2][mask], Stokes[1][mask]) PA[mask] = (90.0 / np.pi) * np.arctan2(U_stokes[mask], Q_stokes[mask])
if (P > 1).any(): if (P > 1).any():
print("WARNING : found {0:d} pixels for which P > 1".format(P[P > 1.0].size)) print("WARNING : found {0:d} pixels for which P > 1".format(P[P > 1.0].size))
# Associated errors # Associated errors
fmax = np.finfo(np.float64).max fmax = np.finfo(np.float64).max
s_P = np.ones(Stokes[0].shape) * fmax s_P = np.ones(I_stokes.shape) * fmax
s_PA = np.ones(Stokes[0].shape) * fmax s_PA = np.ones(I_stokes.shape) * fmax
# Propagate previously computed errors # Propagate previously computed errors
s_P[mask] = (1 / Stokes[0][mask]) * np.sqrt( s_P[mask] = (1 / I_stokes[mask]) * np.sqrt(
( (
Stokes[1][mask] ** 2 * Stokes_cov[1, 1][mask] Q_stokes[mask] ** 2 * Stokes_cov[1, 1][mask]
+ Stokes[2][mask] ** 2 * Stokes_cov[2, 2][mask] + U_stokes[mask] ** 2 * Stokes_cov[2, 2][mask]
+ 2.0 * Stokes[1][mask] * Stokes[2][mask] * Stokes_cov[1, 2][mask] + 2.0 * Q_stokes[mask] * U_stokes[mask] * Stokes_cov[1, 2][mask]
) )
/ (Stokes[1][mask] ** 2 + Stokes[2][mask] ** 2) / (Q_stokes[mask] ** 2 + U_stokes[mask] ** 2)
+ ((Stokes[1][mask] / Stokes[0][mask]) ** 2 + (Stokes[2][mask] / Stokes[0][mask]) ** 2) * Stokes_cov[0, 0][mask] + ((Q_stokes[mask] / I_stokes[mask]) ** 2 + (U_stokes[mask] / I_stokes[mask]) ** 2) * Stokes_cov[0, 0][mask]
- 2.0 * (Stokes[1][mask] / Stokes[0][mask]) * Stokes_cov[0, 1][mask] - 2.0 * (Q_stokes[mask] / I_stokes[mask]) * Stokes_cov[0, 1][mask]
- 2.0 * (Stokes[2][mask] / Stokes[0][mask]) * Stokes_cov[0, 2][mask] - 2.0 * (U_stokes[mask] / I_stokes[mask]) * Stokes_cov[0, 2][mask]
) )
s_PA[mask] = (90.0 / (np.pi * (Stokes[1][mask] ** 2 + Stokes[2][mask] ** 2))) * np.sqrt( s_PA[mask] = (90.0 / (np.pi * (Q_stokes[mask] ** 2 + U_stokes[mask] ** 2))) * np.sqrt(
Stokes[2][mask] ** 2 * Stokes_cov[1, 1][mask] U_stokes[mask] ** 2 * Stokes_cov[1, 1][mask]
+ Stokes[1][mask] ** 2 * Stokes_cov[2, 2][mask] + Q_stokes[mask] ** 2 * Stokes_cov[2, 2][mask]
- 2.0 * Stokes[1][mask] * Stokes[2][mask] * Stokes_cov[1, 2][mask] - 2.0 * Q_stokes[mask] * U_stokes[mask] * Stokes_cov[1, 2][mask]
) )
s_P[np.isnan(s_P)] = fmax s_P[np.isnan(s_P)] = fmax
s_PA[np.isnan(s_PA)] = fmax s_PA[np.isnan(s_PA)] = fmax
# Compute the total exposure time so that
# Stokes[0]*exp_tot = N_tot the total number of events
N_obs = Stokes[0] * float(header_stokes["exptime"])
# Errors on P, PA supposing Poisson noise
s_P_P = np.ones(Stokes[0].shape) * fmax
s_PA_P = np.ones(Stokes[0].shape) * fmax
maskP = np.logical_and(mask, P > 0.0)
if Stokes_cov_stat is not None:
# If IQU covariance matrix containing only statistical error is given propagate to P and PA
# Catch Invalid value in sqrt when diagonal terms are big
with warnings.catch_warnings(record=True) as _:
s_P_P[maskP] = (
P[maskP]
/ Stokes[0][maskP]
* np.sqrt(
Stokes_cov_stat[0, 0][maskP]
- 2.0
/ (Stokes[0][maskP] * P[maskP] ** 2)
* (Stokes[1][maskP] * Stokes_cov_stat[0, 1][maskP] + Stokes[2][maskP] * Stokes_cov_stat[0, 2][maskP])
+ 1.0
/ (Stokes[0][maskP] ** 2 * P[maskP] ** 4)
* (
Stokes[1][maskP] ** 2 * Stokes_cov_stat[1, 1][maskP]
+ Stokes[2][maskP] ** 2 * Stokes_cov_stat[2, 2][maskP] * Stokes[1][maskP] * Stokes[2][maskP] * Stokes_cov_stat[1, 2][maskP]
)
)
)
s_PA_P[maskP] = (
90.0
/ (np.pi * Stokes[0][maskP] ** 2 * P[maskP] ** 2)
* (
Stokes[1][maskP] ** 2 * Stokes_cov_stat[2, 2][maskP]
+ Stokes[2][maskP] * Stokes_cov_stat[1, 1][maskP]
- 2.0 * Stokes[1][maskP] * Stokes[2][maskP] * Stokes_cov_stat[1, 2][maskP]
)
)
else:
# Approximate Poisson error for P and PA
s_P_P[mask] = np.sqrt(2.0 / N_obs[mask])
s_PA_P[maskP] = s_P_P[maskP] / P[maskP] * 90.0 / np.pi
# Catch expected "OverflowWarning" as wrong pixel have an overflowing error # Catch expected "OverflowWarning" as wrong pixel have an overflowing error
with warnings.catch_warnings(record=True) as _: with warnings.catch_warnings(record=True) as _:
mask2 = P**2 >= s_P_P**2 mask2 = P**2 >= s_P**2
debiased_P = np.zeros(Stokes[0].shape) debiased_P = np.zeros(I_stokes.shape)
debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P_P[mask2] ** 2) debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P[mask2] ** 2)
if (debiased_P > 1.0).any(): if (debiased_P > 1.0).any():
print("WARNING : found {0:d} pixels for which debiased_P > 100%".format(debiased_P[debiased_P > 1.0].size)) print("WARNING : found {0:d} pixels for which debiased_P > 100%".format(debiased_P[debiased_P > 1.0].size))
# Compute the total exposure time so that
# I_stokes*exp_tot = N_tot the total number of events
exp_tot = header_stokes["exptime"]
# print("Total exposure time : {} sec".format(exp_tot))
N_obs = I_stokes * exp_tot
# Errors on P, PA supposing Poisson noise
s_P_P = np.ones(I_stokes.shape) * fmax
s_P_P[mask] = np.sqrt(2.0) / np.sqrt(N_obs[mask]) * 100.0
s_PA_P = np.ones(I_stokes.shape) * fmax
s_PA_P[mask2] = s_P_P[mask2] / (2.0 * P[mask2]) * 180.0 / np.pi
# Nan handling : # Nan handling :
P[np.isnan(P)] = 0.0 P[np.isnan(P)] = 0.0
s_P[np.isnan(s_P)] = fmax s_P[np.isnan(s_P)] = fmax
@@ -1577,17 +1614,24 @@ def compute_pol(Stokes, Stokes_cov, header_stokes, Stokes_cov_stat=None):
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
def rotate_Stokes(Stokes, Stokes_cov, data_mask, header_stokes, Stokes_cov_stat=None, SNRi_cut=None): def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None):
""" """
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header matrix to rotate Q, U of a given angle in degrees and update header
orientation keyword. orientation keyword.
---------- ----------
Inputs: Inputs:
Stokes : numpy.ndarray I_stokes : numpy.ndarray
Stokes cube (3D floats) containing the Stokes I, Q, U, V fluxes. Image (2D floats) containing the Stokes parameters accounting for
total intensity
Q_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
vertical/horizontal linear polarization intensity
U_stokes : numpy.ndarray
Image (2D floats) containing the Stokes parameters accounting for
+45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U, V. Covariance matrix of the Stokes parameters I, Q, U.
data_mask : numpy.ndarray data_mask : numpy.ndarray
2D boolean array delimiting the data to work on. 2D boolean array delimiting the data to work on.
header_stokes : astropy.fits.header.Header header_stokes : astropy.fits.header.Header
@@ -1598,10 +1642,17 @@ def rotate_Stokes(Stokes, Stokes_cov, data_mask, header_stokes, Stokes_cov_stat=
Defaults to None. Defaults to None.
---------- ----------
Returns: Returns:
Stokes : numpy.ndarray new_I_stokes : numpy.ndarray
Rotated Stokes cube (3D floats) containing the rotated Stokes I, Q, U, V fluxes. Rotated mage (2D floats) containing the rotated Stokes parameters
accounting for total intensity
new_Q_stokes : numpy.ndarray
Rotated mage (2D floats) containing the rotated Stokes parameters
accounting for vertical/horizontal linear polarization intensity
new_U_stokes : numpy.ndarray
Rotated image (2D floats) containing the rotated Stokes parameters
accounting for +45/-45deg linear polarization intensity.
new_Stokes_cov : numpy.ndarray new_Stokes_cov : numpy.ndarray
Updated covariance matrix of the Stokes parameters I, Q, U, V. Updated covariance matrix of the Stokes parameters I, Q, U.
new_header_stokes : astropy.fits.header.Header new_header_stokes : astropy.fits.header.Header
Updated Header file associated with the Stokes fluxes accounting Updated Header file associated with the Stokes fluxes accounting
for the new orientation angle. for the new orientation angle.
@@ -1610,73 +1661,82 @@ def rotate_Stokes(Stokes, Stokes_cov, data_mask, header_stokes, Stokes_cov_stat=
""" """
# Apply cuts # Apply cuts
if SNRi_cut is not None: if SNRi_cut is not None:
SNRi = Stokes[0] / np.sqrt(Stokes_cov[0, 0]) SNRi = I_stokes / np.sqrt(Stokes_cov[0, 0])
mask = SNRi < SNRi_cut mask = SNRi < SNRi_cut
eps = 1e-5 eps = 1e-5
for i in range(4): for i in range(I_stokes.shape[0]):
Stokes[i][mask] = eps * np.sqrt(Stokes_cov[i, i][mask]) for j in range(I_stokes.shape[1]):
if mask[i, j]:
I_stokes[i, j] = eps * np.sqrt(Stokes_cov[0, 0][i, j])
Q_stokes[i, j] = eps * np.sqrt(Stokes_cov[1, 1][i, j])
U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j])
# Rotate Stokes I, Q, U using rotation matrix # Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
ang = -float(header_stokes["ORIENTAT"]) ang = -float(header_stokes["ORIENTAT"])
alpha = np.pi / 180.0 * ang alpha = np.pi / 180.0 * ang
mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]]) mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]])
old_center = np.array(Stokes.shape[1:]) / 2 old_center = np.array(I_stokes.shape) / 2
shape = np.fix(np.array(Stokes.shape[1:]) * np.sqrt(2.5)).astype(int) shape = np.fix(np.array(I_stokes.shape) * np.sqrt(2.5)).astype(int)
new_center = np.array(shape) / 2 new_center = np.array(shape) / 2
Stokes = zeropad(Stokes, (*Stokes.shape[:-2], *shape)) I_stokes = zeropad(I_stokes, shape)
Q_stokes = zeropad(Q_stokes, shape)
U_stokes = zeropad(U_stokes, shape)
data_mask = zeropad(data_mask, shape) data_mask = zeropad(data_mask, shape)
Stokes_cov = zeropad(Stokes_cov, (*Stokes_cov.shape[:-2], *shape)) Stokes_cov = zeropad(Stokes_cov, [*Stokes_cov.shape[:-2], *shape])
new_Stokes = np.zeros((*Stokes.shape[:-2], *shape)) new_I_stokes = np.zeros(shape)
new_Q_stokes = np.zeros(shape)
new_U_stokes = np.zeros(shape)
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape)) new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape))
# Rotate original images using scipy.ndimage.rotate # Rotate original images using scipy.ndimage.rotate
new_Stokes = sc_rotate(Stokes, ang, axes=(1, 2), order=1, reshape=False, cval=0.0) new_I_stokes = sc_rotate(I_stokes, ang, order=1, reshape=False, cval=0.0)
new_Q_stokes = sc_rotate(Q_stokes, ang, order=1, reshape=False, cval=0.0)
new_U_stokes = sc_rotate(U_stokes, ang, order=1, reshape=False, cval=0.0)
new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0) new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0)
new_data_mask[new_data_mask < 1.0] = 0.0 new_data_mask[new_data_mask < 1.0] = 0.0
new_data_mask = new_data_mask.astype(bool) new_data_mask = new_data_mask.astype(bool)
new_Stokes_cov = np.abs(sc_rotate(Stokes_cov, ang, axes=(2, 3), order=1, reshape=False, cval=0.0)) for i in range(3):
for j in range(3):
new_Stokes_cov[i, j] = sc_rotate(Stokes_cov[i, j], ang, order=1, reshape=False, cval=0.0)
new_Stokes_cov[i, i] = np.abs(new_Stokes_cov[i, i])
for i in range(shape[0]): for i in range(shape[0]):
for j in range(shape[1]): for j in range(shape[1]):
new_Stokes[:3, i, j] = np.dot(mrot, new_Stokes[:3, i, j]) new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j] = np.dot(mrot, np.array([new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j]])).T
new_Stokes_cov[:3, :3, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:3, :3, i, j], mrot.T)) new_Stokes_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:, :, i, j], mrot.T))
if Stokes_cov_stat is not None:
Stokes_cov_stat = zeropad(Stokes_cov_stat, [*Stokes_cov_stat.shape[:-2], *shape])
new_Stokes_cov_stat = np.zeros((*Stokes_cov_stat.shape[:-2], *shape))
for i in range(3):
for j in range(3):
new_Stokes_cov_stat[i, j] = sc_rotate(Stokes_cov_stat[i, j], ang, order=1, reshape=False, cval=0.0)
new_Stokes_cov_stat[i, i] = np.abs(new_Stokes_cov_stat[i, i])
for i in range(shape[0]):
for j in range(shape[1]):
new_Stokes_cov_stat[:3, :3, i, j] = np.dot(mrot, np.dot(new_Stokes_cov_stat[:3, :3, i, j], mrot.T))
# Update headers to new angle # Update headers to new angle
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
new_header_stokes = deepcopy(header_stokes) new_header_stokes = deepcopy(header_stokes)
new_wcs = WCS(header_stokes).deepcopy() new_wcs = WCS(header_stokes).celestial.deepcopy()
new_wcs.wcs.pc[1:] = np.dot(mrot, new_wcs.wcs.pc[1:]) new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc)
new_wcs.wcs.crpix[1:] = np.dot(mrot, new_wcs.wcs.crpix[1:] - old_center[::-1]) + new_center[::-1] new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center[::-1]) + new_center[::-1]
new_wcs.array_shape = shape
new_wcs.wcs.set() new_wcs.wcs.set()
for key, val in new_wcs.to_header().items(): for key, val in new_wcs.to_header().items():
new_header_stokes.set(key, val) new_header_stokes.set(key, val)
new_header_stokes["NAXIS1"], new_header_stokes["NAXIS2"] = new_wcs.array_shape
new_header_stokes["ORIENTAT"] += ang new_header_stokes["ORIENTAT"] += ang
# Nan handling : # Nan handling :
fmax = np.finfo(np.float64).max fmax = np.finfo(np.float64).max
new_Stokes[np.isnan(new_Stokes)] = 0.0 new_I_stokes[np.isnan(new_I_stokes)] = 0.0
new_Stokes[1:][np.broadcast_to(new_Stokes[0] == 0.0, Stokes[1:].shape)] = 0.0 new_Q_stokes[new_I_stokes == 0.0] = 0.0
new_U_stokes[new_I_stokes == 0.0] = 0.0
new_Q_stokes[np.isnan(new_Q_stokes)] = 0.0
new_U_stokes[np.isnan(new_U_stokes)] = 0.0
new_Stokes_cov[np.isnan(new_Stokes_cov)] = fmax new_Stokes_cov[np.isnan(new_Stokes_cov)] = fmax
# Compute updated integrated values for P, PA # Compute updated integrated values for P, PA
mask = deepcopy(new_data_mask).astype(bool) mask = deepcopy(new_data_mask).astype(bool)
I_diluted, Q_diluted, U_diluted = (new_Stokes[:3] * np.broadcast_to(mask, Stokes[:3].shape)).sum(axis=(1, 2)) I_diluted = new_I_stokes[mask].sum()
Q_diluted = new_Q_stokes[mask].sum()
U_diluted = new_U_stokes[mask].sum()
I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 0][mask])) I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 0][mask]))
Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 1][mask])) Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 1][mask]))
U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2, 2][mask])) U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2, 2][mask]))
@@ -1702,10 +1762,7 @@ def rotate_Stokes(Stokes, Stokes_cov, data_mask, header_stokes, Stokes_cov_stat=
new_header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle") new_header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
new_header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") new_header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
if Stokes_cov_stat is not None: return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_header_stokes
return new_Stokes, new_Stokes_cov, new_data_mask, new_header_stokes, new_Stokes_cov_stat
else:
return new_Stokes, new_Stokes_cov, new_data_mask, new_header_stokes
def rotate_data(data_array, error_array, data_mask, headers): def rotate_data(data_array, error_array, data_mask, headers):
@@ -1763,9 +1820,11 @@ def rotate_data(data_array, error_array, data_mask, headers):
new_wcs = WCS(header).celestial.deepcopy() new_wcs = WCS(header).celestial.deepcopy()
new_wcs.wcs.pc[:2, :2] = np.dot(mrot, new_wcs.wcs.pc[:2, :2]) new_wcs.wcs.pc[:2, :2] = np.dot(mrot, new_wcs.wcs.pc[:2, :2])
new_wcs.wcs.crpix[:2] = np.dot(mrot, new_wcs.wcs.crpix[:2] - old_center[::-1]) + new_center[::-1] new_wcs.wcs.crpix[:2] = np.dot(mrot, new_wcs.wcs.crpix[:2] - old_center[::-1]) + new_center[::-1]
new_wcs.array_shape = shape
new_wcs.wcs.set() new_wcs.wcs.set()
for key, val in new_wcs.to_header().items(): for key, val in new_wcs.to_header().items():
new_header[key] = val new_header[key] = val
new_header["NAXIS1"], new_header["NAXIS2"] = new_wcs.array_shape
new_header["ORIENTAT"] = np.arccos(new_wcs.celestial.wcs.pc[0, 0]) * 180.0 / np.pi new_header["ORIENTAT"] = np.arccos(new_wcs.celestial.wcs.pc[0, 0]) * 180.0 / np.pi
new_header["ROTATE"] = ang new_header["ROTATE"] = ang
new_headers.append(new_header) new_headers.append(new_header)

View File

@@ -1,5 +1,4 @@
import numpy as np import numpy as np
from matplotlib.transforms import Bbox, BboxTransform
def rot2D(ang): def rot2D(ang):
@@ -155,50 +154,6 @@ def sci_not(v, err, rnd=1, out=str):
return *output[1:], -power return *output[1:], -power
class cursor_data:
"""
Object to overwrite data getter and formatter in interactive plots.
"""
def __init__(self, im, error=None, fmt=None) -> None:
self.im = im
self.data = im.get_array()
self.fmt = "{:.2f}" if fmt is None else fmt
self.err = error
def set_err(self, err) -> None:
if self.data.shape != err.shape:
raise ValueError("Error and Data don't have the same shape")
else:
self.err = err
def set_fmt(self, fmt) -> None:
self.fmt = fmt
def get(self, event):
xmin, xmax, ymin, ymax = self.im.get_extent()
if self.im.origin == "upper":
ymin, ymax = ymax, ymin
data_extent = Bbox([[xmin, ymin], [xmax, ymax]])
array_extent = Bbox([[0, 0], [self.data.shape[1], self.data.shape[0]]])
trans = self.im.get_transform().inverted()
trans += BboxTransform(boxin=data_extent, boxout=array_extent)
point = trans.transform([event.x, event.y])
if any(np.isnan(point)):
return None
j, i = point.astype(int)
# Clip the coordinates at array bounds
if not (0 <= i < self.data.shape[0]) or not (0 <= j < self.data.shape[1]):
return None
elif self.err is not None:
return self.data[i, j], self.err[i, j]
else:
return self.data
def format(self, y) -> str:
return self.fmt.format(*y)
def wcs_CD_to_PC(CD): def wcs_CD_to_PC(CD):
""" """
Return the position angle in degrees to the North direction of a wcs Return the position angle in degrees to the North direction of a wcs
@@ -242,83 +197,3 @@ def wcs_PA(PC, cdelt):
rot2 = np.pi / 2.0 - np.arctan2(abs(cdelt[0]) * PC[0, 1], cdelt[1] * PC[1, 1]) rot2 = np.pi / 2.0 - np.arctan2(abs(cdelt[0]) * PC[0, 1], cdelt[1] * PC[1, 1])
orient = 0.5 * (rot + rot2) * 180.0 / np.pi orient = 0.5 * (rot + rot2) * 180.0 / np.pi
return orient return orient
def add_stokes_axis_to_header(header, ind=0):
"""
Add a new Stokes axis to the WCS cards in the header.
----------
Inputs:
header : astropy.io.fits.header.Header
The header in which the WCS to work on is saved.
ind : int, optional
Index of the WCS to insert the new Stokes axis in front of.
To add at the end, do add_before_ind = wcs.wcs.naxis
The beginning is at position 0.
Default to 0.
----------
Returns:
new_head : astropy.io.fits.header.Header
A new Header instance with an additional Stokes axis
"""
from astropy.wcs import WCS
from astropy.wcs.utils import add_stokes_axis_to_wcs
wcs = WCS(header).deepcopy()
wcs_Stokes = add_stokes_axis_to_wcs(wcs, ind).deepcopy()
wcs_Stokes.array_shape = (*wcs.array_shape[ind:], 4, *wcs.array_shape[:ind]) if ind < wcs.wcs.naxis else (4, *wcs.array_shape)
new_head = header.copy()
new_head["NAXIS"] = wcs_Stokes.wcs.naxis
for key in wcs.to_header().keys():
if key not in wcs_Stokes.to_header().keys():
del new_head[key]
for key, val in (
list(wcs_Stokes.to_header().items())
+ [("NAXIS%d" % (i + 1), k) for i, k in enumerate(wcs_Stokes.array_shape[::-1])]
+ [("CUNIT%d" % (ind + 1), "STOKES")]
):
if key not in header.keys() and key[:-1] + str(wcs.wcs.naxis) in header.keys():
new_head.insert(key[:-1] + str(wcs.wcs.naxis), (key, val), after=int(key[-1]) < wcs.wcs.naxis)
elif key not in header.keys() and key[:2] + str(wcs.wcs.naxis) + key[2:-1] + str(wcs.wcs.naxis) in header.keys():
new_head.insert(key[:2] + str(wcs.wcs.naxis) + key[2:-1] + str(wcs.wcs.naxis), (key, val), after=int(key[-1]) < wcs.wcs.naxis)
else:
new_head[key] = val
return new_head
def remove_stokes_axis_from_header(header):
"""
Remove a Stokes axis to the WCS cards in the header.
----------
Inputs:
header : astropy.io.fits.header.Header
The header in which the WCS to work on is saved.
----------
Returns:
new_head : astropy.io.fits.header.Header
A new Header instance with only a celestial WCS.
"""
from astropy.wcs import WCS
wcs = WCS(header).deepcopy()
new_wcs = WCS(header).celestial.deepcopy()
new_head = header.copy()
if "NAXIS%d" % (new_wcs.wcs.naxis + 1) in new_head.keys():
del new_head["NAXIS%d" % (new_wcs.wcs.naxis + 1)]
new_head["NAXIS"] = new_wcs.wcs.naxis
for i, k in enumerate(new_wcs.array_shape[::-1]):
new_head["NAXIS%d" % (i + 1)] = k
for key in list(WCS(header).to_header().keys()) + list(
np.unique([["PC%d_%d" % (i + 1, j + 1) for i in range(wcs.wcs.naxis)] for j in range(wcs.wcs.naxis)])
):
if key in new_head.keys() and key not in new_wcs.to_header().keys():
del new_head[key]
for key, val in new_wcs.to_header().items():
if key not in new_head.keys() and key[:-1] + str(wcs.wcs.naxis) in new_head.keys():
new_head.insert(key[:-1] + str(wcs.wcs.naxis), (key, val), after=True)
elif key not in new_head.keys() and key[:2] + str(wcs.wcs.naxis) + key[2:-1] + str(wcs.wcs.naxis) in new_head.keys():
new_head.insert(key[:2] + str(wcs.wcs.naxis) + key[2:-1] + str(wcs.wcs.naxis), (key, val), after=True)
else:
new_head[key] = val
return new_head

View File

@@ -1,6 +1,5 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# -*- coding:utf-8 -*- # -*- coding:utf-8 -*-
from copy import deepcopy
from pathlib import Path from pathlib import Path
from sys import path as syspath from sys import path as syspath
@@ -27,7 +26,7 @@ def same_reduction(infiles):
except KeyError: except KeyError:
pass pass
test_IQU = True test_IQU = True
for look in ["STOKES", "STOKES_COV"]: for look in ["I_stokes", "Q_stokes", "U_stokes", "IQU_cov_matrix"]:
test_IQU *= look in datatype test_IQU *= look in datatype
params["IQU"].append(test_IQU) params["IQU"].append(test_IQU)
# test for orientation and pixel size # test for orientation and pixel size
@@ -89,78 +88,73 @@ def same_obs(infiles, data_folder):
def combine_Stokes(infiles): def combine_Stokes(infiles):
""" """
Combine Stokes matrices from different observations of a same object. Combine I, Q, U from different observations of a same object.
""" """
from astropy.io.fits import open as fits_open from astropy.io.fits import open as fits_open
from lib.reduction import align_data, zeropad from lib.reduction import align_data, zeropad
from lib.utils import remove_stokes_axis_from_header
from scipy.ndimage import shift as sc_shift from scipy.ndimage import shift as sc_shift
Stokes_array, Stokes_cov_array, Stokes_cov_stat_array, data_mask, headers = [], [], [], [], [] I_array, Q_array, U_array, IQU_cov_array, data_mask, headers = [], [], [], [], [], []
shape = np.array([0, 0]) shape = np.array([0, 0])
for file in infiles: for file in infiles:
with fits_open(file) as f: with fits_open(file) as f:
headers.append(f[0].header) headers.append(f[0].header)
Stokes_array.append(f["stokes"].data) I_array.append(f["I_stokes"].data)
Stokes_cov_array.append(f["stokes_cov"].data) Q_array.append(f["Q_stokes"].data)
Stokes_cov_stat_array.append(f["stokes_cov_stat"].data) U_array.append(f["U_stokes"].data)
IQU_cov_array.append(f["IQU_cov_matrix"].data)
data_mask.append(f["data_mask"].data.astype(bool)) data_mask.append(f["data_mask"].data.astype(bool))
shape[0] = np.max([shape[0], f["stokes"].data[0].shape[0]]) shape[0] = np.max([shape[0], f["I_stokes"].data.shape[0]])
shape[1] = np.max([shape[1], f["stokes"].data[0].shape[1]]) shape[1] = np.max([shape[1], f["I_stokes"].data.shape[1]])
exposure_array = np.array([float(head["EXPTIME"]) for head in headers]) exposure_array = np.array([float(head["EXPTIME"]) for head in headers])
shape += np.array([5, 5]) shape += np.array([5, 5])
data_mask = np.sum([zeropad(mask, shape) for mask in data_mask], axis=0).astype(bool) data_mask = np.sum([zeropad(mask, shape) for mask in data_mask], axis=0).astype(bool)
Stokes_array = np.array([[zeropad(stk[i], shape) for i in range(4)] for stk in Stokes_array]) I_array = np.array([zeropad(I, shape) for I in I_array])
Stokes_cov_array = np.array([[[zeropad(cov[i, j], shape) for j in range(4)] for i in range(4)] for cov in Stokes_cov_array]) Q_array = np.array([zeropad(Q, shape) for Q in Q_array])
Stokes_cov_stat_array = np.array([[[zeropad(cov_stat[i, j], shape) for j in range(4)] for i in range(4)] for cov_stat in Stokes_cov_stat_array]) U_array = np.array([zeropad(U, shape) for U in U_array])
IQU_cov_array = np.array([[[zeropad(cov[i, j], shape) for j in range(3)] for i in range(3)] for cov in IQU_cov_array])
I_array = deepcopy(Stokes_array[:, 0]) sI_array = np.sqrt(IQU_cov_array[:, 0, 0])
sI_array = deepcopy(np.sqrt(Stokes_cov_array[:, 0, 0])) sQ_array = np.sqrt(IQU_cov_array[:, 1, 1])
sU_array = np.sqrt(IQU_cov_array[:, 2, 2])
heads = [remove_stokes_axis_from_header(head) for head in headers] _, _, _, _, shifts, errors = align_data(I_array, headers, error_array=sI_array, data_mask=data_mask, ref_center="center", return_shifts=True)
_, _, _, _, shifts, errors = align_data(
I_array, heads, error_array=sI_array, background=sI_array[:, 0, 0], data_mask=data_mask, ref_center="center", return_shifts=True
)
data_mask_aligned = np.sum([sc_shift(data_mask, s, order=1, cval=0.0) for s in shifts], axis=0).astype(bool) data_mask_aligned = np.sum([sc_shift(data_mask, s, order=1, cval=0.0) for s in shifts], axis=0).astype(bool)
Stokes_aligned = np.array([[sc_shift(stk[i], s, order=1, cval=0.0) for i in range(4)] for stk, s in zip(Stokes_array, shifts)]) I_aligned, sI_aligned = (
Stokes_cov_aligned = np.array( np.array([sc_shift(I, s, order=1, cval=0.0) for I, s in zip(I_array, shifts)]),
[[[sc_shift(cov[i, j], s, order=1, cval=0.0) for j in range(4)] for i in range(4)] for cov, s in zip(Stokes_cov_array, shifts)] np.array([sc_shift(sI, s, order=1, cval=0.0) for sI, s in zip(sI_array, shifts)]),
) )
Stokes_cov_stat_aligned = np.array( Q_aligned, sQ_aligned = (
[[[sc_shift(cov_stat[i, j], s, order=1, cval=0.0) for j in range(4)] for i in range(4)] for cov_stat, s in zip(Stokes_cov_stat_array, shifts)] np.array([sc_shift(Q, s, order=1, cval=0.0) for Q, s in zip(Q_array, shifts)]),
np.array([sc_shift(sQ, s, order=1, cval=0.0) for sQ, s in zip(sQ_array, shifts)]),
) )
U_aligned, sU_aligned = (
np.array([sc_shift(U, s, order=1, cval=0.0) for U, s in zip(U_array, shifts)]),
np.array([sc_shift(sU, s, order=1, cval=0.0) for sU, s in zip(sU_array, shifts)]),
)
IQU_cov_aligned = np.array([[[sc_shift(cov[i, j], s, order=1, cval=0.0) for j in range(3)] for i in range(3)] for cov, s in zip(IQU_cov_array, shifts)])
Stokes_combined = np.zeros((4, shape[0], shape[1])) I_combined = np.sum([exp * I for exp, I in zip(exposure_array, I_aligned)], axis=0) / exposure_array.sum()
for i in range(4): Q_combined = np.sum([exp * Q for exp, Q in zip(exposure_array, Q_aligned)], axis=0) / exposure_array.sum()
Stokes_combined[i] = np.sum([exp * stk for exp, stk in zip(exposure_array, Stokes_aligned[:, i])], axis=0) / exposure_array.sum() U_combined = np.sum([exp * U for exp, U in zip(exposure_array, U_aligned)], axis=0) / exposure_array.sum()
Stokes_cov_combined = np.zeros((4, 4, shape[0], shape[1])) IQU_cov_combined = np.zeros((3, 3, shape[0], shape[1]))
Stokes_cov_stat_combined = np.zeros((4, 4, shape[0], shape[1])) for i in range(3):
for i in range(4): IQU_cov_combined[i, i] = np.sum([exp**2 * cov for exp, cov in zip(exposure_array, IQU_cov_aligned[:, i, i])], axis=0) / exposure_array.sum() ** 2
Stokes_cov_combined[i, i] = np.sum([exp**2 * cov for exp, cov in zip(exposure_array, Stokes_cov_aligned[:, i, i])], axis=0) / exposure_array.sum() ** 2 for j in [x for x in range(3) if x != i]:
Stokes_cov_stat_combined[i, i] = ( IQU_cov_combined[i, j] = np.sqrt(
np.sum([exp**2 * cov_stat for exp, cov_stat in zip(exposure_array, Stokes_cov_stat_aligned[:, i, i])], axis=0) / exposure_array.sum() ** 2 np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, IQU_cov_aligned[:, i, j])], axis=0) / exposure_array.sum() ** 2
)
for j in [x for x in range(4) if x != i]:
Stokes_cov_combined[i, j] = np.sqrt(
np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, Stokes_cov_aligned[:, i, j])], axis=0) / exposure_array.sum() ** 2
) )
Stokes_cov_combined[j, i] = np.sqrt( IQU_cov_combined[j, i] = np.sqrt(
np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, Stokes_cov_aligned[:, j, i])], axis=0) / exposure_array.sum() ** 2 np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, IQU_cov_aligned[:, j, i])], axis=0) / exposure_array.sum() ** 2
)
Stokes_cov_stat_combined[i, j] = np.sqrt(
np.sum([exp**2 * cov_stat**2 for exp, cov_stat in zip(exposure_array, Stokes_cov_stat_aligned[:, i, j])], axis=0) / exposure_array.sum() ** 2
)
Stokes_cov_stat_combined[j, i] = np.sqrt(
np.sum([exp**2 * cov_stat**2 for exp, cov_stat in zip(exposure_array, Stokes_cov_stat_aligned[:, j, i])], axis=0) / exposure_array.sum() ** 2
) )
header_combined = headers[0] header_combined = headers[0]
header_combined["EXPTIME"] = exposure_array.sum() header_combined["EXPTIME"] = exposure_array.sum()
return Stokes_combined, Stokes_cov_combined, Stokes_cov_stat_combined, data_mask_aligned, header_combined return I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_aligned, header_combined
def main(infiles, target=None, output_dir="./data/"): def main(infiles, target=None, output_dir="./data/"):
@@ -196,24 +190,21 @@ def main(infiles, target=None, output_dir="./data/"):
infiles = new_infiles infiles = new_infiles
Stokes_combined, Stokes_cov_combined, Stokes_cov_stat_combined, data_mask_combined, header_combined = combine_Stokes(infiles=infiles) I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = combine_Stokes(infiles=infiles)
Stokes_combined, Stokes_cov_combined, data_mask_combined, header_combined, Stokes_cov_stat_combined = rotate_Stokes( I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = rotate_Stokes(
Stokes=Stokes_combined, I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, data_mask=data_mask_combined, header_stokes=header_combined
Stokes_cov=Stokes_cov_combined,
Stokes_cov_stat=Stokes_cov_stat_combined,
data_mask=data_mask_combined,
header_stokes=header_combined,
) )
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = compute_pol( P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = compute_pol(
Stokes=Stokes_combined, Stokes_cov=Stokes_cov_combined, Stokes_cov_stat=Stokes_cov_stat_combined, header_stokes=header_combined I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, header_stokes=header_combined
) )
filename = header_combined["FILENAME"] filename = header_combined["FILENAME"]
figname = "_".join([target, filename[filename.find("FOC_") :], "combined"]) figname = "_".join([target, filename[filename.find("FOC_") :], "combined"])
Stokes_c = save_Stokes( Stokes_combined = save_Stokes(
Stokes=Stokes_combined, I_stokes=I_combined,
Stokes_cov=Stokes_cov_combined, Q_stokes=Q_combined,
Stokes_cov_stat=Stokes_cov_stat_combined, U_stokes=U_combined,
Stokes_cov=IQU_cov_combined,
P=P, P=P,
debiased_P=debiased_P, debiased_P=debiased_P,
s_P=s_P, s_P=s_P,
@@ -228,7 +219,7 @@ def main(infiles, target=None, output_dir="./data/"):
return_hdul=True, return_hdul=True,
) )
pol_map(Stokes_c, **kwargs) pol_map(Stokes_combined, **kwargs)
return "/".join([data_folder, figname + ".fits"]) return "/".join([data_folder, figname + ".fits"])

View File

@@ -1,311 +0,0 @@
#!/usr/bin/python
# -*- coding:utf-8 -*-
from pathlib import Path
from sys import path as syspath
syspath.append(str(Path(__file__).parent.parent))
from os.path import join as path_join
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
from lib.background import bin_centers, gauss
from lib.deconvolve import zeropad
from lib.plots import princ_angle
from lib.reduction import align_data
from lib.utils import remove_stokes_axis_from_header
from matplotlib.colors import LogNorm
from scipy.ndimage import shift
from scipy.optimize import curve_fit
root_dir = path_join("/home/tibeuleu/FOC_Reduction/")
root_dir_K = path_join(root_dir, "Kishimoto", "output")
root_dir_S = path_join(root_dir, "Code")
root_dir_data_S = path_join(root_dir, "data", "NGC1068", "5144")
root_dir_plot_S = path_join(root_dir, "plots", "NGC1068", "5144")
filename_S = "NGC1068_K_FOC_b10.00px.fits"
plt.rcParams.update({"font.size": 15})
SNRi_cut = 30.0
SNRp_cut = 3.0
data_K = {}
data_S = {}
for d, i in zip(["I", "Q", "U", "P", "PA", "sI", "sQ", "sU", "sP", "sPA"], [[0, 0], [0, 1], [0, 2], 4, 7, [2, 0, 0], [2, 1, 1], [2, 2, 2], 5, 8]):
data_K[d] = np.loadtxt(path_join(root_dir_K, d + ".txt"))
with fits.open(path_join(root_dir_data_S, filename_S)) as f:
if type(i) is not int:
data_S[d] = f[i[0]].data[*i[1:]] if d[0] != "s" else np.sqrt(f[i[0]].data[*i[1:]])
else:
data_S[d] = f[i].data
if d == "I":
header = f[i].header
header = remove_stokes_axis_from_header(header)
wcs = WCS(header).celestial
convert_flux = header["photflam"]
bkg_S = np.median(data_S["I"]) / 3
bkg_K = np.median(data_K["I"]) / 3
# zeropad data to get same size of array
shape = data_S["I"].shape
for d in data_K:
data_K[d] = zeropad(data_K[d], shape)
# shift array to get same information in same pixel
data_arr, error_ar, heads, data_msk, shifts, shifts_err = align_data(
np.array([data_S["I"], data_K["I"]]),
[header, header],
error_array=np.array([data_S["sI"], data_K["sI"]]),
background=np.array([bkg_S, bkg_K]),
upsample_factor=10.0,
ref_center="center",
return_shifts=True,
)
for d in data_K:
data_K[d] = shift(data_K[d], shifts[1], order=1, cval=0.0)
# compute pol components from shifted array
for d in [data_S, data_K]:
for i in d:
d[i][np.isnan(d[i])] = 0.0
d["P"] = np.where(np.logical_and(np.isfinite(d["I"]), d["I"] > 0.0), np.sqrt(d["Q"] ** 2 + d["U"] ** 2) / d["I"], 0.0)
d["sP"] = np.where(
np.logical_and(np.isfinite(d["I"]), d["I"] > 0.0),
np.sqrt(
(d["Q"] ** 2 * d["sQ"] ** 2 + d["U"] ** 2 * d["sU"] ** 2) / (d["Q"] ** 2 + d["U"] ** 2)
+ ((d["Q"] / d["I"]) ** 2 + (d["U"] / d["I"]) ** 2) * d["sI"] ** 2
)
/ d["I"],
0.0,
)
d["d_P"] = np.where(np.logical_and(np.isfinite(d["P"]), np.isfinite(d["sP"])), np.sqrt(d["P"] ** 2 - d["sP"] ** 2), 0.0)
d["PA"] = 0.5 * np.arctan2(d["U"], d["Q"]) + np.pi
d["SNRp"] = np.zeros(d["d_P"].shape)
d["SNRp"][d["sP"] > 0.0] = d["d_P"][d["sP"] > 0.0] / d["sP"][d["sP"] > 0.0]
d["SNRi"] = np.zeros(d["I"].shape)
d["SNRi"][d["sI"] > 0.0] = d["I"][d["sI"] > 0.0] / d["sI"][d["sI"] > 0.0]
d["mask"] = np.logical_and(d["SNRi"] > SNRi_cut, d["SNRp"] > SNRp_cut)
data_S["mask"], data_K["mask"] = np.logical_and(data_S["mask"], data_K["mask"]), np.logical_and(data_S["mask"], data_K["mask"])
#
# Compute histogram of measured polarization in cut
#
bins = int(data_S["mask"].sum() / 5)
bin_size = 1.0 / bins
mod_p = np.linspace(0.0, 1.0, 300)
for d in [data_S, data_K]:
d["hist"], d["bin_edges"] = np.histogram(d["d_P"][d["mask"]], bins=bins, range=(0.0, 1.0))
d["binning"] = bin_centers(d["bin_edges"])
peak, bins_fwhm = d["binning"][np.argmax(d["hist"])], d["binning"][d["hist"] > d["hist"].max() / 2.0]
fwhm = bins_fwhm[1] - bins_fwhm[0]
p0 = [d["hist"].max(), peak, fwhm]
try:
popt, pcov = curve_fit(gauss, d["binning"], d["hist"], p0=p0)
except RuntimeError:
popt = p0
d["hist_chi2"] = np.sum((d["hist"] - gauss(d["binning"], *popt)) ** 2) / d["hist"].size
d["hist_popt"] = popt
fig_p, ax_p = plt.subplots(num="Polarization degree histogram", figsize=(10, 6), constrained_layout=True)
ax_p.errorbar(data_S["binning"], data_S["hist"], xerr=bin_size / 2.0, fmt="b.", ecolor="b", label="P through this pipeline")
ax_p.plot(mod_p, gauss(mod_p, *data_S["hist_popt"]), "b--", label="mean = {1:.2f}, stdev = {2:.2f}".format(*data_S["hist_popt"]))
ax_p.errorbar(data_K["binning"], data_K["hist"], xerr=bin_size / 2.0, fmt="r.", ecolor="r", label="P through Kishimoto's pipeline")
ax_p.plot(mod_p, gauss(mod_p, *data_K["hist_popt"]), "r--", label="mean = {1:.2f}, stdev = {2:.2f}".format(*data_K["hist_popt"]))
ax_p.set(xlabel="Polarization degree", ylabel="Counts", title="Histogram of polarization degree computed in the cut for both pipelines.")
ax_p.legend()
fig_p.savefig(path_join(root_dir_plot_S, "NGC1068_K_pol_deg.pdf"), bbox_inches="tight", dpi=300)
#
# Compute angular difference between the maps in cut
#
dtheta = np.where(
data_S["mask"],
0.5
* np.arctan(
(np.sin(2 * data_S["PA"]) * np.cos(2 * data_K["PA"]) - np.cos(2 * data_S["PA"]) * np.cos(2 * data_K["PA"]))
/ (np.cos(2 * data_S["PA"]) * np.cos(2 * data_K["PA"]) + np.cos(2 * data_S["PA"]) * np.sin(2 * data_K["PA"]))
),
np.nan,
)
fig_pa = plt.figure(num="Polarization degree alignement")
ax_pa = fig_pa.add_subplot(111, projection=wcs)
cbar_ax_pa = fig_pa.add_axes([0.88, 0.12, 0.01, 0.75])
ax_pa.set_title(r"Degree of alignement $\zeta$ of the polarization angles from the 2 pipelines in the cut")
im_pa = ax_pa.imshow(np.cos(2 * dtheta), vmin=-1.0, vmax=1.0, origin="lower", cmap="bwr", label=r"$\zeta$ between this pipeline and Kishimoto's")
cbar_pa = plt.colorbar(im_pa, cax=cbar_ax_pa, label=r"$\zeta = \cos\left( 2 \cdot \delta\theta_P \right)$")
ax_pa.coords[0].set_axislabel("Right Ascension (J2000)")
ax_pa.coords[1].set_axislabel("Declination (J2000)")
fig_pa.savefig(path_join(root_dir_plot_S, "NGC1068_K_pol_ang.pdf"), bbox_inches="tight", dpi=300)
#
# Compute power uncertainty difference between the maps in cut
#
eta = np.where(data_S["mask"], np.abs(data_K["d_P"] - data_S["d_P"]) / np.sqrt(data_S["sP"] ** 2 + data_K["sP"] ** 2) / 2.0, np.nan)
fig_dif_p = plt.figure(num="Polarization power difference ratio")
ax_dif_p = fig_dif_p.add_subplot(111, projection=wcs)
cbar_ax_dif_p = fig_dif_p.add_axes([0.88, 0.12, 0.01, 0.75])
ax_dif_p.set_title(r"Degree of difference $\eta$ of the polarization from the 2 pipelines in the cut")
im_dif_p = ax_dif_p.imshow(eta, vmin=0.0, vmax=2.0, origin="lower", cmap="bwr_r", label=r"$\eta$ between this pipeline and Kishimoto's")
cbar_dif_p = plt.colorbar(im_dif_p, cax=cbar_ax_dif_p, label=r"$\eta = \frac{2 \left|P^K-P^S\right|}{\sqrt{{\sigma^K_P}^2+{\sigma^S_P}^2}}$")
ax_dif_p.coords[0].set_axislabel("Right Ascension (J2000)")
ax_dif_p.coords[1].set_axislabel("Declination (J2000)")
fig_dif_p.savefig(path_join(root_dir_plot_S, "NGC1068_K_pol_diff.pdf"), bbox_inches="tight", dpi=300)
#
# Compute angle uncertainty difference between the maps in cut
#
eta = np.where(data_S["mask"], np.abs(data_K["PA"] - data_S["PA"]) / np.sqrt(data_S["sPA"] ** 2 + data_K["sPA"] ** 2) / 2.0, np.nan)
fig_dif_pa = plt.figure(num="Polarization angle difference ratio")
ax_dif_pa = fig_dif_pa.add_subplot(111, projection=wcs)
cbar_ax_dif_pa = fig_dif_pa.add_axes([0.88, 0.12, 0.01, 0.75])
ax_dif_pa.set_title(r"Degree of difference $\eta$ of the polarization from the 2 pipelines in the cut")
im_dif_pa = ax_dif_pa.imshow(eta, vmin=0.0, vmax=2.0, origin="lower", cmap="bwr_r", label=r"$\eta$ between this pipeline and Kishimoto's")
cbar_dif_pa = plt.colorbar(
im_dif_pa, cax=cbar_ax_dif_pa, label=r"$\eta = \frac{2 \left|\theta_P^K-\theta_P^S\right|}{\sqrt{{\sigma^K_{\theta_P}}^2+{\sigma^S_{\theta_P}}^2}}$"
)
ax_dif_pa.coords[0].set_axislabel("Right Ascension (J2000)")
ax_dif_pa.coords[1].set_axislabel("Declination (J2000)")
fig_dif_pa.savefig(path_join(root_dir_plot_S, "NGC1068_K_polang_diff.pdf"), bbox_inches="tight", dpi=300)
# display both polarization maps to check consistency
# plt.rcParams.update({'font.size': 15})
fig = plt.figure(num="Polarization maps comparison", figsize=(10, 10))
ax = fig.add_subplot(111, projection=wcs)
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.88, 0.12, 0.01, 0.75])
for d in [data_S, data_K]:
d["X"], d["Y"] = np.meshgrid(np.arange(d["I"].shape[1]), np.arange(d["I"].shape[0]))
d["xy_U"], d["xy_V"] = (
np.where(d["mask"], d["d_P"] * np.cos(np.pi / 2.0 + d["PA"]), np.nan),
np.where(d["mask"], d["d_P"] * np.sin(np.pi / 2.0 + d["PA"]), np.nan),
)
im0 = ax.imshow(
data_S["I"] * convert_flux,
norm=LogNorm(data_S["I"][data_S["I"] > 0].min() * convert_flux, data_S["I"][data_S["I"] > 0].max() * convert_flux),
origin="lower",
cmap="gray",
label=r"$I_{STOKES}$ through this pipeline",
)
quiv0 = ax.quiver(
data_S["X"],
data_S["Y"],
data_S["xy_U"],
data_S["xy_V"],
units="xy",
angles="uv",
scale=0.5,
scale_units="xy",
pivot="mid",
headwidth=0.0,
headlength=0.0,
headaxislength=0.0,
width=0.2,
color="b",
alpha=0.75,
label="PA through this pipeline",
)
quiv1 = ax.quiver(
data_K["X"],
data_K["Y"],
data_K["xy_U"],
data_K["xy_V"],
units="xy",
angles="uv",
scale=0.5,
scale_units="xy",
pivot="mid",
headwidth=0.0,
headlength=0.0,
headaxislength=0.0,
width=0.1,
color="r",
alpha=0.75,
label="PA through Kishimoto's pipeline",
)
ax.set_title(r"$SNR_P \geq$ " + str(SNRi_cut) + r"$\; & \; SNR_I \geq $" + str(SNRp_cut))
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
ax.coords[0].set_axislabel("Right Ascension (J2000)")
ax.coords[0].set_axislabel_position("b")
ax.coords[0].set_ticklabel_position("b")
ax.coords[1].set_axislabel("Declination (J2000)")
ax.coords[1].set_axislabel_position("l")
ax.coords[1].set_ticklabel_position("l")
# ax.axis('equal')
cbar = plt.colorbar(im0, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
ax.legend(loc="upper right")
fig.savefig(path_join(root_dir_plot_S, "NGC1068_K_comparison.pdf"), bbox_inches="tight", dpi=300)
# compute integrated polarization parameters on a specific cut
for d in [data_S, data_K]:
d["I_dil"] = np.sum(d["I"][d["mask"]])
d["sI_dil"] = np.sqrt(np.sum(d["sI"][d["mask"]] ** 2))
d["Q_dil"] = np.sum(d["Q"][d["mask"]])
d["sQ_dil"] = np.sqrt(np.sum(d["sQ"][d["mask"]] ** 2))
d["U_dil"] = np.sum(d["U"][d["mask"]])
d["sU_dil"] = np.sqrt(np.sum(d["sU"][d["mask"]] ** 2))
d["P_dil"] = np.sqrt(d["Q_dil"] ** 2 + d["U_dil"] ** 2) / d["I_dil"]
d["sP_dil"] = (
np.sqrt(
(d["Q_dil"] ** 2 * d["sQ_dil"] ** 2 + d["U_dil"] ** 2 * d["sU_dil"] ** 2) / (d["Q_dil"] ** 2 + d["U_dil"] ** 2)
+ ((d["Q_dil"] / d["I_dil"]) ** 2 + (d["U_dil"] / d["I_dil"]) ** 2) * d["sI_dil"] ** 2
)
/ d["I_dil"]
)
d["d_P_dil"] = np.sqrt(d["P_dil"] ** 2 - d["sP_dil"] ** 2)
d["PA_dil"] = princ_angle((90.0 / np.pi) * np.arctan2(d["U_dil"], d["Q_dil"]))
d["sPA_dil"] = princ_angle(
(90.0 / (np.pi * (d["Q_dil"] ** 2 + d["U_dil"] ** 2))) * np.sqrt(d["Q_dil"] ** 2 * d["sU_dil"] ** 2 + d["U_dil"] ** 2 * d["sU_dil"] ** 2)
)
print(
"From this pipeline :\n",
"P = {0:.2f} ± {1:.2f} %\n".format(data_S["d_P_dil"] * 100.0, data_S["sP_dil"] * 100.0),
"PA = {0:.2f} ± {1:.2f} °".format(data_S["PA_dil"], data_S["sPA_dil"]),
)
print(
"From Kishimoto's pipeline :\n",
"P = {0:.2f} ± {1:.2f} %\n".format(data_K["d_P_dil"] * 100.0, data_K["sP_dil"] * 100.0),
"PA = {0:.2f} ± {1:.2f} °".format(data_K["PA_dil"], data_K["sPA_dil"]),
)
# compare different types of error
print(
"This pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(
np.mean(data_S["sI"][data_S["mask"]] / data_S["I"][data_S["mask"]]),
np.mean(data_S["sQ"][data_S["mask"]] / data_S["Q"][data_S["mask"]]),
np.mean(data_S["sU"][data_S["mask"]] / data_S["U"][data_S["mask"]]),
np.mean(data_S["sP"][data_S["mask"]] / data_S["P"][data_S["mask"]]),
)
)
print(
"Kishimoto's pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(
np.mean(data_K["sI"][data_S["mask"]] / data_K["I"][data_S["mask"]]),
np.mean(data_K["sQ"][data_S["mask"]] / data_K["Q"][data_S["mask"]]),
np.mean(data_K["sU"][data_S["mask"]] / data_K["U"][data_S["mask"]]),
np.mean(data_K["sP"][data_S["mask"]] / data_K["P"][data_S["mask"]]),
)
)
# for d, i in zip(["I", "Q", "U", "P", "PA", "sI", "sQ", "sU", "sP", "sPA"], [0, 1, 2, 5, 8, (3, 0, 0), (3, 1, 1), (3, 2, 2), 6, 9]):
# data_K[d] = np.loadtxt(path_join(root_dir_K, d + ".txt"))
# with fits.open(path_join(root_dir_data_S, filename_S)) as f:
# if not type(i) is int:
# data_S[d] = np.sqrt(f[i[0]].data[i[1], i[2]])
# else:
# data_S[d] = f[i].data
# if i == 0:
# header = f[i].header
# from Kishimoto's pipeline : IQU_dir, IQU_shift, IQU_stat, IQU_trans
# from my pipeline : raw_bg, raw_flat, raw_psf, raw_shift, raw_wav, IQU_dir
# but errors from my pipeline are propagated all along, how to compare then ?
plt.show()

View File

@@ -20,28 +20,25 @@ def main(infile, P_cut=0.99, target=None, display="pf", output_dir=None):
output = [] output = []
Stokes = fits_open(infile) Stokes = fits_open(infile)
stkI = Stokes["STOKES"].data[0] stkI = Stokes["I_STOKES"].data
s_I = np.sqrt(Stokes["STOKES_COV"].data[0, 0])
SNRi = np.zeros(stkI.shape)
SNRi[s_I > 0.0] = stkI[s_I > 0.0] / s_I[s_I > 0.0]
QN, UN, QN_ERR, UN_ERR = np.full((4, stkI.shape[0], stkI.shape[1]), np.nan) QN, UN, QN_ERR, UN_ERR = np.full((4, stkI.shape[0], stkI.shape[1]), np.nan)
for sflux, nflux in zip( for sflux, nflux in zip(
[Stokes["STOKES"].data[1], Stokes["STOKES"].data[2], np.sqrt(Stokes["STOKES_COV"].data[1, 1]), np.sqrt(Stokes["STOKES_COV"].data[2, 2])], [Stokes["Q_STOKES"].data, Stokes["U_STOKES"].data, np.sqrt(Stokes["IQU_COV_MATRIX"].data[1, 1]), np.sqrt(Stokes["IQU_COV_MATRIX"].data[2, 2])],
[QN, UN, QN_ERR, UN_ERR], [QN, UN, QN_ERR, UN_ERR],
): ):
nflux[stkI > 0.0] = sflux[stkI > 0.0] / stkI[stkI > 0.0] nflux[stkI > 0.0] = sflux[stkI > 0.0] / stkI[stkI > 0.0]
Stokesconf = PCconf(QN, UN, QN_ERR, UN_ERR) Stokesconf = PCconf(QN, UN, QN_ERR, UN_ERR)
Stokesmask = np.logical_and(Stokes["DATA_MASK"].data.astype(bool), SNRi > 10.0) Stokesmask = Stokes["DATA_MASK"].data.astype(bool)
Stokessnr = np.zeros(Stokesmask.shape) Stokessnr = np.zeros(Stokesmask.shape)
Stokessnr[Stokes["POL_DEG_ERR"].data > 0.0] = ( Stokessnr[Stokes["POL_DEG_ERR"].data > 0.0] = (
Stokes["POL_DEG_DEBIASED"].data[Stokes["POL_DEG_ERR"].data > 0.0] / Stokes["POL_DEG_ERR"].data[Stokes["POL_DEG_ERR"].data > 0.0] Stokes["POL_DEG_DEBIASED"].data[Stokes["POL_DEG_ERR"].data > 0.0] / Stokes["POL_DEG_ERR"].data[Stokes["POL_DEG_ERR"].data > 0.0]
) * Stokesmask[Stokes["POL_DEG_ERR"].data > 0.0] )
if P_cut < 1.0: if P_cut < 1.0:
Stokescentconf, Stokescenter = CenterConf(Stokesconf > P_cut, Stokes["POL_ANG"].data, Stokes["POL_ANG_ERR"].data) Stokescentconf, Stokescenter = CenterConf(Stokesconf > P_cut, Stokes["POL_ANG"].data, Stokes["POL_ANG_ERR"].data)
else: else:
Stokescentconf, Stokescenter = CenterConf(Stokessnr > P_cut, Stokes["POL_ANG"].data, Stokes["POL_ANG_ERR"].data) Stokescentconf, Stokescenter = CenterConf(Stokessnr > P_cut, Stokes["POL_ANG"].data, Stokes["POL_ANG_ERR"].data)
Stokespos = WCS(Stokes[0].header).celestial.pixel_to_world(*Stokescenter) Stokespos = WCS(Stokes[0].header).pixel_to_world(*Stokescenter)
if target is None: if target is None:
target = Stokes[0].header["TARGNAME"] target = Stokes[0].header["TARGNAME"]
@@ -80,10 +77,10 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Look for the center of emission for a given reduced observation") parser = argparse.ArgumentParser(description="Look for the center of emission for a given reduced observation")
parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None) parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
parser.add_argument("-f", "--file", metavar="path", required=True, help="The full or relative path to the data product", type=str, default=None) parser.add_argument("-f", "--file", metavar="path", required=False, help="The full or relative path to the data product", type=str, default=None)
parser.add_argument("-c", "--pcut", metavar="pcut", required=False, help="The polarization cut for the data mask", type=float, default=3.0) parser.add_argument("-c", "--pcut", metavar="pcut", required=False, help="The polarization cut for the data mask", type=float, default=0.99)
parser.add_argument("-d", "--display", metavar="display", required=False, help="The map on which to display info", type=str, default="pf") parser.add_argument("-d", "--display", metavar="display", required=False, help="The map on which to display info", type=str, default="pf")
parser.add_argument("-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the plots", type=str, default=None) parser.add_argument("-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the plots", type=str, default="./data")
args = parser.parse_args() args = parser.parse_args()
exitcode = main(infile=args.file, P_cut=args.pcut, target=args.target, display=args.display, output_dir=args.output_dir) exitcode = main(infile=args.file, P_cut=args.pcut, target=args.target, display=args.display, output_dir=args.output_dir)
print("Written to: ", exitcode) print("Written to: ", exitcode)

View File

@@ -10,7 +10,7 @@ from astropy.io import fits
from lib.plots import overplot_pol, overplot_radio from lib.plots import overplot_pol, overplot_radio
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/IC5063/5918/IC5063_5918_F502M_FOC_b0.10arcsec_c0.15arcsec.fits") Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits")
# Stokes_18GHz = fits.open("./data/IC5063/radio/IC5063_18GHz.fits") # Stokes_18GHz = fits.open("./data/IC5063/radio/IC5063_18GHz.fits")
# Stokes_24GHz = fits.open("./data/IC5063/radio/IC5063_24GHz.fits") # Stokes_24GHz = fits.open("./data/IC5063/radio/IC5063_24GHz.fits")
# Stokes_103GHz = fits.open("./data/IC5063/radio/IC5063_103GHz.fits") # Stokes_103GHz = fits.open("./data/IC5063/radio/IC5063_103GHz.fits")
@@ -47,12 +47,10 @@ Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
G = overplot_pol(Stokes_UV, Stokes_IR, cmap="inferno") G = overplot_pol(Stokes_UV, Stokes_IR, cmap="inferno")
G.plot( G.plot(
P_cut=3, P_cut=0.99,
SNRi_cut=10, SNRi_cut=1.0,
savename="./plots/IC5063/IR_overplot.pdf", savename="./plots/IC5063/IR_overplot.pdf",
scale_vec=5, scale_vec=None,
norm=LogNorm(Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"] / 1e3, Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"]), norm=LogNorm(Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"] / 1e3, Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"]),
cmap="inferno", cmap="inferno",
disptype="pf",
levels="Default",
) )

View File

@@ -11,9 +11,9 @@ from lib.plots import overplot_chandra, overplot_pol
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.fits") Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.fits")
Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits") # Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits")
# Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits") # Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits")
# Radio = fits.open("./data/MRK463E/EMERLIN/Voorwerpjes_1356+1822_1356+1822_uniform-image.fits") Radio = fits.open("./data/MRK463E/EMERLIN/Voorwerpjes_1356+1822_1356+1822_uniform-image.fits")
# levels = np.geomspace(1.0, 99.0, 7) # levels = np.geomspace(1.0, 99.0, 7)
@@ -21,26 +21,26 @@ Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits")
# A.plot(levels=levels, P_cut=0.99, SNRi_cut=1.0, scale_vec=5, zoom=1, savename="./plots/MRK463E/Chandra_overplot.pdf") # A.plot(levels=levels, P_cut=0.99, SNRi_cut=1.0, scale_vec=5, zoom=1, savename="./plots/MRK463E/Chandra_overplot.pdf")
# A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned") # A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned")
levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"] # levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"]
B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm()) # B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm())
B.plot(levels=levels, P_cut=0.99, SNRi_cut=1.0, scale_vec=5, norm=LogNorm(8.5e-18, 2.5e-15), savename="./plots/MRK463E/IR_overplot.pdf") # B.plot(levels=levels, P_cut=0.99, SNRi_cut=1.0, scale_vec=5, norm=LogNorm(8.5e-18, 2.5e-15), savename="./plots/MRK463E/IR_overplot.pdf")
B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned") # B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned")
# levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"] # levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"]
# levels = np.array([5, 10, 20, 50]) levels = np.array([5, 10, 20, 50])
# C = overplot_pol(Stokes_UV, Radio, norm=LogNorm()) C = overplot_pol(Stokes_UV, Radio, norm=LogNorm())
# C.other_im.set(norm=LogNorm(1e-4, 2e-2)) C.other_im.set(norm=LogNorm(1e-4, 2e-2))
# C.plot( C.plot(
# levels=levels, levels=levels,
# P_cut=0.99, P_cut=0.99,
# SNRi_cut=1.0, SNRi_cut=1.0,
# step_vec=0, step_vec=0,
# scale_vec=3, scale_vec=3,
# norm=LogNorm(1e-4, 2e-2), norm=LogNorm(1e-4, 2e-2),
# cmap="inferno_r", cmap="inferno_r",
# width=0.5, width=0.5,
# linewidth=0.5, linewidth=0.5,
# disptype="snri", disptype="snri",
# savename="./plots/MRK463E/EMERLIN_snri_overplot.pdf", savename="./plots/MRK463E/EMERLIN_snri_overplot.pdf",
# ) )
# C.write_to(path1="./data/MRK463E/FOC_data_EMERLIN.fits", path2="./data/MRK463E/EMERLIN_data.fits", suffix="aligned") C.write_to(path1="./data/MRK463E/FOC_data_EMERLIN.fits", path2="./data/MRK463E/EMERLIN_data.fits", suffix="aligned")