Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e9fd50ad17 | |||
| 3ac9102445 | |||
| 0a8336aea8 | |||
| f4fdce3f07 | |||
| 042be2bad4 | |||
| e4acb9755c | |||
| c41482af77 | |||
| 6d7442169f | |||
| 00fa050a95 | |||
| 26a353f118 |
@@ -20,7 +20,6 @@ import lib.reduction as proj_red # Functions used in reduction pipeline
|
||||
import numpy as np
|
||||
from lib.utils import princ_angle, sci_not
|
||||
from matplotlib.colors import LogNorm
|
||||
from astropy.wcs import WCS
|
||||
|
||||
|
||||
def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False):
|
||||
@@ -41,8 +40,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
display_crop = False
|
||||
|
||||
# Background estimation
|
||||
error_sub_type = "scott" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
|
||||
subtract_error = 1.50
|
||||
error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
|
||||
subtract_error = 1.33
|
||||
display_bkg = True
|
||||
|
||||
# Data binning
|
||||
@@ -64,7 +63,6 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
smoothing_scale = "arcsec" # pixel or arcsec
|
||||
|
||||
# Rotation
|
||||
rotate_data = False
|
||||
rotate_North = True
|
||||
|
||||
# Polarization map output
|
||||
@@ -119,10 +117,9 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
figtype = "_".join([figtype, "not_aligned"] if figtype != "" else ["not_aligned"])
|
||||
|
||||
# Crop data to remove outside blank margins.
|
||||
data_array, error_array, headers = proj_red.crop_array(
|
||||
data_array, headers, step=5, null_val=0.0, inside=True, display=display_crop, savename=figname, plots_folder=plots_folder
|
||||
data_array, error_array, data_mask, headers = proj_red.crop_array(
|
||||
data_array, headers, step=5, null_val=0.0, crop=True, inside=True, display=display_crop, savename=figname, plots_folder=plots_folder
|
||||
)
|
||||
data_mask = np.ones(data_array[0].shape, dtype=bool)
|
||||
|
||||
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
|
||||
if deconvolve:
|
||||
@@ -144,7 +141,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
)
|
||||
|
||||
# Rotate data to have same orientation
|
||||
rotate_data = rotate_data or np.unique([np.round(float(head["ORIENTAT"]), 3) for head in headers]).size != 1
|
||||
rotate_data = np.unique([np.round(float(head["ORIENTAT"]), 3) for head in headers]).size != 1
|
||||
if rotate_data:
|
||||
ang = np.mean([head["ORIENTAT"] for head in headers])
|
||||
for head in headers:
|
||||
@@ -160,6 +157,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]
|
||||
),
|
||||
)
|
||||
|
||||
# Align and rescale images with oversampling.
|
||||
data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data(
|
||||
data_array,
|
||||
@@ -183,14 +181,6 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]),
|
||||
)
|
||||
|
||||
flux_data, flux_error, flux_mask, flux_head = (
|
||||
deepcopy(data_array.sum(axis=0)),
|
||||
deepcopy(np.sqrt(np.sum(error_array**2, axis=0))),
|
||||
deepcopy(data_mask),
|
||||
deepcopy(headers[0]),
|
||||
)
|
||||
flux_head["EXPTIME"] = np.sum([head["EXPTIME"] for head in headers])
|
||||
|
||||
# Rebin data to desired pixel size.
|
||||
if (pxsize is not None) and not (pxsize == 1 and pxscale.lower() in ["px", "pixel", "pixels"]):
|
||||
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(
|
||||
@@ -226,39 +216,36 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
|
||||
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
|
||||
# Bibcode : 1995chst.conf...10J
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes = proj_red.compute_Stokes(
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, header_stokes = proj_red.compute_Stokes(
|
||||
data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr
|
||||
)
|
||||
I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg = proj_red.compute_Stokes(
|
||||
I_bkg, Q_bkg, U_bkg, S_cov_bkg, S_stat_cov_bkg, header_bkg = proj_red.compute_Stokes(
|
||||
background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False
|
||||
)
|
||||
|
||||
# Step 3:
|
||||
# Rotate images to have North up
|
||||
if rotate_North:
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes = proj_red.rotate_Stokes(
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, data_mask, header_stokes = proj_red.rotate_Stokes(
|
||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, data_mask, header_stokes, SNRi_cut=None
|
||||
)
|
||||
I_bkg, Q_bkg, U_bkg, S_cov_bkg, data_mask_bkg, header_bkg = proj_red.rotate_Stokes(
|
||||
I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), header_bkg, SNRi_cut=None
|
||||
I_bkg, Q_bkg, U_bkg, S_cov_bkg, S_stat_cov_bkg, data_mask_bkg, header_bkg = proj_red.rotate_Stokes(
|
||||
I_bkg, Q_bkg, U_bkg, S_cov_bkg, S_stat_cov_bkg, np.array(True).reshape(1, 1), header_bkg, SNRi_cut=None
|
||||
)
|
||||
flux_data, flux_error, flux_mask, flux_head = proj_red.rotate_data(np.array([flux_data]), np.array([flux_error]), flux_mask, [flux_head])
|
||||
flux_data, flux_error, flux_head = flux_data[0], flux_error[0], flux_head[0]
|
||||
elif not rotate_data:
|
||||
figname += "_noROT"
|
||||
|
||||
# Compute polarimetric parameters (polarization degree and angle).
|
||||
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes)
|
||||
P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg)
|
||||
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, header_stokes)
|
||||
P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, S_stat_cov_bkg, header_bkg)
|
||||
|
||||
# Step 4:
|
||||
# Save image to FITS.
|
||||
savename = "_".join([figname, figtype]) if figtype != "" else figname
|
||||
figname = "_".join([figname, figtype]) if figtype != "" else figname
|
||||
Stokes_hdul = proj_fits.save_Stokes(
|
||||
I_stokes,
|
||||
Q_stokes,
|
||||
U_stokes,
|
||||
Stokes_cov,
|
||||
Stokes_stat_cov,
|
||||
P,
|
||||
debiased_P,
|
||||
s_P,
|
||||
@@ -268,29 +255,32 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
s_PA_P,
|
||||
header_stokes,
|
||||
data_mask,
|
||||
savename,
|
||||
figname,
|
||||
data_folder=data_folder,
|
||||
return_hdul=True,
|
||||
flux_data=np.array([flux_data, flux_error, flux_mask]),
|
||||
flux_head=flux_head,
|
||||
)
|
||||
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"]))
|
||||
|
||||
# Step 5:
|
||||
# crop to desired region of interest (roi)
|
||||
if crop:
|
||||
savename += "_crop"
|
||||
figname += "_crop"
|
||||
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_hdul), norm=LogNorm())
|
||||
stokescrop.crop()
|
||||
stokescrop.write_to("/".join([data_folder, figname + ".fits"]))
|
||||
Stokes_hdul, header_stokes = stokescrop.hdul_crop, stokescrop.hdul_crop[0].header
|
||||
outfiles.append("/".join([data_folder, Stokes_hdul["I_STOKES"].header["FILENAME"] + ".fits"]))
|
||||
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"]))
|
||||
|
||||
data_mask = Stokes_hdul["data_mask"].data.astype(bool)
|
||||
print(
|
||||
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
|
||||
flux_head["PHOTPLAM"],
|
||||
*sci_not(flux_data[flux_mask].sum() * flux_head["PHOTFLAM"], np.sqrt(np.sum(flux_error[flux_mask] ** 2)) * flux_head["PHOTFLAM"], 2, out=int),
|
||||
header_stokes["PHOTPLAM"],
|
||||
*sci_not(
|
||||
Stokes_hdul[0].data[data_mask].sum() * header_stokes["PHOTFLAM"],
|
||||
np.sqrt(Stokes_hdul[3].data[0, 0][data_mask].sum()) * header_stokes["PHOTFLAM"],
|
||||
2,
|
||||
out=int,
|
||||
),
|
||||
)
|
||||
)
|
||||
print("P_int = {0:.1f} ± {1:.1f} %".format(header_stokes["p_int"] * 100.0, np.ceil(header_stokes["sP_int"] * 1000.0) / 10.0))
|
||||
@@ -313,12 +303,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
flux_lim=flux_lim,
|
||||
step_vec=step_vec,
|
||||
scale_vec=scale_vec,
|
||||
savename=figname,
|
||||
savename="_".join([figname]),
|
||||
plots_folder=plots_folder,
|
||||
)
|
||||
for figtype, figsuffix in zip(
|
||||
["FluxDensity", "Intensity", "Pol_flux", "Pol_deg", "Pol_ang", "I_err", "P_err", "SNRi", "SNRp", "confp"],
|
||||
["F", "I", "P_flux", "P", "PA", "I_err", "P_err", "SNRi", "SNRp", "confP"],
|
||||
["Intensity", "Pol_flux", "Pol_deg", "Pol_ang", "I_err", "P_err", "SNRi", "SNRp", "confp"],
|
||||
["I", "P_flux", "P", "PA", "I_err", "P_err", "SNRi", "SNRp", "confP"],
|
||||
):
|
||||
try:
|
||||
proj_plots.polarization_map(
|
||||
@@ -329,7 +319,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
flux_lim=flux_lim,
|
||||
step_vec=step_vec,
|
||||
scale_vec=scale_vec,
|
||||
savename="_".join([savename, figsuffix]),
|
||||
savename="_".join([figname, figsuffix]),
|
||||
plots_folder=plots_folder,
|
||||
display=figtype,
|
||||
)
|
||||
@@ -337,7 +327,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
pass
|
||||
elif not interactive:
|
||||
proj_plots.polarization_map(
|
||||
deepcopy(Stokes_hdul), data_mask, P_cut=P_cut, SNRi_cut=SNRi_cut, savename=savename, plots_folder=plots_folder, display="integrate"
|
||||
deepcopy(Stokes_hdul), data_mask, P_cut=P_cut, SNRi_cut=SNRi_cut, savename=figname, plots_folder=plots_folder, display="integrate"
|
||||
)
|
||||
elif pxscale.lower() not in ["full", "integrate"]:
|
||||
proj_plots.pol_map(Stokes_hdul, P_cut=P_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, scale_vec=scale_vec, flux_lim=flux_lim)
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
from .lib import *
|
||||
from .FOC_reduction import main
|
||||
from . import FOC_reduction
|
||||
|
||||
@@ -18,6 +18,7 @@ import matplotlib.dates as mdates
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from astropy.time import Time
|
||||
from lib.plots import plot_obs
|
||||
from matplotlib.colors import LogNorm
|
||||
from matplotlib.patches import Rectangle
|
||||
from scipy.optimize import curve_fit
|
||||
@@ -135,8 +136,6 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
|
||||
fig2.subplots_adjust(hspace=0, wspace=0, right=1.0)
|
||||
fig2.colorbar(im2, ax=ax2, location="right", shrink=0.60, aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||
|
||||
from .plots import plot_obs
|
||||
|
||||
if savename is not None:
|
||||
this_savename = deepcopy(savename)
|
||||
if savename[-4:] not in [".png", ".jpg", ".pdf"]:
|
||||
@@ -279,7 +278,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
|
||||
return n_data_array, n_error_array, headers, background
|
||||
|
||||
|
||||
def bkg_hist(data, error, mask, headers, n_bins=None, subtract_error=True, display=False, savename=None, plots_folder=""):
|
||||
def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, display=False, savename=None, plots_folder=""):
|
||||
"""
|
||||
----------
|
||||
Inputs:
|
||||
@@ -334,15 +333,29 @@ def bkg_hist(data, error, mask, headers, n_bins=None, subtract_error=True, displ
|
||||
for i, image in enumerate(data):
|
||||
# Compute the Count-rate histogram for the image
|
||||
n_mask = np.logical_and(mask, image > 0.0)
|
||||
if sub_type is not None:
|
||||
if isinstance(sub_type, int):
|
||||
n_bins = sub_type
|
||||
elif sub_type.lower() in ["square-root", "squareroot", "sqrt"]:
|
||||
n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
|
||||
elif sub_type.lower() in ["sturges"]:
|
||||
n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int) + 1 # Sturges
|
||||
elif sub_type.lower() in ["rice"]:
|
||||
n_bins = 2 * np.fix(np.power(image[n_mask].size, 1 / 3)).astype(int) # Rice
|
||||
elif sub_type.lower() in ["freedman-diaconis", "freedmandiaconis", "freedman", "diaconis"]:
|
||||
n_bins = np.fix(
|
||||
(image[n_mask].max() - image[n_mask].min())
|
||||
/ (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3))
|
||||
).astype(int) # Freedman-Diaconis
|
||||
else: # Fallback
|
||||
n_bins = np.fix((image[n_mask].max() - image[n_mask].min()) / (3.5 * image[n_mask].std() / np.power(image[n_mask].size, 1 / 3))).astype(
|
||||
int
|
||||
) # Scott
|
||||
else: # Default statistic
|
||||
n_bins = np.fix((image[n_mask].max() - image[n_mask].min()) / (3.5 * image[n_mask].std() / np.power(image[n_mask].size, 1 / 3))).astype(
|
||||
int
|
||||
) # Scott
|
||||
|
||||
if not isinstance(n_bins, int) and n_bins not in ["auto", "fd", "doane", "scott", "stone", "rice", "sturges", "sqrt"]:
|
||||
match n_bins.lower():
|
||||
case "square-root" | "squareroot":
|
||||
n_bins = "sqrt"
|
||||
case "freedman-diaconis" | "freedmandiaconis":
|
||||
n_bins = "fd"
|
||||
case _:
|
||||
n_bins = "scott"
|
||||
hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins)
|
||||
histograms.append(hist)
|
||||
binning.append(np.exp(bin_centers(bin_edges)))
|
||||
|
||||
@@ -47,9 +47,9 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
|
||||
wcs_array.append(WCS(header=f[0].header, fobj=f).celestial)
|
||||
f.flush()
|
||||
# Save pixel area for flux density computation
|
||||
if "PXFORMT" in headers[i].keys() and headers[i]["PXFORMT"] == "NORMAL":
|
||||
if headers[i]["PXFORMT"] == "NORMAL":
|
||||
headers[i]["PXAREA"] = 1.96e-4 # 14x14 milliarcsec squared pixel area in arcsec^2
|
||||
elif "PXFORMT" in headers[i].keys() and headers[i]["PXFORMT"] == "ZOOM":
|
||||
elif headers[i]["PXFORMT"] == "ZOOM":
|
||||
headers[i]["PXAREA"] = 4.06e-4 # 29x14 milliarcsec squared pixel area in arcsec^2
|
||||
else:
|
||||
headers[i]["PXAREA"] = 1.0 # unknown default to 1 arcsec^2
|
||||
@@ -90,10 +90,10 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
|
||||
# force WCS for POL60 to have same pixel size as POL0 and POL120
|
||||
is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
|
||||
cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 10)
|
||||
if np.any(is_pol60) and np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
|
||||
if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
|
||||
print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0))
|
||||
raise ValueError("Not all images have same pixel size")
|
||||
elif np.any(is_pol60):
|
||||
else:
|
||||
for i in np.arange(len(headers))[is_pol60]:
|
||||
headers[i]["cdelt1"], headers[i]["cdelt2"] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0]
|
||||
|
||||
@@ -110,6 +110,7 @@ def save_Stokes(
|
||||
Q_stokes,
|
||||
U_stokes,
|
||||
Stokes_cov,
|
||||
Stokes_stat_cov,
|
||||
P,
|
||||
debiased_P,
|
||||
s_P,
|
||||
@@ -122,8 +123,6 @@ def save_Stokes(
|
||||
filename,
|
||||
data_folder="",
|
||||
return_hdul=False,
|
||||
flux_data=None,
|
||||
flux_head=None,
|
||||
):
|
||||
"""
|
||||
Save computed polarimetry parameters to a single fits file,
|
||||
@@ -179,7 +178,7 @@ def save_Stokes(
|
||||
header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation")
|
||||
header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name")
|
||||
header["ORIENTAT"] = (header_stokes["ORIENTAT"], "Angle between North and the y-axis of the image")
|
||||
header["FILENAME"] = (filename, "Original filename")
|
||||
header["FILENAME"] = (filename, "ORIGINAL FILENAME")
|
||||
header["BKG_TYPE"] = (header_stokes["BKG_TYPE"], "Bkg estimation method used during reduction")
|
||||
header["BKG_SUB"] = (header_stokes["BKG_SUB"], "Amount of bkg subtracted from images")
|
||||
header["SMOOTH"] = (header_stokes["SMOOTH"] if "SMOOTH" in list(header_stokes.keys()) else "None", "Smoothing method used during reduction")
|
||||
@@ -203,11 +202,15 @@ def save_Stokes(
|
||||
s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
|
||||
|
||||
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1]))
|
||||
new_Stokes_stat_cov = np.zeros((*Stokes_stat_cov.shape[:-2], *shape[::-1]))
|
||||
for i in range(3):
|
||||
for j in range(3):
|
||||
Stokes_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
|
||||
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
|
||||
Stokes_stat_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
|
||||
new_Stokes_stat_cov[i, j] = Stokes_stat_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
|
||||
Stokes_cov = new_Stokes_cov
|
||||
Stokes_stat_cov = new_Stokes_stat_cov
|
||||
|
||||
data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]]
|
||||
data_mask = data_mask.astype(float, copy=False)
|
||||
@@ -215,48 +218,31 @@ def save_Stokes(
|
||||
# Create HDUList object
|
||||
hdul = fits.HDUList([])
|
||||
|
||||
# Add Flux density as PrimaryHDU
|
||||
if flux_data is None:
|
||||
header["DATATYPE"] = ("I_stokes", "type of data stored in the HDU")
|
||||
I_stokes[(1 - data_mask).astype(bool)] = 0.0
|
||||
primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header)
|
||||
primary_hdu.name = "I_stokes"
|
||||
hdul.append(primary_hdu)
|
||||
else:
|
||||
flux_head["FILENAME"] = header["FILENAME"]
|
||||
head = WCS(flux_head).deepcopy().to_header()
|
||||
for key in [key for key in header.keys() if key not in ["SMOOTH", "SAMPLING"]]:
|
||||
try:
|
||||
head[key] = flux_head[key]
|
||||
except KeyError:
|
||||
head[key] = header[key]
|
||||
header["DATATYPE"] = ("Flux_density", "type of data stored in the HDU")
|
||||
primary_hdu = fits.PrimaryHDU(data=flux_data, header=head)
|
||||
primary_hdu.name = "Flux_density"
|
||||
hdul.append(primary_hdu)
|
||||
header["DATATYPE"] = ("I_stokes", "type of data stored in the HDU")
|
||||
I_stokes[(1 - data_mask).astype(bool)] = 0.0
|
||||
image_hdu = fits.ImageHDU(data=I_stokes, header=header)
|
||||
image_hdu.name = "I_stokes"
|
||||
hdul.append(image_hdu)
|
||||
# Add I_stokes as PrimaryHDU
|
||||
header["datatype"] = ("I_stokes", "type of data stored in the HDU")
|
||||
I_stokes[(1 - data_mask).astype(bool)] = 0.0
|
||||
primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header)
|
||||
primary_hdu.name = "I_stokes"
|
||||
hdul.append(primary_hdu)
|
||||
|
||||
# Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList
|
||||
for data, name in [
|
||||
[Q_stokes, "Q_stokes"],
|
||||
[U_stokes, "U_stokes"],
|
||||
[Stokes_cov, "IQU_cov_matrix"],
|
||||
[Stokes_stat_cov, "IQU_stat_cov_matrix"],
|
||||
[P, "Pol_deg"],
|
||||
[debiased_P, "Pol_deg_debiased"],
|
||||
[s_P, "Pol_deg_err"],
|
||||
[s_P_P, "Pol_deg_err_Poisson_noise"],
|
||||
[s_P_P, "Pol_deg_stat_err"],
|
||||
[PA, "Pol_ang"],
|
||||
[s_PA, "Pol_ang_err"],
|
||||
[s_PA_P, "Pol_ang_err_Poisson_noise"],
|
||||
[s_PA_P, "Pol_ang_stat_err"],
|
||||
[data_mask, "Data_mask"],
|
||||
]:
|
||||
hdu_header = header.copy()
|
||||
hdu_header["DATATYPE"] = name
|
||||
if not name == "IQU_cov_matrix":
|
||||
hdu_header["datatype"] = name
|
||||
if not name[-10:] == "cov_matrix":
|
||||
data[(1 - data_mask).astype(bool)] = 0.0
|
||||
hdu = fits.ImageHDU(data=data, header=hdu_header)
|
||||
hdu.name = name
|
||||
|
||||
@@ -133,12 +133,6 @@ def plot_obs(data_array, headers, rectangle=None, shifts=None, savename=None, pl
|
||||
ax_curr.arrow(x, y, dx, dy, length_includes_head=True, width=0.1, head_width=0.3, color="g")
|
||||
ax_curr.plot([x, x], [0, data.shape[0] - 1], "--", lw=2, color="g", alpha=0.85)
|
||||
ax_curr.plot([0, data.shape[1] - 1], [y, y], "--", lw=2, color="g", alpha=0.85)
|
||||
# position of centroid
|
||||
ax_curr.plot([data.shape[1] / 2, data.shape[1] / 2], [0, data.shape[0] - 1], "--", lw=2, color="b", alpha=0.85)
|
||||
ax_curr.plot([0, data.shape[1] - 1], [data.shape[0] / 2, data.shape[0] / 2], "--", lw=2, color="b", alpha=0.85)
|
||||
cr_x, cr_y = head["CRPIX1"], head["CRPIX2"]
|
||||
# Plot WCS reference point
|
||||
ax_curr.plot([cr_x], [cr_y], "+", lw=2, color="r", alpha=0.85)
|
||||
if rectangle is not None:
|
||||
x, y, width, height, angle, color = rectangle[i]
|
||||
ax_curr.add_patch(Rectangle((x, y), width, height, angle=angle, edgecolor=color, fill=False))
|
||||
@@ -195,7 +189,7 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""):
|
||||
for dataset in [stkI, stkQ, stkU]:
|
||||
dataset[np.logical_not(data_mask)] = np.nan
|
||||
|
||||
wcs = WCS(Stokes["I_STOKES"]).deepcopy()
|
||||
wcs = WCS(Stokes[0]).deepcopy()
|
||||
|
||||
# Plot figure
|
||||
plt.rcParams.update({"font.size": 14})
|
||||
@@ -294,9 +288,6 @@ def polarization_map(
|
||||
The figure and ax created for interactive contour maps.
|
||||
"""
|
||||
# Get data
|
||||
flux = Stokes[0].data[0].copy() * Stokes[0].header["PHOTFLAM"]
|
||||
flux_error = Stokes[0].data[1].copy() * Stokes[0].header["PHOTFLAM"]
|
||||
flux_mask = Stokes[0].data[2].astype(bool).copy()
|
||||
stkI = Stokes["I_stokes"].data.copy()
|
||||
stkQ = Stokes["Q_stokes"].data.copy()
|
||||
stkU = Stokes["U_stokes"].data.copy()
|
||||
@@ -311,20 +302,6 @@ def polarization_map(
|
||||
data_mask = np.zeros(stkI.shape).astype(bool)
|
||||
data_mask[stkI > 0.0] = True
|
||||
|
||||
wcs = WCS(Stokes["I_STOKES"]).deepcopy()
|
||||
pivot_wav = Stokes["I_STOKES"].header["photplam"]
|
||||
convert_flux = Stokes["I_STOKES"].header["photflam"]
|
||||
|
||||
# Get integrated flux values from sum
|
||||
I_diluted = stkI[data_mask].sum() * convert_flux
|
||||
I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask])) * convert_flux
|
||||
|
||||
# Get integrated polarization values from header
|
||||
P_diluted = Stokes["I_STOKES"].header["P_int"]
|
||||
P_diluted_err = Stokes["I_STOKES"].header["sP_int"]
|
||||
PA_diluted = Stokes["I_STOKES"].header["PA_int"]
|
||||
PA_diluted_err = Stokes["I_STOKES"].header["sPA_int"]
|
||||
|
||||
# Compute confidence level map
|
||||
QN, UN, QN_ERR, UN_ERR = np.full((4, stkI.shape[0], stkI.shape[1]), np.nan)
|
||||
for nflux, sflux in zip([QN, UN, QN_ERR, UN_ERR], [stkQ, stkU, np.sqrt(stk_cov[1, 1]), np.sqrt(stk_cov[2, 2])]):
|
||||
@@ -337,8 +314,12 @@ def polarization_map(
|
||||
for j in range(3):
|
||||
stk_cov[i][j][np.logical_not(data_mask)] = np.nan
|
||||
|
||||
wcs = WCS(Stokes[0]).deepcopy()
|
||||
pivot_wav = Stokes[0].header["photplam"]
|
||||
convert_flux = Stokes[0].header["photflam"]
|
||||
|
||||
# Plot Stokes parameters map
|
||||
if display is None or display.lower() in ["pol", "polarization", "polarisation", "pol_deg", "p"]:
|
||||
if display is None or display.lower() in ["default"]:
|
||||
plot_Stokes(Stokes, savename=savename, plots_folder=plots_folder)
|
||||
|
||||
# Compute SNR and apply cuts
|
||||
@@ -379,10 +360,10 @@ def polarization_map(
|
||||
if fig is None:
|
||||
ratiox = max(int(stkI.shape[1] / (stkI.shape[0])), 1)
|
||||
ratioy = max(int((stkI.shape[0]) / stkI.shape[1]), 1)
|
||||
fig = plt.figure(figsize=(7 * ratiox, 7 * ratioy), layout="constrained")
|
||||
fig = plt.figure(figsize=(8 * ratiox, 8 * ratioy), layout="constrained")
|
||||
if ax is None:
|
||||
ax = fig.add_subplot(111, projection=wcs)
|
||||
ax.set(aspect="equal", fc="k", xlim=(0, stkI.shape[1]), ylim=(0, stkI.shape[0]))
|
||||
ax.set(aspect="equal", fc="k") # , ylim=[-0.05 * stkI.shape[0], 1.05 * stkI.shape[0]])
|
||||
# fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02)
|
||||
|
||||
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
|
||||
@@ -428,25 +409,7 @@ def polarization_map(
|
||||
ax.set_facecolor("white")
|
||||
font_color = "black"
|
||||
|
||||
if display.lower() in ["f", "flux", "fluxdensity"]:
|
||||
# If no display selected, show intensity map
|
||||
display = "f"
|
||||
if flux_lim is not None:
|
||||
vmin, vmax = flux_lim
|
||||
else:
|
||||
vmin, vmax = np.max(flux[flux > 0.0]) / 2e3, np.max(flux[flux > 0.0])
|
||||
imflux, cr = flux.copy(), WCS(Stokes[0].header).wcs.crpix.astype(int)
|
||||
imflux[cr[1] - 1 : cr[1] + 2, cr[0] - 1 : cr[0] + 2] = np.nan
|
||||
im = ax.imshow(
|
||||
imflux, transform=ax.get_transform(WCS(Stokes[0].header).celestial), norm=LogNorm(vmin, vmax), aspect="equal", cmap=kwargs["cmap"], alpha=1.0
|
||||
)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.015, fraction=0.03, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
|
||||
levelsF = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax
|
||||
print("Flux density contour levels : ", levelsF)
|
||||
ax.contour(flux, levels=levelsF, transform=ax.get_transform(WCS(Stokes[0].header).celestial), colors="grey", linewidths=0.5)
|
||||
ax.plot(*WCS(Stokes[1]).wcs.crpix, "g+")
|
||||
I_diluted, I_diluted_err = np.sum(flux[flux_mask]), np.sqrt(np.sum(flux_error[flux_mask] ** 2))
|
||||
elif display.lower() in ["i", "intensity"]:
|
||||
if display.lower() in ["i", "intensity"]:
|
||||
# If no display selected, show intensity map
|
||||
display = "i"
|
||||
if flux_lim is not None:
|
||||
@@ -456,13 +419,10 @@ def polarization_map(
|
||||
else:
|
||||
vmin, vmax = 1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][stkI > 0.0]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux)
|
||||
im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
ax.plot(*WCS(Stokes[1]).wcs.crpix, "g+")
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.015, fraction=0.03, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||
# levelsI = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax
|
||||
# print("Stokes I contour levels : ", levelsI)
|
||||
# ax.contour(stkI * convert_flux, levels=levelsI, colors="grey", linewidths=0.5)
|
||||
levelsF = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * np.max(flux[flux > 0.0])
|
||||
ax.contour(flux, levels=levelsF, transform=ax.get_transform(WCS(Stokes[0].header).celestial), colors="grey", linewidths=0.5)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||
levelsI = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax
|
||||
print("Flux density contour levels : ", levelsI)
|
||||
ax.contour(stkI * convert_flux, levels=levelsI, colors="grey", linewidths=0.5)
|
||||
elif display.lower() in ["pf", "pol_flux"]:
|
||||
# Display polarization flux
|
||||
display = "pf"
|
||||
@@ -475,36 +435,34 @@ def polarization_map(
|
||||
else:
|
||||
vmin, vmax = 1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][stkI > 0.0]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux)
|
||||
pfmax = (stkI[stkI > 0.0] * pol[stkI > 0.0] * convert_flux).max()
|
||||
im = ax.imshow(stkI * convert_flux * pol, norm=LogNorm(vmin, vmax), aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
fig.colorbar(
|
||||
im, ax=ax, aspect=50, shrink=0.60, pad=0.015, fraction=0.03, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]"
|
||||
)
|
||||
im = ax.imshow(stkI * convert_flux * pol, norm=LogNorm(vmin, vmax), aspect="equal", cmap=kwargs["cmap"], alpha=1.0 - 0.75 * (pol < pol_err))
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||
# levelsPf = np.linspace(0.0.60, 0.50, 5) * pfmax
|
||||
levelsPf = np.array([1.73, 13.0, 33.0, 66.0]) / 100.0 * pfmax
|
||||
levelsPf = np.array([13.0, 33.0, 66.0]) / 100.0 * pfmax
|
||||
print("Polarized flux density contour levels : ", levelsPf)
|
||||
ax.contour(stkI * convert_flux * pol, levels=levelsPf, colors="grey", linewidths=0.5)
|
||||
elif display.lower() in ["p", "pol", "pol_deg"]:
|
||||
# Display polarization degree map
|
||||
display = "p"
|
||||
vmin, vmax = 0.0, min(pol[np.isfinite(pol)].max(), 1.0) * 100.0
|
||||
im = ax.imshow(pol * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.015, fraction=0.03, label=r"$P$ [%]")
|
||||
vmin, vmax = 0.0, min(pol[pol > pol_err].max(), 1.0) * 100.0
|
||||
im = ax.imshow(pol * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0 - 0.75 * (pol < pol_err))
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$P$ [%]")
|
||||
elif display.lower() in ["pa", "pang", "pol_ang"]:
|
||||
# Display polarization degree map
|
||||
display = "pa"
|
||||
vmin, vmax = 0.0, 180.0
|
||||
im = ax.imshow(princ_angle(pang), vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.015, fraction=0.03, label=r"$\theta_P$ [°]")
|
||||
im = ax.imshow(princ_angle(pang), vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0 - 0.75 * (pol < pol_err))
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$\theta_P$ [°]")
|
||||
elif display.lower() in ["s_p", "pol_err", "pol_deg_err"]:
|
||||
# Display polarization degree error map
|
||||
display = "s_p"
|
||||
if (SNRp > P_cut).any():
|
||||
vmin, vmax = 0.0, np.max([pol_err[SNRp > P_cut].max(), 1.0]) * 100.0
|
||||
im = ax.imshow(pol_err * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno_r", alpha=1.0)
|
||||
im = ax.imshow(pol_err * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno_r", alpha=1.0 - 0.75 * (pol < pol_err))
|
||||
else:
|
||||
vmin, vmax = 0.0, 100.0
|
||||
im = ax.imshow(pol_err * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno_r", alpha=1.0)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.015, fraction=0.03, label=r"$\sigma_P$ [%]")
|
||||
im = ax.imshow(pol_err * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno_r", alpha=1.0 - 0.75 * (pol < pol_err))
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$\sigma_P$ [%]")
|
||||
elif display.lower() in ["s_i", "i_err"]:
|
||||
# Display intensity error map
|
||||
display = "s_i"
|
||||
@@ -513,59 +471,65 @@ def polarization_map(
|
||||
1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][stk_cov[0, 0] > 0.0]) * convert_flux),
|
||||
np.max(np.sqrt(stk_cov[0, 0][stk_cov[0, 0] > 0.0]) * convert_flux),
|
||||
)
|
||||
im = ax.imshow(np.sqrt(stk_cov[0, 0]) * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno_r", alpha=1.0)
|
||||
im = ax.imshow(
|
||||
np.sqrt(stk_cov[0, 0]) * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno_r", alpha=1.0 - 0.75 * (pol < pol_err)
|
||||
)
|
||||
else:
|
||||
im = ax.imshow(np.sqrt(stk_cov[0, 0]) * convert_flux, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.015, fraction=0.03, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||
im = ax.imshow(np.sqrt(stk_cov[0, 0]) * convert_flux, aspect="equal", cmap=kwargs["cmap"], alpha=1.0 - 0.75 * (pol < pol_err))
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||
elif display.lower() in ["snri"]:
|
||||
# Display I_stokes signal-to-noise map
|
||||
display = "snri"
|
||||
vmin, vmax = 0.0, np.max(SNRi[np.isfinite(SNRi)])
|
||||
if vmax * 0.99 > SNRi_cut:
|
||||
im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
levelsSNRi = np.linspace(SNRi_cut, vmax * 0.99, 5).astype(int)
|
||||
if vmax * 0.99 > SNRi_cut + 3:
|
||||
im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"])
|
||||
levelsSNRi = np.linspace(SNRi_cut, vmax * 0.99, 3).astype(int)
|
||||
print("SNRi contour levels : ", levelsSNRi)
|
||||
ax.contour(SNRi, levels=levelsSNRi, colors="grey", linewidths=0.5)
|
||||
else:
|
||||
im = ax.imshow(SNRi, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.015, fraction=0.03, label=r"$I_{Stokes}/\sigma_{I}$")
|
||||
im = ax.imshow(SNRi, aspect="equal", cmap=kwargs["cmap"])
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$I_{Stokes}/\sigma_{I}$")
|
||||
elif display.lower() in ["snr", "snrp"]:
|
||||
# Display polarization degree signal-to-noise map
|
||||
display = "snrp"
|
||||
vmin, vmax = 0.0, np.max(SNRp[np.isfinite(SNRp)])
|
||||
if vmax * 0.99 > SNRp_cut:
|
||||
im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
levelsSNRp = np.linspace(P_cut, vmax * 0.99, 5).astype(int)
|
||||
if vmax * 0.99 > SNRp_cut + 3:
|
||||
im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"])
|
||||
levelsSNRp = np.linspace(SNRp_cut, vmax * 0.99, 3).astype(int)
|
||||
print("SNRp contour levels : ", levelsSNRp)
|
||||
ax.contour(SNRp, levels=levelsSNRp, colors="grey", linewidths=0.5)
|
||||
else:
|
||||
im = ax.imshow(SNRp, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.015, fraction=0.03, label=r"$P/\sigma_{P}$")
|
||||
im = ax.imshow(SNRp, aspect="equal", cmap=kwargs["cmap"])
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$P/\sigma_{P}$")
|
||||
elif display.lower() in ["conf", "confp"]:
|
||||
# Display polarization degree signal-to-noise map
|
||||
display = "confp"
|
||||
vmin, vmax = 0.0, 1.0
|
||||
im = ax.imshow(confP, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
im = ax.imshow(confP, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"])
|
||||
levelsconfp = np.array([0.500, 0.900, 0.990, 0.999])
|
||||
print("confp contour levels : ", levelsconfp)
|
||||
ax.contour(confP, levels=levelsconfp, colors="grey", linewidths=0.5)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.015, fraction=0.03, label=r"$Conf_{P}$")
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$Conf_{P}$")
|
||||
else:
|
||||
# Defaults to intensity map
|
||||
if flux_lim is not None:
|
||||
vmin, vmax = flux_lim
|
||||
elif mask.sum() > 0.0:
|
||||
vmin, vmax = 1.0 * np.mean(np.sqrt(stk_cov[0, 0][mask]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux)
|
||||
else:
|
||||
vmin, vmax = np.max(flux[flux > 0.0] * convert_flux) / 2e3, np.max(flux[flux > 0.0] * convert_flux)
|
||||
im = ax.imshow(
|
||||
flux * Stokes[0].header["PHOTFLAM"],
|
||||
transform=ax.get_transform(WCS(Stokes[0].header).celestial),
|
||||
norm=LogNorm(vmin, vmax),
|
||||
aspect="equal",
|
||||
cmap=kwargs["cmap"],
|
||||
alpha=1.0,
|
||||
)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.015, fraction=0.03, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
|
||||
I_diluted, I_diluted_err = np.sum(flux[flux_mask]), np.sqrt(np.sum(flux_error[flux_mask] ** 2))
|
||||
vmin, vmax = 1.0 * np.mean(np.sqrt(stk_cov[0, 0][stkI > 0.0]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux)
|
||||
im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
|
||||
|
||||
# Get integrated flux values from sum
|
||||
I_diluted = stkI[data_mask].sum()
|
||||
I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask]))
|
||||
|
||||
# Get integrated polarization values from header
|
||||
P_diluted = Stokes[0].header["P_int"]
|
||||
P_diluted_err = Stokes[0].header["sP_int"]
|
||||
PA_diluted = Stokes[0].header["PA_int"]
|
||||
PA_diluted_err = Stokes[0].header["sPA_int"]
|
||||
|
||||
plt.rcParams.update({"font.size": 12})
|
||||
px_size = wcs.wcs.get_cdelt()[0] * 3600.0
|
||||
@@ -583,12 +547,12 @@ def polarization_map(
|
||||
back_length=0.0,
|
||||
head_length=7.0,
|
||||
head_width=7.0,
|
||||
angle=-Stokes["I_STOKES"].header["orientat"],
|
||||
angle=-Stokes[0].header["orientat"],
|
||||
text_props={"ec": "k", "fc": font_color, "alpha": 1, "lw": 0.5},
|
||||
arrow_props={"ec": "k", "fc": font_color, "alpha": 1, "lw": 1},
|
||||
)
|
||||
|
||||
if display.lower() in ["f", "i", "s_i", "snri", "pf", "p", "pa", "s_p", "snrp", "confp"] and step_vec != 0:
|
||||
if display.lower() in ["i", "s_i", "snri", "pf", "p", "pa", "s_p", "snrp", "confp"] and step_vec != 0:
|
||||
if scale_vec == -1:
|
||||
poldata[np.isfinite(poldata)] = 1.0 / 2.0
|
||||
step_vec = 1
|
||||
@@ -1212,8 +1176,6 @@ class overplot_chandra(align_maps):
|
||||
other_data = deepcopy(self.other_data)
|
||||
other_wcs = self.other_wcs.deepcopy()
|
||||
if zoom != 1:
|
||||
from scipy.ndimage import zoom as sc_zoom
|
||||
|
||||
other_data = sc_zoom(other_data, zoom)
|
||||
other_wcs.wcs.crpix *= zoom
|
||||
other_wcs.wcs.cdelt /= zoom
|
||||
@@ -1935,7 +1897,7 @@ class crop_map(object):
|
||||
else:
|
||||
self.ax = ax
|
||||
self.mask_alpha = 0.75
|
||||
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1])
|
||||
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1], spancoords="pixels", useblit=True)
|
||||
self.embedded = True
|
||||
self.ax.set(xlabel="Right Ascension (J2000)", ylabel="Declination (J2000)")
|
||||
self.display(self.data, self.wcs, self.map_convert, **self.kwargs)
|
||||
@@ -1996,7 +1958,7 @@ class crop_map(object):
|
||||
self.display()
|
||||
|
||||
if self.fig.canvas.manager.toolbar.mode == "":
|
||||
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1])
|
||||
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1], spancoords="pixels", useblit=True)
|
||||
|
||||
self.RSextent = deepcopy(self.extent)
|
||||
self.RScenter = deepcopy(self.center)
|
||||
@@ -2056,7 +2018,7 @@ class crop_map(object):
|
||||
self.ax.set_ylim(0, ylim)
|
||||
|
||||
if self.fig.canvas.manager.toolbar.mode == "":
|
||||
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1])
|
||||
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1], spancoords="pixels", useblit=True)
|
||||
|
||||
self.fig.canvas.draw_idle()
|
||||
|
||||
@@ -2068,7 +2030,7 @@ class crop_map(object):
|
||||
|
||||
def crop(self) -> None:
|
||||
if self.fig.canvas.manager.toolbar.mode == "":
|
||||
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1])
|
||||
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1], spancoords="pixels", useblit=True)
|
||||
self.bapply.on_clicked(self.apply_crop)
|
||||
self.breset.on_clicked(self.reset_crop)
|
||||
self.fig.canvas.mpl_connect("close_event", self.on_close)
|
||||
@@ -2117,7 +2079,7 @@ class crop_Stokes(crop_map):
|
||||
|
||||
# Crop dataset
|
||||
for dataset in self.hdul_crop:
|
||||
if dataset.header["datatype"] == "IQU_cov_matrix":
|
||||
if dataset.header["datatype"][-10:] == "cov_matrix":
|
||||
stokes_cov = np.zeros((3, 3, shape[1], shape[0]))
|
||||
for i in range(3):
|
||||
for j in range(3):
|
||||
@@ -2140,18 +2102,24 @@ class crop_Stokes(crop_map):
|
||||
self.on_close(event)
|
||||
|
||||
if self.fig.canvas.manager.toolbar.mode == "":
|
||||
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1])
|
||||
self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1], spancoords="pixels", useblit=True)
|
||||
# Update integrated values
|
||||
mask = np.logical_and(self.hdul_crop["data_mask"].data.astype(bool), self.hdul_crop[0].data > 0)
|
||||
I_diluted = self.hdul_crop["i_stokes"].data[mask].sum()
|
||||
Q_diluted = self.hdul_crop["q_stokes"].data[mask].sum()
|
||||
U_diluted = self.hdul_crop["u_stokes"].data[mask].sum()
|
||||
I_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[0, 0][mask]))
|
||||
Q_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[1, 1][mask]))
|
||||
U_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[2, 2][mask]))
|
||||
IQ_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[0, 1][mask] ** 2))
|
||||
IU_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[0, 2][mask] ** 2))
|
||||
QU_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[1, 2][mask] ** 2))
|
||||
mask = np.logical_and(self.hdul_crop["DATA_MASK"].data.astype(bool), self.hdul_crop[0].data > 0)
|
||||
I_diluted = self.hdul_crop["I_STOKES"].data[mask].sum()
|
||||
Q_diluted = self.hdul_crop["Q_STOKES"].data[mask].sum()
|
||||
U_diluted = self.hdul_crop["U_STOKES"].data[mask].sum()
|
||||
I_diluted_err = np.sqrt(np.sum(self.hdul_crop["IQU_COV_MATRIX"].data[0, 0][mask]))
|
||||
Q_diluted_err = np.sqrt(np.sum(self.hdul_crop["IQU_COV_MATRIX"].data[1, 1][mask]))
|
||||
U_diluted_err = np.sqrt(np.sum(self.hdul_crop["IQU_COV_MATRIX"].data[2, 2][mask]))
|
||||
IQ_diluted_err = np.sqrt(np.sum(self.hdul_crop["IQU_COV_MATRIX"].data[0, 1][mask] ** 2))
|
||||
IU_diluted_err = np.sqrt(np.sum(self.hdul_crop["IQU_COV_MATRIX"].data[0, 2][mask] ** 2))
|
||||
QU_diluted_err = np.sqrt(np.sum(self.hdul_crop["IQU_COV_MATRIX"].data[1, 2][mask] ** 2))
|
||||
I_diluted_stat_err = np.sqrt(np.sum(self.hdul_crop["IQU_STAT_COV_MATRIX"].data[0, 0][mask]))
|
||||
Q_diluted_stat_err = np.sqrt(np.sum(self.hdul_crop["IQU_STAT_COV_MATRIX"].data[1, 1][mask]))
|
||||
U_diluted_stat_err = np.sqrt(np.sum(self.hdul_crop["IQU_STAT_COV_MATRIX"].data[2, 2][mask]))
|
||||
IQ_diluted_stat_err = np.sqrt(np.sum(self.hdul_crop["IQU_STAT_COV_MATRIX"].data[0, 1][mask] ** 2))
|
||||
IU_diluted_stat_err = np.sqrt(np.sum(self.hdul_crop["IQU_STAT_COV_MATRIX"].data[0, 2][mask] ** 2))
|
||||
QU_diluted_stat_err = np.sqrt(np.sum(self.hdul_crop["IQU_STAT_COV_MATRIX"].data[1, 2][mask] ** 2))
|
||||
|
||||
P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted
|
||||
P_diluted_err = (1.0 / I_diluted) * np.sqrt(
|
||||
@@ -2160,6 +2128,18 @@ class crop_Stokes(crop_map):
|
||||
- 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err
|
||||
- 2.0 * (U_diluted / I_diluted) * IU_diluted_err
|
||||
)
|
||||
P_diluted_stat_err = (
|
||||
P_diluted
|
||||
/ I_diluted
|
||||
* np.sqrt(
|
||||
I_diluted_stat_err
|
||||
- 2.0 / (I_diluted * P_diluted**2) * (Q_diluted * IQ_diluted_stat_err + U_diluted * IU_diluted_stat_err)
|
||||
+ 1.0
|
||||
/ (I_diluted**2 * P_diluted**4)
|
||||
* (Q_diluted**2 * Q_diluted_stat_err + U_diluted**2 * U_diluted_stat_err + 2.0 * Q_diluted * U_diluted * QU_diluted_stat_err)
|
||||
)
|
||||
)
|
||||
debiased_P_diluted = np.sqrt(P_diluted**2 - P_diluted_stat_err**2) if P_diluted**2 > P_diluted_stat_err**2 else 0.0
|
||||
|
||||
PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted))
|
||||
PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt(
|
||||
@@ -2169,7 +2149,7 @@ class crop_Stokes(crop_map):
|
||||
for dataset in self.hdul_crop:
|
||||
if dataset.header["FILENAME"][-4:] != "crop":
|
||||
dataset.header["FILENAME"] += "_crop"
|
||||
dataset.header["P_int"] = (P_diluted, "Integrated polarization degree")
|
||||
dataset.header["P_int"] = (debiased_P_diluted, "Integrated polarization degree")
|
||||
dataset.header["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
|
||||
dataset.header["PA_int"] = (PA_diluted, "Integrated polarization angle")
|
||||
dataset.header["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
|
||||
@@ -2479,9 +2459,9 @@ class pol_map(object):
|
||||
self.conf = PCconf(self.QN, self.UN, self.QN_ERR, self.UN_ERR)
|
||||
|
||||
# Get data
|
||||
self.targ = self.Stokes["I_STOKES"].header["targname"]
|
||||
self.pivot_wav = self.Stokes["I_STOKES"].header["photplam"]
|
||||
self.map_convert = self.Stokes["I_STOKES"].header["photflam"]
|
||||
self.targ = self.Stokes[0].header["targname"]
|
||||
self.pivot_wav = self.Stokes[0].header["photplam"]
|
||||
self.map_convert = self.Stokes[0].header["photflam"]
|
||||
|
||||
# Create figure
|
||||
plt.rcParams.update({"font.size": 10})
|
||||
@@ -2503,9 +2483,11 @@ class pol_map(object):
|
||||
ax_snr_reset = self.fig.add_axes([0.060, 0.020, 0.05, 0.02])
|
||||
ax_snr_conf = self.fig.add_axes([0.115, 0.020, 0.05, 0.02])
|
||||
SNRi_max = np.max(self.I[self.IQU_cov[0, 0] > 0.0] / np.sqrt(self.IQU_cov[0, 0][self.IQU_cov[0, 0] > 0.0]))
|
||||
SNRp_max = np.max(self.P[self.s_P > 0.0] / self.s_P[self.s_P > 0.0])
|
||||
SNRp_max = np.max(self.P[self.P_ERR > 0.0] / self.P_ERR[self.P_ERR > 0.0])
|
||||
s_I_cut = Slider(ax_I_cut, r"$SNR^{I}_{cut}$", 1.0, int(SNRi_max * 0.95), valstep=1, valinit=self.SNRi_cut)
|
||||
self.s_P_cut = Slider(self.ax_P_cut, r"$Conf^{P}_{cut}$", 0.50, 1.0, valstep=[0.500, 0.900, 0.990, 0.999], valinit=self.P_cut if P_cut <= 1.0 else 0.99)
|
||||
self.P_ERR_cut = Slider(
|
||||
self.ax_P_cut, r"$Conf^{P}_{cut}$", 0.50, 1.0, valstep=[0.500, 0.900, 0.990, 0.999], valinit=self.P_cut if P_cut <= 1.0 else 0.99
|
||||
)
|
||||
s_vec_sc = Slider(ax_vec_sc, r"Vec scale", 0.0, 10.0, valstep=1, valinit=self.scale_vec)
|
||||
b_snr_reset = Button(ax_snr_reset, "Reset")
|
||||
b_snr_reset.label.set_fontsize(8)
|
||||
@@ -2533,7 +2515,7 @@ class pol_map(object):
|
||||
|
||||
def reset_snr(event):
|
||||
s_I_cut.reset()
|
||||
self.s_P_cut.reset()
|
||||
self.P_ERR_cut.reset()
|
||||
s_vec_sc.reset()
|
||||
|
||||
def toggle_snr_conf(event=None):
|
||||
@@ -2542,21 +2524,21 @@ class pol_map(object):
|
||||
if self.snr_conf:
|
||||
self.snr_conf = 0
|
||||
b_snr_conf.label.set_text("Conf")
|
||||
self.s_P_cut = Slider(
|
||||
self.P_ERR_cut = Slider(
|
||||
self.ax_P_cut, r"$SNR^{P}_{cut}$", 1.0, max(int(SNRp_max * 0.95), 3), valstep=1, valinit=self.P_cut if P_cut >= 1.0 else 3
|
||||
)
|
||||
else:
|
||||
self.snr_conf = 1
|
||||
b_snr_conf.label.set_text("SNR")
|
||||
self.s_P_cut = Slider(
|
||||
self.P_ERR_cut = Slider(
|
||||
self.ax_P_cut, r"$Conf^{P}_{cut}$", 0.50, 1.0, valstep=[0.500, 0.900, 0.990, 0.999], valinit=self.P_cut if P_cut <= 1.0 else 0.99
|
||||
)
|
||||
self.s_P_cut.on_changed(update_snrp)
|
||||
update_snrp(self.s_P_cut.val)
|
||||
self.P_ERR_cut.on_changed(update_snrp)
|
||||
update_snrp(self.P_ERR_cut.val)
|
||||
self.fig.canvas.draw_idle()
|
||||
|
||||
s_I_cut.on_changed(update_snri)
|
||||
self.s_P_cut.on_changed(update_snrp)
|
||||
self.P_ERR_cut.on_changed(update_snrp)
|
||||
s_vec_sc.on_changed(update_vecsc)
|
||||
b_snr_reset.on_clicked(reset_snr)
|
||||
b_snr_conf.on_clicked(toggle_snr_conf)
|
||||
@@ -2575,7 +2557,7 @@ class pol_map(object):
|
||||
|
||||
def select_roi(event):
|
||||
if self.data is None:
|
||||
self.data = self.Stokes["I_STOKES"].data
|
||||
self.data = self.Stokes[0].data
|
||||
if self.selected:
|
||||
self.selected = False
|
||||
self.region = deepcopy(self.select_instance.mask.astype(bool))
|
||||
@@ -2617,7 +2599,7 @@ class pol_map(object):
|
||||
|
||||
def select_aperture(event):
|
||||
if self.data is None:
|
||||
self.data = self.Stokes["I_STOKES"].data
|
||||
self.data = self.Stokes[0].data
|
||||
if self.selected:
|
||||
self.selected = False
|
||||
self.select_instance.update_mask()
|
||||
@@ -2674,7 +2656,7 @@ class pol_map(object):
|
||||
|
||||
def select_slit(event):
|
||||
if self.data is None:
|
||||
self.data = self.Stokes["I_STOKES"].data
|
||||
self.data = self.Stokes[0].data
|
||||
if self.selected:
|
||||
self.selected = False
|
||||
self.select_instance.update_mask()
|
||||
@@ -2951,31 +2933,7 @@ class pol_map(object):
|
||||
|
||||
@property
|
||||
def wcs(self):
|
||||
return WCS(self.Stokes["I_STOKES"].header).celestial.deepcopy()
|
||||
|
||||
@property
|
||||
def Flux(self):
|
||||
return self.Stokes[0].data[0] * self.Stokes[0].header["PHOTFLAM"]
|
||||
|
||||
@property
|
||||
def Flux_err(self):
|
||||
return self.Stokes[0].data[1] * self.Stokes[0].header["PHOTFLAM"]
|
||||
|
||||
@property
|
||||
def Flux_mask(self):
|
||||
return self.Stokes[0].data[2].astype(bool)
|
||||
|
||||
@property
|
||||
def Flux(self):
|
||||
return self.Stokes[0].data[0] * self.Stokes[0].header["PHOTFLAM"]
|
||||
|
||||
@property
|
||||
def Flux_err(self):
|
||||
return self.Stokes[0].data[1] * self.Stokes[0].header["PHOTFLAM"]
|
||||
|
||||
@property
|
||||
def Flux_mask(self):
|
||||
return self.Stokes[0].data[2].astype(bool)
|
||||
return WCS(self.Stokes[0].header).celestial.deepcopy()
|
||||
|
||||
@property
|
||||
def I(self):
|
||||
@@ -3021,12 +2979,16 @@ class pol_map(object):
|
||||
def IQU_cov(self):
|
||||
return self.Stokes["IQU_COV_MATRIX"].data
|
||||
|
||||
@property
|
||||
def IQU_stat_cov(self):
|
||||
return self.Stokes["IQU_STAT_COV_MATRIX"].data
|
||||
|
||||
@property
|
||||
def P(self):
|
||||
return self.Stokes["POL_DEG_DEBIASED"].data
|
||||
|
||||
@property
|
||||
def s_P(self):
|
||||
def P_ERR(self):
|
||||
return self.Stokes["POL_DEG_ERR"].data
|
||||
|
||||
@property
|
||||
@@ -3034,12 +2996,12 @@ class pol_map(object):
|
||||
return self.Stokes["POL_ANG"].data
|
||||
|
||||
@property
|
||||
def s_PA(self):
|
||||
def PA_ERR(self):
|
||||
return self.Stokes["POL_ANG_ERR"].data
|
||||
|
||||
@property
|
||||
def data_mask(self):
|
||||
return self.Stokes["DATA_MASK"].data.astype(bool)
|
||||
return self.Stokes["DATA_MASK"].data
|
||||
|
||||
def set_data_mask(self, mask):
|
||||
self.Stokes["DATA_MASK"].data = mask.astype(float)
|
||||
@@ -3050,7 +3012,7 @@ class pol_map(object):
|
||||
SNRp_mask, SNRi_mask = (np.zeros(self.P.shape).astype(bool), np.zeros(self.I.shape).astype(bool))
|
||||
SNRi_mask[s_I > 0.0] = self.I[s_I > 0.0] / s_I[s_I > 0.0] > self.SNRi
|
||||
if self.SNRp >= 1.0:
|
||||
SNRp_mask[self.s_P > 0.0] = self.P[self.s_P > 0.0] / self.s_P[self.s_P > 0.0] > self.SNRp
|
||||
SNRp_mask[self.P_ERR > 0.0] = self.P[self.P_ERR > 0.0] / self.P_ERR[self.P_ERR > 0.0] > self.SNRp
|
||||
else:
|
||||
SNRp_mask = self.conf > self.SNRp
|
||||
return np.logical_and(SNRi_mask, SNRp_mask)
|
||||
@@ -3110,7 +3072,7 @@ class pol_map(object):
|
||||
back_length=0.0,
|
||||
head_length=7.5,
|
||||
head_width=7.5,
|
||||
angle=-self.Stokes["I_STOKES"].header["orientat"],
|
||||
angle=-self.Stokes[0].header["orientat"],
|
||||
color="white",
|
||||
text_props={"ec": None, "fc": "w", "alpha": 1, "lw": 0.4},
|
||||
arrow_props={"ec": None, "fc": "w", "alpha": 1, "lw": 1},
|
||||
@@ -3119,19 +3081,17 @@ class pol_map(object):
|
||||
|
||||
def display(self, fig=None, ax=None, flux_lim=None):
|
||||
kwargs = dict([])
|
||||
if self.display_selection is None:
|
||||
self.display_selection = "total_flux"
|
||||
if flux_lim is None:
|
||||
flux_lim = self.flux_lim
|
||||
if self.display_selection is None or self.display_selection.lower() in ["total_flux"]:
|
||||
self.data = self.Flux # self.I * self.map_convert
|
||||
if self.display_selection.lower() in ["total_flux"]:
|
||||
self.data = self.I * self.map_convert
|
||||
if flux_lim is None:
|
||||
vmin, vmax = (1.0 / 2.0 * np.median(self.data[self.data > 0.0]), np.max(self.data[self.data > 0.0]))
|
||||
else:
|
||||
vmin, vmax = flux_lim
|
||||
kwargs["norm"] = LogNorm(vmin, vmax)
|
||||
if ax is None:
|
||||
kwargs["transform"] = self.ax.get_transform(WCS(self.Stokes[0].header).celestial)
|
||||
else:
|
||||
kwargs["transform"] = ax.get_transform(WCS(self.Stokes[0].header).celestial)
|
||||
label = r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]"
|
||||
elif self.display_selection.lower() in ["pol_flux"]:
|
||||
self.data = self.I * self.map_convert * self.P
|
||||
@@ -3143,11 +3103,13 @@ class pol_map(object):
|
||||
label = r"$P \cdot F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]"
|
||||
elif self.display_selection.lower() in ["pol_deg"]:
|
||||
self.data = self.P * 100.0
|
||||
kwargs["vmin"], kwargs["vmax"] = 0.0, np.max(self.data[self.P > self.s_P])
|
||||
kwargs["vmin"], kwargs["vmax"] = 0.0, min(np.max(self.data[self.P > self.P_ERR]), 100.0)
|
||||
kwargs["alpha"] = 1.0 - 0.75 * (self.P < self.P_ERR)
|
||||
label = r"$P$ [%]"
|
||||
elif self.display_selection.lower() in ["pol_ang"]:
|
||||
self.data = princ_angle(self.PA)
|
||||
kwargs["vmin"], kwargs["vmax"] = 0, 180.0
|
||||
kwargs["alpha"] = 1.0 - 0.75 * (self.P < self.P_ERR)
|
||||
label = r"$\theta_{P}$ [°]"
|
||||
elif self.display_selection.lower() in ["snri"]:
|
||||
s_I = np.sqrt(self.IQU_cov[0, 0])
|
||||
@@ -3158,7 +3120,7 @@ class pol_map(object):
|
||||
label = r"$I_{Stokes}/\sigma_{I}$"
|
||||
elif self.display_selection.lower() in ["snrp"]:
|
||||
SNRp = np.zeros(self.P.shape)
|
||||
SNRp[self.s_P > 0.0] = self.P[self.s_P > 0.0] / self.s_P[self.s_P > 0.0]
|
||||
SNRp[self.P_ERR > 0.0] = self.P[self.P_ERR > 0.0] / self.P_ERR[self.P_ERR > 0.0]
|
||||
self.data = SNRp
|
||||
kwargs["vmin"], kwargs["vmax"] = 0.0, np.max(self.data[self.data > 0.0])
|
||||
label = r"$P/\sigma_{P}$"
|
||||
@@ -3172,7 +3134,6 @@ class pol_map(object):
|
||||
if hasattr(self, "im"):
|
||||
self.im.remove()
|
||||
self.im = ax.imshow(self.data, aspect="equal", cmap="inferno", **kwargs)
|
||||
ax.set(xlim=(0, self.I.shape[1]), ylim=(0, self.I.shape[0]))
|
||||
plt.rcParams.update({"font.size": 14})
|
||||
self.cbar = fig.colorbar(self.im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=label)
|
||||
plt.rcParams.update({"font.size": 10})
|
||||
@@ -3180,8 +3141,8 @@ class pol_map(object):
|
||||
return self.im
|
||||
else:
|
||||
im = ax.imshow(self.data, aspect="equal", cmap="inferno", **kwargs)
|
||||
ax.set_xlim(0, self.I.shape[1])
|
||||
ax.set_ylim(0, self.I.shape[0])
|
||||
ax.set_xlim(0, self.data.shape[1])
|
||||
ax.set_ylim(0, self.data.shape[0])
|
||||
plt.rcParams.update({"font.size": 14})
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=label)
|
||||
plt.rcParams.update({"font.size": 10})
|
||||
@@ -3205,31 +3166,31 @@ class pol_map(object):
|
||||
ax = self.ax
|
||||
if hasattr(self, "quiver"):
|
||||
self.quiver.remove()
|
||||
self.quiver = ax.quiver(
|
||||
X[:: self.step_vec, :: self.step_vec],
|
||||
Y[:: self.step_vec, :: self.step_vec],
|
||||
XY_U[:: self.step_vec, :: self.step_vec],
|
||||
XY_V[:: self.step_vec, :: self.step_vec],
|
||||
units="xy",
|
||||
scale=1.0 / scale_vec,
|
||||
scale_units="xy",
|
||||
pivot="mid",
|
||||
headwidth=0.0,
|
||||
headlength=0.0,
|
||||
headaxislength=0.0,
|
||||
width=0.3,
|
||||
linewidth=0.6,
|
||||
color="white",
|
||||
edgecolor="black",
|
||||
)
|
||||
if self.pa_err:
|
||||
self.quiver = ax.quiver(
|
||||
X[:: self.step_vec, :: self.step_vec],
|
||||
Y[:: self.step_vec, :: self.step_vec],
|
||||
XY_U[:: self.step_vec, :: self.step_vec],
|
||||
XY_V[:: self.step_vec, :: self.step_vec],
|
||||
units="xy",
|
||||
scale=1.0 / scale_vec,
|
||||
scale_units="xy",
|
||||
pivot="mid",
|
||||
headwidth=0.0,
|
||||
headlength=0.0,
|
||||
headaxislength=0.0,
|
||||
width=0.1,
|
||||
# linewidth=0.6,
|
||||
color="black",
|
||||
edgecolor="black",
|
||||
)
|
||||
XY_U_err1, XY_V_err1 = (
|
||||
P_cut * np.cos(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0),
|
||||
P_cut * np.sin(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0),
|
||||
P_cut * np.cos(np.pi / 2.0 + (self.PA + 3.0 * self.P_ERRA) * np.pi / 180.0),
|
||||
P_cut * np.sin(np.pi / 2.0 + (self.PA + 3.0 * self.P_ERRA) * np.pi / 180.0),
|
||||
)
|
||||
XY_U_err2, XY_V_err2 = (
|
||||
P_cut * np.cos(np.pi / 2.0 + (self.PA - 3.0 * self.s_PA) * np.pi / 180.0),
|
||||
P_cut * np.sin(np.pi / 2.0 + (self.PA - 3.0 * self.s_PA) * np.pi / 180.0),
|
||||
P_cut * np.cos(np.pi / 2.0 + (self.PA - 3.0 * self.P_ERRA) * np.pi / 180.0),
|
||||
P_cut * np.sin(np.pi / 2.0 + (self.PA - 3.0 * self.P_ERRA) * np.pi / 180.0),
|
||||
)
|
||||
if hasattr(self, "quiver_err1"):
|
||||
self.quiver_err1.remove()
|
||||
@@ -3271,25 +3232,6 @@ class pol_map(object):
|
||||
edgecolor="black",
|
||||
ls="dashed",
|
||||
)
|
||||
else:
|
||||
self.quiver = ax.quiver(
|
||||
X[:: self.step_vec, :: self.step_vec],
|
||||
Y[:: self.step_vec, :: self.step_vec],
|
||||
XY_U[:: self.step_vec, :: self.step_vec],
|
||||
XY_V[:: self.step_vec, :: self.step_vec],
|
||||
units="xy",
|
||||
scale=1.0 / scale_vec,
|
||||
scale_units="xy",
|
||||
pivot="mid",
|
||||
headwidth=0.0,
|
||||
headlength=0.0,
|
||||
headaxislength=0.0,
|
||||
width=0.3,
|
||||
linewidth=0.6,
|
||||
color="white",
|
||||
edgecolor="black",
|
||||
)
|
||||
|
||||
fig.canvas.draw_idle()
|
||||
return self.quiver
|
||||
else:
|
||||
@@ -3312,12 +3254,12 @@ class pol_map(object):
|
||||
)
|
||||
if self.pa_err:
|
||||
XY_U_err1, XY_V_err1 = (
|
||||
P_cut * np.cos(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0),
|
||||
P_cut * np.sin(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0),
|
||||
P_cut * np.cos(np.pi / 2.0 + (self.PA + 3.0 * self.P_ERRA) * np.pi / 180.0),
|
||||
P_cut * np.sin(np.pi / 2.0 + (self.PA + 3.0 * self.P_ERRA) * np.pi / 180.0),
|
||||
)
|
||||
XY_U_err2, XY_V_err2 = (
|
||||
P_cut * np.cos(np.pi / 2.0 + (self.PA - 3.0 * self.s_PA) * np.pi / 180.0),
|
||||
P_cut * np.sin(np.pi / 2.0 + (self.PA - 3.0 * self.s_PA) * np.pi / 180.0),
|
||||
P_cut * np.cos(np.pi / 2.0 + (self.PA - 3.0 * self.P_ERRA) * np.pi / 180.0),
|
||||
P_cut * np.sin(np.pi / 2.0 + (self.PA - 3.0 * self.P_ERRA) * np.pi / 180.0),
|
||||
)
|
||||
ax.quiver(
|
||||
X[:: self.step_vec, :: self.step_vec],
|
||||
@@ -3361,37 +3303,28 @@ class pol_map(object):
|
||||
str_conf = ""
|
||||
if self.region is None:
|
||||
s_I = np.sqrt(self.IQU_cov[0, 0])
|
||||
I_reg = (
|
||||
np.sum(self.Flux[self.Flux_mask]) / self.map_convert
|
||||
if self.display_selection is None or self.display_selection.lower() in ["total_flux"]
|
||||
else np.sum(self.I[self.data_mask])
|
||||
)
|
||||
I_reg_err = (
|
||||
np.sqrt(np.sum(self.Flux_err[self.Flux_mask] ** 2)) / self.map_convert
|
||||
if self.display_selection is None or self.display_selection.lower() in ["total_flux"]
|
||||
else np.sqrt(np.sum(s_I[self.data_mask] ** 2))
|
||||
)
|
||||
P_reg = self.Stokes["I_STOKES"].header["P_int"]
|
||||
P_reg_err = self.Stokes["I_STOKES"].header["sP_int"]
|
||||
PA_reg = self.Stokes["I_STOKES"].header["PA_int"]
|
||||
PA_reg_err = self.Stokes["I_STOKES"].header["sPA_int"]
|
||||
|
||||
s_I = np.sqrt(self.IQU_cov[0, 0])
|
||||
s_Q = np.sqrt(self.IQU_cov[1, 1])
|
||||
s_U = np.sqrt(self.IQU_cov[2, 2])
|
||||
s_IQ = self.IQU_cov[0, 1]
|
||||
s_IU = self.IQU_cov[0, 2]
|
||||
s_QU = self.IQU_cov[1, 2]
|
||||
I_reg = self.I.sum()
|
||||
I_reg_err = np.sqrt(np.sum(s_I**2))
|
||||
debiased_P_reg = self.Stokes[0].header["P_int"]
|
||||
P_reg_err = self.Stokes[0].header["sP_int"]
|
||||
PA_reg = self.Stokes[0].header["PA_int"]
|
||||
PA_reg_err = self.Stokes[0].header["sPA_int"]
|
||||
|
||||
I_cut = self.I[self.cut].sum()
|
||||
Q_cut = self.Q[self.cut].sum()
|
||||
U_cut = self.U[self.cut].sum()
|
||||
I_cut_err = np.sqrt(np.sum(s_I[self.cut] ** 2))
|
||||
Q_cut_err = np.sqrt(np.sum(s_Q[self.cut] ** 2))
|
||||
U_cut_err = np.sqrt(np.sum(s_U[self.cut] ** 2))
|
||||
IQ_cut_err = np.sqrt(np.sum(s_IQ[self.cut] ** 2))
|
||||
IU_cut_err = np.sqrt(np.sum(s_IU[self.cut] ** 2))
|
||||
QU_cut_err = np.sqrt(np.sum(s_QU[self.cut] ** 2))
|
||||
I_cut_err = np.sqrt(np.sum(self.IQU_cov[0, 0][self.cut]))
|
||||
Q_cut_err = np.sqrt(np.sum(self.IQU_cov[1, 1][self.cut]))
|
||||
U_cut_err = np.sqrt(np.sum(self.IQU_cov[2, 2][self.cut]))
|
||||
IQ_cut_err = np.sqrt(np.sum(self.IQU_cov[0, 1][self.cut] ** 2))
|
||||
IU_cut_err = np.sqrt(np.sum(self.IQU_cov[0, 2][self.cut] ** 2))
|
||||
QU_cut_err = np.sqrt(np.sum(self.IQU_cov[1, 2][self.cut] ** 2))
|
||||
I_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 0][self.cut]))
|
||||
Q_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[1, 1][self.cut]))
|
||||
U_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[2, 2][self.cut]))
|
||||
IQ_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 1][self.cut] ** 2))
|
||||
IU_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 2][self.cut] ** 2))
|
||||
QU_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[1, 2][self.cut] ** 2))
|
||||
|
||||
with np.errstate(divide="ignore", invalid="ignore"):
|
||||
P_cut = np.sqrt(Q_cut**2 + U_cut**2) / I_cut
|
||||
@@ -3404,6 +3337,16 @@ class pol_map(object):
|
||||
)
|
||||
/ I_cut
|
||||
)
|
||||
P_cut_stat_err = (
|
||||
P_cut
|
||||
/ I_cut
|
||||
* np.sqrt(
|
||||
I_cut_stat_err
|
||||
- 2.0 / (I_cut * P_cut**2) * (Q_cut * IQ_cut_stat_err + U_cut * IU_cut_stat_err)
|
||||
+ 1.0 / (I_cut**2 * P_cut**4) * (Q_cut**2 * Q_cut_stat_err + U_cut**2 * U_cut_stat_err + 2.0 * Q_cut * U_cut * QU_cut_stat_err)
|
||||
)
|
||||
)
|
||||
debiased_P_cut = np.sqrt(P_cut**2 - P_cut_stat_err**2) if P_cut**2 > P_cut_stat_err**2 else 0.0
|
||||
|
||||
PA_cut = princ_angle((90.0 / np.pi) * np.arctan2(U_cut, Q_cut))
|
||||
PA_cut_err = (90.0 / (np.pi * (Q_cut**2 + U_cut**2))) * np.sqrt(
|
||||
@@ -3411,22 +3354,21 @@ class pol_map(object):
|
||||
)
|
||||
|
||||
else:
|
||||
s_I = np.sqrt(self.IQU_cov[0, 0])
|
||||
s_Q = np.sqrt(self.IQU_cov[1, 1])
|
||||
s_U = np.sqrt(self.IQU_cov[2, 2])
|
||||
s_IQ = self.IQU_cov[0, 1]
|
||||
s_IU = self.IQU_cov[0, 2]
|
||||
s_QU = self.IQU_cov[1, 2]
|
||||
|
||||
I_reg = self.I[self.region].sum()
|
||||
Q_reg = self.Q[self.region].sum()
|
||||
U_reg = self.U[self.region].sum()
|
||||
I_reg_err = np.sqrt(np.sum(s_I[self.region] ** 2))
|
||||
Q_reg_err = np.sqrt(np.sum(s_Q[self.region] ** 2))
|
||||
U_reg_err = np.sqrt(np.sum(s_U[self.region] ** 2))
|
||||
IQ_reg_err = np.sqrt(np.sum(s_IQ[self.region] ** 2))
|
||||
IU_reg_err = np.sqrt(np.sum(s_IU[self.region] ** 2))
|
||||
QU_reg_err = np.sqrt(np.sum(s_QU[self.region] ** 2))
|
||||
I_reg_err = np.sqrt(np.sum(self.IQU_cov[0, 0][self.region]))
|
||||
Q_reg_err = np.sqrt(np.sum(self.IQU_cov[1, 1][self.region]))
|
||||
U_reg_err = np.sqrt(np.sum(self.IQU_cov[2, 2][self.region]))
|
||||
IQ_reg_err = np.sqrt(np.sum(self.IQU_cov[0, 1][self.region] ** 2))
|
||||
IU_reg_err = np.sqrt(np.sum(self.IQU_cov[0, 2][self.region] ** 2))
|
||||
QU_reg_err = np.sqrt(np.sum(self.IQU_cov[1, 2][self.region] ** 2))
|
||||
I_reg_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 0][self.region]))
|
||||
Q_reg_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[1, 1][self.region]))
|
||||
U_reg_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[2, 2][self.region]))
|
||||
IQ_reg_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 1][self.region] ** 2))
|
||||
IU_reg_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 2][self.region] ** 2))
|
||||
QU_reg_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[1, 2][self.region] ** 2))
|
||||
|
||||
conf = PCconf(QN=Q_reg / I_reg, QN_ERR=Q_reg_err / I_reg, UN=U_reg / I_reg, UN_ERR=U_reg_err / I_reg)
|
||||
if 1.0 - conf > 1e-3:
|
||||
@@ -3443,6 +3385,16 @@ class pol_map(object):
|
||||
)
|
||||
/ I_reg
|
||||
)
|
||||
P_reg_stat_err = (
|
||||
P_reg
|
||||
/ I_reg
|
||||
* np.sqrt(
|
||||
I_reg_stat_err
|
||||
- 2.0 / (I_reg * P_reg**2) * (Q_reg * IQ_reg_stat_err + U_reg * IU_reg_stat_err)
|
||||
+ 1.0 / (I_reg**2 * P_reg**4) * (Q_reg**2 * Q_reg_stat_err + U_reg**2 * U_reg_stat_err + 2.0 * Q_reg * U_reg * QU_reg_stat_err)
|
||||
)
|
||||
)
|
||||
debiased_P_reg = np.sqrt(P_reg**2 - P_reg_stat_err**2) if P_reg**2 > P_reg_stat_err**2 else 0.0
|
||||
|
||||
PA_reg = princ_angle((90.0 / np.pi) * np.arctan2(U_reg, Q_reg))
|
||||
PA_reg_err = (90.0 / (np.pi * (Q_reg**2 + U_reg**2))) * np.sqrt(
|
||||
@@ -3453,12 +3405,18 @@ class pol_map(object):
|
||||
I_cut = self.I[new_cut].sum()
|
||||
Q_cut = self.Q[new_cut].sum()
|
||||
U_cut = self.U[new_cut].sum()
|
||||
I_cut_err = np.sqrt(np.sum(s_I[new_cut] ** 2))
|
||||
Q_cut_err = np.sqrt(np.sum(s_Q[new_cut] ** 2))
|
||||
U_cut_err = np.sqrt(np.sum(s_U[new_cut] ** 2))
|
||||
IQ_cut_err = np.sqrt(np.sum(s_IQ[new_cut] ** 2))
|
||||
IU_cut_err = np.sqrt(np.sum(s_IU[new_cut] ** 2))
|
||||
QU_cut_err = np.sqrt(np.sum(s_QU[new_cut] ** 2))
|
||||
I_cut_err = np.sqrt(np.sum(self.IQU_cov[0, 0][new_cut]))
|
||||
Q_cut_err = np.sqrt(np.sum(self.IQU_cov[1, 1][new_cut]))
|
||||
U_cut_err = np.sqrt(np.sum(self.IQU_cov[2, 2][new_cut]))
|
||||
IQ_cut_err = np.sqrt(np.sum(self.IQU_cov[0, 1][new_cut] ** 2))
|
||||
IU_cut_err = np.sqrt(np.sum(self.IQU_cov[0, 2][new_cut] ** 2))
|
||||
QU_cut_err = np.sqrt(np.sum(self.IQU_cov[1, 2][new_cut] ** 2))
|
||||
I_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 0][new_cut]))
|
||||
Q_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[1, 1][new_cut]))
|
||||
U_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[2, 2][new_cut]))
|
||||
IQ_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 1][new_cut] ** 2))
|
||||
IU_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[0, 2][new_cut] ** 2))
|
||||
QU_cut_stat_err = np.sqrt(np.sum(self.IQU_stat_cov[1, 2][new_cut] ** 2))
|
||||
|
||||
with np.errstate(divide="ignore", invalid="ignore"):
|
||||
P_cut = np.sqrt(Q_cut**2 + U_cut**2) / I_cut
|
||||
@@ -3471,6 +3429,16 @@ class pol_map(object):
|
||||
)
|
||||
/ I_cut
|
||||
)
|
||||
P_cut_stat_err = (
|
||||
P_cut
|
||||
/ I_cut
|
||||
* np.sqrt(
|
||||
I_cut_stat_err
|
||||
- 2.0 / (I_cut * P_cut**2) * (Q_cut * IQ_cut_stat_err + U_cut * IU_cut_stat_err)
|
||||
+ 1.0 / (I_cut**2 * P_cut**4) * (Q_cut**2 * Q_cut_stat_err + U_cut**2 * U_cut_stat_err + 2.0 * Q_cut * U_cut * QU_cut_stat_err)
|
||||
)
|
||||
)
|
||||
debiased_P_cut = np.sqrt(P_cut**2 - P_cut_stat_err**2) if P_cut**2 > P_cut_stat_err**2 else 0.0
|
||||
|
||||
PA_cut = princ_angle((90.0 / np.pi) * np.arctan2(U_cut, Q_cut))
|
||||
PA_cut_err = (90.0 / (np.pi * (Q_cut**2 + U_cut**2))) * np.sqrt(
|
||||
@@ -3491,7 +3459,7 @@ class pol_map(object):
|
||||
self.pivot_wav, sci_not(I_reg * self.map_convert, I_reg_err * self.map_convert, 2)
|
||||
)
|
||||
+ "\n"
|
||||
+ r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg * 100.0, np.ceil(P_reg_err * 1000.0) / 10.0)
|
||||
+ r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(debiased_P_reg * 100.0, np.ceil(P_reg_err * 1000.0) / 10.0)
|
||||
+ "\n"
|
||||
+ r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err * 10.0) / 10.0)
|
||||
+ str_conf
|
||||
@@ -3503,7 +3471,7 @@ class pol_map(object):
|
||||
# self.pivot_wav, sci_not(I_cut * self.map_convert, I_cut_err * self.map_convert, 2)
|
||||
# )
|
||||
# + "\n"
|
||||
# + r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_cut * 100.0, np.ceil(P_cut_err * 1000.0) / 10.0)
|
||||
# + r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(debiased_P_cut * 100.0, np.ceil(P_cut_err * 1000.0) / 10.0)
|
||||
# + "\n"
|
||||
# + r"$\theta_{{P}}^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_cut, np.ceil(PA_cut_err * 10.0) / 10.0)
|
||||
# )
|
||||
@@ -3527,7 +3495,7 @@ class pol_map(object):
|
||||
self.pivot_wav, sci_not(I_reg * self.map_convert, I_reg_err * self.map_convert, 2)
|
||||
)
|
||||
+ "\n"
|
||||
+ r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg * 100.0, np.ceil(P_reg_err * 1000.0) / 10.0)
|
||||
+ r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(debiased_P_reg * 100.0, np.ceil(P_reg_err * 1000.0) / 10.0)
|
||||
+ "\n"
|
||||
+ r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err * 10.0) / 10.0)
|
||||
+ str_conf
|
||||
@@ -3539,7 +3507,7 @@ class pol_map(object):
|
||||
# self.pivot_wav, sci_not(I_cut * self.map_convert, I_cut_err * self.map_convert, 2)
|
||||
# )
|
||||
# + "\n"
|
||||
# + r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_cut * 100.0, np.ceil(P_cut_err * 1000.0) / 10.0)
|
||||
# + r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(debiased_P_cut * 100.0, np.ceil(P_cut_err * 1000.0) / 10.0)
|
||||
# + "\n"
|
||||
# + r"$\theta_{{P}}^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_cut, np.ceil(PA_cut_err * 10.0) / 10.0)
|
||||
# )
|
||||
|
||||
@@ -52,6 +52,7 @@ from scipy.ndimage import rotate as sc_rotate
|
||||
from scipy.ndimage import shift as sc_shift
|
||||
from scipy.signal import fftconvolve
|
||||
|
||||
from .background import bkg_fit, bkg_hist, bkg_mini
|
||||
from .convex_hull import image_hull
|
||||
from .cross_correlation import phase_cross_correlation
|
||||
from .deconvolve import deconvolve_im, gaussian2d, gaussian_psf, zeropad
|
||||
@@ -223,7 +224,9 @@ def bin_ndarray(ndarray, new_shape, operation="sum"):
|
||||
return ndarray
|
||||
|
||||
|
||||
def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, null_val=None, inside=False, display=False, savename=None, plots_folder=""):
|
||||
def crop_array(
|
||||
data_array, headers, error_array=None, data_mask=None, step=5, null_val=None, crop=True, inside=False, display=False, savename=None, plots_folder=""
|
||||
):
|
||||
"""
|
||||
Homogeneously crop an array: all contained images will have the same shape.
|
||||
'inside' parameter will decide how much should be cropped.
|
||||
@@ -255,6 +258,10 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu
|
||||
If None, will be put to 75% of the mean value of the associated error
|
||||
array.
|
||||
Defaults to None.
|
||||
crop : boolean, optional
|
||||
If True, data_array will be cropped down to contain only relevant data.
|
||||
If False, this information will be kept in the crop_mask output.
|
||||
Defaults to True.
|
||||
inside : boolean, optional
|
||||
If True, the cropped image will be the maximum rectangle included
|
||||
inside the image. If False, the cropped image will be the minimum
|
||||
@@ -294,6 +301,9 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu
|
||||
v_array[1] = np.max(vertex[:, 1]).astype(int)
|
||||
v_array[2] = np.min(vertex[:, 2]).astype(int)
|
||||
v_array[3] = np.max(vertex[:, 3]).astype(int)
|
||||
if data_mask is None:
|
||||
data_mask = np.zeros(data_array[0].shape).astype(bool)
|
||||
data_mask[v_array[0] : v_array[1], v_array[2] : v_array[3]] = True
|
||||
|
||||
new_shape = np.array([v_array[1] - v_array[0], v_array[3] - v_array[2]])
|
||||
rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 0.0, "b"]
|
||||
@@ -307,8 +317,6 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu
|
||||
# Update CRPIX value in the associated header
|
||||
curr_wcs = WCS(crop_headers[i]).celestial.deepcopy()
|
||||
curr_wcs.wcs.crpix[:2] = curr_wcs.wcs.crpix[:2] - np.array([v_array[2], v_array[0]])
|
||||
curr_wcs.array_shape = crop_array[i].shape
|
||||
curr_wcs.wcs.set()
|
||||
crop_headers[i].update(curr_wcs.to_header())
|
||||
crop_headers[i]["naxis1"], crop_headers[i]["naxis2"] = crop_array[i].shape
|
||||
|
||||
@@ -353,11 +361,11 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu
|
||||
)
|
||||
plt.show()
|
||||
|
||||
if data_mask is not None:
|
||||
if crop:
|
||||
crop_mask = data_mask[v_array[0] : v_array[1], v_array[2] : v_array[3]]
|
||||
return crop_array, crop_error_array, crop_mask, crop_headers
|
||||
else:
|
||||
return crop_array, crop_error_array, crop_headers
|
||||
return data_array, error_array, data_mask, headers
|
||||
|
||||
|
||||
def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px", shape=None, iterations=20, algo="richardson"):
|
||||
@@ -524,22 +532,20 @@ def get_error(
|
||||
# estimated to less than 3%
|
||||
err_flat = data * 0.03
|
||||
|
||||
from .background import bkg_fit, bkg_hist, bkg_mini
|
||||
|
||||
if sub_type is None:
|
||||
n_data_array, c_error_bkg, headers, background = bkg_hist(
|
||||
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
||||
)
|
||||
sub_type, subtract_error = "histogram ", str(int(subtract_error > 0.0))
|
||||
elif isinstance(sub_type, str):
|
||||
if sub_type.lower() in ["fit"]:
|
||||
if sub_type.lower() in ["auto"]:
|
||||
n_data_array, c_error_bkg, headers, background = bkg_fit(
|
||||
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
||||
)
|
||||
sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
|
||||
else:
|
||||
n_data_array, c_error_bkg, headers, background = bkg_hist(
|
||||
data, error, mask, headers, n_bins=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
||||
data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
||||
)
|
||||
sub_type, subtract_error = "histogram ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
|
||||
elif isinstance(sub_type, tuple):
|
||||
@@ -637,8 +643,7 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
|
||||
pxsize, scale = "", "full"
|
||||
else:
|
||||
raise ValueError("'{0:s}' invalid scale for binning.".format(scale))
|
||||
new_shape_float = min(image.shape / Dxy_arr, key=lambda x: x[0] + x[1])
|
||||
new_shape = np.ceil(new_shape_float).astype(int)
|
||||
new_shape = np.ceil(min(image.shape / Dxy_arr, key=lambda x: x[0] + x[1])).astype(int)
|
||||
|
||||
for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))):
|
||||
# Get current pixel size
|
||||
@@ -667,10 +672,8 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
|
||||
# Update header
|
||||
nw = w.deepcopy()
|
||||
nw.wcs.cdelt *= Dxy
|
||||
# nw.wcs.crpix += np.abs(new_shape_float - new_shape) * np.array(new_shape) / Dxy
|
||||
nw.wcs.crpix /= Dxy
|
||||
nw.array_shape = new_shape
|
||||
nw.wcs.set()
|
||||
new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape
|
||||
new_header["PXAREA"] *= Dxy[0] * Dxy[1]
|
||||
for key, val in nw.to_header().items():
|
||||
@@ -850,10 +853,7 @@ def align_data(
|
||||
new_crpix = np.array([wcs.wcs.crpix for wcs in headers_wcs]) + shifts[:, ::-1] + res_shift[::-1]
|
||||
for i in range(len(headers_wcs)):
|
||||
headers_wcs[i].wcs.crpix = new_crpix[0]
|
||||
headers_wcs[i].array_shape = (res_shape, res_shape)
|
||||
headers_wcs[i].wcs.set()
|
||||
headers[i].update(headers_wcs[i].to_header())
|
||||
headers[i]["NAXIS1"], headers[i]["NAXIS2"] = res_shape, res_shape
|
||||
|
||||
data_mask = rescaled_mask.all(axis=0)
|
||||
data_array, error_array, data_mask, headers = crop_array(rescaled_image, headers, rescaled_error, data_mask, null_val=0.01 * background)
|
||||
@@ -1252,6 +1252,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
||||
# Orientation and error for each polarizer
|
||||
# fmax = np.finfo(np.float64).max
|
||||
pol_flux = np.array([corr[0] * pol0, corr[1] * pol60, corr[2] * pol120])
|
||||
pol_flux_corr = np.array([pf * 2.0 / t for (pf, t) in zip(pol_flux, transmit)])
|
||||
|
||||
coeff_stokes = np.zeros((3, 3))
|
||||
# Coefficients linking each polarizer flux to each Stokes parameter
|
||||
@@ -1267,6 +1268,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
||||
# Normalization parameter for Stokes parameters computation
|
||||
N = (coeff_stokes[0, :] * transmit / 2.0).sum()
|
||||
coeff_stokes = coeff_stokes / N
|
||||
coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T
|
||||
I_stokes = np.zeros(pol_array[0].shape)
|
||||
Q_stokes = np.zeros(pol_array[0].shape)
|
||||
U_stokes = np.zeros(pol_array[0].shape)
|
||||
@@ -1308,121 +1310,81 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
||||
|
||||
# Statistical error: Poisson noise is assumed
|
||||
sigma_flux = np.array([np.sqrt(flux / head["exptime"]) for flux, head in zip(pol_flux, pol_headers)])
|
||||
s_I2_stat = np.sum([coeff_stokes[0, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0)
|
||||
s_Q2_stat = np.sum([coeff_stokes[1, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0)
|
||||
s_U2_stat = np.sum([coeff_stokes[2, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0)
|
||||
Stokes_stat_cov = np.zeros(Stokes_cov.shape)
|
||||
for i in range(Stokes_cov.shape[0]):
|
||||
Stokes_stat_cov[i, i] = np.sum([coeff_stokes[i, k] ** 2 * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
|
||||
for j in [k for k in range(3) if k > i]:
|
||||
Stokes_stat_cov[i, j] = np.sum([abs(coeff_stokes[i, k] * coeff_stokes[j, k]) * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
|
||||
Stokes_stat_cov[j, i] = np.sum([abs(coeff_stokes[i, k] * coeff_stokes[j, k]) * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
|
||||
|
||||
pol_flux_corr = np.array([pf * 2.0 / t for (pf, t) in zip(pol_flux, transmit)])
|
||||
coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T
|
||||
# Compute the derivative of each Stokes parameter with respect to the polarizer orientation
|
||||
dI_dtheta1 = (
|
||||
2.0
|
||||
* pol_eff[0]
|
||||
/ N
|
||||
* (
|
||||
pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes)
|
||||
- pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes)
|
||||
+ coeff_stokes_corr[0, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
|
||||
)
|
||||
)
|
||||
dI_dtheta2 = (
|
||||
2.0
|
||||
* pol_eff[1]
|
||||
/ N
|
||||
* (
|
||||
pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes)
|
||||
- pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes)
|
||||
+ coeff_stokes_corr[0, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
|
||||
)
|
||||
)
|
||||
dI_dtheta3 = (
|
||||
2.0
|
||||
* pol_eff[2]
|
||||
/ N
|
||||
* (
|
||||
pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes)
|
||||
- pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes)
|
||||
+ coeff_stokes_corr[0, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
|
||||
)
|
||||
)
|
||||
dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3])
|
||||
dIQU_dtheta = np.zeros(Stokes_cov.shape)
|
||||
|
||||
dQ_dtheta1 = (
|
||||
2.0
|
||||
* pol_eff[0]
|
||||
/ N
|
||||
* (
|
||||
np.cos(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
|
||||
- (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * Q_stokes
|
||||
+ coeff_stokes_corr[1, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
|
||||
# Derivative of I_stokes wrt theta_1, 2, 3
|
||||
for j in range(3):
|
||||
dIQU_dtheta[0, j] = (
|
||||
2.0
|
||||
* pol_eff[j]
|
||||
/ N
|
||||
* (
|
||||
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - I_stokes)
|
||||
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3]) * (pol_flux_corr[(j + 2) % 3] - I_stokes)
|
||||
+ coeff_stokes_corr[0, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes)
|
||||
)
|
||||
)
|
||||
)
|
||||
dQ_dtheta2 = (
|
||||
2.0
|
||||
* pol_eff[1]
|
||||
/ N
|
||||
* (
|
||||
np.cos(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
|
||||
- (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * Q_stokes
|
||||
+ coeff_stokes_corr[1, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
|
||||
)
|
||||
)
|
||||
dQ_dtheta3 = (
|
||||
2.0
|
||||
* pol_eff[2]
|
||||
/ N
|
||||
* (
|
||||
np.cos(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
|
||||
- (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * Q_stokes
|
||||
+ coeff_stokes_corr[1, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
|
||||
)
|
||||
)
|
||||
dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3])
|
||||
|
||||
dU_dtheta1 = (
|
||||
2.0
|
||||
* pol_eff[0]
|
||||
/ N
|
||||
* (
|
||||
np.sin(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
|
||||
- (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * U_stokes
|
||||
+ coeff_stokes_corr[2, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
|
||||
# Derivative of Q_stokes wrt theta_1, 2, 3
|
||||
for j in range(3):
|
||||
dIQU_dtheta[1, j] = (
|
||||
2.0
|
||||
* pol_eff[j]
|
||||
/ N
|
||||
* (
|
||||
np.cos(2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - pol_flux_corr[(j + 2) % 3])
|
||||
- (
|
||||
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j])
|
||||
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3])
|
||||
)
|
||||
* Q_stokes
|
||||
+ coeff_stokes_corr[1, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes)
|
||||
)
|
||||
)
|
||||
)
|
||||
dU_dtheta2 = (
|
||||
2.0
|
||||
* pol_eff[1]
|
||||
/ N
|
||||
* (
|
||||
np.sin(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
|
||||
- (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * U_stokes
|
||||
+ coeff_stokes_corr[2, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
|
||||
|
||||
# Derivative of U_stokes wrt theta_1, 2, 3
|
||||
for j in range(3):
|
||||
dIQU_dtheta[2, j] = (
|
||||
2.0
|
||||
* pol_eff[j]
|
||||
/ N
|
||||
* (
|
||||
np.sin(2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - pol_flux_corr[(j + 2) % 3])
|
||||
- (
|
||||
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j])
|
||||
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3])
|
||||
)
|
||||
* U_stokes
|
||||
+ coeff_stokes_corr[2, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes)
|
||||
)
|
||||
)
|
||||
)
|
||||
dU_dtheta3 = (
|
||||
2.0
|
||||
* pol_eff[2]
|
||||
/ N
|
||||
* (
|
||||
np.sin(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
|
||||
- (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * U_stokes
|
||||
+ coeff_stokes_corr[2, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
|
||||
)
|
||||
)
|
||||
dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3])
|
||||
|
||||
# Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999)
|
||||
s_I2_axis = np.sum([dI_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0)
|
||||
s_Q2_axis = np.sum([dQ_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0)
|
||||
s_U2_axis = np.sum([dU_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0)
|
||||
# np.savetxt("output/sI_dir.txt", np.sqrt(s_I2_axis))
|
||||
# np.savetxt("output/sQ_dir.txt", np.sqrt(s_Q2_axis))
|
||||
# np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis))
|
||||
Stokes_axis_cov = np.zeros(Stokes_cov.shape)
|
||||
for i in range(Stokes_cov.shape[0]):
|
||||
Stokes_axis_cov[i, i] = np.sum([dIQU_dtheta[i, k] ** 2 * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0)
|
||||
for j in [k for k in range(3) if k > i]:
|
||||
Stokes_axis_cov[i, j] = np.sum(
|
||||
[abs(dIQU_dtheta[i, k] * dIQU_dtheta[j, k]) * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0
|
||||
)
|
||||
Stokes_axis_cov[j, i] = np.sum(
|
||||
[abs(dIQU_dtheta[i, k] * dIQU_dtheta[j, k]) * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0
|
||||
)
|
||||
|
||||
# Add quadratically the uncertainty to the Stokes covariance matrix
|
||||
Stokes_cov[0, 0] += s_I2_axis + s_I2_stat
|
||||
Stokes_cov[1, 1] += s_Q2_axis + s_Q2_stat
|
||||
Stokes_cov[2, 2] += s_U2_axis + s_U2_stat
|
||||
for i in range(Stokes_cov.shape[0]):
|
||||
Stokes_cov[i, i] += Stokes_axis_cov[i, i] + Stokes_stat_cov[i, i]
|
||||
for j in [k for k in range(Stokes_cov.shape[0]) if k > i]:
|
||||
Stokes_cov[i, j] = np.sqrt(Stokes_cov[i, j] ** 2 + Stokes_axis_cov[i, j] ** 2 + Stokes_stat_cov[i, j] ** 2)
|
||||
Stokes_cov[j, i] = np.sqrt(Stokes_cov[j, i] ** 2 + Stokes_axis_cov[j, i] ** 2 + Stokes_stat_cov[j, i] ** 2)
|
||||
|
||||
# Save values to single header
|
||||
header_stokes = pol_headers[0]
|
||||
@@ -1456,8 +1418,8 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
||||
for i in range(3):
|
||||
Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2
|
||||
for j in [x for x in range(3) if x != i]:
|
||||
Stokes_cov[i, j] = np.sqrt(np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, i, j])], axis=0) / all_exp.sum() ** 2)
|
||||
Stokes_cov[j, i] = np.sqrt(np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, j, i])], axis=0) / all_exp.sum() ** 2)
|
||||
Stokes_cov[i, j] = np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, i, j])], axis=0) / all_exp.sum() ** 2
|
||||
Stokes_cov[j, i] = np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, j, i])], axis=0) / all_exp.sum() ** 2
|
||||
|
||||
# Save values to single header
|
||||
header_stokes = all_header_stokes[0]
|
||||
@@ -1472,6 +1434,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
||||
Q_stokes[np.isnan(Q_stokes)] = 0.0
|
||||
U_stokes[np.isnan(U_stokes)] = 0.0
|
||||
Stokes_cov[np.isnan(Stokes_cov)] = fmax
|
||||
Stokes_stat_cov[np.isnan(Stokes_cov)] = fmax
|
||||
|
||||
if integrate:
|
||||
# Compute integrated values for P, PA before any rotation
|
||||
@@ -1485,29 +1448,47 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
||||
IQ_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 1][mask] ** 2))
|
||||
IU_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 2][mask] ** 2))
|
||||
QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask] ** 2))
|
||||
I_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 0][mask]))
|
||||
Q_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[1, 1][mask]))
|
||||
U_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[2, 2][mask]))
|
||||
IQ_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 1][mask] ** 2))
|
||||
IU_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 2][mask] ** 2))
|
||||
QU_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[1, 2][mask] ** 2))
|
||||
|
||||
P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted
|
||||
P_diluted_err = np.sqrt(
|
||||
P_diluted_err = (1.0 / I_diluted) * np.sqrt(
|
||||
(Q_diluted**2 * Q_diluted_err**2 + U_diluted**2 * U_diluted_err**2 + 2.0 * Q_diluted * U_diluted * QU_diluted_err) / (Q_diluted**2 + U_diluted**2)
|
||||
+ ((Q_diluted / I_diluted) ** 2 + (U_diluted / I_diluted) ** 2) * I_diluted_err**2
|
||||
- 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err
|
||||
- 2.0 * (U_diluted / I_diluted) * IU_diluted_err
|
||||
)
|
||||
P_diluted_stat_err = (
|
||||
P_diluted
|
||||
/ I_diluted
|
||||
* np.sqrt(
|
||||
I_diluted_stat_err
|
||||
- 2.0 / (I_diluted * P_diluted**2) * (Q_diluted * IQ_diluted_stat_err + U_diluted * IU_diluted_stat_err)
|
||||
+ 1.0
|
||||
/ (I_diluted**2 * P_diluted**4)
|
||||
* (Q_diluted**2 * Q_diluted_stat_err + U_diluted**2 * U_diluted_stat_err + 2.0 * Q_diluted * U_diluted * QU_diluted_stat_err)
|
||||
)
|
||||
)
|
||||
debiased_P_diluted = np.sqrt(P_diluted**2 - P_diluted_stat_err**2) if P_diluted**2 > P_diluted_stat_err**2 else 0.0
|
||||
|
||||
PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted))
|
||||
PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt(
|
||||
U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err
|
||||
)
|
||||
|
||||
header_stokes["P_int"] = (P_diluted, "Integrated polarization degree")
|
||||
header_stokes["P_int"] = (debiased_P_diluted, "Integrated polarization degree")
|
||||
header_stokes["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
|
||||
header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
|
||||
header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
|
||||
|
||||
return I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes
|
||||
return I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, header_stokes
|
||||
|
||||
|
||||
def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes):
|
||||
def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, header_stokes):
|
||||
"""
|
||||
Compute the polarization degree (in %) and angle (in deg) and their
|
||||
respective errors from given Stokes parameters.
|
||||
@@ -1582,27 +1563,44 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes):
|
||||
s_P[np.isnan(s_P)] = fmax
|
||||
s_PA[np.isnan(s_PA)] = fmax
|
||||
|
||||
# Errors on P, PA supposing Poisson noise
|
||||
s_P_P = np.ones(I_stokes.shape) * fmax
|
||||
s_PA_P = np.ones(I_stokes.shape) * fmax
|
||||
maskP = np.logical_and(mask, P > 0.0)
|
||||
s_P_P[maskP] = (
|
||||
P[maskP]
|
||||
/ I_stokes[maskP]
|
||||
* np.sqrt(
|
||||
Stokes_stat_cov[0, 0][maskP]
|
||||
- 2.0 / (I_stokes[maskP] * P[maskP] ** 2) * (Q_stokes[maskP] * Stokes_stat_cov[0, 1][maskP] + U_stokes[maskP] * Stokes_stat_cov[0, 2][maskP])
|
||||
+ 1.0
|
||||
/ (I_stokes[maskP] ** 2 * P[maskP] ** 4)
|
||||
* (
|
||||
Q_stokes[maskP] ** 2 * Stokes_stat_cov[1, 1][maskP]
|
||||
+ U_stokes[maskP] ** 2 * Stokes_stat_cov[2, 2][maskP]
|
||||
+ 2.0 * Q_stokes[maskP] * U_stokes[maskP] * Stokes_stat_cov[1, 2][maskP]
|
||||
)
|
||||
)
|
||||
)
|
||||
s_PA_P[maskP] = (
|
||||
90.0
|
||||
/ (np.pi * I_stokes[maskP] ** 2 * P[maskP] ** 2)
|
||||
* (
|
||||
Q_stokes[maskP] ** 2 * Stokes_stat_cov[2, 2][maskP]
|
||||
+ U_stokes[maskP] * Stokes_stat_cov[1, 1][maskP]
|
||||
- 2.0 * Q_stokes[maskP] * U_stokes[maskP] * Stokes_stat_cov[1, 2][maskP]
|
||||
)
|
||||
)
|
||||
|
||||
# Catch expected "OverflowWarning" as wrong pixel have an overflowing error
|
||||
with warnings.catch_warnings(record=True) as _:
|
||||
mask2 = P**2 >= s_P**2
|
||||
mask2 = P**2 >= s_P_P**2
|
||||
debiased_P = np.zeros(I_stokes.shape)
|
||||
debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P[mask2] ** 2)
|
||||
debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P_P[mask2] ** 2)
|
||||
|
||||
if (debiased_P > 1.0).any():
|
||||
print("WARNING : found {0:d} pixels for which debiased_P > 100%".format(debiased_P[debiased_P > 1.0].size))
|
||||
|
||||
# Compute the total exposure time so that
|
||||
# I_stokes*exp_tot = N_tot the total number of events
|
||||
exp_tot = header_stokes["exptime"]
|
||||
# print("Total exposure time : {} sec".format(exp_tot))
|
||||
N_obs = I_stokes * exp_tot
|
||||
|
||||
# Errors on P, PA supposing Poisson noise
|
||||
s_P_P = np.ones(I_stokes.shape) * fmax
|
||||
s_P_P[mask] = np.sqrt(2.0) / np.sqrt(N_obs[mask]) * 100.0
|
||||
s_PA_P = np.ones(I_stokes.shape) * fmax
|
||||
s_PA_P[mask2] = s_P_P[mask2] / (2.0 * P[mask2]) * 180.0 / np.pi
|
||||
|
||||
# Nan handling :
|
||||
P[np.isnan(P)] = 0.0
|
||||
s_P[np.isnan(s_P)] = fmax
|
||||
@@ -1614,7 +1612,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes):
|
||||
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
|
||||
|
||||
|
||||
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None):
|
||||
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, data_mask, header_stokes, SNRi_cut=None):
|
||||
"""
|
||||
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
|
||||
matrix to rotate Q, U of a given angle in degrees and update header
|
||||
@@ -1631,7 +1629,11 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
||||
Image (2D floats) containing the Stokes parameters accounting for
|
||||
+45/-45deg linear polarization intensity
|
||||
Stokes_cov : numpy.ndarray
|
||||
Covariance matrix of the Stokes parameters I, Q, U.
|
||||
Covariance matrix containing all uncertainties of the Stokes
|
||||
parameters I, Q, U.
|
||||
Stokes_stat_cov : numpy.ndarray
|
||||
Covariance matrix containing statistical uncertainty of the Stokes
|
||||
parameters I, Q, U.
|
||||
data_mask : numpy.ndarray
|
||||
2D boolean array delimiting the data to work on.
|
||||
header_stokes : astropy.fits.header.Header
|
||||
@@ -1653,6 +1655,8 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
||||
accounting for +45/-45deg linear polarization intensity.
|
||||
new_Stokes_cov : numpy.ndarray
|
||||
Updated covariance matrix of the Stokes parameters I, Q, U.
|
||||
new_Stokes_stat_cov : numpy.ndarray
|
||||
Updated statistical covariance matrix of the Stokes parameters I, Q, U.
|
||||
new_header_stokes : astropy.fits.header.Header
|
||||
Updated Header file associated with the Stokes fluxes accounting
|
||||
for the new orientation angle.
|
||||
@@ -1684,11 +1688,9 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
||||
Q_stokes = zeropad(Q_stokes, shape)
|
||||
U_stokes = zeropad(U_stokes, shape)
|
||||
data_mask = zeropad(data_mask, shape)
|
||||
Stokes_cov = zeropad(Stokes_cov, [*Stokes_cov.shape[:-2], *shape])
|
||||
new_I_stokes = np.zeros(shape)
|
||||
new_Q_stokes = np.zeros(shape)
|
||||
new_U_stokes = np.zeros(shape)
|
||||
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape))
|
||||
|
||||
# Rotate original images using scipy.ndimage.rotate
|
||||
new_I_stokes = sc_rotate(I_stokes, ang, order=1, reshape=False, cval=0.0)
|
||||
@@ -1697,6 +1699,10 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
||||
new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0)
|
||||
new_data_mask[new_data_mask < 1.0] = 0.0
|
||||
new_data_mask = new_data_mask.astype(bool)
|
||||
|
||||
# Rotate covariance matrix
|
||||
Stokes_cov = zeropad(Stokes_cov, [*Stokes_cov.shape[:-2], *shape])
|
||||
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape))
|
||||
for i in range(3):
|
||||
for j in range(3):
|
||||
new_Stokes_cov[i, j] = sc_rotate(Stokes_cov[i, j], ang, order=1, reshape=False, cval=0.0)
|
||||
@@ -1707,6 +1713,17 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
||||
new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j] = np.dot(mrot, np.array([new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j]])).T
|
||||
new_Stokes_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:, :, i, j], mrot.T))
|
||||
|
||||
# Rotate statistical covariance matrix
|
||||
Stokes_stat_cov = zeropad(Stokes_stat_cov, [*Stokes_stat_cov.shape[:-2], *shape])
|
||||
new_Stokes_stat_cov = np.zeros((*Stokes_stat_cov.shape[:-2], *shape))
|
||||
for i in range(3):
|
||||
for j in range(3):
|
||||
new_Stokes_stat_cov[i, j] = sc_rotate(Stokes_stat_cov[i, j], ang, order=1, reshape=False, cval=0.0)
|
||||
new_Stokes_stat_cov[i, i] = np.abs(new_Stokes_stat_cov[i, i])
|
||||
for i in range(shape[0]):
|
||||
for j in range(shape[1]):
|
||||
new_Stokes_stat_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_stat_cov[:, :, i, j], mrot.T))
|
||||
|
||||
# Update headers to new angle
|
||||
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
|
||||
|
||||
@@ -1715,11 +1732,9 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
||||
|
||||
new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc)
|
||||
new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center[::-1]) + new_center[::-1]
|
||||
new_wcs.array_shape = shape
|
||||
new_wcs.wcs.set()
|
||||
for key, val in new_wcs.to_header().items():
|
||||
new_header_stokes.set(key, val)
|
||||
new_header_stokes["NAXIS1"], new_header_stokes["NAXIS2"] = new_wcs.array_shape
|
||||
new_header_stokes["ORIENTAT"] += ang
|
||||
|
||||
# Nan handling :
|
||||
@@ -1737,12 +1752,18 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
||||
I_diluted = new_I_stokes[mask].sum()
|
||||
Q_diluted = new_Q_stokes[mask].sum()
|
||||
U_diluted = new_U_stokes[mask].sum()
|
||||
I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 0][mask]))
|
||||
Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 1][mask]))
|
||||
U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2, 2][mask]))
|
||||
IQ_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 1][mask] ** 2))
|
||||
IU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 2][mask] ** 2))
|
||||
QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 2][mask] ** 2))
|
||||
I_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 0][mask]))
|
||||
Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 1][mask]))
|
||||
U_diluted_err = np.sqrt(np.sum(Stokes_cov[2, 2][mask]))
|
||||
IQ_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 1][mask] ** 2))
|
||||
IU_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 2][mask] ** 2))
|
||||
QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask] ** 2))
|
||||
I_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 0][mask]))
|
||||
Q_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[1, 1][mask]))
|
||||
U_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[2, 2][mask]))
|
||||
IQ_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 1][mask] ** 2))
|
||||
IU_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 2][mask] ** 2))
|
||||
QU_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[1, 2][mask] ** 2))
|
||||
|
||||
P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted
|
||||
P_diluted_err = (1.0 / I_diluted) * np.sqrt(
|
||||
@@ -1751,18 +1772,30 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
||||
- 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err
|
||||
- 2.0 * (U_diluted / I_diluted) * IU_diluted_err
|
||||
)
|
||||
P_diluted_stat_err = (
|
||||
P_diluted
|
||||
/ I_diluted
|
||||
* np.sqrt(
|
||||
I_diluted_stat_err
|
||||
- 2.0 / (I_diluted * P_diluted**2) * (Q_diluted * IQ_diluted_stat_err + U_diluted * IU_diluted_stat_err)
|
||||
+ 1.0
|
||||
/ (I_diluted**2 * P_diluted**4)
|
||||
* (Q_diluted**2 * Q_diluted_stat_err + U_diluted**2 * U_diluted_stat_err + 2.0 * Q_diluted * U_diluted * QU_diluted_stat_err)
|
||||
)
|
||||
)
|
||||
debiased_P_diluted = np.sqrt(P_diluted**2 - P_diluted_stat_err**2) if P_diluted**2 > P_diluted_stat_err**2 else 0.0
|
||||
|
||||
PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted))
|
||||
PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt(
|
||||
U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err
|
||||
)
|
||||
|
||||
new_header_stokes["P_int"] = (P_diluted, "Integrated polarization degree")
|
||||
new_header_stokes["P_int"] = (debiased_P_diluted, "Integrated polarization degree")
|
||||
new_header_stokes["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
|
||||
new_header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
|
||||
new_header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
|
||||
|
||||
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_header_stokes
|
||||
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_Stokes_stat_cov, new_data_mask, new_header_stokes
|
||||
|
||||
|
||||
def rotate_data(data_array, error_array, data_mask, headers):
|
||||
@@ -1820,11 +1853,9 @@ def rotate_data(data_array, error_array, data_mask, headers):
|
||||
new_wcs = WCS(header).celestial.deepcopy()
|
||||
new_wcs.wcs.pc[:2, :2] = np.dot(mrot, new_wcs.wcs.pc[:2, :2])
|
||||
new_wcs.wcs.crpix[:2] = np.dot(mrot, new_wcs.wcs.crpix[:2] - old_center[::-1]) + new_center[::-1]
|
||||
new_wcs.array_shape = shape
|
||||
new_wcs.wcs.set()
|
||||
for key, val in new_wcs.to_header().items():
|
||||
new_header[key] = val
|
||||
new_header["NAXIS1"], new_header["NAXIS2"] = new_wcs.array_shape
|
||||
new_header["ORIENTAT"] = np.arccos(new_wcs.celestial.wcs.pc[0, 0]) * 180.0 / np.pi
|
||||
new_header["ROTATE"] = ang
|
||||
new_headers.append(new_header)
|
||||
|
||||
@@ -43,7 +43,9 @@ def main(infile, P_cut=0.99, target=None, display="pf", output_dir=None):
|
||||
if target is None:
|
||||
target = Stokes[0].header["TARGNAME"]
|
||||
|
||||
fig = figure(figsize=(8, 8.5), layout="constrained")
|
||||
ratiox = max(int(stkI.shape[1] / (stkI.shape[0])), 1)
|
||||
ratioy = max(int((stkI.shape[0]) / stkI.shape[1]), 1)
|
||||
fig = figure(figsize=(8 * ratiox, 8 * ratioy), layout="constrained")
|
||||
fig, ax = polarization_map(Stokes, P_cut=P_cut, step_vec=1, scale_vec=5, display=display, fig=fig, width=0.33, linewidth=0.5)
|
||||
|
||||
ax.plot(*Stokescenter, marker="+", color="k", lw=3)
|
||||
|
||||
Reference in New Issue
Block a user