Merge branch 'testing'
correction for observation orientation and plots improvments
This commit is contained in:
@@ -157,8 +157,8 @@ def combine_Stokes(infiles):
|
|||||||
def main(infiles, target=None, output_dir="./data/"):
|
def main(infiles, target=None, output_dir="./data/"):
|
||||||
""" """
|
""" """
|
||||||
from lib.fits import save_Stokes
|
from lib.fits import save_Stokes
|
||||||
from lib.reduction import compute_pol
|
|
||||||
from lib.plots import pol_map
|
from lib.plots import pol_map
|
||||||
|
from lib.reduction import compute_pol, rotate_Stokes
|
||||||
|
|
||||||
if target is None:
|
if target is None:
|
||||||
target = input("Target name:\n>")
|
target = input("Target name:\n>")
|
||||||
@@ -167,48 +167,38 @@ def main(infiles, target=None, output_dir="./data/"):
|
|||||||
data_folder = prod[0][0]
|
data_folder = prod[0][0]
|
||||||
files = [p[1] for p in prod]
|
files = [p[1] for p in prod]
|
||||||
|
|
||||||
|
# Reduction parameters
|
||||||
|
kwargs = {}
|
||||||
|
# Polarization map output
|
||||||
|
kwargs["SNRp_cut"] = 3.0
|
||||||
|
kwargs["SNRi_cut"] = 1.0
|
||||||
|
kwargs["flux_lim"] = 1e-19, 3e-17
|
||||||
|
kwargs["scale_vec"] = 5
|
||||||
|
kwargs["step_vec"] = 1
|
||||||
|
|
||||||
if not same_reduction(infiles):
|
if not same_reduction(infiles):
|
||||||
from FOC_reduction import main as FOC_reduction
|
from FOC_reduction import main as FOC_reduction
|
||||||
|
|
||||||
# Reduction parameters
|
|
||||||
kwargs = {}
|
|
||||||
# Background estimation
|
|
||||||
kwargs["error_sub_type"] = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
|
|
||||||
kwargs["subtract_error"] = 0.7
|
|
||||||
|
|
||||||
# Data binning
|
|
||||||
kwargs["pxsize"] = 0.1
|
|
||||||
kwargs["pxscale"] = "arcsec" # pixel, arcsec or full
|
|
||||||
|
|
||||||
# Smoothing
|
|
||||||
kwargs["smoothing_function"] = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
|
|
||||||
kwargs["smoothing_FWHM"] = 0.2 # If None, no smoothing is done
|
|
||||||
kwargs["smoothing_scale"] = "arcsec" # pixel or arcsec
|
|
||||||
|
|
||||||
# Polarization map output
|
|
||||||
kwargs["SNRp_cut"] = 3.0 # P measurments with SNR>3
|
|
||||||
kwargs["SNRi_cut"] = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
|
||||||
kwargs["flux_lim"] = 1e-19, 3e-17 # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
|
|
||||||
kwargs["scale_vec"] = 5
|
|
||||||
kwargs["step_vec"] = (
|
|
||||||
1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
|
|
||||||
)
|
|
||||||
grouped_infiles = same_obs(files, data_folder)
|
grouped_infiles = same_obs(files, data_folder)
|
||||||
|
|
||||||
new_infiles = []
|
new_infiles = []
|
||||||
for i, group in enumerate(grouped_infiles):
|
for i, group in enumerate(grouped_infiles):
|
||||||
new_infiles.append(
|
new_infiles.append(
|
||||||
FOC_reduction(target=target + "-" + str(i + 1), infiles=["/".join([data_folder, file]) for file in group], interactive=True, **kwargs)
|
FOC_reduction(target=target + "-" + str(i + 1), infiles=["/".join([data_folder, file]) for file in group], interactive=True)[0]
|
||||||
)
|
)
|
||||||
|
|
||||||
infiles = new_infiles
|
infiles = new_infiles
|
||||||
|
|
||||||
I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = combine_Stokes(infiles)
|
I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = combine_Stokes(infiles=infiles)
|
||||||
|
I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = rotate_Stokes(
|
||||||
|
I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, data_mask=data_mask_combined, header_stokes=header_combined
|
||||||
|
)
|
||||||
|
|
||||||
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = compute_pol(
|
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = compute_pol(
|
||||||
I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, header_stokes=header_combined
|
I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, header_stokes=header_combined
|
||||||
)
|
)
|
||||||
filename = header_combined["FILENAME"]
|
filename = header_combined["FILENAME"]
|
||||||
figname = "_".join([target, filename[filename.find("FOC_"):], "combined"])
|
figname = "_".join([target, filename[filename.find("FOC_") :], "combined"])
|
||||||
Stokes_combined = save_Stokes(
|
Stokes_combined = save_Stokes(
|
||||||
I_stokes=I_combined,
|
I_stokes=I_combined,
|
||||||
Q_stokes=Q_combined,
|
Q_stokes=Q_combined,
|
||||||
@@ -228,9 +218,9 @@ def main(infiles, target=None, output_dir="./data/"):
|
|||||||
return_hdul=True,
|
return_hdul=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
pol_map(Stokes_combined)
|
pol_map(Stokes_combined, **kwargs)
|
||||||
|
|
||||||
return "/".join([data_folder, figname+".fits"])
|
return "/".join([data_folder, figname + ".fits"])
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ from lib.utils import princ_angle, sci_not
|
|||||||
from matplotlib.colors import LogNorm
|
from matplotlib.colors import LogNorm
|
||||||
|
|
||||||
|
|
||||||
def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False, **kwargs):
|
def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False):
|
||||||
# Reduction parameters
|
# Reduction parameters
|
||||||
# Deconvolution
|
# Deconvolution
|
||||||
deconvolve = False
|
deconvolve = False
|
||||||
@@ -36,12 +36,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
|
|
||||||
# Background estimation
|
# Background estimation
|
||||||
error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
|
error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
|
||||||
subtract_error = 0.7
|
subtract_error = 1.0
|
||||||
display_bkg = False
|
display_bkg = False
|
||||||
|
|
||||||
# Data binning
|
# Data binning
|
||||||
pxsize = 0.1
|
pxsize = 2
|
||||||
pxscale = "arcsec" # pixel, arcsec or full
|
pxscale = "px" # pixel, arcsec or full
|
||||||
rebin_operation = "sum" # sum or average
|
rebin_operation = "sum" # sum or average
|
||||||
|
|
||||||
# Alignement
|
# Alignement
|
||||||
@@ -54,8 +54,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
|
|
||||||
# Smoothing
|
# Smoothing
|
||||||
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
|
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
|
||||||
smoothing_FWHM = 0.2 # If None, no smoothing is done
|
smoothing_FWHM = 2.0 # If None, no smoothing is done
|
||||||
smoothing_scale = "arcsec" # pixel or arcsec
|
smoothing_scale = "px" # pixel or arcsec
|
||||||
|
|
||||||
# Rotation
|
# Rotation
|
||||||
rotate_North = True
|
rotate_North = True
|
||||||
@@ -64,31 +64,10 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
SNRp_cut = 3.0 # P measurments with SNR>3
|
SNRp_cut = 3.0 # P measurments with SNR>3
|
||||||
SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
||||||
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
|
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
|
||||||
scale_vec = 3
|
scale_vec = 5
|
||||||
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
|
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
|
||||||
|
|
||||||
# Pipeline start
|
# Pipeline start
|
||||||
# Step 0:
|
|
||||||
# Get parameters from kwargs
|
|
||||||
for key, value in [
|
|
||||||
["error_sub_type", error_sub_type],
|
|
||||||
["subtract_error", subtract_error],
|
|
||||||
["pxsize", pxsize],
|
|
||||||
["pxscale", pxscale],
|
|
||||||
["smoothing_function", smoothing_function],
|
|
||||||
["smoothing_FWHM", smoothing_FWHM],
|
|
||||||
["smoothing_scale", smoothing_scale],
|
|
||||||
["SNRp_cut", SNRp_cut],
|
|
||||||
["SNRi_cut", SNRi_cut],
|
|
||||||
["flux_lim", flux_lim],
|
|
||||||
["scale_vec", scale_vec],
|
|
||||||
["step_vec", step_vec],
|
|
||||||
]:
|
|
||||||
try:
|
|
||||||
value = kwargs[key]
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
rebin = True if pxsize is not None else False
|
|
||||||
|
|
||||||
# Step 1:
|
# Step 1:
|
||||||
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
|
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
|
||||||
@@ -119,19 +98,18 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
|
|
||||||
figname = "_".join([target, "FOC"])
|
figname = "_".join([target, "FOC"])
|
||||||
figtype = ""
|
figtype = ""
|
||||||
if rebin:
|
if (pxsize is not None) and not (pxsize == 1 and pxscale.lower() in ["px", "pixel", "pixels"]):
|
||||||
if pxscale not in ["full"]:
|
if pxscale not in ["full"]:
|
||||||
figtype = "".join(["b", "{0:.2f}".format(pxsize), pxscale]) # additionnal informations
|
figtype = "".join(["b", "{0:.2f}".format(pxsize), pxscale]) # additionnal informations
|
||||||
else:
|
else:
|
||||||
figtype = "full"
|
figtype = "full"
|
||||||
if smoothing_FWHM is not None:
|
if smoothing_FWHM is not None and smoothing_scale is not None:
|
||||||
figtype += "_" + "".join(
|
smoothstr = "".join([*[s[0] for s in smoothing_function.split("_")], "{0:.2f}".format(smoothing_FWHM), smoothing_scale])
|
||||||
["".join([s[0] for s in smoothing_function.split("_")]), "{0:.2f}".format(smoothing_FWHM), smoothing_scale]
|
figtype = "_".join([figtype, smoothstr] if figtype != "" else [smoothstr])
|
||||||
) # additionnal informations
|
|
||||||
if deconvolve:
|
if deconvolve:
|
||||||
figtype += "_deconv"
|
figtype = "_".join([figtype, "deconv"] if figtype != "" else ["deconv"])
|
||||||
if align_center is None:
|
if align_center is None:
|
||||||
figtype += "_not_aligned"
|
figtype = "_".join([figtype, "not_aligned"] if figtype != "" else ["not_aligned"])
|
||||||
|
|
||||||
# Crop data to remove outside blank margins.
|
# Crop data to remove outside blank margins.
|
||||||
data_array, error_array, headers = proj_red.crop_array(
|
data_array, error_array, headers = proj_red.crop_array(
|
||||||
@@ -159,7 +137,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Rotate data to have same orientation
|
# Rotate data to have same orientation
|
||||||
rotate_data = np.unique([float(head["ORIENTAT"]) for head in headers]).size != 1
|
rotate_data = np.unique([np.round(float(head["ORIENTAT"]), 3) for head in headers]).size != 1
|
||||||
if rotate_data:
|
if rotate_data:
|
||||||
ang = np.mean([head["ORIENTAT"] for head in headers])
|
ang = np.mean([head["ORIENTAT"] for head in headers])
|
||||||
for head in headers:
|
for head in headers:
|
||||||
@@ -199,7 +177,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Rebin data to desired pixel size.
|
# Rebin data to desired pixel size.
|
||||||
if rebin:
|
if (pxsize is not None) and not (pxsize == 1 and pxscale.lower() in ["px", "pixel", "pixels"]):
|
||||||
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(
|
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(
|
||||||
data_array, error_array, headers, pxsize=pxsize, scale=pxscale, operation=rebin_operation, data_mask=data_mask
|
data_array, error_array, headers, pxsize=pxsize, scale=pxscale, operation=rebin_operation, data_mask=data_mask
|
||||||
)
|
)
|
||||||
@@ -246,7 +224,9 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes = proj_red.rotate_Stokes(
|
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes = proj_red.rotate_Stokes(
|
||||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None
|
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None
|
||||||
)
|
)
|
||||||
I_bkg, Q_bkg, U_bkg, S_cov_bkg, _, _ = proj_red.rotate_Stokes(I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), header_bkg, SNRi_cut=None)
|
I_bkg, Q_bkg, U_bkg, S_cov_bkg, data_mask_bkg, header_bkg = proj_red.rotate_Stokes(
|
||||||
|
I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), header_bkg, SNRi_cut=None
|
||||||
|
)
|
||||||
|
|
||||||
# Compute polarimetric parameters (polarization degree and angle).
|
# Compute polarimetric parameters (polarization degree and angle).
|
||||||
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes)
|
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes)
|
||||||
@@ -273,6 +253,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
data_folder=data_folder,
|
data_folder=data_folder,
|
||||||
return_hdul=True,
|
return_hdul=True,
|
||||||
)
|
)
|
||||||
|
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"]))
|
||||||
|
|
||||||
# Step 5:
|
# Step 5:
|
||||||
# crop to desired region of interest (roi)
|
# crop to desired region of interest (roi)
|
||||||
@@ -281,15 +262,16 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_hdul), norm=LogNorm())
|
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_hdul), norm=LogNorm())
|
||||||
stokescrop.crop()
|
stokescrop.crop()
|
||||||
stokescrop.write_to("/".join([data_folder, figname + ".fits"]))
|
stokescrop.write_to("/".join([data_folder, figname + ".fits"]))
|
||||||
Stokes_hdul, headers = stokescrop.hdul_crop, [dataset.header for dataset in stokescrop.hdul_crop]
|
Stokes_hdul, header_stokes = stokescrop.hdul_crop, stokescrop.hdul_crop[0].header
|
||||||
|
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"]))
|
||||||
|
|
||||||
data_mask = Stokes_hdul["data_mask"].data.astype(bool)
|
data_mask = Stokes_hdul["data_mask"].data.astype(bool)
|
||||||
print(
|
print(
|
||||||
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
|
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
|
||||||
header_stokes["photplam"],
|
header_stokes["PHOTPLAM"],
|
||||||
*sci_not(
|
*sci_not(
|
||||||
Stokes_hdul[0].data[data_mask].sum() * header_stokes["photflam"],
|
Stokes_hdul[0].data[data_mask].sum() * header_stokes["PHOTFLAM"],
|
||||||
np.sqrt(Stokes_hdul[3].data[0, 0][data_mask].sum()) * header_stokes["photflam"],
|
np.sqrt(Stokes_hdul[3].data[0, 0][data_mask].sum()) * header_stokes["PHOTFLAM"],
|
||||||
2,
|
2,
|
||||||
out=int,
|
out=int,
|
||||||
),
|
),
|
||||||
@@ -421,8 +403,6 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
elif pxscale.lower() not in ["full", "integrate"]:
|
elif pxscale.lower() not in ["full", "integrate"]:
|
||||||
proj_plots.pol_map(Stokes_hdul, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
|
proj_plots.pol_map(Stokes_hdul, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
|
||||||
|
|
||||||
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"]+".fits"]))
|
|
||||||
|
|
||||||
return outfiles
|
return outfiles
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -258,7 +258,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
|
|||||||
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
|
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
|
||||||
|
|
||||||
# Substract background
|
# Substract background
|
||||||
if subtract_error > 0:
|
if np.abs(subtract_error) > 0:
|
||||||
n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
||||||
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
|
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
|
||||||
|
|
||||||
@@ -367,7 +367,7 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
|
|||||||
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
|
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
|
||||||
|
|
||||||
# Substract background
|
# Substract background
|
||||||
if subtract_error > 0:
|
if np.abs(subtract_error) > 0:
|
||||||
n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
||||||
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
|
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
|
||||||
|
|
||||||
@@ -464,7 +464,7 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True
|
|||||||
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
|
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
|
||||||
|
|
||||||
# Substract background
|
# Substract background
|
||||||
if subtract_error > 0.0:
|
if np.abs(subtract_error) > 0:
|
||||||
n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
||||||
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
|
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
|
||||||
|
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ from astropy.io import fits
|
|||||||
from astropy.wcs import WCS
|
from astropy.wcs import WCS
|
||||||
|
|
||||||
from .convex_hull import clean_ROI
|
from .convex_hull import clean_ROI
|
||||||
|
from .utils import wcs_PA
|
||||||
|
|
||||||
|
|
||||||
def get_obs_data(infiles, data_folder="", compute_flux=False):
|
def get_obs_data(infiles, data_folder="", compute_flux=False):
|
||||||
@@ -57,22 +58,20 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
|
|||||||
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1.0, 1.0])).all():
|
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1.0, 1.0])).all():
|
||||||
# Update WCS with relevant information
|
# Update WCS with relevant information
|
||||||
if new_wcs.wcs.has_cd():
|
if new_wcs.wcs.has_cd():
|
||||||
old_cd = new_wcs.wcs.cd
|
|
||||||
del new_wcs.wcs.cd
|
del new_wcs.wcs.cd
|
||||||
keys = list(new_wcs.to_header().keys()) + ["CD1_1", "CD1_2", "CD1_3", "CD2_1", "CD2_2", "CD2_3", "CD3_1", "CD3_2", "CD3_3"]
|
keys = list(new_wcs.to_header().keys()) + ["CD1_1", "CD1_2", "CD1_3", "CD2_1", "CD2_2", "CD2_3", "CD3_1", "CD3_2", "CD3_3"]
|
||||||
for key in keys:
|
for key in keys:
|
||||||
header.remove(key, ignore_missing=True)
|
header.remove(key, ignore_missing=True)
|
||||||
new_cdelt = np.linalg.eig(old_cd)[0]
|
new_cdelt = np.linalg.eigvals(wcs.wcs.cd)
|
||||||
elif (new_wcs.wcs.cdelt == np.array([1.0, 1.0])).all() and (new_wcs.array_shape in [(512, 512), (1024, 512), (512, 1024), (1024, 1024)]):
|
new_cdelt.sort()
|
||||||
old_cd = new_wcs.wcs.pc
|
new_wcs.wcs.pc = wcs.wcs.cd.dot(np.diag(1.0 / new_cdelt))
|
||||||
new_wcs.wcs.pc = np.dot(old_cd, np.diag(1.0 / new_cdelt))
|
|
||||||
new_wcs.wcs.cdelt = new_cdelt
|
new_wcs.wcs.cdelt = new_cdelt
|
||||||
for key, val in new_wcs.to_header().items():
|
for key, val in new_wcs.to_header().items():
|
||||||
header[key] = val
|
header[key] = val
|
||||||
try:
|
try:
|
||||||
_ = header["ORIENTAT"]
|
_ = header["ORIENTAT"]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
header["ORIENTAT"] = -np.arccos(new_wcs.wcs.pc[0, 0]) * 180.0 / np.pi
|
header["ORIENTAT"] = wcs_PA(new_wcs.wcs.pc[1, 0], np.diag(new_wcs.wcs.pc).mean())
|
||||||
|
|
||||||
# force WCS for POL60 to have same pixel size as POL0 and POL120
|
# force WCS for POL60 to have same pixel size as POL0 and POL120
|
||||||
is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
|
is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
|
||||||
@@ -130,7 +129,6 @@ def save_Stokes(
|
|||||||
Only returned if return_hdul is True.
|
Only returned if return_hdul is True.
|
||||||
"""
|
"""
|
||||||
# Create new WCS object given the modified images
|
# Create new WCS object given the modified images
|
||||||
exp_tot = header_stokes['exptime']
|
|
||||||
new_wcs = WCS(header_stokes).deepcopy()
|
new_wcs = WCS(header_stokes).deepcopy()
|
||||||
|
|
||||||
if data_mask.shape != (1, 1):
|
if data_mask.shape != (1, 1):
|
||||||
@@ -140,23 +138,23 @@ def save_Stokes(
|
|||||||
new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2]
|
new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2]
|
||||||
|
|
||||||
header = new_wcs.to_header()
|
header = new_wcs.to_header()
|
||||||
header["TELESCOP"] = (header_stokes["telescop"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "telescope used to acquire data")
|
header["TELESCOP"] = (header_stokes["TELESCOP"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "telescope used to acquire data")
|
||||||
header["INSTRUME"] = (header_stokes["instrume"] if "INSTRUME" in list(header_stokes.keys()) else "FOC", "identifier for instrument used to acuire data")
|
header["INSTRUME"] = (header_stokes["INSTRUME"] if "INSTRUME" in list(header_stokes.keys()) else "FOC", "identifier for instrument used to acuire data")
|
||||||
header["PHOTPLAM"] = (header_stokes["photplam"], "Pivot Wavelength")
|
header["PHOTPLAM"] = (header_stokes["PHOTPLAM"], "Pivot Wavelength")
|
||||||
header["PHOTFLAM"] = (header_stokes["photflam"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
|
header["PHOTFLAM"] = (header_stokes["PHOTFLAM"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
|
||||||
header["EXPTIME"] = (exp_tot, "Total exposure time in sec")
|
header["EXPTIME"] = (header_stokes["EXPTIME"], "Total exposure time in sec")
|
||||||
header["PROPOSID"] = (header_stokes["proposid"], "PEP proposal identifier for observation")
|
header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation")
|
||||||
header["TARGNAME"] = (header_stokes["targname"], "Target name")
|
header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name")
|
||||||
header["ORIENTAT"] = (np.arccos(new_wcs.wcs.pc[0, 0]) * 180.0 / np.pi, "Angle between North and the y-axis of the image")
|
header["ORIENTAT"] = (header_stokes["ORIENTAT"], "Angle between North and the y-axis of the image")
|
||||||
header["FILENAME"] = (filename, "Original filename")
|
header["FILENAME"] = (filename, "ORIGINAL FILENAME")
|
||||||
header["BKG_TYPE"] = (header_stokes["BKG_TYPE"], "Bkg estimation method used during reduction")
|
header["BKG_TYPE"] = (header_stokes["BKG_TYPE"], "Bkg estimation method used during reduction")
|
||||||
header["BKG_SUB"] = (header_stokes["BKG_SUB"], "Amount of bkg subtracted from images")
|
header["BKG_SUB"] = (header_stokes["BKG_SUB"], "Amount of bkg subtracted from images")
|
||||||
header["SMOOTH"] = (header_stokes["SMOOTH"], "Smoothing method used during reduction")
|
header["SMOOTH"] = (header_stokes["SMOOTH"] if "SMOOTH" in list(header_stokes.keys()) else "None", "Smoothing method used during reduction")
|
||||||
header["SAMPLING"] = (header_stokes["SAMPLING"], "Resampling performed during reduction")
|
header["SAMPLING"] = (header_stokes["SAMPLING"] if "SAMPLING" in list(header_stokes.keys()) else "None", "Resampling performed during reduction")
|
||||||
header["P_INT"] = (header_stokes["P_int"], "Integrated polarization degree")
|
header["P_INT"] = (header_stokes["P_INT"], "Integrated polarization degree")
|
||||||
header["sP_INT"] = (header_stokes["sP_int"], "Integrated polarization degree error")
|
header["sP_INT"] = (header_stokes["sP_INT"], "Integrated polarization degree error")
|
||||||
header["PA_INT"] = (header_stokes["PA_int"], "Integrated polarization angle")
|
header["PA_INT"] = (header_stokes["PA_INT"], "Integrated polarization angle")
|
||||||
header["sPA_INT"] = (header_stokes["sPA_int"], "Integrated polarization angle error")
|
header["sPA_INT"] = (header_stokes["sPA_INT"], "Integrated polarization angle error")
|
||||||
|
|
||||||
# Crop Data to mask
|
# Crop Data to mask
|
||||||
if data_mask.shape != (1, 1):
|
if data_mask.shape != (1, 1):
|
||||||
|
|||||||
@@ -182,21 +182,23 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""):
|
|||||||
wcs = WCS(Stokes[0]).deepcopy()
|
wcs = WCS(Stokes[0]).deepcopy()
|
||||||
|
|
||||||
# Plot figure
|
# Plot figure
|
||||||
plt.rcParams.update({"font.size": 10})
|
plt.rcParams.update({"font.size": 14})
|
||||||
fig, (axI, axQ, axU) = plt.subplots(ncols=3, figsize=(20, 6), subplot_kw=dict(projection=wcs))
|
ratiox = max(int(stkI.shape[1]/stkI.shape[0]),1)
|
||||||
fig.subplots_adjust(hspace=0, wspace=0.75, bottom=0.01, top=0.99, left=0.08, right=0.95)
|
ratioy = max(int(stkI.shape[0]/stkI.shape[1]),1)
|
||||||
|
fig, (axI, axQ, axU) = plt.subplots(ncols=3, figsize=(15*ratiox, 6*ratioy), subplot_kw=dict(projection=wcs))
|
||||||
|
fig.subplots_adjust(hspace=0, wspace=0.50, bottom=0.01, top=0.99, left=0.07, right=0.97)
|
||||||
fig.suptitle("I, Q, U Stokes parameters")
|
fig.suptitle("I, Q, U Stokes parameters")
|
||||||
|
|
||||||
imI = axI.imshow(stkI, origin="lower", cmap="inferno")
|
imI = axI.imshow(stkI, origin="lower", cmap="inferno")
|
||||||
fig.colorbar(imI, ax=axI, aspect=50, shrink=0.50, pad=0.025, label="counts/sec")
|
fig.colorbar(imI, ax=axI, aspect=30, shrink=0.50, pad=0.025, label="counts/sec")
|
||||||
axI.set(xlabel="RA", ylabel="DEC", title=r"$I_{stokes}$")
|
axI.set(xlabel="RA", ylabel="DEC", title=r"$I_{stokes}$")
|
||||||
|
|
||||||
imQ = axQ.imshow(stkQ, origin="lower", cmap="inferno")
|
imQ = axQ.imshow(stkQ, origin="lower", cmap="inferno")
|
||||||
fig.colorbar(imQ, ax=axQ, aspect=50, shrink=0.50, pad=0.025, label="counts/sec")
|
fig.colorbar(imQ, ax=axQ, aspect=30, shrink=0.50, pad=0.025, label="counts/sec")
|
||||||
axQ.set(xlabel="RA", ylabel="DEC", title=r"$Q_{stokes}$")
|
axQ.set(xlabel="RA", ylabel="DEC", title=r"$Q_{stokes}$")
|
||||||
|
|
||||||
imU = axU.imshow(stkU, origin="lower", cmap="inferno")
|
imU = axU.imshow(stkU, origin="lower", cmap="inferno")
|
||||||
fig.colorbar(imU, ax=axU, aspect=50, shrink=0.50, pad=0.025, label="counts/sec")
|
fig.colorbar(imU, ax=axU, aspect=30, shrink=0.50, pad=0.025, label="counts/sec")
|
||||||
axU.set(xlabel="RA", ylabel="DEC", title=r"$U_{stokes}$")
|
axU.set(xlabel="RA", ylabel="DEC", title=r"$U_{stokes}$")
|
||||||
|
|
||||||
if savename is not None:
|
if savename is not None:
|
||||||
@@ -320,12 +322,20 @@ def polarization_map(
|
|||||||
print("No pixel with polarization information above requested SNR.")
|
print("No pixel with polarization information above requested SNR.")
|
||||||
|
|
||||||
# Plot the map
|
# Plot the map
|
||||||
plt.rcParams.update({"font.size": 10})
|
plt.rcParams.update({"font.size": 14})
|
||||||
plt.rcdefaults()
|
plt.rcdefaults()
|
||||||
fig, ax = plt.subplots(figsize=(10, 10), layout="constrained", subplot_kw=dict(projection=wcs))
|
ratiox = max(int(stkI.shape[1]/(stkI.shape[0])),1)
|
||||||
ax.set(aspect="equal", fc="k")
|
ratioy = max(int((stkI.shape[0])/stkI.shape[1]),1)
|
||||||
|
fig, ax = plt.subplots(figsize=(6*ratiox, 6*ratioy), layout="compressed", subplot_kw=dict(projection=wcs))
|
||||||
|
ax.set(aspect="equal", fc="k", ylim=[-stkI.shape[0]*0.10,stkI.shape[0]*1.15])
|
||||||
# fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02)
|
# fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02)
|
||||||
|
|
||||||
|
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
|
||||||
|
ax.coords[0].set_axislabel("Right Ascension (J2000)")
|
||||||
|
ax.coords[0].set_axislabel_position("t")
|
||||||
|
ax.coords[0].set_ticklabel_position("t")
|
||||||
|
ax.set_ylabel("Declination (J2000)", labelpad=-1)
|
||||||
|
|
||||||
if display.lower() in ["intensity"]:
|
if display.lower() in ["intensity"]:
|
||||||
# If no display selected, show intensity map
|
# If no display selected, show intensity map
|
||||||
display = "i"
|
display = "i"
|
||||||
@@ -337,7 +347,7 @@ def polarization_map(
|
|||||||
else:
|
else:
|
||||||
vmin, vmax = flux_lim
|
vmin, vmax = flux_lim
|
||||||
im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno", alpha=1.0)
|
im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno", alpha=1.0)
|
||||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
fig.colorbar(im, ax=ax, aspect=30, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||||
levelsI = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax
|
levelsI = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax
|
||||||
print("Total flux contour levels : ", levelsI)
|
print("Total flux contour levels : ", levelsI)
|
||||||
ax.contour(stkI * convert_flux, levels=levelsI, colors="grey", linewidths=0.5)
|
ax.contour(stkI * convert_flux, levels=levelsI, colors="grey", linewidths=0.5)
|
||||||
@@ -432,24 +442,24 @@ def polarization_map(
|
|||||||
PA_diluted = Stokes[0].header["PA_int"]
|
PA_diluted = Stokes[0].header["PA_int"]
|
||||||
PA_diluted_err = Stokes[0].header["sPA_int"]
|
PA_diluted_err = Stokes[0].header["sPA_int"]
|
||||||
|
|
||||||
plt.rcParams.update({"font.size": 12})
|
plt.rcParams.update({"font.size": 10})
|
||||||
px_size = wcs.wcs.get_cdelt()[0] * 3600.0
|
px_size = wcs.wcs.get_cdelt()[0] * 3600.0
|
||||||
px_sc = AnchoredSizeBar(ax.transData, 1.0 / px_size, "1 arcsec", 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="w")
|
px_sc = AnchoredSizeBar(ax.transData, 1.0 / px_size, "1 arcsec", 3, pad=0.25, sep=5, borderpad=0.25, frameon=False, size_vertical=0.005, color="w")
|
||||||
north_dir = AnchoredDirectionArrows(
|
north_dir = AnchoredDirectionArrows(
|
||||||
ax.transAxes,
|
ax.transAxes,
|
||||||
"E",
|
"E",
|
||||||
"N",
|
"N",
|
||||||
length=-0.08,
|
length=-0.05,
|
||||||
fontsize=0.025,
|
fontsize=0.02,
|
||||||
loc=1,
|
loc=1,
|
||||||
aspect_ratio=-1,
|
aspect_ratio=-(stkI.shape[1]/(stkI.shape[0]*1.25)),
|
||||||
sep_y=0.01,
|
sep_y=0.01,
|
||||||
sep_x=0.01,
|
sep_x=0.01,
|
||||||
back_length=0.0,
|
back_length=0.0,
|
||||||
head_length=10.0,
|
head_length=10.0,
|
||||||
head_width=10.0,
|
head_width=10.0,
|
||||||
angle=-Stokes[0].header["orientat"],
|
angle=-Stokes[0].header["orientat"],
|
||||||
text_props={"ec": "k", "fc": "w", "alpha": 1, "lw": -0.2},
|
text_props={"ec": "k", "fc": "w", "alpha": 1, "lw": 0.4},
|
||||||
arrow_props={"ec": "k", "fc": "w", "alpha": 1, "lw": 1},
|
arrow_props={"ec": "k", "fc": "w", "alpha": 1, "lw": 1},
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -478,7 +488,7 @@ def polarization_map(
|
|||||||
color="w",
|
color="w",
|
||||||
edgecolor="k",
|
edgecolor="k",
|
||||||
)
|
)
|
||||||
pol_sc = AnchoredSizeBar(ax.transData, scale_vec, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="w")
|
pol_sc = AnchoredSizeBar(ax.transData, scale_vec, r"$P$= 100 %", 4, pad=0.25, sep=5, borderpad=0.25, frameon=False, size_vertical=0.005, color="w")
|
||||||
|
|
||||||
ax.add_artist(pol_sc)
|
ax.add_artist(pol_sc)
|
||||||
ax.add_artist(px_sc)
|
ax.add_artist(px_sc)
|
||||||
@@ -521,12 +531,6 @@ def polarization_map(
|
|||||||
x, y = np.array([x, y]) - np.array(stkI.shape) / 2.0
|
x, y = np.array([x, y]) - np.array(stkI.shape) / 2.0
|
||||||
ax.add_patch(Rectangle((x, y), width, height, angle=angle, edgecolor=color, fill=False))
|
ax.add_patch(Rectangle((x, y), width, height, angle=angle, edgecolor=color, fill=False))
|
||||||
|
|
||||||
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
|
|
||||||
ax.coords[0].set_axislabel("Right Ascension (J2000)")
|
|
||||||
ax.coords[0].set_axislabel_position("t")
|
|
||||||
ax.coords[0].set_ticklabel_position("t")
|
|
||||||
ax.set_ylabel("Declination (J2000)", labelpad=-1)
|
|
||||||
|
|
||||||
if savename is not None:
|
if savename is not None:
|
||||||
if savename[-4:] not in [".png", ".jpg", ".pdf"]:
|
if savename[-4:] not in [".png", ".jpg", ".pdf"]:
|
||||||
savename += ".pdf"
|
savename += ".pdf"
|
||||||
@@ -666,7 +670,7 @@ class align_maps(object):
|
|||||||
length=-0.08,
|
length=-0.08,
|
||||||
fontsize=0.03,
|
fontsize=0.03,
|
||||||
loc=1,
|
loc=1,
|
||||||
aspect_ratio=-1,
|
aspect_ratio=-(self.map_data.shape[1]/self.map_data.shape[0]),
|
||||||
sep_y=0.01,
|
sep_y=0.01,
|
||||||
sep_x=0.01,
|
sep_x=0.01,
|
||||||
angle=-self.map_header["orientat"],
|
angle=-self.map_header["orientat"],
|
||||||
@@ -724,7 +728,7 @@ class align_maps(object):
|
|||||||
length=-0.08,
|
length=-0.08,
|
||||||
fontsize=0.03,
|
fontsize=0.03,
|
||||||
loc=1,
|
loc=1,
|
||||||
aspect_ratio=-1,
|
aspect_ratio=-(self.other_data.shape[1]/self.other_data.shape[0]),
|
||||||
sep_y=0.01,
|
sep_y=0.01,
|
||||||
sep_x=0.01,
|
sep_x=0.01,
|
||||||
angle=-self.other_header["orientat"],
|
angle=-self.other_header["orientat"],
|
||||||
@@ -988,7 +992,7 @@ class overplot_radio(align_maps):
|
|||||||
length=-0.08,
|
length=-0.08,
|
||||||
fontsize=0.03,
|
fontsize=0.03,
|
||||||
loc=1,
|
loc=1,
|
||||||
aspect_ratio=-1,
|
aspect_ratio=-(stkI.shape[1]/stkI.shape[0]),
|
||||||
sep_y=0.01,
|
sep_y=0.01,
|
||||||
sep_x=0.01,
|
sep_x=0.01,
|
||||||
angle=-self.Stokes_UV[0].header["orientat"],
|
angle=-self.Stokes_UV[0].header["orientat"],
|
||||||
@@ -1190,7 +1194,7 @@ class overplot_chandra(align_maps):
|
|||||||
length=-0.08,
|
length=-0.08,
|
||||||
fontsize=0.03,
|
fontsize=0.03,
|
||||||
loc=1,
|
loc=1,
|
||||||
aspect_ratio=-1,
|
aspect_ratio=-(stkI.shape[1]/stkI.shape[0]),
|
||||||
sep_y=0.01,
|
sep_y=0.01,
|
||||||
sep_x=0.01,
|
sep_x=0.01,
|
||||||
angle=-self.Stokes_UV[0].header["orientat"],
|
angle=-self.Stokes_UV[0].header["orientat"],
|
||||||
@@ -1329,7 +1333,6 @@ class overplot_pol(align_maps):
|
|||||||
else:
|
else:
|
||||||
self.scale_vec = scale_vec
|
self.scale_vec = scale_vec
|
||||||
step_vec = 1
|
step_vec = 1
|
||||||
px_scale = np.abs(self.wcs_UV.wcs.get_cdelt()[0] / self.other_wcs.wcs.get_cdelt()[0])
|
|
||||||
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
|
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
|
||||||
self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0)
|
self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0)
|
||||||
self.Q = self.ax_overplot.quiver(
|
self.Q = self.ax_overplot.quiver(
|
||||||
@@ -1339,7 +1342,7 @@ class overplot_pol(align_maps):
|
|||||||
self.V[::step_vec, ::step_vec],
|
self.V[::step_vec, ::step_vec],
|
||||||
units="xy",
|
units="xy",
|
||||||
angles="uv",
|
angles="uv",
|
||||||
scale=px_scale / self.scale_vec,
|
scale=1. / self.scale_vec,
|
||||||
scale_units="xy",
|
scale_units="xy",
|
||||||
pivot="mid",
|
pivot="mid",
|
||||||
headwidth=0.0,
|
headwidth=0.0,
|
||||||
@@ -1385,7 +1388,7 @@ class overplot_pol(align_maps):
|
|||||||
length=-0.08,
|
length=-0.08,
|
||||||
fontsize=0.03,
|
fontsize=0.03,
|
||||||
loc=1,
|
loc=1,
|
||||||
aspect_ratio=-1,
|
aspect_ratio=-(stkI.shape[1]/stkI.shape[0]),
|
||||||
sep_y=0.01,
|
sep_y=0.01,
|
||||||
sep_x=0.01,
|
sep_x=0.01,
|
||||||
angle=-self.Stokes_UV[0].header["orientat"],
|
angle=-self.Stokes_UV[0].header["orientat"],
|
||||||
@@ -1395,7 +1398,7 @@ class overplot_pol(align_maps):
|
|||||||
self.ax_overplot.add_artist(north_dir)
|
self.ax_overplot.add_artist(north_dir)
|
||||||
pol_sc = AnchoredSizeBar(
|
pol_sc = AnchoredSizeBar(
|
||||||
self.ax_overplot.transData,
|
self.ax_overplot.transData,
|
||||||
self.scale_vec / px_scale,
|
self.scale_vec,
|
||||||
r"$P$= 100%",
|
r"$P$= 100%",
|
||||||
4,
|
4,
|
||||||
pad=0.5,
|
pad=0.5,
|
||||||
@@ -1550,7 +1553,7 @@ class align_pol(object):
|
|||||||
length=-0.08,
|
length=-0.08,
|
||||||
fontsize=0.025,
|
fontsize=0.025,
|
||||||
loc=1,
|
loc=1,
|
||||||
aspect_ratio=-1,
|
aspect_ratio=-(stkI.shape[1]/stkI.shape[0]),
|
||||||
sep_y=0.01,
|
sep_y=0.01,
|
||||||
sep_x=0.01,
|
sep_x=0.01,
|
||||||
back_length=0.0,
|
back_length=0.0,
|
||||||
@@ -1814,6 +1817,8 @@ class crop_map(object):
|
|||||||
# Write cropped map to new HDUList
|
# Write cropped map to new HDUList
|
||||||
self.header_crop = deepcopy(header)
|
self.header_crop = deepcopy(header)
|
||||||
self.header_crop.update(self.wcs_crop.to_header())
|
self.header_crop.update(self.wcs_crop.to_header())
|
||||||
|
if self.header_crop["FILENAME"][-4:] != "crop":
|
||||||
|
self.header_crop["FILENAME"] += "_crop"
|
||||||
self.hdul_crop = fits.HDUList([fits.PrimaryHDU(self.data_crop, self.header_crop)])
|
self.hdul_crop = fits.HDUList([fits.PrimaryHDU(self.data_crop, self.header_crop)])
|
||||||
|
|
||||||
self.rect_selector.clear()
|
self.rect_selector.clear()
|
||||||
@@ -1936,6 +1941,8 @@ class crop_Stokes(crop_map):
|
|||||||
)
|
)
|
||||||
|
|
||||||
for dataset in self.hdul_crop:
|
for dataset in self.hdul_crop:
|
||||||
|
if dataset.header["FILENAME"][-4:] != "crop":
|
||||||
|
dataset.header["FILENAME"] += "_crop"
|
||||||
dataset.header["P_int"] = (P_diluted, "Integrated polarization degree")
|
dataset.header["P_int"] = (P_diluted, "Integrated polarization degree")
|
||||||
dataset.header["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
|
dataset.header["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
|
||||||
dataset.header["PA_int"] = (PA_diluted, "Integrated polarization angle")
|
dataset.header["PA_int"] = (PA_diluted, "Integrated polarization angle")
|
||||||
@@ -2797,10 +2804,10 @@ class pol_map(object):
|
|||||||
ax.transAxes,
|
ax.transAxes,
|
||||||
"E",
|
"E",
|
||||||
"N",
|
"N",
|
||||||
length=-0.08,
|
length=-0.05,
|
||||||
fontsize=0.025,
|
fontsize=0.02,
|
||||||
loc=1,
|
loc=1,
|
||||||
aspect_ratio=-1,
|
aspect_ratio=-(self.I.shape[1]/self.I.shape[0]),
|
||||||
sep_y=0.01,
|
sep_y=0.01,
|
||||||
sep_x=0.01,
|
sep_x=0.01,
|
||||||
back_length=0.0,
|
back_length=0.0,
|
||||||
|
|||||||
@@ -433,7 +433,18 @@ def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px",
|
|||||||
return deconv_array
|
return deconv_array
|
||||||
|
|
||||||
|
|
||||||
def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=None, subtract_error=0.5, display=False, savename=None, plots_folder="", return_background=False):
|
def get_error(
|
||||||
|
data_array,
|
||||||
|
headers,
|
||||||
|
error_array=None,
|
||||||
|
data_mask=None,
|
||||||
|
sub_type=None,
|
||||||
|
subtract_error=0.5,
|
||||||
|
display=False,
|
||||||
|
savename=None,
|
||||||
|
plots_folder="",
|
||||||
|
return_background=False,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Look for sub-image of shape sub_shape that have the smallest integrated
|
Look for sub-image of shape sub_shape that have the smallest integrated
|
||||||
flux (no source assumption) and define the background on the image by the
|
flux (no source assumption) and define the background on the image by the
|
||||||
@@ -521,29 +532,29 @@ def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=No
|
|||||||
n_data_array, c_error_bkg, headers, background = bkg_hist(
|
n_data_array, c_error_bkg, headers, background = bkg_hist(
|
||||||
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
||||||
)
|
)
|
||||||
sub_type, subtract_error = "histogram ", str(int(subtract_error>0.))
|
sub_type, subtract_error = "histogram ", str(int(subtract_error > 0.0))
|
||||||
elif isinstance(sub_type, str):
|
elif isinstance(sub_type, str):
|
||||||
if sub_type.lower() in ["auto"]:
|
if sub_type.lower() in ["auto"]:
|
||||||
n_data_array, c_error_bkg, headers, background = bkg_fit(
|
n_data_array, c_error_bkg, headers, background = bkg_fit(
|
||||||
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
||||||
)
|
)
|
||||||
sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma"%subtract_error if subtract_error != 0. else 0.
|
sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
|
||||||
else:
|
else:
|
||||||
n_data_array, c_error_bkg, headers, background = bkg_hist(
|
n_data_array, c_error_bkg, headers, background = bkg_hist(
|
||||||
data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
||||||
)
|
)
|
||||||
sub_type, subtract_error = "histogram ", "mean+%.1fsigma"%subtract_error if subtract_error != 0. else 0.
|
sub_type, subtract_error = "histogram ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
|
||||||
elif isinstance(sub_type, tuple):
|
elif isinstance(sub_type, tuple):
|
||||||
n_data_array, c_error_bkg, headers, background = bkg_mini(
|
n_data_array, c_error_bkg, headers, background = bkg_mini(
|
||||||
data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
||||||
)
|
)
|
||||||
sub_type, subtract_error = "minimal flux ", "mean+%.1fsigma"%subtract_error if subtract_error != 0. else 0.
|
sub_type, subtract_error = "minimal flux ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
|
||||||
else:
|
else:
|
||||||
print("Warning: Invalid subtype.")
|
print("Warning: Invalid subtype.")
|
||||||
|
|
||||||
for header in headers:
|
for header in headers:
|
||||||
header["BKG_TYPE"] = (sub_type,"Bkg estimation method used during reduction")
|
header["BKG_TYPE"] = (sub_type, "Bkg estimation method used during reduction")
|
||||||
header["BKG_SUB"] = (subtract_error,"Amount of bkg subtracted from images")
|
header["BKG_SUB"] = (subtract_error, "Amount of bkg subtracted from images")
|
||||||
|
|
||||||
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
|
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
|
||||||
n_error_array = np.sqrt(err_wav**2 + err_psf**2 + err_flat**2 + c_error_bkg**2)
|
n_error_array = np.sqrt(err_wav**2 + err_psf**2 + err_flat**2 + c_error_bkg**2)
|
||||||
@@ -618,7 +629,12 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
|
|||||||
|
|
||||||
# Compute binning ratio
|
# Compute binning ratio
|
||||||
if scale.lower() in ["px", "pixel"]:
|
if scale.lower() in ["px", "pixel"]:
|
||||||
Dxy_arr[i] = np.array( [ pxsize, ] * 2)
|
Dxy_arr[i] = np.array(
|
||||||
|
[
|
||||||
|
pxsize,
|
||||||
|
]
|
||||||
|
* 2
|
||||||
|
)
|
||||||
scale = "px"
|
scale = "px"
|
||||||
elif scale.lower() in ["arcsec", "arcseconds"]:
|
elif scale.lower() in ["arcsec", "arcseconds"]:
|
||||||
Dxy_arr[i] = np.array(pxsize / np.abs(w.wcs.cdelt) / 3600.0)
|
Dxy_arr[i] = np.array(pxsize / np.abs(w.wcs.cdelt) / 3600.0)
|
||||||
@@ -662,7 +678,7 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
|
|||||||
new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape
|
new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape
|
||||||
for key, val in nw.to_header().items():
|
for key, val in nw.to_header().items():
|
||||||
new_header.set(key, val)
|
new_header.set(key, val)
|
||||||
new_header["SAMPLING"] = (str(pxsize)+scale, "Resampling performed during reduction")
|
new_header["SAMPLING"] = (str(pxsize) + scale, "Resampling performed during reduction")
|
||||||
rebinned_headers.append(new_header)
|
rebinned_headers.append(new_header)
|
||||||
if data_mask is not None:
|
if data_mask is not None:
|
||||||
data_mask = bin_ndarray(data_mask, new_shape=new_shape, operation="average") > 0.80
|
data_mask = bin_ndarray(data_mask, new_shape=new_shape, operation="average") > 0.80
|
||||||
@@ -676,7 +692,9 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
|
|||||||
return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask
|
return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask
|
||||||
|
|
||||||
|
|
||||||
def align_data(data_array, headers, error_array=None, data_mask=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False):
|
def align_data(
|
||||||
|
data_array, headers, error_array=None, data_mask=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Align images in data_array using cross correlation, and rescale them to
|
Align images in data_array using cross correlation, and rescale them to
|
||||||
wider images able to contain any rotation of the reference image.
|
wider images able to contain any rotation of the reference image.
|
||||||
@@ -757,7 +775,9 @@ def align_data(data_array, headers, error_array=None, data_mask=None, background
|
|||||||
if data_mask is None:
|
if data_mask is None:
|
||||||
full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0)
|
full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0)
|
||||||
else:
|
else:
|
||||||
full_array, err_array, data_mask, full_headers = crop_array(full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0)
|
full_array, err_array, data_mask, full_headers = crop_array(
|
||||||
|
full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0
|
||||||
|
)
|
||||||
|
|
||||||
data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1]
|
data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1]
|
||||||
error_array = err_array[:-1]
|
error_array = err_array[:-1]
|
||||||
@@ -787,7 +807,7 @@ def align_data(data_array, headers, error_array=None, data_mask=None, background
|
|||||||
res_mask = np.zeros((res_shape, res_shape), dtype=bool)
|
res_mask = np.zeros((res_shape, res_shape), dtype=bool)
|
||||||
res_mask[res_shift[0] : res_shift[0] + shape[1], res_shift[1] : res_shift[1] + shape[2]] = True
|
res_mask[res_shift[0] : res_shift[0] + shape[1], res_shift[1] : res_shift[1] + shape[2]] = True
|
||||||
if data_mask is not None:
|
if data_mask is not None:
|
||||||
res_mask = np.logical_and(res_mask,zeropad(data_mask, (res_shape, res_shape)).astype(bool))
|
res_mask = np.logical_and(res_mask, zeropad(data_mask, (res_shape, res_shape)).astype(bool))
|
||||||
|
|
||||||
shifts, errors = [], []
|
shifts, errors = [], []
|
||||||
for i, image in enumerate(data_array):
|
for i, image in enumerate(data_array):
|
||||||
@@ -806,8 +826,8 @@ def align_data(data_array, headers, error_array=None, data_mask=None, background
|
|||||||
rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.0)
|
rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.0)
|
||||||
rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i])
|
rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i])
|
||||||
|
|
||||||
curr_mask = sc_shift(res_mask*10., shift, order=1, cval=0.0)
|
curr_mask = sc_shift(res_mask * 10.0, shift, order=1, cval=0.0)
|
||||||
curr_mask[curr_mask < curr_mask.max()*2./3.] = 0.0
|
curr_mask[curr_mask < curr_mask.max() * 2.0 / 3.0] = 0.0
|
||||||
rescaled_mask[i] = curr_mask.astype(bool)
|
rescaled_mask[i] = curr_mask.astype(bool)
|
||||||
# mask_vertex = clean_ROI(curr_mask)
|
# mask_vertex = clean_ROI(curr_mask)
|
||||||
# rescaled_mask[i, mask_vertex[2] : mask_vertex[3], mask_vertex[0] : mask_vertex[1]] = True
|
# rescaled_mask[i, mask_vertex[2] : mask_vertex[3], mask_vertex[0] : mask_vertex[1]] = True
|
||||||
@@ -964,7 +984,7 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.5, scale="pi
|
|||||||
raise ValueError("{} is not a valid smoothing option".format(smoothing))
|
raise ValueError("{} is not a valid smoothing option".format(smoothing))
|
||||||
|
|
||||||
for header in headers:
|
for header in headers:
|
||||||
header["SMOOTH"] = (" ".join([smoothing,FWHM_size,FWHM_scale]),"Smoothing method used during reduction")
|
header["SMOOTH"] = (" ".join([smoothing, FWHM_size, FWHM_scale]), "Smoothing method used during reduction")
|
||||||
|
|
||||||
return smoothed, error
|
return smoothed, error
|
||||||
|
|
||||||
@@ -1193,11 +1213,11 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
|||||||
(yet)".format(instr)
|
(yet)".format(instr)
|
||||||
)
|
)
|
||||||
rotate = np.zeros(len(headers))
|
rotate = np.zeros(len(headers))
|
||||||
for i,head in enumerate(headers):
|
for i, head in enumerate(headers):
|
||||||
try:
|
try:
|
||||||
rotate[i] = head['ROTATE']
|
rotate[i] = head["ROTATE"]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
rotate[i] = 0.
|
rotate[i] = 0.0
|
||||||
|
|
||||||
if (np.unique(rotate) == rotate[0]).all():
|
if (np.unique(rotate) == rotate[0]).all():
|
||||||
theta = globals()["theta"] - rotate[0] * np.pi / 180.0
|
theta = globals()["theta"] - rotate[0] * np.pi / 180.0
|
||||||
@@ -1231,8 +1251,8 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
|||||||
|
|
||||||
# Calculating correction factor: allows all pol_filt to share same exptime and inverse sensitivity (taken to be the one from POL0)
|
# Calculating correction factor: allows all pol_filt to share same exptime and inverse sensitivity (taken to be the one from POL0)
|
||||||
corr = np.array([1.0 * h["photflam"] / h["exptime"] for h in pol_headers]) * pol_headers[0]["exptime"] / pol_headers[0]["photflam"]
|
corr = np.array([1.0 * h["photflam"] / h["exptime"] for h in pol_headers]) * pol_headers[0]["exptime"] / pol_headers[0]["photflam"]
|
||||||
pol_headers[1]['photflam'], pol_headers[1]['exptime'] = pol_headers[0]['photflam'], pol_headers[1]['exptime']
|
pol_headers[1]["photflam"], pol_headers[1]["exptime"] = pol_headers[0]["photflam"], pol_headers[1]["exptime"]
|
||||||
pol_headers[2]['photflam'], pol_headers[2]['exptime'] = pol_headers[0]['photflam'], pol_headers[2]['exptime']
|
pol_headers[2]["photflam"], pol_headers[2]["exptime"] = pol_headers[0]["photflam"], pol_headers[2]["exptime"]
|
||||||
|
|
||||||
# Orientation and error for each polarizer
|
# Orientation and error for each polarizer
|
||||||
# fmax = np.finfo(np.float64).max
|
# fmax = np.finfo(np.float64).max
|
||||||
@@ -1241,22 +1261,12 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
|||||||
coeff_stokes = np.zeros((3, 3))
|
coeff_stokes = np.zeros((3, 3))
|
||||||
# Coefficients linking each polarizer flux to each Stokes parameter
|
# Coefficients linking each polarizer flux to each Stokes parameter
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
coeff_stokes[0, i] = (
|
coeff_stokes[0, i] = pol_eff[(i + 1) % 3] * pol_eff[(i + 2) % 3] * np.sin(-2.0 * theta[(i + 1) % 3] + 2.0 * theta[(i + 2) % 3]) * 2.0 / transmit[i]
|
||||||
pol_eff[(i + 1) % 3]
|
|
||||||
* pol_eff[(i + 2) % 3]
|
|
||||||
* np.sin(-2.0 * theta[(i + 1) % 3] + 2.0 * theta[(i + 2) % 3])
|
|
||||||
* 2.0
|
|
||||||
/ transmit[i]
|
|
||||||
)
|
|
||||||
coeff_stokes[1, i] = (
|
coeff_stokes[1, i] = (
|
||||||
(-pol_eff[(i + 1) % 3] * np.sin(2.0 * theta[(i + 1) % 3]) + pol_eff[(i + 2) % 3] * np.sin(2.0 * theta[(i + 2) % 3]))
|
(-pol_eff[(i + 1) % 3] * np.sin(2.0 * theta[(i + 1) % 3]) + pol_eff[(i + 2) % 3] * np.sin(2.0 * theta[(i + 2) % 3])) * 2.0 / transmit[i]
|
||||||
* 2.0
|
|
||||||
/ transmit[i]
|
|
||||||
)
|
)
|
||||||
coeff_stokes[2, i] = (
|
coeff_stokes[2, i] = (
|
||||||
(pol_eff[(i + 1) % 3] * np.cos(2.0 * theta[(i + 1) % 3]) - pol_eff[(i + 2) % 3] * np.cos(2.0 * theta[(i + 2) % 3]))
|
(pol_eff[(i + 1) % 3] * np.cos(2.0 * theta[(i + 1) % 3]) - pol_eff[(i + 2) % 3] * np.cos(2.0 * theta[(i + 2) % 3])) * 2.0 / transmit[i]
|
||||||
* 2.0
|
|
||||||
/ transmit[i]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Normalization parameter for Stokes parameters computation
|
# Normalization parameter for Stokes parameters computation
|
||||||
@@ -1348,11 +1358,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
|||||||
/ N
|
/ N
|
||||||
* (
|
* (
|
||||||
np.cos(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
|
np.cos(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
|
||||||
- (
|
- (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * Q_stokes
|
||||||
pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
|
|
||||||
- pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
|
|
||||||
)
|
|
||||||
* Q_stokes
|
|
||||||
+ coeff_stokes_corr[1, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
|
+ coeff_stokes_corr[1, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@@ -1362,11 +1368,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
|||||||
/ N
|
/ N
|
||||||
* (
|
* (
|
||||||
np.cos(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
|
np.cos(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
|
||||||
- (
|
- (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * Q_stokes
|
||||||
pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
|
|
||||||
- pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
|
|
||||||
)
|
|
||||||
* Q_stokes
|
|
||||||
+ coeff_stokes_corr[1, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
|
+ coeff_stokes_corr[1, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@@ -1376,11 +1378,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
|||||||
/ N
|
/ N
|
||||||
* (
|
* (
|
||||||
np.cos(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
|
np.cos(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
|
||||||
- (
|
- (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * Q_stokes
|
||||||
pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
|
|
||||||
- pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
|
|
||||||
)
|
|
||||||
* Q_stokes
|
|
||||||
+ coeff_stokes_corr[1, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
|
+ coeff_stokes_corr[1, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@@ -1392,11 +1390,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
|||||||
/ N
|
/ N
|
||||||
* (
|
* (
|
||||||
np.sin(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
|
np.sin(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
|
||||||
- (
|
- (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * U_stokes
|
||||||
pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
|
|
||||||
- pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
|
|
||||||
)
|
|
||||||
* U_stokes
|
|
||||||
+ coeff_stokes_corr[2, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
|
+ coeff_stokes_corr[2, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@@ -1406,11 +1400,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
|||||||
/ N
|
/ N
|
||||||
* (
|
* (
|
||||||
np.sin(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
|
np.sin(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
|
||||||
- (
|
- (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * U_stokes
|
||||||
pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
|
|
||||||
- pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
|
|
||||||
)
|
|
||||||
* U_stokes
|
|
||||||
+ coeff_stokes_corr[2, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
|
+ coeff_stokes_corr[2, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@@ -1420,11 +1410,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
|||||||
/ N
|
/ N
|
||||||
* (
|
* (
|
||||||
np.sin(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
|
np.sin(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
|
||||||
- (
|
- (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * U_stokes
|
||||||
pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
|
|
||||||
- pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
|
|
||||||
)
|
|
||||||
* U_stokes
|
|
||||||
+ coeff_stokes_corr[2, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
|
+ coeff_stokes_corr[2, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
@@ -1451,26 +1437,38 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
|
|||||||
all_Q_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
|
all_Q_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
|
||||||
all_U_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
|
all_U_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
|
||||||
all_Stokes_cov = np.zeros((np.unique(rotate).size, 3, 3, data_array.shape[1], data_array.shape[2]))
|
all_Stokes_cov = np.zeros((np.unique(rotate).size, 3, 3, data_array.shape[1], data_array.shape[2]))
|
||||||
all_header_stokes = [{},]*np.unique(rotate).size
|
all_header_stokes = [
|
||||||
|
{},
|
||||||
|
] * np.unique(rotate).size
|
||||||
|
|
||||||
for i,rot in enumerate(np.unique(rotate)):
|
for i, rot in enumerate(np.unique(rotate)):
|
||||||
rot_mask = (rotate == rot)
|
rot_mask = rotate == rot
|
||||||
all_I_stokes[i], all_Q_stokes[i], all_U_stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes(data_array[rot_mask], error_array[rot_mask], data_mask, [headers[i] for i in np.arange(len(headers))[rot_mask]], FWHM=FWHM, scale=scale, smoothing=smoothing, transmitcorr=transmitcorr, integrate=False)
|
all_I_stokes[i], all_Q_stokes[i], all_U_stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes(
|
||||||
all_exp = np.array([float(h['exptime']) for h in all_header_stokes])
|
data_array[rot_mask],
|
||||||
|
error_array[rot_mask],
|
||||||
|
data_mask,
|
||||||
|
[headers[i] for i in np.arange(len(headers))[rot_mask]],
|
||||||
|
FWHM=FWHM,
|
||||||
|
scale=scale,
|
||||||
|
smoothing=smoothing,
|
||||||
|
transmitcorr=transmitcorr,
|
||||||
|
integrate=False,
|
||||||
|
)
|
||||||
|
all_exp = np.array([float(h["exptime"]) for h in all_header_stokes])
|
||||||
|
|
||||||
I_stokes = np.sum([exp*I for exp, I in zip(all_exp, all_I_stokes)],axis=0) / all_exp.sum()
|
I_stokes = np.sum([exp * I for exp, I in zip(all_exp, all_I_stokes)], axis=0) / all_exp.sum()
|
||||||
Q_stokes = np.sum([exp*Q for exp, Q in zip(all_exp, all_Q_stokes)],axis=0) / all_exp.sum()
|
Q_stokes = np.sum([exp * Q for exp, Q in zip(all_exp, all_Q_stokes)], axis=0) / all_exp.sum()
|
||||||
U_stokes = np.sum([exp*U for exp, U in zip(all_exp, all_U_stokes)],axis=0) / all_exp.sum()
|
U_stokes = np.sum([exp * U for exp, U in zip(all_exp, all_U_stokes)], axis=0) / all_exp.sum()
|
||||||
Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1]))
|
Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1]))
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
Stokes_cov[i,i] = np.sum([exp**2*cov for exp, cov in zip(all_exp, all_Stokes_cov[:,i,i])], axis=0) / all_exp.sum()**2
|
Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2
|
||||||
for j in [x for x in range(3) if x!=i]:
|
for j in [x for x in range(3) if x != i]:
|
||||||
Stokes_cov[i,j] = np.sqrt(np.sum([exp**2*cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:,i,j])], axis=0) / all_exp.sum()**2)
|
Stokes_cov[i, j] = np.sqrt(np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, i, j])], axis=0) / all_exp.sum() ** 2)
|
||||||
Stokes_cov[j,i] = np.sqrt(np.sum([exp**2*cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:,j,i])], axis=0) / all_exp.sum()**2)
|
Stokes_cov[j, i] = np.sqrt(np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, j, i])], axis=0) / all_exp.sum() ** 2)
|
||||||
|
|
||||||
# Save values to single header
|
# Save values to single header
|
||||||
header_stokes = all_header_stokes[0]
|
header_stokes = all_header_stokes[0]
|
||||||
header_stokes['exptime'] = all_exp.sum()
|
header_stokes["exptime"] = all_exp.sum()
|
||||||
|
|
||||||
# Nan handling :
|
# Nan handling :
|
||||||
fmax = np.finfo(np.float64).max
|
fmax = np.finfo(np.float64).max
|
||||||
@@ -1681,12 +1679,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
|||||||
U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j])
|
U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j])
|
||||||
|
|
||||||
# Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
|
# Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
|
||||||
# ang = np.zeros((len(headers),))
|
ang = -float(header_stokes["ORIENTAT"])
|
||||||
# for i, head in enumerate(headers):
|
|
||||||
# pc = WCS(head).celestial.wcs.pc[0,0]
|
|
||||||
# ang[i] = -np.arccos(WCS(head).celestial.wcs.pc[0,0]) * 180.0 / np.pi if np.abs(pc) < 1. else 0.
|
|
||||||
pc = WCS(header_stokes).celestial.wcs.pc[0,0]
|
|
||||||
ang = -np.arccos(WCS(header_stokes).celestial.wcs.pc[0,0]) * 180.0 / np.pi if np.abs(pc) < 1. else 0.
|
|
||||||
alpha = np.pi / 180.0 * ang
|
alpha = np.pi / 180.0 * ang
|
||||||
mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]])
|
mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]])
|
||||||
|
|
||||||
@@ -1709,7 +1702,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
|||||||
new_Q_stokes = sc_rotate(Q_stokes, ang, order=1, reshape=False, cval=0.0)
|
new_Q_stokes = sc_rotate(Q_stokes, ang, order=1, reshape=False, cval=0.0)
|
||||||
new_U_stokes = sc_rotate(U_stokes, ang, order=1, reshape=False, cval=0.0)
|
new_U_stokes = sc_rotate(U_stokes, ang, order=1, reshape=False, cval=0.0)
|
||||||
new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0)
|
new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0)
|
||||||
new_data_mask[new_data_mask < 2] = 0.0
|
new_data_mask[new_data_mask < 1.0] = 0.0
|
||||||
new_data_mask = new_data_mask.astype(bool)
|
new_data_mask = new_data_mask.astype(bool)
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
for j in range(3):
|
for j in range(3):
|
||||||
@@ -1725,7 +1718,6 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
|||||||
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
|
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
|
||||||
|
|
||||||
new_header_stokes = deepcopy(header_stokes)
|
new_header_stokes = deepcopy(header_stokes)
|
||||||
new_header_stokes["orientat"] = header_stokes["orientat"] + ang
|
|
||||||
new_wcs = WCS(header_stokes).celestial.deepcopy()
|
new_wcs = WCS(header_stokes).celestial.deepcopy()
|
||||||
|
|
||||||
new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc)
|
new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc)
|
||||||
@@ -1737,7 +1729,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
|||||||
new_header_stokes.set("PC1_1", 1.0)
|
new_header_stokes.set("PC1_1", 1.0)
|
||||||
if new_wcs.wcs.pc[1, 1] == 1.0:
|
if new_wcs.wcs.pc[1, 1] == 1.0:
|
||||||
new_header_stokes.set("PC2_2", 1.0)
|
new_header_stokes.set("PC2_2", 1.0)
|
||||||
new_header_stokes["orientat"] = header_stokes["orientat"] + ang
|
new_header_stokes["ORIENTAT"] += ang
|
||||||
|
|
||||||
# Nan handling :
|
# Nan handling :
|
||||||
fmax = np.finfo(np.float64).max
|
fmax = np.finfo(np.float64).max
|
||||||
@@ -1824,7 +1816,7 @@ def rotate_data(data_array, error_array, data_mask, headers):
|
|||||||
new_data_array = []
|
new_data_array = []
|
||||||
new_error_array = []
|
new_error_array = []
|
||||||
new_data_mask = []
|
new_data_mask = []
|
||||||
for i,header in zip(range(data_array.shape[0]),headers):
|
for i, header in zip(range(data_array.shape[0]), headers):
|
||||||
ang = -float(header["ORIENTAT"])
|
ang = -float(header["ORIENTAT"])
|
||||||
alpha = ang * np.pi / 180.0
|
alpha = ang * np.pi / 180.0
|
||||||
|
|
||||||
@@ -1842,14 +1834,14 @@ def rotate_data(data_array, error_array, data_mask, headers):
|
|||||||
new_wcs.wcs.set()
|
new_wcs.wcs.set()
|
||||||
for key, val in new_wcs.to_header().items():
|
for key, val in new_wcs.to_header().items():
|
||||||
new_header[key] = val
|
new_header[key] = val
|
||||||
new_header["ORIENTAT"] = np.arccos(new_wcs.celestial.wcs.pc[0,0]) * 180.0 / np.pi
|
new_header["ORIENTAT"] = np.arccos(new_wcs.celestial.wcs.pc[0, 0]) * 180.0 / np.pi
|
||||||
new_header["ROTATE"] = ang
|
new_header["ROTATE"] = ang
|
||||||
new_headers.append(new_header)
|
new_headers.append(new_header)
|
||||||
|
|
||||||
new_data_array = np.array(new_data_array)
|
new_data_array = np.array(new_data_array)
|
||||||
new_error_array = np.array(new_error_array)
|
new_error_array = np.array(new_error_array)
|
||||||
new_data_mask = np.array(new_data_mask).sum(axis=0)
|
new_data_mask = np.array(new_data_mask).sum(axis=0)
|
||||||
new_data_mask[new_data_mask < new_data_mask.max()*2./3.] = 0.0
|
new_data_mask[new_data_mask < 1.0] = 0.0
|
||||||
new_data_mask = new_data_mask.astype(bool)
|
new_data_mask = new_data_mask.astype(bool)
|
||||||
|
|
||||||
for i in range(new_data_array.shape[0]):
|
for i in range(new_data_array.shape[0]):
|
||||||
|
|||||||
@@ -45,3 +45,18 @@ def sci_not(v, err, rnd=1, out=str):
|
|||||||
return output[0] + r")e{0}".format(-power)
|
return output[0] + r")e{0}".format(-power)
|
||||||
else:
|
else:
|
||||||
return *output[1:], -power
|
return *output[1:], -power
|
||||||
|
|
||||||
|
def wcs_PA(PC21, PC22):
|
||||||
|
"""
|
||||||
|
Return the position angle in degrees to the North direction of a wcs
|
||||||
|
from the values of coefficient of its transformation matrix.
|
||||||
|
"""
|
||||||
|
if (abs(PC21) > abs(PC22)) and (PC21 >= 0):
|
||||||
|
orient = -np.arccos(PC22) * 180.0 / np.pi
|
||||||
|
elif (abs(PC21) > abs(PC22)) and (PC21 < 0):
|
||||||
|
orient = np.arccos(PC22) * 180.0 / np.pi
|
||||||
|
elif (abs(PC21) < abs(PC22)) and (PC22 >= 0):
|
||||||
|
orient = np.arccos(PC22) * 180.0 / np.pi
|
||||||
|
elif (abs(PC21) < abs(PC22)) and (PC22 < 0):
|
||||||
|
orient = -np.arccos(PC22) * 180.0 / np.pi
|
||||||
|
return orient
|
||||||
|
|||||||
Reference in New Issue
Block a user