Merge branch 'test' into main
This commit is contained in:
237
package/Combine.py
Executable file
237
package/Combine.py
Executable file
@@ -0,0 +1,237 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding:utf-8 -*-
|
||||||
|
# Project libraries
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
def same_reduction(infiles):
|
||||||
|
"""
|
||||||
|
Test if infiles are pipeline productions with same parameters.
|
||||||
|
"""
|
||||||
|
from astropy.io.fits import open as fits_open
|
||||||
|
from astropy.wcs import WCS
|
||||||
|
|
||||||
|
params = {"IQU": [], "ROT": [], "SIZE": [], "TARGNAME": [], "BKG_SUB": [], "SAMPLING": [], "SMOOTH": []}
|
||||||
|
for file in infiles:
|
||||||
|
with fits_open(file) as f:
|
||||||
|
# test for presence of I, Q, U images
|
||||||
|
datatype = []
|
||||||
|
for hdu in f:
|
||||||
|
try:
|
||||||
|
datatype.append(hdu.header["datatype"])
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
test_IQU = True
|
||||||
|
for look in ["I_stokes", "Q_stokes", "U_stokes", "IQU_cov_matrix"]:
|
||||||
|
test_IQU *= look in datatype
|
||||||
|
params["IQU"].append(test_IQU)
|
||||||
|
# test for orientation and pixel size
|
||||||
|
wcs = WCS(f[0].header).celestial
|
||||||
|
if wcs.wcs.has_cd() or (wcs.wcs.cdelt[:2] == np.array([1.0, 1.0])).all():
|
||||||
|
cdelt = np.linalg.eig(wcs.wcs.cd)[0]
|
||||||
|
pc = np.dot(wcs.wcs.cd, np.diag(1.0 / cdelt))
|
||||||
|
else:
|
||||||
|
cdelt = wcs.wcs.cdelt
|
||||||
|
pc = wcs.wcs.pc
|
||||||
|
params["ROT"].append(np.round(np.arccos(pc[0, 0]), 2) if np.abs(pc[0, 0]) < 1.0 else 0.0)
|
||||||
|
params["SIZE"].append(np.round(np.max(np.abs(cdelt * 3600.0)), 2))
|
||||||
|
# look for information on reduction procedure
|
||||||
|
for key in [k for k in params.keys() if k not in ["IQU", "ROT", "SIZE"]]:
|
||||||
|
try:
|
||||||
|
params[key].append(f[0].header[key])
|
||||||
|
except KeyError:
|
||||||
|
params[key].append("null")
|
||||||
|
result = np.all(params["IQU"])
|
||||||
|
for key in [k for k in params.keys() if k != "IQU"]:
|
||||||
|
result *= np.unique(params[key]).size == 1
|
||||||
|
if np.all(params["IQU"]) and not result:
|
||||||
|
print(np.unique(params["SIZE"]))
|
||||||
|
raise ValueError("Not all observations were reduced with the same parameters, please provide the raw files.")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def same_obs(infiles, data_folder):
|
||||||
|
"""
|
||||||
|
Group infiles into same observations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import astropy.units as u
|
||||||
|
from astropy.io.fits import getheader
|
||||||
|
from astropy.table import Table
|
||||||
|
from astropy.time import Time, TimeDelta
|
||||||
|
|
||||||
|
headers = [getheader("/".join([data_folder, file])) for file in infiles]
|
||||||
|
files = {}
|
||||||
|
files["PROPOSID"] = np.array([str(head["PROPOSID"]) for head in headers], dtype=str)
|
||||||
|
files["ROOTNAME"] = np.array([head["ROOTNAME"].lower() + "_c0f.fits" for head in headers], dtype=str)
|
||||||
|
files["EXPSTART"] = np.array([Time(head["EXPSTART"], format="mjd") for head in headers])
|
||||||
|
products = Table(files)
|
||||||
|
|
||||||
|
new_infiles = []
|
||||||
|
for pid in np.unique(products["PROPOSID"]):
|
||||||
|
obs = products[products["PROPOSID"] == pid].copy()
|
||||||
|
close_date = np.unique(
|
||||||
|
[[np.abs(TimeDelta(obs["EXPSTART"][i].unix - date.unix, format="sec")) < 7.0 * u.d for i in range(len(obs))] for date in obs["EXPSTART"]], axis=0
|
||||||
|
)
|
||||||
|
if len(close_date) > 1:
|
||||||
|
for date in close_date:
|
||||||
|
new_infiles.append(list(products["ROOTNAME"][np.any([products["ROOTNAME"] == dataset for dataset in obs["ROOTNAME"][date]], axis=0)]))
|
||||||
|
else:
|
||||||
|
new_infiles.append(list(products["ROOTNAME"][products["PROPOSID"] == pid]))
|
||||||
|
return new_infiles
|
||||||
|
|
||||||
|
|
||||||
|
def combine_Stokes(infiles):
|
||||||
|
"""
|
||||||
|
Combine I, Q, U from different observations of a same object.
|
||||||
|
"""
|
||||||
|
from astropy.io.fits import open as fits_open
|
||||||
|
from lib.reduction import align_data, zeropad
|
||||||
|
from scipy.ndimage import shift as sc_shift
|
||||||
|
|
||||||
|
I_array, Q_array, U_array, IQU_cov_array, data_mask, headers = [], [], [], [], [], []
|
||||||
|
shape = np.array([0, 0])
|
||||||
|
for file in infiles:
|
||||||
|
with fits_open(file) as f:
|
||||||
|
headers.append(f[0].header)
|
||||||
|
I_array.append(f["I_stokes"].data)
|
||||||
|
Q_array.append(f["Q_stokes"].data)
|
||||||
|
U_array.append(f["U_stokes"].data)
|
||||||
|
IQU_cov_array.append(f["IQU_cov_matrix"].data)
|
||||||
|
data_mask.append(f["data_mask"].data.astype(bool))
|
||||||
|
shape[0] = np.max([shape[0], f["I_stokes"].data.shape[0]])
|
||||||
|
shape[1] = np.max([shape[1], f["I_stokes"].data.shape[1]])
|
||||||
|
|
||||||
|
exposure_array = np.array([float(head["EXPTIME"]) for head in headers])
|
||||||
|
|
||||||
|
shape += np.array([5, 5])
|
||||||
|
data_mask = np.sum([zeropad(mask, shape) for mask in data_mask], axis=0).astype(bool)
|
||||||
|
I_array = np.array([zeropad(I, shape) for I in I_array])
|
||||||
|
Q_array = np.array([zeropad(Q, shape) for Q in Q_array])
|
||||||
|
U_array = np.array([zeropad(U, shape) for U in U_array])
|
||||||
|
IQU_cov_array = np.array([[[zeropad(cov[i, j], shape) for j in range(3)] for i in range(3)] for cov in IQU_cov_array])
|
||||||
|
|
||||||
|
sI_array = np.sqrt(IQU_cov_array[:, 0, 0])
|
||||||
|
sQ_array = np.sqrt(IQU_cov_array[:, 1, 1])
|
||||||
|
sU_array = np.sqrt(IQU_cov_array[:, 2, 2])
|
||||||
|
|
||||||
|
_, _, _, _, shifts, errors = align_data(I_array, headers, error_array=sI_array, data_mask=data_mask, ref_center="center", return_shifts=True)
|
||||||
|
data_mask_aligned = np.sum([sc_shift(data_mask, s, order=1, cval=0.0) for s in shifts], axis=0).astype(bool)
|
||||||
|
I_aligned, sI_aligned = (
|
||||||
|
np.array([sc_shift(I, s, order=1, cval=0.0) for I, s in zip(I_array, shifts)]),
|
||||||
|
np.array([sc_shift(sI, s, order=1, cval=0.0) for sI, s in zip(sI_array, shifts)]),
|
||||||
|
)
|
||||||
|
Q_aligned, sQ_aligned = (
|
||||||
|
np.array([sc_shift(Q, s, order=1, cval=0.0) for Q, s in zip(Q_array, shifts)]),
|
||||||
|
np.array([sc_shift(sQ, s, order=1, cval=0.0) for sQ, s in zip(sQ_array, shifts)]),
|
||||||
|
)
|
||||||
|
U_aligned, sU_aligned = (
|
||||||
|
np.array([sc_shift(U, s, order=1, cval=0.0) for U, s in zip(U_array, shifts)]),
|
||||||
|
np.array([sc_shift(sU, s, order=1, cval=0.0) for sU, s in zip(sU_array, shifts)]),
|
||||||
|
)
|
||||||
|
IQU_cov_aligned = np.array([[[sc_shift(cov[i, j], s, order=1, cval=0.0) for j in range(3)] for i in range(3)] for cov, s in zip(IQU_cov_array, shifts)])
|
||||||
|
|
||||||
|
I_combined = np.sum([exp * I for exp, I in zip(exposure_array, I_aligned)], axis=0) / exposure_array.sum()
|
||||||
|
Q_combined = np.sum([exp * Q for exp, Q in zip(exposure_array, Q_aligned)], axis=0) / exposure_array.sum()
|
||||||
|
U_combined = np.sum([exp * U for exp, U in zip(exposure_array, U_aligned)], axis=0) / exposure_array.sum()
|
||||||
|
|
||||||
|
IQU_cov_combined = np.zeros((3, 3, shape[0], shape[1]))
|
||||||
|
for i in range(3):
|
||||||
|
IQU_cov_combined[i, i] = np.sum([exp**2 * cov for exp, cov in zip(exposure_array, IQU_cov_aligned[:, i, i])], axis=0) / exposure_array.sum() ** 2
|
||||||
|
for j in [x for x in range(3) if x != i]:
|
||||||
|
IQU_cov_combined[i, j] = np.sqrt(
|
||||||
|
np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, IQU_cov_aligned[:, i, j])], axis=0) / exposure_array.sum() ** 2
|
||||||
|
)
|
||||||
|
IQU_cov_combined[j, i] = np.sqrt(
|
||||||
|
np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, IQU_cov_aligned[:, j, i])], axis=0) / exposure_array.sum() ** 2
|
||||||
|
)
|
||||||
|
|
||||||
|
header_combined = headers[0]
|
||||||
|
header_combined["EXPTIME"] = exposure_array.sum()
|
||||||
|
|
||||||
|
return I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_aligned, header_combined
|
||||||
|
|
||||||
|
|
||||||
|
def main(infiles, target=None, output_dir="./data/"):
|
||||||
|
""" """
|
||||||
|
from lib.fits import save_Stokes
|
||||||
|
from lib.plots import pol_map
|
||||||
|
from lib.reduction import compute_pol, rotate_Stokes
|
||||||
|
|
||||||
|
if target is None:
|
||||||
|
target = input("Target name:\n>")
|
||||||
|
|
||||||
|
prod = np.array([["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles], dtype=str)
|
||||||
|
data_folder = prod[0][0]
|
||||||
|
files = [p[1] for p in prod]
|
||||||
|
|
||||||
|
# Reduction parameters
|
||||||
|
kwargs = {}
|
||||||
|
# Polarization map output
|
||||||
|
kwargs["SNRp_cut"] = 3.0
|
||||||
|
kwargs["SNRi_cut"] = 1.0
|
||||||
|
kwargs["flux_lim"] = 1e-19, 3e-17
|
||||||
|
kwargs["scale_vec"] = 5
|
||||||
|
kwargs["step_vec"] = 1
|
||||||
|
|
||||||
|
if not same_reduction(infiles):
|
||||||
|
from FOC_reduction import main as FOC_reduction
|
||||||
|
|
||||||
|
grouped_infiles = same_obs(files, data_folder)
|
||||||
|
|
||||||
|
new_infiles = []
|
||||||
|
for i, group in enumerate(grouped_infiles):
|
||||||
|
new_infiles.append(
|
||||||
|
FOC_reduction(target=target + "-" + str(i + 1), infiles=["/".join([data_folder, file]) for file in group], interactive=True)[0]
|
||||||
|
)
|
||||||
|
|
||||||
|
infiles = new_infiles
|
||||||
|
|
||||||
|
I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = combine_Stokes(infiles=infiles)
|
||||||
|
I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = rotate_Stokes(
|
||||||
|
I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, data_mask=data_mask_combined, header_stokes=header_combined
|
||||||
|
)
|
||||||
|
|
||||||
|
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = compute_pol(
|
||||||
|
I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, header_stokes=header_combined
|
||||||
|
)
|
||||||
|
filename = header_combined["FILENAME"]
|
||||||
|
figname = "_".join([target, filename[filename.find("FOC_") :], "combined"])
|
||||||
|
Stokes_combined = save_Stokes(
|
||||||
|
I_stokes=I_combined,
|
||||||
|
Q_stokes=Q_combined,
|
||||||
|
U_stokes=U_combined,
|
||||||
|
Stokes_cov=IQU_cov_combined,
|
||||||
|
P=P,
|
||||||
|
debiased_P=debiased_P,
|
||||||
|
s_P=s_P,
|
||||||
|
s_P_P=s_P_P,
|
||||||
|
PA=PA,
|
||||||
|
s_PA=s_PA,
|
||||||
|
s_PA_P=s_PA_P,
|
||||||
|
header_stokes=header_combined,
|
||||||
|
data_mask=data_mask_combined,
|
||||||
|
filename=figname,
|
||||||
|
data_folder=data_folder,
|
||||||
|
return_hdul=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
pol_map(Stokes_combined, **kwargs)
|
||||||
|
|
||||||
|
return "/".join([data_folder, figname + ".fits"])
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description="Combine different observations of a single object")
|
||||||
|
parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
|
||||||
|
parser.add_argument("-f", "--files", metavar="path", required=False, nargs="*", help="the full or relative path to the data products", default=None)
|
||||||
|
parser.add_argument(
|
||||||
|
"-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data"
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
exitcode = main(target=args.target, infiles=args.files, output_dir=args.output_dir)
|
||||||
|
print("Written to: ", exitcode)
|
||||||
@@ -22,16 +22,17 @@ from lib.utils import sci_not, princ_angle
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir="./data", crop=False, interactive=False):
|
def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir="./data", crop=False, interactive=False):
|
||||||
# Reduction parameters
|
# Reduction parameters
|
||||||
# Deconvolution
|
# Deconvolution
|
||||||
deconvolve = False
|
deconvolve = False
|
||||||
if deconvolve:
|
if deconvolve:
|
||||||
# from lib.deconvolve import from_file_psf
|
# from lib.deconvolve import from_file_psf
|
||||||
psf = 'gaussian' # Can be user-defined as well
|
psf = "gaussian" # Can be user-defined as well
|
||||||
# psf = from_file_psf(data_folder+psf_file)
|
# psf = from_file_psf(data_folder+psf_file)
|
||||||
psf_FWHM = 3.1
|
psf_FWHM = 3.1
|
||||||
psf_scale = 'px'
|
psf_scale = "px"
|
||||||
psf_shape = None # (151, 151)
|
psf_shape = None # (151, 151)
|
||||||
iterations = 1
|
iterations = 1
|
||||||
algo = "conjgrad"
|
algo = "conjgrad"
|
||||||
@@ -40,18 +41,20 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir=
|
|||||||
display_crop = False
|
display_crop = False
|
||||||
|
|
||||||
# Background estimation
|
# Background estimation
|
||||||
error_sub_type = 'freedman-diaconis' # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
|
|
||||||
subtract_error = 0.01
|
error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
|
||||||
|
subtract_error = 1.0
|
||||||
|
|
||||||
display_bkg = False
|
display_bkg = False
|
||||||
|
|
||||||
# Data binning
|
# Data binning
|
||||||
rebin = True
|
|
||||||
pxsize = 2
|
pxsize = 2
|
||||||
px_scale = 'px' # pixel, arcsec or full
|
pxscale = "px" # pixel, arcsec or full
|
||||||
rebin_operation = 'sum' # sum or average
|
rebin_operation = "sum" # sum or average
|
||||||
|
|
||||||
# Alignement
|
# Alignement
|
||||||
align_center = 'center' # If None will not align the images
|
align_center = "center" # If None will not align the images
|
||||||
|
|
||||||
display_align = False
|
display_align = False
|
||||||
display_data = False
|
display_data = False
|
||||||
|
|
||||||
@@ -59,19 +62,18 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir=
|
|||||||
transmitcorr = True
|
transmitcorr = True
|
||||||
|
|
||||||
# Smoothing
|
# Smoothing
|
||||||
smoothing_function = 'combine' # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
|
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
|
||||||
smoothing_FWHM = None # If None, no smoothing is done
|
smoothing_FWHM = 2.0 # If None, no smoothing is done
|
||||||
smoothing_scale = 'px' # pixel or arcsec
|
smoothing_scale = "px" # pixel or arcsec
|
||||||
|
|
||||||
# Rotation
|
# Rotation
|
||||||
rotate_data = False # rotation to North convention can give erroneous results
|
rotate_North = True
|
||||||
rotate_stokes = True
|
|
||||||
|
|
||||||
# Polarization map output
|
# Polarization map output
|
||||||
SNRp_cut = 3. # P measurments with SNR>3
|
SNRp_cut = 3.0 # P measurments with SNR>3
|
||||||
SNRi_cut = 3. # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
||||||
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
|
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
|
||||||
vec_scale = 5
|
scale_vec = 5
|
||||||
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
|
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
|
||||||
|
|
||||||
# Adaptive binning
|
# Adaptive binning
|
||||||
@@ -85,6 +87,7 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir=
|
|||||||
optimize = False
|
optimize = False
|
||||||
|
|
||||||
# Pipeline start
|
# Pipeline start
|
||||||
|
|
||||||
# Step 1:
|
# Step 1:
|
||||||
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
|
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
|
||||||
|
|
||||||
@@ -114,6 +117,7 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir=
|
|||||||
target = input("Target name:\n>")
|
target = input("Target name:\n>")
|
||||||
|
|
||||||
data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=True)
|
data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=True)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
plots_folder = data_folder.replace("data", "plots")
|
plots_folder = data_folder.replace("data", "plots")
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@@ -123,18 +127,20 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir=
|
|||||||
|
|
||||||
figname = "_".join([target, "FOC"])
|
figname = "_".join([target, "FOC"])
|
||||||
figtype = ""
|
figtype = ""
|
||||||
if rebin:
|
if (pxsize is not None) and not (pxsize == 1 and pxscale.lower() in ["px", "pixel", "pixels"]):
|
||||||
if px_scale not in ['full']:
|
if pxscale not in ["full"]:
|
||||||
figtype = "".join(["b", "{0:.2f}".format(pxsize), px_scale]) # additionnal informations
|
figtype = "".join(["b", "{0:.2f}".format(pxsize), pxscale]) # additionnal informations
|
||||||
else:
|
else:
|
||||||
figtype = "full"
|
figtype = "full"
|
||||||
if smoothing_FWHM is not None:
|
|
||||||
figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]),
|
if smoothing_FWHM is not None and smoothing_scale is not None:
|
||||||
"{0:.2f}".format(smoothing_FWHM), smoothing_scale]) # additionnal informations
|
smoothstr = "".join([*[s[0] for s in smoothing_function.split("_")], "{0:.2f}".format(smoothing_FWHM), smoothing_scale])
|
||||||
|
figtype = "_".join([figtype, smoothstr] if figtype != "" else [smoothstr])
|
||||||
|
|
||||||
if deconvolve:
|
if deconvolve:
|
||||||
figtype += "_deconv"
|
figtype = "_".join([figtype, "deconv"] if figtype != "" else ["deconv"])
|
||||||
if align_center is None:
|
if align_center is None:
|
||||||
figtype += "_not_aligned"
|
figtype = "_".join([figtype, "not_aligned"] if figtype != "" else ["not_aligned"])
|
||||||
|
|
||||||
if optimal_binning:
|
if optimal_binning:
|
||||||
options = {'optimize': optimize, 'optimal_binning': True}
|
options = {'optimize': optimize, 'optimal_binning': True}
|
||||||
@@ -337,12 +343,14 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir=
|
|||||||
elif px_scale.lower() not in ['full', 'integrate']:
|
elif px_scale.lower() not in ['full', 'integrate']:
|
||||||
proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
|
proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
|
||||||
|
|
||||||
return 0
|
|
||||||
|
return outfiles
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Query MAST for target products')
|
parser = argparse.ArgumentParser(description='Query MAST for target products')
|
||||||
parser.add_argument('-t', '--target', metavar='targetname', required=False, help='the name of the target', type=str, default=None)
|
parser.add_argument('-t', '--target', metavar='targetname', required=False, help='the name of the target', type=str, default=None)
|
||||||
parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, help='the proposal id of the data products', type=int, default=None)
|
parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, help='the proposal id of the data products', type=int, default=None)
|
||||||
|
|||||||
@@ -1,2 +1,3 @@
|
|||||||
from . import lib
|
from . import lib
|
||||||
from . import src
|
from . import src
|
||||||
|
from . import FOC_reduction
|
||||||
|
|||||||
@@ -9,128 +9,144 @@ prototypes :
|
|||||||
- bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background)
|
- bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background)
|
||||||
Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape.
|
Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape.
|
||||||
"""
|
"""
|
||||||
from os.path import join as path_join
|
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import matplotlib.dates as mdates
|
|
||||||
from matplotlib.colors import LogNorm
|
|
||||||
from matplotlib.patches import Rectangle
|
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
|
from os.path import join as path_join
|
||||||
|
|
||||||
|
import matplotlib.dates as mdates
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import numpy as np
|
||||||
from astropy.time import Time
|
from astropy.time import Time
|
||||||
from lib.plots import plot_obs
|
from lib.plots import plot_obs
|
||||||
|
from matplotlib.colors import LogNorm
|
||||||
|
from matplotlib.patches import Rectangle
|
||||||
from scipy.optimize import curve_fit
|
from scipy.optimize import curve_fit
|
||||||
|
|
||||||
|
|
||||||
def gauss(x, *p):
|
def gauss(x, *p):
|
||||||
N, mu, sigma = p
|
N, mu, sigma = p
|
||||||
return N*np.exp(-(x-mu)**2/(2.*sigma**2))
|
return N * np.exp(-((x - mu) ** 2) / (2.0 * sigma**2))
|
||||||
|
|
||||||
|
|
||||||
def gausspol(x, *p):
|
def gausspol(x, *p):
|
||||||
N, mu, sigma, a, b, c, d = p
|
N, mu, sigma, a, b, c, d = p
|
||||||
return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + a*np.log(x) + b/x + c*x + d
|
return N * np.exp(-((x - mu) ** 2) / (2.0 * sigma**2)) + a * np.log(x) + b / x + c * x + d
|
||||||
|
|
||||||
|
|
||||||
def bin_centers(edges):
|
def bin_centers(edges):
|
||||||
return (edges[1:]+edges[:-1])/2.
|
return (edges[1:] + edges[:-1]) / 2.0
|
||||||
|
|
||||||
|
|
||||||
def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"):
|
def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"):
|
||||||
plt.rcParams.update({'font.size': 15})
|
plt.rcParams.update({"font.size": 15})
|
||||||
convert_flux = np.array([head['photflam'] for head in headers])
|
convert_flux = np.array([head["photflam"] for head in headers])
|
||||||
date_time = np.array([Time((headers[i]['expstart']+headers[i]['expend'])/2., format='mjd', precision=0).iso for i in range(len(headers))])
|
date_time = np.array([Time((headers[i]["expstart"] + headers[i]["expend"]) / 2.0, format="mjd", precision=0).iso for i in range(len(headers))])
|
||||||
date_time = np.array([datetime.strptime(d, '%Y-%m-%d %H:%M:%S') for d in date_time])
|
date_time = np.array([datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in date_time])
|
||||||
date_err = np.array([timedelta(seconds=headers[i]['exptime']/2.) for i in range(len(headers))])
|
date_err = np.array([timedelta(seconds=headers[i]["exptime"] / 2.0) for i in range(len(headers))])
|
||||||
filt = np.array([headers[i]['filtnam1'] for i in range(len(headers))])
|
filt = np.array([headers[i]["filtnam1"] for i in range(len(headers))])
|
||||||
dict_filt = {"POL0": 'r', "POL60": 'g', "POL120": 'b'}
|
dict_filt = {"POL0": "r", "POL60": "g", "POL120": "b"}
|
||||||
c_filt = np.array([dict_filt[f] for f in filt])
|
c_filt = np.array([dict_filt[f] for f in filt])
|
||||||
|
|
||||||
fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
|
fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
|
||||||
for f in np.unique(filt):
|
for f in np.unique(filt):
|
||||||
mask = [fil == f for fil in filt]
|
mask = [fil == f for fil in filt]
|
||||||
ax.scatter(date_time[mask], background[mask]*convert_flux[mask], color=dict_filt[f],
|
ax.scatter(date_time[mask], background[mask] * convert_flux[mask], color=dict_filt[f], label="{0:s}".format(f))
|
||||||
label="{0:s}".format(f))
|
ax.errorbar(date_time, background * convert_flux, xerr=date_err, yerr=std_bkg * convert_flux, fmt="+k", markersize=0, ecolor=c_filt)
|
||||||
ax.errorbar(date_time, background*convert_flux, xerr=date_err, yerr=std_bkg*convert_flux, fmt='+k',
|
|
||||||
markersize=0, ecolor=c_filt)
|
|
||||||
# Date handling
|
# Date handling
|
||||||
locator = mdates.AutoDateLocator()
|
locator = mdates.AutoDateLocator()
|
||||||
formatter = mdates.ConciseDateFormatter(locator)
|
formatter = mdates.ConciseDateFormatter(locator)
|
||||||
ax.xaxis.set_major_locator(locator)
|
ax.xaxis.set_major_locator(locator)
|
||||||
ax.xaxis.set_major_formatter(formatter)
|
ax.xaxis.set_major_formatter(formatter)
|
||||||
# ax.set_ylim(bottom=0.)
|
# ax.set_ylim(bottom=0.)
|
||||||
ax.set_yscale('log')
|
ax.set_yscale("log")
|
||||||
ax.set_xlabel("Observation date and time")
|
ax.set_xlabel("Observation date and time")
|
||||||
ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||||
plt.legend()
|
plt.legend()
|
||||||
if not (savename is None):
|
if savename is not None:
|
||||||
this_savename = deepcopy(savename)
|
this_savename = deepcopy(savename)
|
||||||
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
|
if savename[-4:] not in [".png", ".jpg", ".pdf"]:
|
||||||
this_savename += '_background_flux.pdf'
|
this_savename += "_background_flux.pdf"
|
||||||
else:
|
else:
|
||||||
this_savename = savename[:-4] + "_background_flux" + savename[-4:]
|
this_savename = savename[:-4] + "_background_flux" + savename[-4:]
|
||||||
fig.savefig(path_join(plots_folder, this_savename), bbox_inches='tight')
|
fig.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
|
||||||
|
|
||||||
if not (histograms is None):
|
if histograms is not None:
|
||||||
filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0}
|
filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0}
|
||||||
fig_h, ax_h = plt.subplots(figsize=(10, 6), constrained_layout=True)
|
fig_h, ax_h = plt.subplots(figsize=(10, 8), constrained_layout=True)
|
||||||
for i, (hist, bins) in enumerate(zip(histograms, binning)):
|
for i, (hist, bins) in enumerate(zip(histograms, binning)):
|
||||||
filt_obs[headers[i]['filtnam1']] += 1
|
filt_obs[headers[i]["filtnam1"]] += 1
|
||||||
ax_h.plot(bins*convert_flux[i], hist, '+', color="C{0:d}".format(i), alpha=0.8,
|
ax_h.plot(
|
||||||
label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')')
|
bins * convert_flux[i],
|
||||||
ax_h.plot([background[i]*convert_flux[i], background[i]*convert_flux[i]], [hist.min(), hist.max()], 'x--', color="C{0:d}".format(i), alpha=0.8)
|
hist,
|
||||||
if not (coeff is None):
|
"+",
|
||||||
|
color="C{0:d}".format(i),
|
||||||
|
alpha=0.8,
|
||||||
|
label=headers[i]["filtnam1"] + " (Obs " + str(filt_obs[headers[i]["filtnam1"]]) + ")",
|
||||||
|
)
|
||||||
|
ax_h.plot([background[i] * convert_flux[i], background[i] * convert_flux[i]], [hist.min(), hist.max()], "x--", color="C{0:d}".format(i), alpha=0.8)
|
||||||
|
if coeff is not None:
|
||||||
# ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
|
# ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
|
||||||
ax_h.plot(bins*convert_flux[i], gauss(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
|
ax_h.plot(bins * convert_flux[i], gauss(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8)
|
||||||
ax_h.set_xscale('log')
|
ax_h.set_xscale("log")
|
||||||
ax_h.set_ylim([0., np.max([hist.max() for hist in histograms])])
|
ax_h.set_ylim([0.0, np.max([hist.max() for hist in histograms])])
|
||||||
ax_h.set_xlim([np.min(background * convert_flux) * 1e-2, np.max(background * convert_flux) * 1e2])
|
ax_h.set_xlim([np.min(background * convert_flux) * 1e-2, np.max(background * convert_flux) * 1e2])
|
||||||
ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||||
ax_h.set_ylabel(r"Number of pixels in bin")
|
ax_h.set_ylabel(r"Number of pixels in bin")
|
||||||
ax_h.set_title("Histogram for each observation")
|
ax_h.set_title("Histogram for each observation")
|
||||||
plt.legend()
|
plt.legend()
|
||||||
if not (savename is None):
|
if savename is not None:
|
||||||
this_savename = deepcopy(savename)
|
this_savename = deepcopy(savename)
|
||||||
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
|
if savename[-4:] not in [".png", ".jpg", ".pdf"]:
|
||||||
this_savename += '_histograms.pdf'
|
this_savename += "_histograms.pdf"
|
||||||
else:
|
else:
|
||||||
this_savename = savename[:-4] + "_histograms" + savename[-4:]
|
this_savename = savename[:-4] + "_histograms" + savename[-4:]
|
||||||
fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches='tight')
|
fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
|
||||||
|
|
||||||
fig2, ax2 = plt.subplots(figsize=(10, 10))
|
fig2, ax2 = plt.subplots(figsize=(10, 10))
|
||||||
data0 = data[0] * convert_flux[0]
|
data0 = data[0] * convert_flux[0]
|
||||||
bkg_data0 = data0 <= background[0] * convert_flux[0]
|
bkg_data0 = data0 <= background[0] * convert_flux[0]
|
||||||
instr = headers[0]['instrume']
|
instr = headers[0]["instrume"]
|
||||||
rootname = headers[0]['rootname']
|
rootname = headers[0]["rootname"]
|
||||||
exptime = headers[0]['exptime']
|
exptime = headers[0]["exptime"]
|
||||||
filt = headers[0]['filtnam1']
|
filt = headers[0]["filtnam1"]
|
||||||
# plots
|
# plots
|
||||||
im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.].mean()/10., data0.max()), origin='lower', cmap='gray')
|
im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.0].mean() / 10.0, data0.max()), origin="lower", cmap="gray")
|
||||||
ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.5)
|
ax2.imshow(bkg_data0, origin="lower", cmap="Reds", alpha=0.5)
|
||||||
if not (rectangle is None):
|
if rectangle is not None:
|
||||||
x, y, width, height, angle, color = rectangle[0]
|
x, y, width, height, angle, color = rectangle[0]
|
||||||
ax2.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False, lw=2))
|
ax2.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False, lw=2))
|
||||||
ax2.annotate(instr+":"+rootname, color='white', fontsize=10, xy=(0.01, 1.00), xycoords='axes fraction', verticalalignment='top', horizontalalignment='left')
|
ax2.annotate(
|
||||||
ax2.annotate(filt, color='white', fontsize=14, xy=(0.01, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left')
|
instr + ":" + rootname, color="white", fontsize=10, xy=(0.01, 1.00), xycoords="axes fraction", verticalalignment="top", horizontalalignment="left"
|
||||||
ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(1.00, 0.01),
|
)
|
||||||
xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right')
|
ax2.annotate(filt, color="white", fontsize=14, xy=(0.01, 0.01), xycoords="axes fraction", verticalalignment="bottom", horizontalalignment="left")
|
||||||
ax2.set(xlabel='pixel offset', ylabel='pixel offset', aspect='equal')
|
ax2.annotate(
|
||||||
|
str(exptime) + " s", color="white", fontsize=10, xy=(1.00, 0.01), xycoords="axes fraction", verticalalignment="bottom", horizontalalignment="right"
|
||||||
|
)
|
||||||
|
ax2.set(xlabel="pixel offset", ylabel="pixel offset", aspect="equal")
|
||||||
|
|
||||||
fig2.subplots_adjust(hspace=0, wspace=0, right=1.0)
|
fig2.subplots_adjust(hspace=0, wspace=0, right=1.0)
|
||||||
fig2.colorbar(im2, ax=ax2, location='right', aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
fig2.colorbar(im2, ax=ax2, location="right", aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||||
|
|
||||||
if not (savename is None):
|
if savename is not None:
|
||||||
this_savename = deepcopy(savename)
|
this_savename = deepcopy(savename)
|
||||||
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
|
if savename[-4:] not in [".png", ".jpg", ".pdf"]:
|
||||||
this_savename += '_'+filt+'_background_location.pdf'
|
this_savename += "_" + filt + "_background_location.pdf"
|
||||||
else:
|
else:
|
||||||
this_savename = savename[:-4]+'_'+filt+'_background_location'+savename[-4:]
|
this_savename = savename[:-4] + "_" + filt + "_background_location" + savename[-4:]
|
||||||
fig2.savefig(path_join(plots_folder, this_savename), bbox_inches='tight')
|
fig2.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
|
||||||
if not (rectangle is None):
|
if rectangle is not None:
|
||||||
plot_obs(data, headers, vmin=data[data > 0.].min()*convert_flux.mean(), vmax=data[data > 0.].max()*convert_flux.mean(), rectangle=rectangle,
|
plot_obs(
|
||||||
savename=savename+"_background_location", plots_folder=plots_folder)
|
data,
|
||||||
elif not (rectangle is None):
|
headers,
|
||||||
plot_obs(data, headers, vmin=data[data > 0.].min(), vmax=data[data > 0.].max(), rectangle=rectangle)
|
vmin=data[data > 0.0].min() * convert_flux.mean(),
|
||||||
|
vmax=data[data > 0.0].max() * convert_flux.mean(),
|
||||||
|
rectangle=rectangle,
|
||||||
|
savename=savename + "_background_location",
|
||||||
|
plots_folder=plots_folder,
|
||||||
|
)
|
||||||
|
elif rectangle is not None:
|
||||||
|
plot_obs(data, headers, vmin=data[data > 0.0].min(), vmax=data[data > 0.0].max(), rectangle=rectangle)
|
||||||
|
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
@@ -141,7 +157,7 @@ def sky_part(img):
|
|||||||
# Intensity range
|
# Intensity range
|
||||||
sky_med = np.median(rand_pix)
|
sky_med = np.median(rand_pix)
|
||||||
sig = np.min([img[img < sky_med].std(), img[img > sky_med].std()])
|
sig = np.min([img[img < sky_med].std(), img[img > sky_med].std()])
|
||||||
sky_range = [sky_med-2.*sig, np.max([sky_med+sig, 7e-4])] # Detector background average FOC Data Handbook Sec. 7.6
|
sky_range = [sky_med - 2.0 * sig, np.max([sky_med + sig, 7e-4])] # Detector background average FOC Data Handbook Sec. 7.6
|
||||||
|
|
||||||
sky = img[np.logical_and(img >= sky_range[0], img <= sky_range[1])]
|
sky = img[np.logical_and(img >= sky_range[0], img <= sky_range[1])]
|
||||||
return sky, sky_range
|
return sky, sky_range
|
||||||
@@ -152,13 +168,13 @@ def bkg_estimate(img, bins=None, chi2=None, coeff=None):
|
|||||||
bins, chi2, coeff = [8], [], []
|
bins, chi2, coeff = [8], [], []
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
bins.append(int(3./2.*bins[-1]))
|
bins.append(int(3.0 / 2.0 * bins[-1]))
|
||||||
except IndexError:
|
except IndexError:
|
||||||
bins, chi2, coeff = [8], [], []
|
bins, chi2, coeff = [8], [], []
|
||||||
hist, bin_edges = np.histogram(img[img > 0], bins=bins[-1])
|
hist, bin_edges = np.histogram(img[img > 0], bins=bins[-1])
|
||||||
binning = bin_centers(bin_edges)
|
binning = bin_centers(bin_edges)
|
||||||
peak = binning[np.argmax(hist)]
|
peak = binning[np.argmax(hist)]
|
||||||
bins_stdev = binning[hist > hist.max()/2.]
|
bins_stdev = binning[hist > hist.max() / 2.0]
|
||||||
stdev = bins_stdev[-1] - bins_stdev[0]
|
stdev = bins_stdev[-1] - bins_stdev[0]
|
||||||
# p0 = [hist.max(), peak, stdev, 1e-3, 1e-3, 1e-3, 1e-3]
|
# p0 = [hist.max(), peak, stdev, 1e-3, 1e-3, 1e-3, 1e-3]
|
||||||
p0 = [hist.max(), peak, stdev]
|
p0 = [hist.max(), peak, stdev]
|
||||||
@@ -223,7 +239,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
|
|||||||
|
|
||||||
for i, image in enumerate(data):
|
for i, image in enumerate(data):
|
||||||
# Compute the Count-rate histogram for the image
|
# Compute the Count-rate histogram for the image
|
||||||
sky, sky_range = sky_part(image[image > 0.])
|
sky, sky_range = sky_part(image[image > 0.0])
|
||||||
|
|
||||||
bins, chi2, coeff = bkg_estimate(sky)
|
bins, chi2, coeff = bkg_estimate(sky)
|
||||||
while bins[-1] < 256:
|
while bins[-1] < 256:
|
||||||
@@ -235,6 +251,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
|
|||||||
weights = 1 / chi2**2
|
weights = 1 / chi2**2
|
||||||
weights /= weights.sum()
|
weights /= weights.sum()
|
||||||
|
|
||||||
|
|
||||||
bkg = np.sum(weights*(coeff[:, 1]+np.abs(coeff[:, 2]) * 0.01)) # why not just use 0.01
|
bkg = np.sum(weights*(coeff[:, 1]+np.abs(coeff[:, 2]) * 0.01)) # why not just use 0.01
|
||||||
|
|
||||||
error_bkg[i] *= bkg
|
error_bkg[i] *= bkg
|
||||||
@@ -246,7 +263,8 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
|
|||||||
# n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
# n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
||||||
# n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg
|
# n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg
|
||||||
|
|
||||||
std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std()
|
|
||||||
|
std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
|
||||||
background[i] = bkg
|
background[i] = bkg
|
||||||
|
|
||||||
if subtract_error > 0:
|
if subtract_error > 0:
|
||||||
@@ -311,37 +329,43 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
|
|||||||
|
|
||||||
for i, image in enumerate(data):
|
for i, image in enumerate(data):
|
||||||
# Compute the Count-rate histogram for the image
|
# Compute the Count-rate histogram for the image
|
||||||
n_mask = np.logical_and(mask, image > 0.)
|
n_mask = np.logical_and(mask, image > 0.0)
|
||||||
if not (sub_type is None):
|
if sub_type is not None:
|
||||||
if isinstance(sub_type, int):
|
if isinstance(sub_type, int):
|
||||||
n_bins = sub_type
|
n_bins = sub_type
|
||||||
elif sub_type.lower() in ['sqrt']:
|
elif sub_type.lower() in ["sqrt"]:
|
||||||
n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
|
n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
|
||||||
elif sub_type.lower() in ['sturges']:
|
elif sub_type.lower() in ["sturges"]:
|
||||||
n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int) + 1 # Sturges
|
n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int) + 1 # Sturges
|
||||||
elif sub_type.lower() in ['rice']:
|
elif sub_type.lower() in ["rice"]:
|
||||||
n_bins = 2 * np.fix(np.power(image[n_mask].size, 1 / 3)).astype(int) # Rice
|
n_bins = 2 * np.fix(np.power(image[n_mask].size, 1 / 3)).astype(int) # Rice
|
||||||
elif sub_type.lower() in ['scott']:
|
elif sub_type.lower() in ["scott"]:
|
||||||
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size, 1/3))).astype(int) # Scott
|
n_bins = np.fix((image[n_mask].max() - image[n_mask].min()) / (3.5 * image[n_mask].std() / np.power(image[n_mask].size, 1 / 3))).astype(
|
||||||
|
int
|
||||||
|
) # Scott
|
||||||
else:
|
else:
|
||||||
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) /
|
n_bins = np.fix(
|
||||||
np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis
|
(image[n_mask].max() - image[n_mask].min())
|
||||||
|
/ (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3))
|
||||||
|
).astype(int) # Freedman-Diaconis
|
||||||
else:
|
else:
|
||||||
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) /
|
n_bins = np.fix(
|
||||||
np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis
|
(image[n_mask].max() - image[n_mask].min()) / (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3))
|
||||||
|
).astype(int) # Freedman-Diaconis
|
||||||
|
|
||||||
hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins)
|
hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins)
|
||||||
histograms.append(hist)
|
histograms.append(hist)
|
||||||
binning.append(np.exp(bin_centers(bin_edges)))
|
binning.append(np.exp(bin_centers(bin_edges)))
|
||||||
|
|
||||||
# Fit a gaussian to the log-intensity histogram
|
# Fit a gaussian to the log-intensity histogram
|
||||||
bins_stdev = binning[-1][hist > hist.max()/2.]
|
bins_stdev = binning[-1][hist > hist.max() / 2.0]
|
||||||
stdev = bins_stdev[-1] - bins_stdev[0]
|
stdev = bins_stdev[-1] - bins_stdev[0]
|
||||||
# p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3]
|
# p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3]
|
||||||
p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev]
|
p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev]
|
||||||
# popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
|
# popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
|
||||||
popt, pcov = curve_fit(gauss, binning[-1], hist, p0=p0)
|
popt, pcov = curve_fit(gauss, binning[-1], hist, p0=p0)
|
||||||
coeff.append(popt)
|
coeff.append(popt)
|
||||||
|
|
||||||
bkg = popt[1]+np.abs(popt[2]) * 0.01 # why not just use 0.01
|
bkg = popt[1]+np.abs(popt[2]) * 0.01 # why not just use 0.01
|
||||||
|
|
||||||
error_bkg[i] *= bkg
|
error_bkg[i] *= bkg
|
||||||
@@ -353,7 +377,8 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
|
|||||||
# n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
# n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
||||||
# n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg
|
# n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg
|
||||||
|
|
||||||
std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std()
|
|
||||||
|
std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
|
||||||
background[i] = bkg
|
background[i] = bkg
|
||||||
|
|
||||||
if subtract_error > 0:
|
if subtract_error > 0:
|
||||||
@@ -439,10 +464,11 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True
|
|||||||
minima = np.unravel_index(np.argmin(temp.sum(axis=0)), temp.shape[1:])
|
minima = np.unravel_index(np.argmin(temp.sum(axis=0)), temp.shape[1:])
|
||||||
|
|
||||||
for i, image in enumerate(data):
|
for i, image in enumerate(data):
|
||||||
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0., 'r'])
|
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0.0, "r"])
|
||||||
# Compute error : root mean square of the background
|
# Compute error : root mean square of the background
|
||||||
sub_image = image[minima[0] : minima[0] + sub_shape[0], minima[1] : minima[1] + sub_shape[1]]
|
sub_image = image[minima[0] : minima[0] + sub_shape[0], minima[1] : minima[1] + sub_shape[1]]
|
||||||
# bkg = np.std(sub_image) # Previously computed using standard deviation over the background
|
# bkg = np.std(sub_image) # Previously computed using standard deviation over the background
|
||||||
|
|
||||||
bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*0.01 if subtract_error > 0 else np.sqrt(np.sum(sub_image**2)/sub_image.size)
|
bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*0.01 if subtract_error > 0 else np.sqrt(np.sum(sub_image**2)/sub_image.size)
|
||||||
error_bkg[i] *= bkg
|
error_bkg[i] *= bkg
|
||||||
|
|
||||||
@@ -453,7 +479,8 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True
|
|||||||
# n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
# n_data_array[i][mask] = n_data_array[i][mask] - bkg
|
||||||
# n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg
|
# n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg
|
||||||
|
|
||||||
std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std()
|
|
||||||
|
std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
|
||||||
background[i] = bkg
|
background[i] = bkg
|
||||||
|
|
||||||
if subtract_error > 0:
|
if subtract_error > 0:
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ Library functions for graham algorithm implementation (find the convex hull of a
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
@@ -16,15 +17,15 @@ def clean_ROI(image):
|
|||||||
row, col = np.indices(shape)
|
row, col = np.indices(shape)
|
||||||
|
|
||||||
for i in range(0, shape[0]):
|
for i in range(0, shape[0]):
|
||||||
r = row[i, :][image[i, :] > 0.]
|
r = row[i, :][image[i, :] > 0.0]
|
||||||
c = col[i, :][image[i, :] > 0.]
|
c = col[i, :][image[i, :] > 0.0]
|
||||||
if len(r) > 1 and len(c) > 1:
|
if len(r) > 1 and len(c) > 1:
|
||||||
H.append((r[0], c[0]))
|
H.append((r[0], c[0]))
|
||||||
H.append((r[-1], c[-1]))
|
H.append((r[-1], c[-1]))
|
||||||
H = np.array(H)
|
H = np.array(H)
|
||||||
for j in range(0, shape[1]):
|
for j in range(0, shape[1]):
|
||||||
r = row[:, j][image[:, j] > 0.]
|
r = row[:, j][image[:, j] > 0.0]
|
||||||
c = col[:, j][image[:, j] > 0.]
|
c = col[:, j][image[:, j] > 0.0]
|
||||||
if len(r) > 1 and len(c) > 1:
|
if len(r) > 1 and len(c) > 1:
|
||||||
J.append((r[0], c[0]))
|
J.append((r[0], c[0]))
|
||||||
J.append((r[-1], c[-1]))
|
J.append((r[-1], c[-1]))
|
||||||
@@ -206,16 +207,32 @@ def sort_angles_distances(Omega, s):
|
|||||||
Sort the list of points 's' for the composition order given reference point
|
Sort the list of points 's' for the composition order given reference point
|
||||||
Omega.
|
Omega.
|
||||||
"""
|
"""
|
||||||
def order(A, B): return comp(Omega, A, B)
|
|
||||||
|
def order(A, B):
|
||||||
|
return comp(Omega, A, B)
|
||||||
|
|
||||||
quicksort(s, order)
|
quicksort(s, order)
|
||||||
|
|
||||||
|
|
||||||
# Define fuction for stacks (use here python lists with stack operations).
|
# Define fuction for stacks (use here python lists with stack operations).
|
||||||
def empty_stack(): return []
|
def empty_stack():
|
||||||
def stack(S, A): S.append(A)
|
return []
|
||||||
def unstack(S): S.pop()
|
|
||||||
def stack_top(S): return S[-1]
|
|
||||||
def stack_sub_top(S): return S[-2]
|
def stack(S, A):
|
||||||
|
S.append(A)
|
||||||
|
|
||||||
|
|
||||||
|
def unstack(S):
|
||||||
|
S.pop()
|
||||||
|
|
||||||
|
|
||||||
|
def stack_top(S):
|
||||||
|
return S[-1]
|
||||||
|
|
||||||
|
|
||||||
|
def stack_sub_top(S):
|
||||||
|
return S[-2]
|
||||||
|
|
||||||
|
|
||||||
# Alignement handling
|
# Alignement handling
|
||||||
@@ -299,7 +316,7 @@ def convex_hull(H):
|
|||||||
return S
|
return S
|
||||||
|
|
||||||
|
|
||||||
def image_hull(image, step=5, null_val=0., inside=True):
|
def image_hull(image, step=5, null_val=0.0, inside=True):
|
||||||
"""
|
"""
|
||||||
Compute the convex hull of a 2D image and return the 4 relevant coordinates
|
Compute the convex hull of a 2D image and return the 4 relevant coordinates
|
||||||
of the maximum included rectangle (ie. crop image to maximum rectangle).
|
of the maximum included rectangle (ie. crop image to maximum rectangle).
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
"""
|
"""
|
||||||
Library functions for phase cross-correlation computation.
|
Library functions for phase cross-correlation computation.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+)
|
# Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+)
|
||||||
# Otherwise fall back to numpy.fft.
|
# Otherwise fall back to numpy.fft.
|
||||||
# Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer
|
# Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer
|
||||||
@@ -13,8 +14,7 @@ except ImportError:
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
def _upsampled_dft(data, upsampled_region_size, upsample_factor=1,
|
def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, axis_offsets=None):
|
||||||
axis_offsets=None):
|
|
||||||
"""
|
"""
|
||||||
Upsampled DFT by matrix multiplication.
|
Upsampled DFT by matrix multiplication.
|
||||||
This code is intended to provide the same result as if the following
|
This code is intended to provide the same result as if the following
|
||||||
@@ -48,26 +48,27 @@ def _upsampled_dft(data, upsampled_region_size, upsample_factor=1,
|
|||||||
"""
|
"""
|
||||||
# if people pass in an integer, expand it to a list of equal-sized sections
|
# if people pass in an integer, expand it to a list of equal-sized sections
|
||||||
if not hasattr(upsampled_region_size, "__iter__"):
|
if not hasattr(upsampled_region_size, "__iter__"):
|
||||||
upsampled_region_size = [upsampled_region_size, ] * data.ndim
|
upsampled_region_size = [
|
||||||
|
upsampled_region_size,
|
||||||
|
] * data.ndim
|
||||||
else:
|
else:
|
||||||
if len(upsampled_region_size) != data.ndim:
|
if len(upsampled_region_size) != data.ndim:
|
||||||
raise ValueError("shape of upsampled region sizes must be equal "
|
raise ValueError("shape of upsampled region sizes must be equal " "to input data's number of dimensions.")
|
||||||
"to input data's number of dimensions.")
|
|
||||||
|
|
||||||
if axis_offsets is None:
|
if axis_offsets is None:
|
||||||
axis_offsets = [0, ] * data.ndim
|
axis_offsets = [
|
||||||
|
0,
|
||||||
|
] * data.ndim
|
||||||
else:
|
else:
|
||||||
if len(axis_offsets) != data.ndim:
|
if len(axis_offsets) != data.ndim:
|
||||||
raise ValueError("number of axis offsets must be equal to input "
|
raise ValueError("number of axis offsets must be equal to input " "data's number of dimensions.")
|
||||||
"data's number of dimensions.")
|
|
||||||
|
|
||||||
im2pi = 1j * 2 * np.pi
|
im2pi = 1j * 2 * np.pi
|
||||||
|
|
||||||
dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets))
|
dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets))
|
||||||
|
|
||||||
for (n_items, ups_size, ax_offset) in dim_properties[::-1]:
|
for n_items, ups_size, ax_offset in dim_properties[::-1]:
|
||||||
kernel = ((np.arange(ups_size) - ax_offset)[:, None]
|
kernel = (np.arange(ups_size) - ax_offset)[:, None] * fft.fftfreq(n_items, upsample_factor)
|
||||||
* fft.fftfreq(n_items, upsample_factor))
|
|
||||||
kernel = np.exp(-im2pi * kernel)
|
kernel = np.exp(-im2pi * kernel)
|
||||||
|
|
||||||
# Equivalent to:
|
# Equivalent to:
|
||||||
@@ -100,14 +101,11 @@ def _compute_error(cross_correlation_max, src_amp, target_amp):
|
|||||||
target_amp : float
|
target_amp : float
|
||||||
The normalized average image intensity of the target image
|
The normalized average image intensity of the target image
|
||||||
"""
|
"""
|
||||||
error = 1.0 - cross_correlation_max * cross_correlation_max.conj() /\
|
error = 1.0 - cross_correlation_max * cross_correlation_max.conj() / (src_amp * target_amp)
|
||||||
(src_amp * target_amp)
|
|
||||||
return np.sqrt(np.abs(error))
|
return np.sqrt(np.abs(error))
|
||||||
|
|
||||||
|
|
||||||
def phase_cross_correlation(reference_image, moving_image, *,
|
def phase_cross_correlation(reference_image, moving_image, *, upsample_factor=1, space="real", return_error=True, overlap_ratio=0.3):
|
||||||
upsample_factor=1, space="real",
|
|
||||||
return_error=True, overlap_ratio=0.3):
|
|
||||||
"""
|
"""
|
||||||
Efficient subpixel image translation registration by cross-correlation.
|
Efficient subpixel image translation registration by cross-correlation.
|
||||||
This code gives the same precision as the FFT upsampled cross-correlation
|
This code gives the same precision as the FFT upsampled cross-correlation
|
||||||
@@ -174,11 +172,11 @@ def phase_cross_correlation(reference_image, moving_image, *,
|
|||||||
raise ValueError("images must be same shape")
|
raise ValueError("images must be same shape")
|
||||||
|
|
||||||
# assume complex data is already in Fourier space
|
# assume complex data is already in Fourier space
|
||||||
if space.lower() == 'fourier':
|
if space.lower() == "fourier":
|
||||||
src_freq = reference_image
|
src_freq = reference_image
|
||||||
target_freq = moving_image
|
target_freq = moving_image
|
||||||
# real data needs to be fft'd.
|
# real data needs to be fft'd.
|
||||||
elif space.lower() == 'real':
|
elif space.lower() == "real":
|
||||||
src_freq = fft.fftn(reference_image)
|
src_freq = fft.fftn(reference_image)
|
||||||
target_freq = fft.fftn(moving_image)
|
target_freq = fft.fftn(moving_image)
|
||||||
else:
|
else:
|
||||||
@@ -190,8 +188,7 @@ def phase_cross_correlation(reference_image, moving_image, *,
|
|||||||
cross_correlation = fft.ifftn(image_product)
|
cross_correlation = fft.ifftn(image_product)
|
||||||
|
|
||||||
# Locate maximum
|
# Locate maximum
|
||||||
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
|
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape)
|
||||||
cross_correlation.shape)
|
|
||||||
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
|
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
|
||||||
|
|
||||||
shifts = np.stack(maxima).astype(np.float64)
|
shifts = np.stack(maxima).astype(np.float64)
|
||||||
@@ -214,13 +211,9 @@ def phase_cross_correlation(reference_image, moving_image, *,
|
|||||||
upsample_factor = np.array(upsample_factor, dtype=np.float64)
|
upsample_factor = np.array(upsample_factor, dtype=np.float64)
|
||||||
# Matrix multiply DFT around the current shift estimate
|
# Matrix multiply DFT around the current shift estimate
|
||||||
sample_region_offset = dftshift - shifts * upsample_factor
|
sample_region_offset = dftshift - shifts * upsample_factor
|
||||||
cross_correlation = _upsampled_dft(image_product.conj(),
|
cross_correlation = _upsampled_dft(image_product.conj(), upsampled_region_size, upsample_factor, sample_region_offset).conj()
|
||||||
upsampled_region_size,
|
|
||||||
upsample_factor,
|
|
||||||
sample_region_offset).conj()
|
|
||||||
# Locate maximum and map back to original pixel grid
|
# Locate maximum and map back to original pixel grid
|
||||||
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
|
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape)
|
||||||
cross_correlation.shape)
|
|
||||||
CCmax = cross_correlation[maxima]
|
CCmax = cross_correlation[maxima]
|
||||||
|
|
||||||
maxima = np.stack(maxima).astype(np.float64) - dftshift
|
maxima = np.stack(maxima).astype(np.float64) - dftshift
|
||||||
@@ -240,10 +233,8 @@ def phase_cross_correlation(reference_image, moving_image, *,
|
|||||||
if return_error:
|
if return_error:
|
||||||
# Redirect user to masked_phase_cross_correlation if NaNs are observed
|
# Redirect user to masked_phase_cross_correlation if NaNs are observed
|
||||||
if np.isnan(CCmax) or np.isnan(src_amp) or np.isnan(target_amp):
|
if np.isnan(CCmax) or np.isnan(src_amp) or np.isnan(target_amp):
|
||||||
raise ValueError(
|
raise ValueError("NaN values found, please remove NaNs from your input data")
|
||||||
"NaN values found, please remove NaNs from your input data")
|
|
||||||
|
|
||||||
return shifts, _compute_error(CCmax, src_amp, target_amp), \
|
return shifts, _compute_error(CCmax, src_amp, target_amp), _compute_phasediff(CCmax)
|
||||||
_compute_phasediff(CCmax)
|
|
||||||
else:
|
else:
|
||||||
return shifts
|
return shifts
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ prototypes :
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from scipy.signal import convolve
|
|
||||||
from astropy.io import fits
|
from astropy.io import fits
|
||||||
|
from scipy.signal import convolve
|
||||||
|
|
||||||
|
|
||||||
def abs2(x):
|
def abs2(x):
|
||||||
@@ -118,7 +118,7 @@ def gaussian2d(x, y, sigma):
|
|||||||
return np.exp(-(x**2 + y**2) / (2 * sigma**2)) / (2 * np.pi * sigma**2)
|
return np.exp(-(x**2 + y**2) / (2 * sigma**2)) / (2 * np.pi * sigma**2)
|
||||||
|
|
||||||
|
|
||||||
def gaussian_psf(FWHM=1., shape=(5, 5)):
|
def gaussian_psf(FWHM=1.0, shape=(5, 5)):
|
||||||
"""
|
"""
|
||||||
Define the gaussian Point-Spread-Function of chosen shape and FWHM.
|
Define the gaussian Point-Spread-Function of chosen shape and FWHM.
|
||||||
----------
|
----------
|
||||||
@@ -136,7 +136,7 @@ def gaussian_psf(FWHM=1., shape=(5, 5)):
|
|||||||
Kernel containing the weights of the desired gaussian PSF.
|
Kernel containing the weights of the desired gaussian PSF.
|
||||||
"""
|
"""
|
||||||
# Compute standard deviation from FWHM
|
# Compute standard deviation from FWHM
|
||||||
stdev = FWHM/(2.*np.sqrt(2.*np.log(2.)))
|
stdev = FWHM / (2.0 * np.sqrt(2.0 * np.log(2.0)))
|
||||||
|
|
||||||
# Create kernel of desired shape
|
# Create kernel of desired shape
|
||||||
x, y = np.meshgrid(np.arange(-shape[0] / 2, shape[0] / 2), np.arange(-shape[1] / 2, shape[1] / 2))
|
x, y = np.meshgrid(np.arange(-shape[0] / 2, shape[0] / 2), np.arange(-shape[1] / 2, shape[1] / 2))
|
||||||
@@ -241,7 +241,7 @@ def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon=
|
|||||||
im_deconv = image.copy()
|
im_deconv = image.copy()
|
||||||
|
|
||||||
for _ in range(iterations):
|
for _ in range(iterations):
|
||||||
conv = convolve(im_deconv, psf, mode='same')
|
conv = convolve(im_deconv, psf, mode="same")
|
||||||
if filter_epsilon:
|
if filter_epsilon:
|
||||||
relative_blur = np.where(conv < filter_epsilon, 0, image - conv)
|
relative_blur = np.where(conv < filter_epsilon, 0, image - conv)
|
||||||
else:
|
else:
|
||||||
@@ -290,12 +290,12 @@ def richardson_lucy(image, psf, iterations=20, clip=True, filter_epsilon=None):
|
|||||||
psf_mirror = np.flip(psf)
|
psf_mirror = np.flip(psf)
|
||||||
|
|
||||||
for _ in range(iterations):
|
for _ in range(iterations):
|
||||||
conv = convolve(im_deconv, psf, mode='same')
|
conv = convolve(im_deconv, psf, mode="same")
|
||||||
if filter_epsilon:
|
if filter_epsilon:
|
||||||
relative_blur = np.where(conv < filter_epsilon, 0, image / conv)
|
relative_blur = np.where(conv < filter_epsilon, 0, image / conv)
|
||||||
else:
|
else:
|
||||||
relative_blur = image / conv
|
relative_blur = image / conv
|
||||||
im_deconv *= convolve(relative_blur, psf_mirror, mode='same')
|
im_deconv *= convolve(relative_blur, psf_mirror, mode="same")
|
||||||
|
|
||||||
if clip:
|
if clip:
|
||||||
im_deconv[im_deconv > 1] = 1
|
im_deconv[im_deconv > 1] = 1
|
||||||
@@ -335,12 +335,12 @@ def one_step_gradient(image, psf, iterations=20, clip=True, filter_epsilon=None)
|
|||||||
psf_mirror = np.flip(psf)
|
psf_mirror = np.flip(psf)
|
||||||
|
|
||||||
for _ in range(iterations):
|
for _ in range(iterations):
|
||||||
conv = convolve(im_deconv, psf, mode='same')
|
conv = convolve(im_deconv, psf, mode="same")
|
||||||
if filter_epsilon:
|
if filter_epsilon:
|
||||||
relative_blur = np.where(conv < filter_epsilon, 0, image - conv)
|
relative_blur = np.where(conv < filter_epsilon, 0, image - conv)
|
||||||
else:
|
else:
|
||||||
relative_blur = image - conv
|
relative_blur = image - conv
|
||||||
im_deconv += convolve(relative_blur, psf_mirror, mode='same')
|
im_deconv += convolve(relative_blur, psf_mirror, mode="same")
|
||||||
|
|
||||||
if clip:
|
if clip:
|
||||||
im_deconv[im_deconv > 1] = 1
|
im_deconv[im_deconv > 1] = 1
|
||||||
@@ -458,7 +458,7 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
|
|||||||
r = np.copy(b)
|
r = np.copy(b)
|
||||||
x = np.zeros(b.shape, dtype=b.dtype)
|
x = np.zeros(b.shape, dtype=b.dtype)
|
||||||
rho = inner(r, r)
|
rho = inner(r, r)
|
||||||
epsilon = np.max([0., 1e-5*np.sqrt(rho)])
|
epsilon = np.max([0.0, 1e-5 * np.sqrt(rho)])
|
||||||
|
|
||||||
# Conjugate gradient iterations.
|
# Conjugate gradient iterations.
|
||||||
beta = 0.0
|
beta = 0.0
|
||||||
@@ -494,8 +494,7 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
|
|||||||
return im_deconv
|
return im_deconv
|
||||||
|
|
||||||
|
|
||||||
def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
|
def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True, filter_epsilon=None, algo="richardson"):
|
||||||
filter_epsilon=None, algo='richardson'):
|
|
||||||
"""
|
"""
|
||||||
Prepare an image for deconvolution using a chosen algorithm and return
|
Prepare an image for deconvolution using a chosen algorithm and return
|
||||||
results.
|
results.
|
||||||
@@ -537,25 +536,21 @@ def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
|
|||||||
"""
|
"""
|
||||||
# Normalize image to highest pixel value
|
# Normalize image to highest pixel value
|
||||||
pxmax = image[np.isfinite(image)].max()
|
pxmax = image[np.isfinite(image)].max()
|
||||||
if pxmax == 0.:
|
if pxmax == 0.0:
|
||||||
raise ValueError("Invalid image")
|
raise ValueError("Invalid image")
|
||||||
norm_image = image / pxmax
|
norm_image = image / pxmax
|
||||||
|
|
||||||
# Deconvolve normalized image
|
# Deconvolve normalized image
|
||||||
if algo.lower() in ['wiener', 'wiener simple']:
|
if algo.lower() in ["wiener", "wiener simple"]:
|
||||||
norm_deconv = wiener(image=norm_image, psf=psf, alpha=alpha, clip=clip)
|
norm_deconv = wiener(image=norm_image, psf=psf, alpha=alpha, clip=clip)
|
||||||
elif algo.lower() in ['van-cittert', 'vancittert', 'cittert']:
|
elif algo.lower() in ["van-cittert", "vancittert", "cittert"]:
|
||||||
norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha,
|
norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
|
||||||
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
|
elif algo.lower() in ["1grad", "one_step_grad", "one step grad"]:
|
||||||
elif algo.lower() in ['1grad', 'one_step_grad', 'one step grad']:
|
norm_deconv = one_step_gradient(image=norm_image, psf=psf, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
|
||||||
norm_deconv = one_step_gradient(image=norm_image, psf=psf,
|
elif algo.lower() in ["conjgrad", "conj_grad", "conjugate gradient"]:
|
||||||
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
|
norm_deconv = conjgrad(image=norm_image, psf=psf, alpha=alpha, error=error, iterations=iterations)
|
||||||
elif algo.lower() in ['conjgrad', 'conj_grad', 'conjugate gradient']:
|
|
||||||
norm_deconv = conjgrad(image=norm_image, psf=psf, alpha=alpha,
|
|
||||||
error=error, iterations=iterations)
|
|
||||||
else: # Defaults to Richardson-Lucy
|
else: # Defaults to Richardson-Lucy
|
||||||
norm_deconv = richardson_lucy(image=norm_image, psf=psf,
|
norm_deconv = richardson_lucy(image=norm_image, psf=psf, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
|
||||||
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
|
|
||||||
|
|
||||||
# Output deconvolved image with original pxmax value
|
# Output deconvolved image with original pxmax value
|
||||||
im_deconv = pxmax * norm_deconv
|
im_deconv = pxmax * norm_deconv
|
||||||
|
|||||||
@@ -9,11 +9,14 @@ prototypes :
|
|||||||
Save computed polarimetry parameters to a single fits file (and return HDUList)
|
Save computed polarimetry parameters to a single fits file (and return HDUList)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
from os.path import join as path_join
|
from os.path import join as path_join
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
from astropy.io import fits
|
from astropy.io import fits
|
||||||
from astropy.wcs import WCS
|
from astropy.wcs import WCS
|
||||||
|
|
||||||
from .convex_hull import clean_ROI
|
from .convex_hull import clean_ROI
|
||||||
|
from .utils import wcs_PA
|
||||||
|
|
||||||
|
|
||||||
def get_obs_data(infiles, data_folder="", compute_flux=False):
|
def get_obs_data(infiles, data_folder="", compute_flux=False):
|
||||||
@@ -36,59 +39,61 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
|
|||||||
headers : header list
|
headers : header list
|
||||||
List of headers objects corresponding to each image in data_array.
|
List of headers objects corresponding to each image in data_array.
|
||||||
"""
|
"""
|
||||||
data_array, headers = [], []
|
data_array, headers, wcs_array = [], [], []
|
||||||
for i in range(len(infiles)):
|
for i in range(len(infiles)):
|
||||||
with fits.open(path_join(data_folder, infiles[i])) as f:
|
with fits.open(path_join(data_folder, infiles[i]), mode="update") as f:
|
||||||
headers.append(f[0].header)
|
headers.append(f[0].header)
|
||||||
data_array.append(f[0].data)
|
data_array.append(f[0].data)
|
||||||
|
wcs_array.append(WCS(header=f[0].header, fobj=f).celestial)
|
||||||
|
f.flush()
|
||||||
data_array = np.array(data_array, dtype=np.double)
|
data_array = np.array(data_array, dtype=np.double)
|
||||||
|
|
||||||
# Prevent negative count value in imported data
|
# Prevent negative count value in imported data
|
||||||
for i in range(len(data_array)):
|
for i in range(len(data_array)):
|
||||||
data_array[i][data_array[i] < 0.] = 0.
|
data_array[i][data_array[i] < 0.0] = 0.0
|
||||||
|
|
||||||
# force WCS to convention PCi_ja unitary, cdelt in deg
|
# force WCS to convention PCi_ja unitary, cdelt in deg
|
||||||
for header in headers:
|
for wcs, header in zip(wcs_array, headers):
|
||||||
new_wcs = WCS(header).celestial.deepcopy()
|
new_wcs = wcs.deepcopy()
|
||||||
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1., 1.])).all():
|
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1.0, 1.0])).all():
|
||||||
# Update WCS with relevant information
|
# Update WCS with relevant information
|
||||||
if new_wcs.wcs.has_cd():
|
if new_wcs.wcs.has_cd():
|
||||||
old_cd = new_wcs.wcs.cd
|
|
||||||
del new_wcs.wcs.cd
|
del new_wcs.wcs.cd
|
||||||
keys = list(new_wcs.to_header().keys())+['CD1_1', 'CD1_2', 'CD1_3', 'CD2_1', 'CD2_2', 'CD2_3', 'CD3_1', 'CD3_2', 'CD3_3']
|
keys = list(new_wcs.to_header().keys()) + ["CD1_1", "CD1_2", "CD1_3", "CD2_1", "CD2_2", "CD2_3", "CD3_1", "CD3_2", "CD3_3"]
|
||||||
for key in keys:
|
for key in keys:
|
||||||
header.remove(key, ignore_missing=True)
|
header.remove(key, ignore_missing=True)
|
||||||
new_cdelt = np.linalg.eig(old_cd)[0]
|
new_cdelt = np.linalg.eigvals(wcs.wcs.cd)
|
||||||
elif (new_wcs.wcs.cdelt == np.array([1., 1.])).all() and \
|
new_cdelt.sort()
|
||||||
(new_wcs.array_shape in [(512, 512), (1024, 512), (512, 1024), (1024, 1024)]):
|
new_wcs.wcs.pc = wcs.wcs.cd.dot(np.diag(1.0 / new_cdelt))
|
||||||
old_cd = new_wcs.wcs.pc
|
|
||||||
new_wcs.wcs.pc = np.dot(old_cd, np.diag(1./new_cdelt))
|
|
||||||
new_wcs.wcs.cdelt = new_cdelt
|
new_wcs.wcs.cdelt = new_cdelt
|
||||||
for key, val in new_wcs.to_header().items():
|
for key, val in new_wcs.to_header().items():
|
||||||
header[key] = val
|
header[key] = val
|
||||||
# header['orientat'] = princ_angle(float(header['orientat']))
|
try:
|
||||||
|
_ = header["ORIENTAT"]
|
||||||
|
except KeyError:
|
||||||
|
header["ORIENTAT"] = wcs_PA(new_wcs.wcs.pc[1, 0], np.diag(new_wcs.wcs.pc).mean())
|
||||||
|
|
||||||
# force WCS for POL60 to have same pixel size as POL0 and POL120
|
# force WCS for POL60 to have same pixel size as POL0 and POL120
|
||||||
is_pol60 = np.array([head['filtnam1'].lower() == 'pol60' for head in headers], dtype=bool)
|
is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
|
||||||
cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 14)
|
cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 10)
|
||||||
if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
|
if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
|
||||||
print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0))
|
print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0))
|
||||||
raise ValueError("Not all images have same pixel size")
|
raise ValueError("Not all images have same pixel size")
|
||||||
else:
|
else:
|
||||||
for i in np.arange(len(headers))[is_pol60]:
|
for i in np.arange(len(headers))[is_pol60]:
|
||||||
headers[i]['cdelt1'], headers[i]['cdelt2'] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0]
|
headers[i]["cdelt1"], headers[i]["cdelt2"] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0]
|
||||||
|
|
||||||
if compute_flux:
|
if compute_flux:
|
||||||
for i in range(len(infiles)):
|
for i in range(len(infiles)):
|
||||||
# Compute the flux in counts/sec
|
# Compute the flux in counts/sec
|
||||||
data_array[i] /= headers[i]['EXPTIME']
|
data_array[i] /= headers[i]["EXPTIME"]
|
||||||
|
|
||||||
return data_array, headers
|
return data_array, headers
|
||||||
|
|
||||||
|
|
||||||
def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
|
def save_Stokes(
|
||||||
s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="",
|
I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, header_stokes, data_mask, filename, data_folder="", return_hdul=False
|
||||||
return_hdul=False):
|
):
|
||||||
"""
|
"""
|
||||||
Save computed polarimetry parameters to a single fits file,
|
Save computed polarimetry parameters to a single fits file,
|
||||||
updating header accordingly.
|
updating header accordingly.
|
||||||
@@ -124,9 +129,7 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
|
|||||||
Only returned if return_hdul is True.
|
Only returned if return_hdul is True.
|
||||||
"""
|
"""
|
||||||
# Create new WCS object given the modified images
|
# Create new WCS object given the modified images
|
||||||
ref_header = headers[0]
|
new_wcs = WCS(header_stokes).deepcopy()
|
||||||
exp_tot = np.array([header['exptime'] for header in headers]).sum()
|
|
||||||
new_wcs = WCS(ref_header).deepcopy()
|
|
||||||
|
|
||||||
if data_mask.shape != (1, 1):
|
if data_mask.shape != (1, 1):
|
||||||
vertex = clean_ROI(data_mask)
|
vertex = clean_ROI(data_mask)
|
||||||
@@ -135,19 +138,23 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
|
|||||||
new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2]
|
new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2]
|
||||||
|
|
||||||
header = new_wcs.to_header()
|
header = new_wcs.to_header()
|
||||||
header['telescop'] = (ref_header['telescop'] if 'TELESCOP' in list(ref_header.keys()) else 'HST', 'telescope used to acquire data')
|
header["TELESCOP"] = (header_stokes["TELESCOP"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "telescope used to acquire data")
|
||||||
header['instrume'] = (ref_header['instrume'] if 'INSTRUME' in list(ref_header.keys()) else 'FOC', 'identifier for instrument used to acuire data')
|
header["INSTRUME"] = (header_stokes["INSTRUME"] if "INSTRUME" in list(header_stokes.keys()) else "FOC", "identifier for instrument used to acuire data")
|
||||||
header['photplam'] = (ref_header['photplam'], 'Pivot Wavelength')
|
header["PHOTPLAM"] = (header_stokes["PHOTPLAM"], "Pivot Wavelength")
|
||||||
header['photflam'] = (ref_header['photflam'], 'Inverse Sensitivity in DN/sec/cm**2/Angst')
|
header["PHOTFLAM"] = (header_stokes["PHOTFLAM"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
|
||||||
header['exptot'] = (exp_tot, 'Total exposure time in sec')
|
header["EXPTIME"] = (header_stokes["EXPTIME"], "Total exposure time in sec")
|
||||||
header['proposid'] = (ref_header['proposid'], 'PEP proposal identifier for observation')
|
header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation")
|
||||||
header['targname'] = (ref_header['targname'], 'Target name')
|
header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name")
|
||||||
header['orientat'] = (ref_header['orientat'], 'Angle between North and the y-axis of the image')
|
header["ORIENTAT"] = (header_stokes["ORIENTAT"], "Angle between North and the y-axis of the image")
|
||||||
header['filename'] = (filename, 'Original filename')
|
header["FILENAME"] = (filename, "ORIGINAL FILENAME")
|
||||||
header['P_int'] = (ref_header['P_int'], 'Integrated polarization degree')
|
header["BKG_TYPE"] = (header_stokes["BKG_TYPE"], "Bkg estimation method used during reduction")
|
||||||
header['P_int_err'] = (ref_header['P_int_err'], 'Integrated polarization degree error')
|
header["BKG_SUB"] = (header_stokes["BKG_SUB"], "Amount of bkg subtracted from images")
|
||||||
header['PA_int'] = (ref_header['PA_int'], 'Integrated polarization angle')
|
header["SMOOTH"] = (header_stokes["SMOOTH"] if "SMOOTH" in list(header_stokes.keys()) else "None", "Smoothing method used during reduction")
|
||||||
header['PA_int_err'] = (ref_header['PA_int_err'], 'Integrated polarization angle error')
|
header["SAMPLING"] = (header_stokes["SAMPLING"] if "SAMPLING" in list(header_stokes.keys()) else "None", "Resampling performed during reduction")
|
||||||
|
header["P_INT"] = (header_stokes["P_INT"], "Integrated polarization degree")
|
||||||
|
header["sP_INT"] = (header_stokes["sP_INT"], "Integrated polarization degree error")
|
||||||
|
header["PA_INT"] = (header_stokes["PA_INT"], "Integrated polarization angle")
|
||||||
|
header["sPA_INT"] = (header_stokes["sPA_INT"], "Integrated polarization angle error")
|
||||||
|
|
||||||
# Crop Data to mask
|
# Crop Data to mask
|
||||||
if data_mask.shape != (1, 1):
|
if data_mask.shape != (1, 1):
|
||||||
@@ -165,7 +172,7 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
|
|||||||
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1]))
|
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1]))
|
||||||
for i in range(3):
|
for i in range(3):
|
||||||
for j in range(3):
|
for j in range(3):
|
||||||
Stokes_cov[i, j][(1-data_mask).astype(bool)] = 0.
|
Stokes_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
|
||||||
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
|
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
|
||||||
Stokes_cov = new_Stokes_cov
|
Stokes_cov = new_Stokes_cov
|
||||||
|
|
||||||
@@ -176,23 +183,30 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
|
|||||||
hdul = fits.HDUList([])
|
hdul = fits.HDUList([])
|
||||||
|
|
||||||
# Add I_stokes as PrimaryHDU
|
# Add I_stokes as PrimaryHDU
|
||||||
header['datatype'] = ('I_stokes', 'type of data stored in the HDU')
|
header["datatype"] = ("I_stokes", "type of data stored in the HDU")
|
||||||
I_stokes[(1-data_mask).astype(bool)] = 0.
|
I_stokes[(1 - data_mask).astype(bool)] = 0.0
|
||||||
primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header)
|
primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header)
|
||||||
primary_hdu.name = 'I_stokes'
|
primary_hdu.name = "I_stokes"
|
||||||
hdul.append(primary_hdu)
|
hdul.append(primary_hdu)
|
||||||
|
|
||||||
# Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList
|
# Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList
|
||||||
for data, name in [[Q_stokes, 'Q_stokes'], [U_stokes, 'U_stokes'],
|
for data, name in [
|
||||||
[Stokes_cov, 'IQU_cov_matrix'], [P, 'Pol_deg'],
|
[Q_stokes, "Q_stokes"],
|
||||||
[debiased_P, 'Pol_deg_debiased'], [s_P, 'Pol_deg_err'],
|
[U_stokes, "U_stokes"],
|
||||||
[s_P_P, 'Pol_deg_err_Poisson_noise'], [PA, 'Pol_ang'],
|
[Stokes_cov, "IQU_cov_matrix"],
|
||||||
[s_PA, 'Pol_ang_err'], [s_PA_P, 'Pol_ang_err_Poisson_noise'],
|
[P, "Pol_deg"],
|
||||||
[data_mask, 'Data_mask']]:
|
[debiased_P, "Pol_deg_debiased"],
|
||||||
|
[s_P, "Pol_deg_err"],
|
||||||
|
[s_P_P, "Pol_deg_err_Poisson_noise"],
|
||||||
|
[PA, "Pol_ang"],
|
||||||
|
[s_PA, "Pol_ang_err"],
|
||||||
|
[s_PA_P, "Pol_ang_err_Poisson_noise"],
|
||||||
|
[data_mask, "Data_mask"],
|
||||||
|
]:
|
||||||
hdu_header = header.copy()
|
hdu_header = header.copy()
|
||||||
hdu_header['datatype'] = name
|
hdu_header["datatype"] = name
|
||||||
if not name == 'IQU_cov_matrix':
|
if not name == "IQU_cov_matrix":
|
||||||
data[(1-data_mask).astype(bool)] = 0.
|
data[(1 - data_mask).astype(bool)] = 0.0
|
||||||
hdu = fits.ImageHDU(data=data, header=hdu_header)
|
hdu = fits.ImageHDU(data=data, header=hdu_header)
|
||||||
hdu.name = name
|
hdu.name = name
|
||||||
hdul.append(hdu)
|
hdul.append(hdu)
|
||||||
|
|||||||
2149
package/lib/plots.py
2149
package/lib/plots.py
File diff suppressed because it is too large
Load Diff
@@ -3,34 +3,44 @@
|
|||||||
"""
|
"""
|
||||||
Library function to query and download datatsets from MAST api.
|
Library function to query and download datatsets from MAST api.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from os import system
|
from os import system
|
||||||
from os.path import join as path_join, exists as path_exists
|
from os.path import exists as path_exists
|
||||||
from astroquery.mast import MastMissions, Observations
|
from os.path import join as path_join
|
||||||
from astropy.table import unique, Column
|
from warnings import filterwarnings
|
||||||
from astropy.time import Time, TimeDelta
|
|
||||||
import astropy.units as u
|
import astropy.units as u
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from astropy.table import Column, unique
|
||||||
|
from astropy.time import Time, TimeDelta
|
||||||
|
from astroquery.exceptions import NoResultsWarning
|
||||||
|
from astroquery.mast import MastMissions, Observations
|
||||||
|
|
||||||
|
filterwarnings("error", category=NoResultsWarning)
|
||||||
|
|
||||||
|
|
||||||
def divide_proposal(products):
|
def divide_proposal(products):
|
||||||
"""
|
"""
|
||||||
Divide observation in proposals by time or filter
|
Divide observation in proposals by time or filter
|
||||||
"""
|
"""
|
||||||
for pid in np.unique(products['Proposal ID']):
|
for pid in np.unique(products["Proposal ID"]):
|
||||||
obs = products[products['Proposal ID'] == pid].copy()
|
obs = products[products["Proposal ID"] == pid].copy()
|
||||||
same_filt = np.unique(np.array(np.sum([obs['Filters'][:, 1:] == filt[1:] for filt in obs['Filters']], axis=2) < 3, dtype=bool), axis=0)
|
same_filt = np.unique(np.array(np.sum([obs["Filters"][:, 1:] == filt[1:] for filt in obs["Filters"]], axis=2) < 3, dtype=bool), axis=0)
|
||||||
if len(same_filt) > 1:
|
if len(same_filt) > 1:
|
||||||
for filt in same_filt:
|
for filt in same_filt:
|
||||||
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][filt]], axis=0)] = "_".join(
|
products["Proposal ID"][np.any([products["Dataset"] == dataset for dataset in obs["Dataset"][filt]], axis=0)] = "_".join(
|
||||||
[obs['Proposal ID'][filt][0], "_".join([fi for fi in obs['Filters'][filt][0][1:] if fi[:-1] != "CLEAR"])])
|
[obs["Proposal ID"][filt][0], "_".join([fi for fi in obs["Filters"][filt][0][1:] if fi[:-1] != "CLEAR"])]
|
||||||
for pid in np.unique(products['Proposal ID']):
|
)
|
||||||
obs = products[products['Proposal ID'] == pid].copy()
|
for pid in np.unique(products["Proposal ID"]):
|
||||||
close_date = np.unique([[np.abs(TimeDelta(obs['Start'][i].unix-date.unix, format='sec'))
|
obs = products[products["Proposal ID"] == pid].copy()
|
||||||
< 7.*u.d for i in range(len(obs))] for date in obs['Start']], axis=0)
|
close_date = np.unique(
|
||||||
|
[[np.abs(TimeDelta(obs["Start"][i].unix - date.unix, format="sec")) < 7.0 * u.d for i in range(len(obs))] for date in obs["Start"]], axis=0
|
||||||
|
)
|
||||||
if len(close_date) > 1:
|
if len(close_date) > 1:
|
||||||
for date in close_date:
|
for date in close_date:
|
||||||
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][date]], axis=0)
|
products["Proposal ID"][np.any([products["Dataset"] == dataset for dataset in obs["Dataset"][date]], axis=0)] = "_".join(
|
||||||
] = "_".join([obs['Proposal ID'][date][0], str(obs['Start'][date][0])[:10]])
|
[obs["Proposal ID"][date][0], str(obs["Start"][date][0])[:10]]
|
||||||
|
)
|
||||||
return products
|
return products
|
||||||
|
|
||||||
|
|
||||||
@@ -38,53 +48,36 @@ def get_product_list(target=None, proposal_id=None):
|
|||||||
"""
|
"""
|
||||||
Retrieve products list for a given target from the MAST archive
|
Retrieve products list for a given target from the MAST archive
|
||||||
"""
|
"""
|
||||||
mission = MastMissions(mission='hst')
|
mission = MastMissions(mission="hst")
|
||||||
radius = '3'
|
radius = "3"
|
||||||
select_cols = [
|
select_cols = [
|
||||||
'sci_data_set_name',
|
"sci_data_set_name",
|
||||||
'sci_spec_1234',
|
"sci_spec_1234",
|
||||||
'sci_actual_duration',
|
"sci_actual_duration",
|
||||||
'sci_start_time',
|
"sci_start_time",
|
||||||
'sci_stop_time',
|
"sci_stop_time",
|
||||||
'sci_central_wavelength',
|
"sci_central_wavelength",
|
||||||
'sci_instrume',
|
"sci_instrume",
|
||||||
'sci_aper_1234',
|
"sci_aper_1234",
|
||||||
'sci_targname',
|
"sci_targname",
|
||||||
'sci_pep_id',
|
"sci_pep_id",
|
||||||
'sci_pi_last_name']
|
"sci_pi_last_name",
|
||||||
|
]
|
||||||
|
|
||||||
cols = [
|
cols = ["Dataset", "Filters", "Exptime", "Start", "Stop", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]
|
||||||
'Dataset',
|
|
||||||
'Filters',
|
|
||||||
'Exptime',
|
|
||||||
'Start',
|
|
||||||
'Stop',
|
|
||||||
'Central wavelength',
|
|
||||||
'Instrument',
|
|
||||||
'Size',
|
|
||||||
'Target name',
|
|
||||||
'Proposal ID',
|
|
||||||
'PI last name']
|
|
||||||
|
|
||||||
if target is None:
|
if target is None:
|
||||||
target = input("Target name:\n>")
|
target = input("Target name:\n>")
|
||||||
|
|
||||||
# Use query_object method to resolve the object name into coordinates
|
# Use query_object method to resolve the object name into coordinates
|
||||||
results = mission.query_object(
|
results = mission.query_object(target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc")
|
||||||
target,
|
|
||||||
radius=radius,
|
|
||||||
select_cols=select_cols,
|
|
||||||
sci_spec_1234='POL*',
|
|
||||||
sci_obs_type='image',
|
|
||||||
sci_aec='S',
|
|
||||||
sci_instrume='foc')
|
|
||||||
|
|
||||||
for c, n_c in zip(select_cols, cols):
|
for c, n_c in zip(select_cols, cols):
|
||||||
results.rename_column(c, n_c)
|
results.rename_column(c, n_c)
|
||||||
results['Proposal ID'] = Column(results['Proposal ID'], dtype='U35')
|
results["Proposal ID"] = Column(results["Proposal ID"], dtype="U35")
|
||||||
results['Filters'] = Column(np.array([filt.split(";") for filt in results['Filters']], dtype=str))
|
results["Filters"] = Column(np.array([filt.split(";") for filt in results["Filters"]], dtype=str))
|
||||||
results['Start'] = Column(Time(results['Start']))
|
results["Start"] = Column(Time(results["Start"]))
|
||||||
results['Stop'] = Column(Time(results['Stop']))
|
results["Stop"] = Column(Time(results["Stop"]))
|
||||||
|
|
||||||
results = divide_proposal(results)
|
results = divide_proposal(results)
|
||||||
obs = results.copy()
|
obs = results.copy()
|
||||||
@@ -92,67 +85,70 @@ def get_product_list(target=None, proposal_id=None):
|
|||||||
# Remove single observations for which a FIND filter is used
|
# Remove single observations for which a FIND filter is used
|
||||||
to_remove = []
|
to_remove = []
|
||||||
for i in range(len(obs)):
|
for i in range(len(obs)):
|
||||||
if "F1ND" in obs[i]['Filters']:
|
if "F1ND" in obs[i]["Filters"]:
|
||||||
to_remove.append(i)
|
to_remove.append(i)
|
||||||
obs.remove_rows(to_remove)
|
obs.remove_rows(to_remove)
|
||||||
# Remove observations for which a polarization filter is missing
|
# Remove observations for which a polarization filter is missing
|
||||||
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
|
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
|
||||||
for pid in np.unique(obs['Proposal ID']):
|
for pid in np.unique(obs["Proposal ID"]):
|
||||||
used_pol = np.zeros(3)
|
used_pol = np.zeros(3)
|
||||||
for dataset in obs[obs['Proposal ID'] == pid]:
|
for dataset in obs[obs["Proposal ID"] == pid]:
|
||||||
used_pol[polfilt[dataset['Filters'][0]]] += 1
|
used_pol[polfilt[dataset["Filters"][0]]] += 1
|
||||||
if np.any(used_pol < 1):
|
if np.any(used_pol < 1):
|
||||||
obs.remove_rows(np.arange(len(obs))[obs['Proposal ID'] == pid])
|
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
|
||||||
|
|
||||||
tab = unique(obs, ['Target name', 'Proposal ID'])
|
tab = unique(obs, ["Target name", "Proposal ID"])
|
||||||
obs["Obs"] = [np.argmax(np.logical_and(tab['Proposal ID'] == data['Proposal ID'], tab['Target name'] == data['Target name']))+1 for data in obs]
|
obs["Obs"] = [np.argmax(np.logical_and(tab["Proposal ID"] == data["Proposal ID"], tab["Target name"] == data["Target name"])) + 1 for data in obs]
|
||||||
try:
|
try:
|
||||||
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], 'Obs')
|
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], "Obs")
|
||||||
except IndexError:
|
except IndexError:
|
||||||
raise ValueError(
|
raise ValueError("There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
|
||||||
"There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
|
|
||||||
|
|
||||||
b = np.zeros(len(results), dtype=bool)
|
b = np.zeros(len(results), dtype=bool)
|
||||||
if proposal_id is not None and str(proposal_id) in obs['Proposal ID']:
|
if proposal_id is not None and str(proposal_id) in obs["Proposal ID"]:
|
||||||
b[results['Proposal ID'] == str(proposal_id)] = True
|
b[results["Proposal ID"] == str(proposal_id)] = True
|
||||||
else:
|
else:
|
||||||
n_obs.pprint(len(n_obs) + 2)
|
n_obs.pprint(len(n_obs) + 2)
|
||||||
a = [np.array(i.split(":"), dtype=str)
|
a = [
|
||||||
for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(',')]
|
np.array(i.split(":"), dtype=str)
|
||||||
if a[0][0] == '':
|
for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(",")
|
||||||
|
]
|
||||||
|
if a[0][0] == "":
|
||||||
a = [[1]]
|
a = [[1]]
|
||||||
if a[0][0] in ['a', 'all', '*']:
|
if a[0][0] in ["a", "all", "*"]:
|
||||||
b = np.ones(len(results), dtype=bool)
|
b = np.ones(len(results), dtype=bool)
|
||||||
else:
|
else:
|
||||||
a = [np.array(i, dtype=int) for i in a]
|
a = [np.array(i, dtype=int) for i in a]
|
||||||
for i in a:
|
for i in a:
|
||||||
if len(i) > 1:
|
if len(i) > 1:
|
||||||
for j in range(i[0], i[1] + 1):
|
for j in range(i[0], i[1] + 1):
|
||||||
b[np.array([dataset in obs['Dataset'][obs["Obs"] == j] for dataset in results['Dataset']])] = True
|
b[np.array([dataset in obs["Dataset"][obs["Obs"] == j] for dataset in results["Dataset"]])] = True
|
||||||
else:
|
else:
|
||||||
b[np.array([dataset in obs['Dataset'][obs['Obs'] == i[0]] for dataset in results['Dataset']])] = True
|
b[np.array([dataset in obs["Dataset"][obs["Obs"] == i[0]] for dataset in results["Dataset"]])] = True
|
||||||
|
|
||||||
observations = Observations.query_criteria(obs_id=list(results['Dataset'][b]))
|
observations = Observations.query_criteria(obs_id=list(results["Dataset"][b]))
|
||||||
products = Observations.filter_products(Observations.get_product_list(observations),
|
products = Observations.filter_products(
|
||||||
productType=['SCIENCE'],
|
Observations.get_product_list(observations),
|
||||||
dataproduct_type=['image'],
|
productType=["SCIENCE"],
|
||||||
|
dataproduct_type=["image"],
|
||||||
calib_level=[2],
|
calib_level=[2],
|
||||||
description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP")
|
description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP",
|
||||||
products['proposal_id'] = Column(products['proposal_id'], dtype='U35')
|
)
|
||||||
products['target_name'] = Column(observations['target_name'])
|
products["proposal_id"] = Column(products["proposal_id"], dtype="U35")
|
||||||
|
products["target_name"] = Column(observations["target_name"])
|
||||||
|
|
||||||
for prod in products:
|
for prod in products:
|
||||||
prod['proposal_id'] = results['Proposal ID'][results['Dataset'] == prod['productFilename'][:len(results['Dataset'][0])].upper()][0]
|
prod["proposal_id"] = results["Proposal ID"][results["Dataset"] == prod["productFilename"][: len(results["Dataset"][0])].upper()][0]
|
||||||
|
|
||||||
for prod in products:
|
for prod in products:
|
||||||
prod['target_name'] = observations['target_name'][observations['obsid'] == prod['obsID']][0]
|
prod["target_name"] = observations["target_name"][observations["obsid"] == prod["obsID"]][0]
|
||||||
tab = unique(products, ['target_name', 'proposal_id'])
|
tab = unique(products, ["target_name", "proposal_id"])
|
||||||
|
|
||||||
products["Obs"] = [np.argmax(np.logical_and(tab['proposal_id'] == data['proposal_id'], tab['target_name'] == data['target_name']))+1 for data in products]
|
products["Obs"] = [np.argmax(np.logical_and(tab["proposal_id"] == data["proposal_id"], tab["target_name"] == data["target_name"])) + 1 for data in products]
|
||||||
return target, products
|
return target, products
|
||||||
|
|
||||||
|
|
||||||
def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
|
def retrieve_products(target=None, proposal_id=None, output_dir="./data"):
|
||||||
"""
|
"""
|
||||||
Given a target name and a proposal_id, create the local directories and retrieve the fits files from the MAST Archive
|
Given a target name and a proposal_id, create the local directories and retrieve the fits files from the MAST Archive
|
||||||
"""
|
"""
|
||||||
@@ -160,18 +156,19 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
|
|||||||
prodpaths = []
|
prodpaths = []
|
||||||
# data_dir = path_join(output_dir, target)
|
# data_dir = path_join(output_dir, target)
|
||||||
out = ""
|
out = ""
|
||||||
for obs in unique(products, 'Obs'):
|
for obs in unique(products, "Obs"):
|
||||||
filepaths = []
|
filepaths = []
|
||||||
# obs_dir = path_join(data_dir, obs['prodposal_id'])
|
# obs_dir = path_join(data_dir, obs['prodposal_id'])
|
||||||
# if obs['target_name']!=target:
|
# if obs['target_name']!=target:
|
||||||
obs_dir = path_join(path_join(output_dir, target), obs['proposal_id'])
|
obs_dir = path_join(path_join(output_dir, target), obs["proposal_id"])
|
||||||
if not path_exists(obs_dir):
|
if not path_exists(obs_dir):
|
||||||
system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
|
system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
|
||||||
for file in products['productFilename'][products['Obs'] == obs['Obs']]:
|
for file in products["productFilename"][products["Obs"] == obs["Obs"]]:
|
||||||
fpath = path_join(obs_dir, file)
|
fpath = path_join(obs_dir, file)
|
||||||
if not path_exists(fpath):
|
if not path_exists(fpath):
|
||||||
out += "{0:s} : {1:s}\n".format(file, Observations.download_file(
|
out += "{0:s} : {1:s}\n".format(
|
||||||
products['dataURI'][products['productFilename'] == file][0], local_path=fpath)[0])
|
file, Observations.download_file(products["dataURI"][products["productFilename"] == file][0], local_path=fpath)[0]
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
out += "{0:s} : Exists\n".format(file)
|
out += "{0:s} : Exists\n".format(file)
|
||||||
filepaths.append([obs_dir, file])
|
filepaths.append([obs_dir, file])
|
||||||
@@ -183,13 +180,13 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Query MAST for target products')
|
parser = argparse.ArgumentParser(description="Query MAST for target products")
|
||||||
parser.add_argument('-t', '--target', metavar='targetname', required=False,
|
parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
|
||||||
help='the name of the target', type=str, default=None)
|
parser.add_argument("-p", "--proposal_id", metavar="proposal_id", required=False, help="the proposal id of the data products", type=int, default=None)
|
||||||
parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False,
|
parser.add_argument(
|
||||||
help='the proposal id of the data products', type=int, default=None)
|
"-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data"
|
||||||
parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False,
|
)
|
||||||
help='output directory path for the data products', type=str, default="./data")
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
print(args.target)
|
||||||
prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id)
|
prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id)
|
||||||
print(prodpaths)
|
print(prodpaths)
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,6 @@
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
def rot2D(ang):
|
def rot2D(ang):
|
||||||
"""
|
"""
|
||||||
Return the 2D rotation matrix of given angle in degrees
|
Return the 2D rotation matrix of given angle in degrees
|
||||||
@@ -17,10 +18,10 @@ def princ_angle(ang):
|
|||||||
A = np.array([ang])
|
A = np.array([ang])
|
||||||
else:
|
else:
|
||||||
A = np.array(ang)
|
A = np.array(ang)
|
||||||
while np.any(A < 0.):
|
while np.any(A < 0.0):
|
||||||
A[A < 0.] = A[A < 0.]+360.
|
A[A < 0.0] = A[A < 0.0] + 360.0
|
||||||
while np.any(A >= 180.):
|
while np.any(A >= 180.0):
|
||||||
A[A >= 180.] = A[A >= 180.]-180.
|
A[A >= 180.0] = A[A >= 180.0] - 180.0
|
||||||
if type(ang) is type(A):
|
if type(ang) is type(A):
|
||||||
return A
|
return A
|
||||||
else:
|
else:
|
||||||
@@ -31,7 +32,7 @@ def sci_not(v, err, rnd=1, out=str):
|
|||||||
"""
|
"""
|
||||||
Return the scientifque error notation as a string.
|
Return the scientifque error notation as a string.
|
||||||
"""
|
"""
|
||||||
power = - int(('%E' % v)[-3:])+1
|
power = -int(("%E" % v)[-3:]) + 1
|
||||||
output = [r"({0}".format(round(v * 10**power, rnd)), round(v * 10**power, rnd)]
|
output = [r"({0}".format(round(v * 10**power, rnd)), round(v * 10**power, rnd)]
|
||||||
if isinstance(err, list):
|
if isinstance(err, list):
|
||||||
for error in err:
|
for error in err:
|
||||||
@@ -40,7 +41,22 @@ def sci_not(v, err, rnd=1, out=str):
|
|||||||
else:
|
else:
|
||||||
output[0] += r" $\pm$ {0}".format(round(err * 10**power, rnd))
|
output[0] += r" $\pm$ {0}".format(round(err * 10**power, rnd))
|
||||||
output.append(round(err * 10**power, rnd))
|
output.append(round(err * 10**power, rnd))
|
||||||
if out == str:
|
if out is str:
|
||||||
return output[0] + r")e{0}".format(-power)
|
return output[0] + r")e{0}".format(-power)
|
||||||
else:
|
else:
|
||||||
return *output[1:], -power
|
return *output[1:], -power
|
||||||
|
|
||||||
|
def wcs_PA(PC21, PC22):
|
||||||
|
"""
|
||||||
|
Return the position angle in degrees to the North direction of a wcs
|
||||||
|
from the values of coefficient of its transformation matrix.
|
||||||
|
"""
|
||||||
|
if (abs(PC21) > abs(PC22)) and (PC21 >= 0):
|
||||||
|
orient = -np.arccos(PC22) * 180.0 / np.pi
|
||||||
|
elif (abs(PC21) > abs(PC22)) and (PC21 < 0):
|
||||||
|
orient = np.arccos(PC22) * 180.0 / np.pi
|
||||||
|
elif (abs(PC21) < abs(PC22)) and (PC22 >= 0):
|
||||||
|
orient = np.arccos(PC22) * 180.0 / np.pi
|
||||||
|
elif (abs(PC21) < abs(PC22)) and (PC22 < 0):
|
||||||
|
orient = -np.arccos(PC22) * 180.0 / np.pi
|
||||||
|
return orient
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
from astropy.io import fits
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from lib.plots import overplot_radio, overplot_pol
|
from astropy.io import fits
|
||||||
|
from lib.plots import overplot_pol, overplot_radio
|
||||||
from matplotlib.colors import LogNorm
|
from matplotlib.colors import LogNorm
|
||||||
|
|
||||||
Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits")
|
Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits")
|
||||||
@@ -14,31 +14,37 @@ Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits")
|
|||||||
Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
|
Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
|
||||||
|
|
||||||
# levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
|
# levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
|
||||||
levelsMorganti = np.logspace(-0.1249, 1.97, 7)/100.
|
levelsMorganti = np.logspace(-0.1249, 1.97, 7) / 100.0
|
||||||
|
|
||||||
levels18GHz = levelsMorganti * Stokes_18GHz[0].data.max()
|
levels18GHz = levelsMorganti * Stokes_18GHz[0].data.max()
|
||||||
A = overplot_radio(Stokes_UV, Stokes_18GHz)
|
A = overplot_radio(Stokes_UV, Stokes_18GHz)
|
||||||
A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/18GHz_overplot.pdf', vec_scale=None)
|
A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/18GHz_overplot.pdf", vec_scale=None)
|
||||||
|
|
||||||
levels24GHz = levelsMorganti * Stokes_24GHz[0].data.max()
|
levels24GHz = levelsMorganti * Stokes_24GHz[0].data.max()
|
||||||
B = overplot_radio(Stokes_UV, Stokes_24GHz)
|
B = overplot_radio(Stokes_UV, Stokes_24GHz)
|
||||||
B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/24GHz_overplot.pdf', vec_scale=None)
|
B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/24GHz_overplot.pdf", vec_scale=None)
|
||||||
|
|
||||||
levels103GHz = levelsMorganti * Stokes_103GHz[0].data.max()
|
levels103GHz = levelsMorganti * Stokes_103GHz[0].data.max()
|
||||||
C = overplot_radio(Stokes_UV, Stokes_103GHz)
|
C = overplot_radio(Stokes_UV, Stokes_103GHz)
|
||||||
C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/103GHz_overplot.pdf', vec_scale=None)
|
C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/103GHz_overplot.pdf", vec_scale=None)
|
||||||
|
|
||||||
levels229GHz = levelsMorganti * Stokes_229GHz[0].data.max()
|
levels229GHz = levelsMorganti * Stokes_229GHz[0].data.max()
|
||||||
D = overplot_radio(Stokes_UV, Stokes_229GHz)
|
D = overplot_radio(Stokes_UV, Stokes_229GHz)
|
||||||
D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/229GHz_overplot.pdf', vec_scale=None)
|
D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/229GHz_overplot.pdf", vec_scale=None)
|
||||||
|
|
||||||
levels357GHz = levelsMorganti * Stokes_357GHz[0].data.max()
|
levels357GHz = levelsMorganti * Stokes_357GHz[0].data.max()
|
||||||
E = overplot_radio(Stokes_UV, Stokes_357GHz)
|
E = overplot_radio(Stokes_UV, Stokes_357GHz)
|
||||||
E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/357GHz_overplot.pdf', vec_scale=None)
|
E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/357GHz_overplot.pdf", vec_scale=None)
|
||||||
|
|
||||||
# F = overplot_pol(Stokes_UV, Stokes_S2)
|
# F = overplot_pol(Stokes_UV, Stokes_S2)
|
||||||
# F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18))
|
# F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18))
|
||||||
|
|
||||||
G = overplot_pol(Stokes_UV, Stokes_IR, cmap='inferno')
|
G = overplot_pol(Stokes_UV, Stokes_IR, cmap="inferno")
|
||||||
G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot.pdf', vec_scale=None,
|
G.plot(
|
||||||
norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3, Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']), cmap='inferno_r')
|
SNRp_cut=2.0,
|
||||||
|
SNRi_cut=10.0,
|
||||||
|
savename="./plots/IC5063/IR_overplot.pdf",
|
||||||
|
vec_scale=None,
|
||||||
|
norm=LogNorm(Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"] / 1e3, Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"]),
|
||||||
|
cmap="inferno_r",
|
||||||
|
)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
from astropy.io import fits
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from astropy.io import fits
|
||||||
from lib.plots import overplot_chandra, overplot_pol
|
from lib.plots import overplot_chandra, overplot_pol
|
||||||
from matplotlib.colors import LogNorm
|
from matplotlib.colors import LogNorm
|
||||||
|
|
||||||
@@ -8,13 +8,13 @@ Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.f
|
|||||||
Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits")
|
Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits")
|
||||||
Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits")
|
Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits")
|
||||||
|
|
||||||
levels = np.geomspace(1., 99., 7)
|
levels = np.geomspace(1.0, 99.0, 7)
|
||||||
|
|
||||||
A = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm())
|
A = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm())
|
||||||
A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf')
|
A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, zoom=1, savename="./plots/MRK463E/Chandra_overplot.pdf")
|
||||||
A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned")
|
A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned")
|
||||||
|
|
||||||
levels = np.array([0.8, 2, 5, 10, 20, 50])/100.*Stokes_UV[0].header['photflam']
|
levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"]
|
||||||
B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm())
|
B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm())
|
||||||
B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, norm=LogNorm(8.5e-18, 2.5e-15), savename='./plots/MRK463E/IR_overplot.pdf')
|
B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, norm=LogNorm(8.5e-18, 2.5e-15), savename="./plots/MRK463E/IR_overplot.pdf")
|
||||||
B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned")
|
B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned")
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
from getopt import getopt, error as get_error
|
from getopt import error as get_error
|
||||||
|
from getopt import getopt
|
||||||
from sys import argv
|
from sys import argv
|
||||||
|
|
||||||
arglist = argv[1:]
|
arglist = argv[1:]
|
||||||
@@ -24,7 +25,7 @@ try:
|
|||||||
elif curr_arg in ("-i", "--snri"):
|
elif curr_arg in ("-i", "--snri"):
|
||||||
SNRi_cut = int(curr_val)
|
SNRi_cut = int(curr_val)
|
||||||
elif curr_arg in ("-l", "--lim"):
|
elif curr_arg in ("-l", "--lim"):
|
||||||
flux_lim = list("".join(curr_val).split(','))
|
flux_lim = list("".join(curr_val).split(","))
|
||||||
except get_error as err:
|
except get_error as err:
|
||||||
print(str(err))
|
print(str(err))
|
||||||
|
|
||||||
|
|||||||
@@ -1,19 +1,21 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
|
||||||
def main(infiles=None):
|
def main(infiles=None):
|
||||||
"""
|
"""
|
||||||
Retrieve native spatial resolution from given observation.
|
Retrieve native spatial resolution from given observation.
|
||||||
"""
|
"""
|
||||||
from os.path import join as path_join
|
from os.path import join as path_join
|
||||||
from warnings import catch_warnings, filterwarnings
|
from warnings import catch_warnings, filterwarnings
|
||||||
|
|
||||||
from astropy.io.fits import getheader
|
from astropy.io.fits import getheader
|
||||||
from astropy.wcs import WCS, FITSFixedWarning
|
from astropy.wcs import WCS, FITSFixedWarning
|
||||||
from numpy.linalg import eig
|
from numpy.linalg import eig
|
||||||
|
|
||||||
if infiles is None:
|
if infiles is None:
|
||||||
print("Usage: \"python get_cdelt.py -f infiles\"")
|
print('Usage: "python get_cdelt.py -f infiles"')
|
||||||
return 1
|
return 1
|
||||||
prod = [["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles]
|
prod = [["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles]
|
||||||
data_folder = prod[0][0]
|
data_folder = prod[0][0]
|
||||||
infiles = [p[1] for p in prod]
|
infiles = [p[1] for p in prod]
|
||||||
|
|
||||||
@@ -21,14 +23,14 @@ def main(infiles=None):
|
|||||||
size = {}
|
size = {}
|
||||||
for currfile in infiles:
|
for currfile in infiles:
|
||||||
with catch_warnings():
|
with catch_warnings():
|
||||||
filterwarnings('ignore', message="'datfix' made the change", category=FITSFixedWarning)
|
filterwarnings("ignore", message="'datfix' made the change", category=FITSFixedWarning)
|
||||||
wcs = WCS(getheader(path_join(data_folder, currfile))).celestial
|
wcs = WCS(getheader(path_join(data_folder, currfile))).celestial
|
||||||
key = currfile[:-5]
|
key = currfile[:-5]
|
||||||
size[key] = wcs.array_shape
|
size[key] = wcs.array_shape
|
||||||
if wcs.wcs.has_cd():
|
if wcs.wcs.has_cd():
|
||||||
cdelt[key] = eig(wcs.wcs.cd)[0]*3600.
|
cdelt[key] = eig(wcs.wcs.cd)[0] * 3600.0
|
||||||
else:
|
else:
|
||||||
cdelt[key] = wcs.wcs.cdelt*3600.
|
cdelt[key] = wcs.wcs.cdelt * 3600.0
|
||||||
|
|
||||||
print("Image name, native resolution in arcsec and shape")
|
print("Image name, native resolution in arcsec and shape")
|
||||||
for currfile in infiles:
|
for currfile in infiles:
|
||||||
@@ -41,7 +43,7 @@ def main(infiles=None):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Query MAST for target products')
|
parser = argparse.ArgumentParser(description="Query MAST for target products")
|
||||||
parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None)
|
parser.add_argument("-f", "--files", metavar="path", required=False, nargs="*", help="the full or relative path to the data products", default=None)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
exitcode = main(infiles=args.files)
|
exitcode = main(infiles=args.files)
|
||||||
|
|||||||
Reference in New Issue
Block a user