reformat code using python-lsp-ruff

This commit is contained in:
2024-07-01 15:21:52 +02:00
parent 271ecbb631
commit 5a62fa4983
13 changed files with 1271 additions and 860 deletions

View File

@@ -5,14 +5,15 @@ Main script where are progressively added the steps for the FOC pipeline reducti
"""
# Project libraries
import numpy as np
from copy import deepcopy
from os import system
from os.path import exists as path_exists
import lib.fits as proj_fits # Functions to handle fits files
import lib.reduction as proj_red # Functions used in reduction pipeline
import lib.plots as proj_plots # Functions for plotting data
from lib.utils import sci_not, princ_angle
import lib.fits as proj_fits # Functions to handle fits files
import lib.plots as proj_plots # Functions for plotting data
import lib.reduction as proj_red # Functions used in reduction pipeline
import numpy as np
from lib.utils import princ_angle, sci_not
from matplotlib.colors import LogNorm
@@ -22,10 +23,10 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
deconvolve = False
if deconvolve:
# from lib.deconvolve import from_file_psf
psf = 'gaussian' # Can be user-defined as well
psf = "gaussian" # Can be user-defined as well
# psf = from_file_psf(data_folder+psf_file)
psf_FWHM = 3.1
psf_scale = 'px'
psf_scale = "px"
psf_shape = None # (151, 151)
iterations = 1
algo = "conjgrad"
@@ -34,45 +35,45 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
display_crop = False
# Background estimation
error_sub_type = 'freedman-diaconis' # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 0.01
display_bkg = True
error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 1.0
display_bkg = False
# Data binning
rebin = True
pxsize = 2
px_scale = 'px' # pixel, arcsec or full
rebin_operation = 'sum' # sum or average
px_scale = "px" # pixel, arcsec or full
rebin_operation = "sum" # sum or average
# Alignement
align_center = 'center' # If None will not align the images
display_align = True
align_center = "center" # If None will not align the images
display_align = False
display_data = False
# Transmittance correction
transmitcorr = True
# Smoothing
smoothing_function = 'combine' # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = None # If None, no smoothing is done
smoothing_scale = 'px' # pixel or arcsec
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 1.5 # If None, no smoothing is done
smoothing_scale = "px" # pixel or arcsec
# Rotation
rotate_data = False # rotation to North convention can give erroneous results
rotate_data = False # rotation to North convention can give erroneous results
rotate_stokes = True
# Polarization map output
SNRp_cut = 3. # P measurments with SNR>3
SNRi_cut = 3. # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
vec_scale = 5
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
SNRp_cut = 3.0 # P measurments with SNR>3
SNRi_cut = 3.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
vec_scale = 3
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
# Pipeline start
# Step 1:
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
if infiles is not None:
prod = np.array([["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles], dtype=str)
prod = np.array([["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles], dtype=str)
obs_dir = "/".join(infiles[0].split("/")[:-1])
if not path_exists(obs_dir):
system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
@@ -80,6 +81,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
target = input("Target name:\n>")
else:
from lib.query import retrieve_products
target, products = retrieve_products(target, proposal_id, output_dir=output_dir)
prod = products.pop()
for prods in products:
@@ -97,21 +99,23 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
figname = "_".join([target, "FOC"])
figtype = ""
if rebin:
if px_scale not in ['full']:
figtype = "".join(["b", "{0:.2f}".format(pxsize), px_scale]) # additionnal informations
if px_scale not in ["full"]:
figtype = "".join(["b", "{0:.2f}".format(pxsize), px_scale]) # additionnal informations
else:
figtype = "full"
if smoothing_FWHM is not None:
figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]),
"{0:.2f}".format(smoothing_FWHM), smoothing_scale]) # additionnal informations
figtype += "_" + "".join(
["".join([s[0] for s in smoothing_function.split("_")]), "{0:.2f}".format(smoothing_FWHM), smoothing_scale]
) # additionnal informations
if deconvolve:
figtype += "_deconv"
if align_center is None:
figtype += "_not_aligned"
# Crop data to remove outside blank margins.
data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0.,
inside=True, display=display_crop, savename=figname, plots_folder=plots_folder)
data_array, error_array, headers = proj_red.crop_array(
data_array, headers, step=5, null_val=0.0, inside=True, display=display_crop, savename=figname, plots_folder=plots_folder
)
data_mask = np.ones(data_array[0].shape, dtype=bool)
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
@@ -120,36 +124,68 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Estimate error from data background, estimated from sub-image of desired sub_shape.
background = None
data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, data_mask=data_mask, sub_type=error_sub_type, subtract_error=subtract_error, display=display_bkg, savename="_".join([figname, "errors"]), plots_folder=plots_folder, return_background=True)
data_array, error_array, headers, background = proj_red.get_error(
data_array,
headers,
error_array,
data_mask=data_mask,
sub_type=error_sub_type,
subtract_error=subtract_error,
display=display_bkg,
savename="_".join([figname, "errors"]),
plots_folder=plots_folder,
return_background=True,
)
# Align and rescale images with oversampling.
data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data(
data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=True)
data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=True
)
if display_align:
print("Image shifts: {} \nShifts uncertainty: {}".format(shifts, error_shifts))
proj_plots.plot_obs(data_array, headers, savename="_".join([figname, str(align_center)]), plots_folder=plots_folder, norm=LogNorm(
vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam']))
proj_plots.plot_obs(
data_array,
headers,
savename="_".join([figname, str(align_center)]),
plots_folder=plots_folder,
norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]),
)
# Rebin data to desired pixel size.
if rebin:
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(
data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask)
data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask
)
# Rotate data to have North up
if rotate_data:
data_mask = np.ones(data_array.shape[1:]).astype(bool)
alpha = headers[0]['orientat']
alpha = headers[0]["orientat"]
data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers, -alpha)
# Plot array for checking output
if display_data and px_scale.lower() not in ['full', 'integrate']:
proj_plots.plot_obs(data_array, headers, savename="_".join([figname, "rebin"]), plots_folder=plots_folder, norm=LogNorm(
vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam']))
if display_data and px_scale.lower() not in ["full", "integrate"]:
proj_plots.plot_obs(
data_array,
headers,
savename="_".join([figname, "rebin"]),
plots_folder=plots_folder,
norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]),
)
background = np.array([np.array(bkg).reshape(1, 1) for bkg in background])
background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1'] == head['filtnam1'] for h in headers], dtype=bool)].mean())
** 2/np.sum([h['filtnam1'] == head['filtnam1'] for h in headers]))).reshape(1, 1) for bkg, head in zip(background, headers)])
background_error = np.array(
[
np.array(
np.sqrt(
(bkg - background[np.array([h["filtnam1"] == head["filtnam1"] for h in headers], dtype=bool)].mean()) ** 2
/ np.sum([h["filtnam1"] == head["filtnam1"] for h in headers])
)
).reshape(1, 1)
for bkg, head in zip(background, headers)
]
)
# Step 2:
# Compute Stokes I, Q, U with smoothed polarized images
@@ -158,15 +194,18 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
# Bibcode : 1995chst.conf...10J
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(
data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr)
I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(background, background_error, np.array(True).reshape(
1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False)
data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr
)
I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(
background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False
)
# Step 3:
# Rotate images to have North up
if rotate_stokes:
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None)
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None
)
I_bkg, Q_bkg, U_bkg, S_cov_bkg, _, _ = proj_red.rotate_Stokes(I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), headers, SNRi_cut=None)
# Compute polarimetric parameters (polarization degree and angle).
@@ -176,8 +215,24 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Step 4:
# Save image to FITS.
figname = "_".join([figname, figtype]) if figtype != "" else figname
Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P,
headers, data_mask, figname, data_folder=data_folder, return_hdul=True)
Stokes_test = proj_fits.save_Stokes(
I_stokes,
Q_stokes,
U_stokes,
Stokes_cov,
P,
debiased_P,
s_P,
s_P_P,
PA,
s_PA,
s_PA_P,
headers,
data_mask,
figname,
data_folder=data_folder,
return_hdul=True,
)
# Step 5:
# crop to desired region of interest (roi)
@@ -185,43 +240,145 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
figname += "_crop"
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_test), norm=LogNorm())
stokescrop.crop()
stokescrop.write_to("/".join([data_folder, figname+".fits"]))
stokescrop.write_to("/".join([data_folder, figname + ".fits"]))
Stokes_test, headers = stokescrop.hdul_crop, [dataset.header for dataset in stokescrop.hdul_crop]
data_mask = Stokes_test['data_mask'].data.astype(bool)
print("F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(headers[0]['photplam'], *sci_not(
Stokes_test[0].data[data_mask].sum()*headers[0]['photflam'], np.sqrt(Stokes_test[3].data[0, 0][data_mask].sum())*headers[0]['photflam'], 2, out=int)))
print("P_int = {0:.1f} ± {1:.1f} %".format(headers[0]['p_int']*100., np.ceil(headers[0]['p_int_err']*1000.)/10.))
print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(headers[0]['pa_int']), princ_angle(np.ceil(headers[0]['pa_int_err']*10.)/10.)))
data_mask = Stokes_test["data_mask"].data.astype(bool)
print(
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
headers[0]["photplam"],
*sci_not(
Stokes_test[0].data[data_mask].sum() * headers[0]["photflam"],
np.sqrt(Stokes_test[3].data[0, 0][data_mask].sum()) * headers[0]["photflam"],
2,
out=int,
),
)
)
print("P_int = {0:.1f} ± {1:.1f} %".format(headers[0]["p_int"] * 100.0, np.ceil(headers[0]["p_int_err"] * 1000.0) / 10.0))
print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(headers[0]["pa_int"]), princ_angle(np.ceil(headers[0]["pa_int_err"] * 10.0) / 10.0)))
# Background values
print("F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(headers[0]['photplam'], *sci_not(
I_bkg[0, 0]*headers[0]['photflam'], np.sqrt(S_cov_bkg[0, 0][0, 0])*headers[0]['photflam'], 2, out=int)))
print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0]*100., np.ceil(s_P_bkg[0, 0]*1000.)/10.))
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0]*10.)/10.)))
print(
"F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
headers[0]["photplam"], *sci_not(I_bkg[0, 0] * headers[0]["photflam"], np.sqrt(S_cov_bkg[0, 0][0, 0]) * headers[0]["photflam"], 2, out=int)
)
)
print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0] * 100.0, np.ceil(s_P_bkg[0, 0] * 1000.0) / 10.0))
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0] * 10.0) / 10.0)))
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
if px_scale.lower() not in ['full', 'integrate'] and not interactive:
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim,
step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname]), plots_folder=plots_folder)
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, "I"]), plots_folder=plots_folder, display='Intensity')
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, "P_flux"]), plots_folder=plots_folder, display='Pol_Flux')
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, "P"]), plots_folder=plots_folder, display='Pol_deg')
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, "PA"]), plots_folder=plots_folder, display='Pol_ang')
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, "I_err"]), plots_folder=plots_folder, display='I_err')
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, "P_err"]), plots_folder=plots_folder, display='Pol_deg_err')
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, "SNRi"]), plots_folder=plots_folder, display='SNRi')
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, "SNRp"]), plots_folder=plots_folder, display='SNRp')
if px_scale.lower() not in ["full", "integrate"] and not interactive:
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname]),
plots_folder=plots_folder,
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "I"]),
plots_folder=plots_folder,
display="Intensity",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "P_flux"]),
plots_folder=plots_folder,
display="Pol_Flux",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "P"]),
plots_folder=plots_folder,
display="Pol_deg",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "PA"]),
plots_folder=plots_folder,
display="Pol_ang",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "I_err"]),
plots_folder=plots_folder,
display="I_err",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "P_err"]),
plots_folder=plots_folder,
display="Pol_deg_err",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "SNRi"]),
plots_folder=plots_folder,
display="SNRi",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "SNRp"]),
plots_folder=plots_folder,
display="SNRp",
)
elif not interactive:
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut,
savename=figname, plots_folder=plots_folder, display='integrate')
elif px_scale.lower() not in ['full', 'integrate']:
proj_plots.polarization_map(
deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=figname, plots_folder=plots_folder, display="integrate"
)
elif px_scale.lower() not in ["full", "integrate"]:
proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
return 0
@@ -230,15 +387,17 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Query MAST for target products')
parser.add_argument('-t', '--target', metavar='targetname', required=False, help='the name of the target', type=str, default=None)
parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, help='the proposal id of the data products', type=int, default=None)
parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None)
parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False,
help='output directory path for the data products', type=str, default="./data")
parser.add_argument('-c', '--crop', action='store_true', required=False, help='whether to crop the analysis region')
parser.add_argument('-i', '--interactive', action='store_true', required=False, help='whether to output to the interactive analysis tool')
parser = argparse.ArgumentParser(description="Query MAST for target products")
parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
parser.add_argument("-p", "--proposal_id", metavar="proposal_id", required=False, help="the proposal id of the data products", type=int, default=None)
parser.add_argument("-f", "--files", metavar="path", required=False, nargs="*", help="the full or relative path to the data products", default=None)
parser.add_argument(
"-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data"
)
parser.add_argument("-c", "--crop", action="store_true", required=False, help="whether to crop the analysis region")
parser.add_argument("-i", "--interactive", action="store_true", required=False, help="whether to output to the interactive analysis tool")
args = parser.parse_args()
exitcode = main(target=args.target, proposal_id=args.proposal_id, infiles=args.files,
output_dir=args.output_dir, crop=args.crop, interactive=args.interactive)
exitcode = main(
target=args.target, proposal_id=args.proposal_id, infiles=args.files, output_dir=args.output_dir, crop=args.crop, interactive=args.interactive
)
print("Finished with ExitCode: ", exitcode)

View File

@@ -9,139 +9,155 @@ prototypes :
- bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background)
Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape.
"""
from os.path import join as path_join
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.colors import LogNorm
from matplotlib.patches import Rectangle
from datetime import datetime, timedelta
from os.path import join as path_join
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
from astropy.time import Time
from lib.plots import plot_obs
from matplotlib.colors import LogNorm
from matplotlib.patches import Rectangle
from scipy.optimize import curve_fit
def gauss(x, *p):
N, mu, sigma = p
return N*np.exp(-(x-mu)**2/(2.*sigma**2))
return N * np.exp(-((x - mu) ** 2) / (2.0 * sigma**2))
def gausspol(x, *p):
N, mu, sigma, a, b, c, d = p
return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + a*np.log(x) + b/x + c*x + d
return N * np.exp(-((x - mu) ** 2) / (2.0 * sigma**2)) + a * np.log(x) + b / x + c * x + d
def bin_centers(edges):
return (edges[1:]+edges[:-1])/2.
return (edges[1:] + edges[:-1]) / 2.0
def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"):
plt.rcParams.update({'font.size': 15})
convert_flux = np.array([head['photflam'] for head in headers])
date_time = np.array([Time((headers[i]['expstart']+headers[i]['expend'])/2., format='mjd', precision=0).iso for i in range(len(headers))])
date_time = np.array([datetime.strptime(d, '%Y-%m-%d %H:%M:%S') for d in date_time])
date_err = np.array([timedelta(seconds=headers[i]['exptime']/2.) for i in range(len(headers))])
filt = np.array([headers[i]['filtnam1'] for i in range(len(headers))])
dict_filt = {"POL0": 'r', "POL60": 'g', "POL120": 'b'}
plt.rcParams.update({"font.size": 15})
convert_flux = np.array([head["photflam"] for head in headers])
date_time = np.array([Time((headers[i]["expstart"] + headers[i]["expend"]) / 2.0, format="mjd", precision=0).iso for i in range(len(headers))])
date_time = np.array([datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in date_time])
date_err = np.array([timedelta(seconds=headers[i]["exptime"] / 2.0) for i in range(len(headers))])
filt = np.array([headers[i]["filtnam1"] for i in range(len(headers))])
dict_filt = {"POL0": "r", "POL60": "g", "POL120": "b"}
c_filt = np.array([dict_filt[f] for f in filt])
fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
for f in np.unique(filt):
mask = [fil == f for fil in filt]
ax.scatter(date_time[mask], background[mask]*convert_flux[mask], color=dict_filt[f],
label="{0:s}".format(f))
ax.errorbar(date_time, background*convert_flux, xerr=date_err, yerr=std_bkg*convert_flux, fmt='+k',
markersize=0, ecolor=c_filt)
ax.scatter(date_time[mask], background[mask] * convert_flux[mask], color=dict_filt[f], label="{0:s}".format(f))
ax.errorbar(date_time, background * convert_flux, xerr=date_err, yerr=std_bkg * convert_flux, fmt="+k", markersize=0, ecolor=c_filt)
# Date handling
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
# ax.set_ylim(bottom=0.)
ax.set_yscale('log')
ax.set_yscale("log")
ax.set_xlabel("Observation date and time")
ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
plt.legend()
if not (savename is None):
this_savename = deepcopy(savename)
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
this_savename += '_background_flux.pdf'
if not savename[-4:] in [".png", ".jpg", ".pdf"]:
this_savename += "_background_flux.pdf"
else:
this_savename = savename[:-4]+"_background_flux"+savename[-4:]
fig.savefig(path_join(plots_folder, this_savename), bbox_inches='tight')
this_savename = savename[:-4] + "_background_flux" + savename[-4:]
fig.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
if not (histograms is None):
filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0}
fig_h, ax_h = plt.subplots(figsize=(10, 6), constrained_layout=True)
for i, (hist, bins) in enumerate(zip(histograms, binning)):
filt_obs[headers[i]['filtnam1']] += 1
ax_h.plot(bins*convert_flux[i], hist, '+', color="C{0:d}".format(i), alpha=0.8,
label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')')
ax_h.plot([background[i]*convert_flux[i], background[i]*convert_flux[i]], [hist.min(), hist.max()], 'x--', color="C{0:d}".format(i), alpha=0.8)
filt_obs[headers[i]["filtnam1"]] += 1
ax_h.plot(
bins * convert_flux[i],
hist,
"+",
color="C{0:d}".format(i),
alpha=0.8,
label=headers[i]["filtnam1"] + " (Obs " + str(filt_obs[headers[i]["filtnam1"]]) + ")",
)
ax_h.plot([background[i] * convert_flux[i], background[i] * convert_flux[i]], [hist.min(), hist.max()], "x--", color="C{0:d}".format(i), alpha=0.8)
if not (coeff is None):
# ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
ax_h.plot(bins*convert_flux[i], gauss(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
ax_h.set_xscale('log')
ax_h.set_ylim([0., np.max([hist.max() for hist in histograms])])
ax_h.set_xlim([np.min(background*convert_flux)*1e-2, np.max(background*convert_flux)*1e2])
ax_h.plot(bins * convert_flux[i], gauss(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8)
ax_h.set_xscale("log")
ax_h.set_ylim([0.0, np.max([hist.max() for hist in histograms])])
ax_h.set_xlim([np.min(background * convert_flux) * 1e-2, np.max(background * convert_flux) * 1e2])
ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
ax_h.set_ylabel(r"Number of pixels in bin")
ax_h.set_title("Histogram for each observation")
plt.legend()
if not (savename is None):
this_savename = deepcopy(savename)
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
this_savename += '_histograms.pdf'
if not savename[-4:] in [".png", ".jpg", ".pdf"]:
this_savename += "_histograms.pdf"
else:
this_savename = savename[:-4]+"_histograms"+savename[-4:]
fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches='tight')
this_savename = savename[:-4] + "_histograms" + savename[-4:]
fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
fig2, ax2 = plt.subplots(figsize=(10, 10))
data0 = data[0]*convert_flux[0]
bkg_data0 = data0 <= background[0]*convert_flux[0]
instr = headers[0]['instrume']
rootname = headers[0]['rootname']
exptime = headers[0]['exptime']
filt = headers[0]['filtnam1']
data0 = data[0] * convert_flux[0]
bkg_data0 = data0 <= background[0] * convert_flux[0]
instr = headers[0]["instrume"]
rootname = headers[0]["rootname"]
exptime = headers[0]["exptime"]
filt = headers[0]["filtnam1"]
# plots
im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.].mean()/10., data0.max()), origin='lower', cmap='gray')
ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.5)
im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.0].mean() / 10.0, data0.max()), origin="lower", cmap="gray")
ax2.imshow(bkg_data0, origin="lower", cmap="Reds", alpha=0.5)
if not (rectangle is None):
x, y, width, height, angle, color = rectangle[0]
ax2.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False, lw=2))
ax2.annotate(instr+":"+rootname, color='white', fontsize=10, xy=(0.01, 1.00), xycoords='axes fraction', verticalalignment='top', horizontalalignment='left')
ax2.annotate(filt, color='white', fontsize=14, xy=(0.01, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left')
ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(1.00, 0.01),
xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right')
ax2.set(xlabel='pixel offset', ylabel='pixel offset', aspect='equal')
ax2.annotate(
instr + ":" + rootname, color="white", fontsize=10, xy=(0.01, 1.00), xycoords="axes fraction", verticalalignment="top", horizontalalignment="left"
)
ax2.annotate(filt, color="white", fontsize=14, xy=(0.01, 0.01), xycoords="axes fraction", verticalalignment="bottom", horizontalalignment="left")
ax2.annotate(
str(exptime) + " s", color="white", fontsize=10, xy=(1.00, 0.01), xycoords="axes fraction", verticalalignment="bottom", horizontalalignment="right"
)
ax2.set(xlabel="pixel offset", ylabel="pixel offset", aspect="equal")
fig2.subplots_adjust(hspace=0, wspace=0, right=1.0)
fig2.colorbar(im2, ax=ax2, location='right', aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
fig2.colorbar(im2, ax=ax2, location="right", aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
if not (savename is None):
this_savename = deepcopy(savename)
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
this_savename += '_'+filt+'_background_location.pdf'
if not savename[-4:] in [".png", ".jpg", ".pdf"]:
this_savename += "_" + filt + "_background_location.pdf"
else:
this_savename = savename[:-4]+'_'+filt+'_background_location'+savename[-4:]
fig2.savefig(path_join(plots_folder, this_savename), bbox_inches='tight')
this_savename = savename[:-4] + "_" + filt + "_background_location" + savename[-4:]
fig2.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
if not (rectangle is None):
plot_obs(data, headers, vmin=data[data > 0.].min()*convert_flux.mean(), vmax=data[data > 0.].max()*convert_flux.mean(), rectangle=rectangle,
savename=savename+"_background_location", plots_folder=plots_folder)
plot_obs(
data,
headers,
vmin=data[data > 0.0].min() * convert_flux.mean(),
vmax=data[data > 0.0].max() * convert_flux.mean(),
rectangle=rectangle,
savename=savename + "_background_location",
plots_folder=plots_folder,
)
elif not (rectangle is None):
plot_obs(data, headers, vmin=data[data > 0.].min(), vmax=data[data > 0.].max(), rectangle=rectangle)
plot_obs(data, headers, vmin=data[data > 0.0].min(), vmax=data[data > 0.0].max(), rectangle=rectangle)
plt.show()
def sky_part(img):
rand_ind = np.unique((np.random.rand(np.floor(img.size/4).astype(int))*2*img.size).astype(int) % img.size)
rand_ind = np.unique((np.random.rand(np.floor(img.size / 4).astype(int)) * 2 * img.size).astype(int) % img.size)
rand_pix = img.flatten()[rand_ind]
# Intensity range
sky_med = np.median(rand_pix)
sig = np.min([img[img < sky_med].std(), img[img > sky_med].std()])
sky_range = [sky_med-2.*sig, np.max([sky_med+sig, 7e-4])] # Detector background average FOC Data Handbook Sec. 7.6
sky_range = [sky_med - 2.0 * sig, np.max([sky_med + sig, 7e-4])] # Detector background average FOC Data Handbook Sec. 7.6
sky = img[np.logical_and(img >= sky_range[0], img <= sky_range[1])]
return sky, sky_range
@@ -152,14 +168,14 @@ def bkg_estimate(img, bins=None, chi2=None, coeff=None):
bins, chi2, coeff = [8], [], []
else:
try:
bins.append(int(3./2.*bins[-1]))
bins.append(int(3.0 / 2.0 * bins[-1]))
except IndexError:
bins, chi2, coeff = [8], [], []
hist, bin_edges = np.histogram(img[img > 0], bins=bins[-1])
binning = bin_centers(bin_edges)
peak = binning[np.argmax(hist)]
bins_stdev = binning[hist > hist.max()/2.]
stdev = bins_stdev[-1]-bins_stdev[0]
bins_stdev = binning[hist > hist.max() / 2.0]
stdev = bins_stdev[-1] - bins_stdev[0]
# p0 = [hist.max(), peak, stdev, 1e-3, 1e-3, 1e-3, 1e-3]
p0 = [hist.max(), peak, stdev]
try:
@@ -168,7 +184,7 @@ def bkg_estimate(img, bins=None, chi2=None, coeff=None):
except RuntimeError:
popt = p0
# chi2.append(np.sum((hist - gausspol(binning, *popt))**2)/hist.size)
chi2.append(np.sum((hist - gauss(binning, *popt))**2)/hist.size)
chi2.append(np.sum((hist - gauss(binning, *popt)) ** 2) / hist.size)
coeff.append(popt)
return bins, chi2, coeff
@@ -223,7 +239,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
for i, image in enumerate(data):
# Compute the Count-rate histogram for the image
sky, sky_range = sky_part(image[image > 0.])
sky, sky_range = sky_part(image[image > 0.0])
bins, chi2, coeff = bkg_estimate(sky)
while bins[-1] < 256:
@@ -232,21 +248,21 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
histograms.append(hist)
binning.append(bin_centers(bin_edges))
chi2, coeff = np.array(chi2), np.array(coeff)
weights = 1/chi2**2
weights = 1 / chi2**2
weights /= weights.sum()
bkg = np.sum(weights*(coeff[:, 1]+np.abs(coeff[:, 2])*subtract_error))
bkg = np.sum(weights * (coeff[:, 1] + np.abs(coeff[:, 2]) * subtract_error))
error_bkg[i] *= bkg
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2)
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
# Substract background
if subtract_error > 0:
n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std()
std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
background[i] = bkg
if display:
@@ -308,49 +324,54 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
for i, image in enumerate(data):
# Compute the Count-rate histogram for the image
n_mask = np.logical_and(mask, image > 0.)
n_mask = np.logical_and(mask, image > 0.0)
if not (sub_type is None):
if isinstance(sub_type, int):
n_bins = sub_type
elif sub_type.lower() in ['sqrt']:
elif sub_type.lower() in ["sqrt"]:
n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
elif sub_type.lower() in ['sturges']:
n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int)+1 # Sturges
elif sub_type.lower() in ['rice']:
n_bins = 2*np.fix(np.power(image[n_mask].size, 1/3)).astype(int) # Rice
elif sub_type.lower() in ['scott']:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size, 1/3))).astype(int) # Scott
elif sub_type.lower() in ["sturges"]:
n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int) + 1 # Sturges
elif sub_type.lower() in ["rice"]:
n_bins = 2 * np.fix(np.power(image[n_mask].size, 1 / 3)).astype(int) # Rice
elif sub_type.lower() in ["scott"]:
n_bins = np.fix((image[n_mask].max() - image[n_mask].min()) / (3.5 * image[n_mask].std() / np.power(image[n_mask].size, 1 / 3))).astype(
int
) # Scott
else:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) /
np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis
n_bins = np.fix(
(image[n_mask].max() - image[n_mask].min())
/ (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3))
).astype(int) # Freedman-Diaconis
else:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) /
np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis
n_bins = np.fix(
(image[n_mask].max() - image[n_mask].min()) / (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3))
).astype(int) # Freedman-Diaconis
hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins)
histograms.append(hist)
binning.append(np.exp(bin_centers(bin_edges)))
# Fit a gaussian to the log-intensity histogram
bins_stdev = binning[-1][hist > hist.max()/2.]
stdev = bins_stdev[-1]-bins_stdev[0]
bins_stdev = binning[-1][hist > hist.max() / 2.0]
stdev = bins_stdev[-1] - bins_stdev[0]
# p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3]
p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev]
# popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
popt, pcov = curve_fit(gauss, binning[-1], hist, p0=p0)
coeff.append(popt)
bkg = popt[1]+np.abs(popt[2])*subtract_error
bkg = popt[1] + np.abs(popt[2]) * subtract_error
error_bkg[i] *= bkg
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2)
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
# Substract background
if subtract_error > 0:
n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std()
std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
background[i] = bkg
if display:
@@ -409,10 +430,10 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True
sub_shape = np.array(sub_shape)
# Make sub_shape of odd values
if not (np.all(sub_shape % 2)):
sub_shape += 1-sub_shape % 2
sub_shape += 1 - sub_shape % 2
shape = np.array(data.shape)
diff = (sub_shape-1).astype(int)
temp = np.zeros((shape[0], shape[1]-diff[0], shape[2]-diff[1]))
diff = (sub_shape - 1).astype(int)
temp = np.zeros((shape[0], shape[1] - diff[0], shape[2] - diff[1]))
n_data_array, n_error_array = deepcopy(data), deepcopy(error)
error_bkg = np.ones(n_data_array.shape)
@@ -425,29 +446,29 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True
# sub-image dominated by background
fmax = np.finfo(np.double).max
img = deepcopy(image)
img[1-mask] = fmax/(diff[0]*diff[1])
img[1 - mask] = fmax / (diff[0] * diff[1])
for r in range(temp.shape[1]):
for c in range(temp.shape[2]):
temp[i][r, c] = np.where(mask[r, c], img[r:r+diff[0], c:c+diff[1]].sum(), fmax/(diff[0]*diff[1]))
temp[i][r, c] = np.where(mask[r, c], img[r : r + diff[0], c : c + diff[1]].sum(), fmax / (diff[0] * diff[1]))
minima = np.unravel_index(np.argmin(temp.sum(axis=0)), temp.shape[1:])
for i, image in enumerate(data):
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0., 'r'])
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0.0, "r"])
# Compute error : root mean square of the background
sub_image = image[minima[0]:minima[0]+sub_shape[0], minima[1]:minima[1]+sub_shape[1]]
sub_image = image[minima[0] : minima[0] + sub_shape[0], minima[1] : minima[1] + sub_shape[1]]
# bkg = np.std(sub_image) # Previously computed using standard deviation over the background
bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*subtract_error if subtract_error > 0 else np.sqrt(np.sum(sub_image**2)/sub_image.size)
bkg = np.sqrt(np.sum(sub_image**2) / sub_image.size) * subtract_error if subtract_error > 0 else np.sqrt(np.sum(sub_image**2) / sub_image.size)
error_bkg[i] *= bkg
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2)
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
# Substract background
if subtract_error > 0.:
if subtract_error > 0.0:
n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std()
std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
background[i] = bkg
if display:

View File

@@ -3,6 +3,7 @@ Library functions for graham algorithm implementation (find the convex hull of a
"""
from copy import deepcopy
import numpy as np
@@ -16,23 +17,23 @@ def clean_ROI(image):
row, col = np.indices(shape)
for i in range(0, shape[0]):
r = row[i, :][image[i, :] > 0.]
c = col[i, :][image[i, :] > 0.]
r = row[i, :][image[i, :] > 0.0]
c = col[i, :][image[i, :] > 0.0]
if len(r) > 1 and len(c) > 1:
H.append((r[0], c[0]))
H.append((r[-1], c[-1]))
H = np.array(H)
for j in range(0, shape[1]):
r = row[:, j][image[:, j] > 0.]
c = col[:, j][image[:, j] > 0.]
r = row[:, j][image[:, j] > 0.0]
c = col[:, j][image[:, j] > 0.0]
if len(r) > 1 and len(c) > 1:
J.append((r[0], c[0]))
J.append((r[-1], c[-1]))
J = np.array(J)
xmin = np.min([H[:, 1].min(), J[:, 1].min()])
xmax = np.max([H[:, 1].max(), J[:, 1].max()])+1
xmax = np.max([H[:, 1].max(), J[:, 1].max()]) + 1
ymin = np.min([H[:, 0].min(), J[:, 0].min()])
ymax = np.max([H[:, 0].max(), J[:, 0].max()])+1
ymax = np.max([H[:, 0].max(), J[:, 0].max()]) + 1
return np.array([xmin, xmax, ymin, ymax])
@@ -81,7 +82,7 @@ def distance(A, B):
Euclidian distance between A, B.
"""
x, y = vector(A, B)
return np.sqrt(x ** 2 + y ** 2)
return np.sqrt(x**2 + y**2)
# Define lexicographic and composition order
@@ -174,8 +175,8 @@ def partition(s, left, right, order):
temp = deepcopy(s[i])
s[i] = deepcopy(s[j])
s[j] = deepcopy(temp)
temp = deepcopy(s[i+1])
s[i+1] = deepcopy(s[right])
temp = deepcopy(s[i + 1])
s[i + 1] = deepcopy(s[right])
s[right] = deepcopy(temp)
return i + 1
@@ -206,16 +207,32 @@ def sort_angles_distances(Omega, s):
Sort the list of points 's' for the composition order given reference point
Omega.
"""
def order(A, B): return comp(Omega, A, B)
def order(A, B):
return comp(Omega, A, B)
quicksort(s, order)
# Define fuction for stacks (use here python lists with stack operations).
def empty_stack(): return []
def stack(S, A): S.append(A)
def unstack(S): S.pop()
def stack_top(S): return S[-1]
def stack_sub_top(S): return S[-2]
def empty_stack():
return []
def stack(S, A):
S.append(A)
def unstack(S):
S.pop()
def stack_top(S):
return S[-1]
def stack_sub_top(S):
return S[-2]
# Alignement handling
@@ -299,7 +316,7 @@ def convex_hull(H):
return S
def image_hull(image, step=5, null_val=0., inside=True):
def image_hull(image, step=5, null_val=0.0, inside=True):
"""
Compute the convex hull of a 2D image and return the 4 relevant coordinates
of the maximum included rectangle (ie. crop image to maximum rectangle).
@@ -331,7 +348,7 @@ def image_hull(image, step=5, null_val=0., inside=True):
H = []
shape = np.array(image.shape)
row, col = np.indices(shape)
for i in range(0, int(min(shape)/2), step):
for i in range(0, int(min(shape) / 2), step):
r1, r2 = row[i, :][image[i, :] > null_val], row[-i, :][image[-i, :] > null_val]
c1, c2 = col[i, :][image[i, :] > null_val], col[-i, :][image[-i, :] > null_val]
if r1.shape[0] > 1:
@@ -349,10 +366,10 @@ def image_hull(image, step=5, null_val=0., inside=True):
# S1 = S[x_min*y_max][np.argmax(S[x_min*y_max][:, 1])]
# S2 = S[x_max*y_min][np.argmin(S[x_max*y_min][:, 1])]
# S3 = S[x_max*y_max][np.argmax(S[x_max*y_max][:, 0])]
S0 = S[x_min*y_min][np.abs(0-S[x_min*y_min].sum(axis=1)).min() == np.abs(0-S[x_min*y_min].sum(axis=1))][0]
S1 = S[x_min*y_max][np.abs(shape[1]-S[x_min*y_max].sum(axis=1)).min() == np.abs(shape[1]-S[x_min*y_max].sum(axis=1))][0]
S2 = S[x_max*y_min][np.abs(shape[0]-S[x_max*y_min].sum(axis=1)).min() == np.abs(shape[0]-S[x_max*y_min].sum(axis=1))][0]
S3 = S[x_max*y_max][np.abs(shape.sum()-S[x_max*y_max].sum(axis=1)).min() == np.abs(shape.sum()-S[x_max*y_max].sum(axis=1))][0]
S0 = S[x_min * y_min][np.abs(0 - S[x_min * y_min].sum(axis=1)).min() == np.abs(0 - S[x_min * y_min].sum(axis=1))][0]
S1 = S[x_min * y_max][np.abs(shape[1] - S[x_min * y_max].sum(axis=1)).min() == np.abs(shape[1] - S[x_min * y_max].sum(axis=1))][0]
S2 = S[x_max * y_min][np.abs(shape[0] - S[x_max * y_min].sum(axis=1)).min() == np.abs(shape[0] - S[x_max * y_min].sum(axis=1))][0]
S3 = S[x_max * y_max][np.abs(shape.sum() - S[x_max * y_max].sum(axis=1)).min() == np.abs(shape.sum() - S[x_max * y_max].sum(axis=1))][0]
# Get the vertex of the biggest included rectangle
if inside:
f0 = np.max([S0[0], S1[0]])

View File

@@ -1,6 +1,7 @@
"""
Library functions for phase cross-correlation computation.
"""
# Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+)
# Otherwise fall back to numpy.fft.
# Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer
@@ -13,8 +14,7 @@ except ImportError:
import numpy as np
def _upsampled_dft(data, upsampled_region_size, upsample_factor=1,
axis_offsets=None):
def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, axis_offsets=None):
"""
Upsampled DFT by matrix multiplication.
This code is intended to provide the same result as if the following
@@ -48,26 +48,27 @@ def _upsampled_dft(data, upsampled_region_size, upsample_factor=1,
"""
# if people pass in an integer, expand it to a list of equal-sized sections
if not hasattr(upsampled_region_size, "__iter__"):
upsampled_region_size = [upsampled_region_size, ] * data.ndim
upsampled_region_size = [
upsampled_region_size,
] * data.ndim
else:
if len(upsampled_region_size) != data.ndim:
raise ValueError("shape of upsampled region sizes must be equal "
"to input data's number of dimensions.")
raise ValueError("shape of upsampled region sizes must be equal " "to input data's number of dimensions.")
if axis_offsets is None:
axis_offsets = [0, ] * data.ndim
axis_offsets = [
0,
] * data.ndim
else:
if len(axis_offsets) != data.ndim:
raise ValueError("number of axis offsets must be equal to input "
"data's number of dimensions.")
raise ValueError("number of axis offsets must be equal to input " "data's number of dimensions.")
im2pi = 1j * 2 * np.pi
dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets))
for (n_items, ups_size, ax_offset) in dim_properties[::-1]:
kernel = ((np.arange(ups_size) - ax_offset)[:, None]
* fft.fftfreq(n_items, upsample_factor))
for n_items, ups_size, ax_offset in dim_properties[::-1]:
kernel = (np.arange(ups_size) - ax_offset)[:, None] * fft.fftfreq(n_items, upsample_factor)
kernel = np.exp(-im2pi * kernel)
# Equivalent to:
@@ -100,14 +101,11 @@ def _compute_error(cross_correlation_max, src_amp, target_amp):
target_amp : float
The normalized average image intensity of the target image
"""
error = 1.0 - cross_correlation_max * cross_correlation_max.conj() /\
(src_amp * target_amp)
error = 1.0 - cross_correlation_max * cross_correlation_max.conj() / (src_amp * target_amp)
return np.sqrt(np.abs(error))
def phase_cross_correlation(reference_image, moving_image, *,
upsample_factor=1, space="real",
return_error=True, overlap_ratio=0.3):
def phase_cross_correlation(reference_image, moving_image, *, upsample_factor=1, space="real", return_error=True, overlap_ratio=0.3):
"""
Efficient subpixel image translation registration by cross-correlation.
This code gives the same precision as the FFT upsampled cross-correlation
@@ -174,11 +172,11 @@ def phase_cross_correlation(reference_image, moving_image, *,
raise ValueError("images must be same shape")
# assume complex data is already in Fourier space
if space.lower() == 'fourier':
if space.lower() == "fourier":
src_freq = reference_image
target_freq = moving_image
# real data needs to be fft'd.
elif space.lower() == 'real':
elif space.lower() == "real":
src_freq = fft.fftn(reference_image)
target_freq = fft.fftn(moving_image)
else:
@@ -190,8 +188,7 @@ def phase_cross_correlation(reference_image, moving_image, *,
cross_correlation = fft.ifftn(image_product)
# Locate maximum
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
cross_correlation.shape)
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape)
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = np.stack(maxima).astype(np.float64)
@@ -213,14 +210,10 @@ def phase_cross_correlation(reference_image, moving_image, *,
dftshift = np.fix(upsampled_region_size / 2.0)
upsample_factor = np.array(upsample_factor, dtype=np.float64)
# Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts*upsample_factor
cross_correlation = _upsampled_dft(image_product.conj(),
upsampled_region_size,
upsample_factor,
sample_region_offset).conj()
sample_region_offset = dftshift - shifts * upsample_factor
cross_correlation = _upsampled_dft(image_product.conj(), upsampled_region_size, upsample_factor, sample_region_offset).conj()
# Locate maximum and map back to original pixel grid
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
cross_correlation.shape)
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape)
CCmax = cross_correlation[maxima]
maxima = np.stack(maxima).astype(np.float64) - dftshift
@@ -240,10 +233,8 @@ def phase_cross_correlation(reference_image, moving_image, *,
if return_error:
# Redirect user to masked_phase_cross_correlation if NaNs are observed
if np.isnan(CCmax) or np.isnan(src_amp) or np.isnan(target_amp):
raise ValueError(
"NaN values found, please remove NaNs from your input data")
raise ValueError("NaN values found, please remove NaNs from your input data")
return shifts, _compute_error(CCmax, src_amp, target_amp), \
_compute_phasediff(CCmax)
return shifts, _compute_error(CCmax, src_amp, target_amp), _compute_phasediff(CCmax)
else:
return shifts

View File

@@ -28,8 +28,8 @@ prototypes :
"""
import numpy as np
from scipy.signal import convolve
from astropy.io import fits
from scipy.signal import convolve
def abs2(x):
@@ -37,9 +37,9 @@ def abs2(x):
if np.iscomplexobj(x):
x_re = x.real
x_im = x.imag
return x_re*x_re + x_im*x_im
return x_re * x_re + x_im * x_im
else:
return x*x
return x * x
def zeropad(arr, shape):
@@ -53,7 +53,7 @@ def zeropad(arr, shape):
diff = np.asarray(shape) - np.asarray(arr.shape)
if diff.min() < 0:
raise ValueError("output dimensions must be larger or equal input dimensions")
offset = diff//2
offset = diff // 2
z = np.zeros(shape, dtype=arr.dtype)
if rank == 1:
i0 = offset[0]
@@ -115,10 +115,10 @@ def zeropad(arr, shape):
def gaussian2d(x, y, sigma):
return np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2)
return np.exp(-(x**2 + y**2) / (2 * sigma**2)) / (2 * np.pi * sigma**2)
def gaussian_psf(FWHM=1., shape=(5, 5)):
def gaussian_psf(FWHM=1.0, shape=(5, 5)):
"""
Define the gaussian Point-Spread-Function of chosen shape and FWHM.
----------
@@ -136,13 +136,13 @@ def gaussian_psf(FWHM=1., shape=(5, 5)):
Kernel containing the weights of the desired gaussian PSF.
"""
# Compute standard deviation from FWHM
stdev = FWHM/(2.*np.sqrt(2.*np.log(2.)))
stdev = FWHM / (2.0 * np.sqrt(2.0 * np.log(2.0)))
# Create kernel of desired shape
x, y = np.meshgrid(np.arange(-shape[0]/2, shape[0]/2), np.arange(-shape[1]/2, shape[1]/2))
x, y = np.meshgrid(np.arange(-shape[0] / 2, shape[0] / 2), np.arange(-shape[1] / 2, shape[1] / 2))
kernel = gaussian2d(x, y, stdev)
return kernel/kernel.sum()
return kernel / kernel.sum()
def from_file_psf(filename):
@@ -164,7 +164,7 @@ def from_file_psf(filename):
if isinstance(psf, np.ndarray) or len(psf) != 2:
raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename))
# Return the normalized Point Spread Function
kernel = psf/psf.max()
kernel = psf / psf.max()
return kernel
@@ -199,14 +199,14 @@ def wiener(image, psf, alpha=0.1, clip=True):
ft_y = np.fft.fftn(im_deconv)
ft_h = np.fft.fftn(np.fft.ifftshift(psf))
ft_x = ft_h.conj()*ft_y / (abs2(ft_h) + alpha)
ft_x = ft_h.conj() * ft_y / (abs2(ft_h) + alpha)
im_deconv = np.fft.ifftn(ft_x).real
if clip:
im_deconv[im_deconv > 1] = 1
im_deconv[im_deconv < -1] = -1
return im_deconv/im_deconv.max()
return im_deconv / im_deconv.max()
def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon=None):
@@ -241,12 +241,12 @@ def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon=
im_deconv = image.copy()
for _ in range(iterations):
conv = convolve(im_deconv, psf, mode='same')
conv = convolve(im_deconv, psf, mode="same")
if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image - conv)
else:
relative_blur = image - conv
im_deconv += alpha*relative_blur
im_deconv += alpha * relative_blur
if clip:
im_deconv[im_deconv > 1] = 1
@@ -290,12 +290,12 @@ def richardson_lucy(image, psf, iterations=20, clip=True, filter_epsilon=None):
psf_mirror = np.flip(psf)
for _ in range(iterations):
conv = convolve(im_deconv, psf, mode='same')
conv = convolve(im_deconv, psf, mode="same")
if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image / conv)
else:
relative_blur = image / conv
im_deconv *= convolve(relative_blur, psf_mirror, mode='same')
im_deconv *= convolve(relative_blur, psf_mirror, mode="same")
if clip:
im_deconv[im_deconv > 1] = 1
@@ -335,12 +335,12 @@ def one_step_gradient(image, psf, iterations=20, clip=True, filter_epsilon=None)
psf_mirror = np.flip(psf)
for _ in range(iterations):
conv = convolve(im_deconv, psf, mode='same')
conv = convolve(im_deconv, psf, mode="same")
if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image - conv)
else:
relative_blur = image - conv
im_deconv += convolve(relative_blur, psf_mirror, mode='same')
im_deconv += convolve(relative_blur, psf_mirror, mode="same")
if clip:
im_deconv[im_deconv > 1] = 1
@@ -387,20 +387,20 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
if error is None:
wgt = np.ones(image.shape)
else:
wgt = image/error
wgt = image / error
wgt /= wgt.max()
def W(x):
"""Define W operator : apply weights"""
return wgt*x
return wgt * x
def H(x):
"""Define H operator : convolution with PSF"""
return np.fft.ifftn(ft_h*np.fft.fftn(x)).real
return np.fft.ifftn(ft_h * np.fft.fftn(x)).real
def Ht(x):
"""Define Ht operator : transpose of H"""
return np.fft.ifftn(ft_h.conj()*np.fft.fftn(x)).real
return np.fft.ifftn(ft_h.conj() * np.fft.fftn(x)).real
def DtD(x):
"""Returns the result of D'.D.x where D is a (multi-dimensional)
@@ -444,7 +444,7 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
def A(x):
"""Define symetric positive semi definite operator A"""
return Ht(W(H(x)))+alpha*DtD(x)
return Ht(W(H(x))) + alpha * DtD(x)
# Define obtained vector A.x = b
b = Ht(W(image))
@@ -458,7 +458,7 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
r = np.copy(b)
x = np.zeros(b.shape, dtype=b.dtype)
rho = inner(r, r)
epsilon = np.max([0., 1e-5*np.sqrt(rho)])
epsilon = np.max([0.0, 1e-5 * np.sqrt(rho)])
# Conjugate gradient iterations.
beta = 0.0
@@ -476,26 +476,25 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
if beta == 0.0:
p = r
else:
p = r + beta*p
p = r + beta * p
# Make optimal step along search direction.
q = A(p)
gamma = inner(p, q)
if gamma <= 0.0:
raise ValueError("Operator A is not positive definite")
alpha = rho/gamma
x += alpha*p
r -= alpha*q
alpha = rho / gamma
x += alpha * p
r -= alpha * q
rho_prev, rho = rho, inner(r, r)
beta = rho/rho_prev
beta = rho / rho_prev
# Return normalized solution
im_deconv = x/x.max()
im_deconv = x / x.max()
return im_deconv
def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
filter_epsilon=None, algo='richardson'):
def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True, filter_epsilon=None, algo="richardson"):
"""
Prepare an image for deconvolution using a chosen algorithm and return
results.
@@ -537,27 +536,23 @@ def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
"""
# Normalize image to highest pixel value
pxmax = image[np.isfinite(image)].max()
if pxmax == 0.:
if pxmax == 0.0:
raise ValueError("Invalid image")
norm_image = image/pxmax
norm_image = image / pxmax
# Deconvolve normalized image
if algo.lower() in ['wiener', 'wiener simple']:
if algo.lower() in ["wiener", "wiener simple"]:
norm_deconv = wiener(image=norm_image, psf=psf, alpha=alpha, clip=clip)
elif algo.lower() in ['van-cittert', 'vancittert', 'cittert']:
norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha,
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
elif algo.lower() in ['1grad', 'one_step_grad', 'one step grad']:
norm_deconv = one_step_gradient(image=norm_image, psf=psf,
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
elif algo.lower() in ['conjgrad', 'conj_grad', 'conjugate gradient']:
norm_deconv = conjgrad(image=norm_image, psf=psf, alpha=alpha,
error=error, iterations=iterations)
elif algo.lower() in ["van-cittert", "vancittert", "cittert"]:
norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
elif algo.lower() in ["1grad", "one_step_grad", "one step grad"]:
norm_deconv = one_step_gradient(image=norm_image, psf=psf, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
elif algo.lower() in ["conjgrad", "conj_grad", "conjugate gradient"]:
norm_deconv = conjgrad(image=norm_image, psf=psf, alpha=alpha, error=error, iterations=iterations)
else: # Defaults to Richardson-Lucy
norm_deconv = richardson_lucy(image=norm_image, psf=psf,
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
norm_deconv = richardson_lucy(image=norm_image, psf=psf, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
# Output deconvolved image with original pxmax value
im_deconv = pxmax*norm_deconv
im_deconv = pxmax * norm_deconv
return im_deconv

View File

@@ -9,10 +9,12 @@ prototypes :
Save computed polarimetry parameters to a single fits file (and return HDUList)
"""
import numpy as np
from os.path import join as path_join
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
from .convex_hull import clean_ROI
@@ -38,7 +40,7 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
"""
data_array, headers, wcs_array = [], [], []
for i in range(len(infiles)):
with fits.open(path_join(data_folder, infiles[i]), mode='update') as f:
with fits.open(path_join(data_folder, infiles[i]), mode="update") as f:
headers.append(f[0].header)
data_array.append(f[0].data)
wcs_array.append(WCS(header=f[0].header, fobj=f).celestial)
@@ -47,53 +49,52 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
# Prevent negative count value in imported data
for i in range(len(data_array)):
data_array[i][data_array[i] < 0.] = 0.
data_array[i][data_array[i] < 0.0] = 0.0
# force WCS to convention PCi_ja unitary, cdelt in deg
for wcs, header in zip(wcs_array, headers):
new_wcs = wcs.deepcopy()
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1., 1.])).all():
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1.0, 1.0])).all():
# Update WCS with relevant information
if new_wcs.wcs.has_cd():
old_cd = new_wcs.wcs.cd
del new_wcs.wcs.cd
keys = list(new_wcs.to_header().keys())+['CD1_1', 'CD1_2', 'CD1_3', 'CD2_1', 'CD2_2', 'CD2_3', 'CD3_1', 'CD3_2', 'CD3_3']
keys = list(new_wcs.to_header().keys()) + ["CD1_1", "CD1_2", "CD1_3", "CD2_1", "CD2_2", "CD2_3", "CD3_1", "CD3_2", "CD3_3"]
for key in keys:
header.remove(key, ignore_missing=True)
new_cdelt = np.linalg.eig(old_cd)[0]
elif (new_wcs.wcs.cdelt == np.array([1., 1.])).all() and \
(new_wcs.array_shape in [(512, 512), (1024, 512), (512, 1024), (1024, 1024)]):
elif (new_wcs.wcs.cdelt == np.array([1.0, 1.0])).all() and (new_wcs.array_shape in [(512, 512), (1024, 512), (512, 1024), (1024, 1024)]):
old_cd = new_wcs.wcs.pc
new_wcs.wcs.pc = np.dot(old_cd, np.diag(1./new_cdelt))
new_wcs.wcs.pc = np.dot(old_cd, np.diag(1.0 / new_cdelt))
new_wcs.wcs.cdelt = new_cdelt
for key, val in new_wcs.to_header().items():
header[key] = val
try:
_ = header['ORIENTAT']
_ = header["ORIENTAT"]
except KeyError:
header['ORIENTAT'] = -np.arccos(new_wcs.wcs.pc[0, 0])*180./np.pi
header["ORIENTAT"] = -np.arccos(new_wcs.wcs.pc[0, 0]) * 180.0 / np.pi
# force WCS for POL60 to have same pixel size as POL0 and POL120
is_pol60 = np.array([head['filtnam1'].lower() == 'pol60' for head in headers], dtype=bool)
is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 14)
if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0))
raise ValueError("Not all images have same pixel size")
else:
for i in np.arange(len(headers))[is_pol60]:
headers[i]['cdelt1'], headers[i]['cdelt2'] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0]
headers[i]["cdelt1"], headers[i]["cdelt2"] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0]
if compute_flux:
for i in range(len(infiles)):
# Compute the flux in counts/sec
data_array[i] /= headers[i]['EXPTIME']
data_array[i] /= headers[i]["EXPTIME"]
return data_array, headers
def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="",
return_hdul=False):
def save_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="", return_hdul=False
):
"""
Save computed polarimetry parameters to a single fits file,
updating header accordingly.
@@ -130,80 +131,87 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
"""
# Create new WCS object given the modified images
ref_header = headers[0]
exp_tot = np.array([header['exptime'] for header in headers]).sum()
exp_tot = np.array([header["exptime"] for header in headers]).sum()
new_wcs = WCS(ref_header).deepcopy()
if data_mask.shape != (1, 1):
vertex = clean_ROI(data_mask)
shape = vertex[1::2]-vertex[0::2]
shape = vertex[1::2] - vertex[0::2]
new_wcs.array_shape = shape
new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2]
header = new_wcs.to_header()
header['telescop'] = (ref_header['telescop'] if 'TELESCOP' in list(ref_header.keys()) else 'HST', 'telescope used to acquire data')
header['instrume'] = (ref_header['instrume'] if 'INSTRUME' in list(ref_header.keys()) else 'FOC', 'identifier for instrument used to acuire data')
header['photplam'] = (ref_header['photplam'], 'Pivot Wavelength')
header['photflam'] = (ref_header['photflam'], 'Inverse Sensitivity in DN/sec/cm**2/Angst')
header['exptot'] = (exp_tot, 'Total exposure time in sec')
header['proposid'] = (ref_header['proposid'], 'PEP proposal identifier for observation')
header['targname'] = (ref_header['targname'], 'Target name')
header['orientat'] = (ref_header['orientat'], 'Angle between North and the y-axis of the image')
header['filename'] = (filename, 'Original filename')
header['P_int'] = (ref_header['P_int'], 'Integrated polarization degree')
header['P_int_err'] = (ref_header['P_int_err'], 'Integrated polarization degree error')
header['PA_int'] = (ref_header['PA_int'], 'Integrated polarization angle')
header['PA_int_err'] = (ref_header['PA_int_err'], 'Integrated polarization angle error')
header["telescop"] = (ref_header["telescop"] if "TELESCOP" in list(ref_header.keys()) else "HST", "telescope used to acquire data")
header["instrume"] = (ref_header["instrume"] if "INSTRUME" in list(ref_header.keys()) else "FOC", "identifier for instrument used to acuire data")
header["photplam"] = (ref_header["photplam"], "Pivot Wavelength")
header["photflam"] = (ref_header["photflam"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
header["exptot"] = (exp_tot, "Total exposure time in sec")
header["proposid"] = (ref_header["proposid"], "PEP proposal identifier for observation")
header["targname"] = (ref_header["targname"], "Target name")
header["orientat"] = (ref_header["orientat"], "Angle between North and the y-axis of the image")
header["filename"] = (filename, "Original filename")
header["P_int"] = (ref_header["P_int"], "Integrated polarization degree")
header["P_int_err"] = (ref_header["P_int_err"], "Integrated polarization degree error")
header["PA_int"] = (ref_header["PA_int"], "Integrated polarization angle")
header["PA_int_err"] = (ref_header["PA_int_err"], "Integrated polarization angle error")
# Crop Data to mask
if data_mask.shape != (1, 1):
I_stokes = I_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]]
Q_stokes = Q_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]]
U_stokes = U_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]]
P = P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
debiased_P = debiased_P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
s_P = s_P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
s_P_P = s_P_P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
PA = PA[vertex[2]:vertex[3], vertex[0]:vertex[1]]
s_PA = s_PA[vertex[2]:vertex[3], vertex[0]:vertex[1]]
s_PA_P = s_PA_P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
I_stokes = I_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]]
Q_stokes = Q_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]]
U_stokes = U_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]]
P = P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
debiased_P = debiased_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_P = s_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_P_P = s_P_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
PA = PA[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_PA = s_PA[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1]))
for i in range(3):
for j in range(3):
Stokes_cov[i, j][(1-data_mask).astype(bool)] = 0.
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2]:vertex[3], vertex[0]:vertex[1]]
Stokes_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
Stokes_cov = new_Stokes_cov
data_mask = data_mask[vertex[2]:vertex[3], vertex[0]:vertex[1]]
data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]]
data_mask = data_mask.astype(float, copy=False)
# Create HDUList object
hdul = fits.HDUList([])
# Add I_stokes as PrimaryHDU
header['datatype'] = ('I_stokes', 'type of data stored in the HDU')
I_stokes[(1-data_mask).astype(bool)] = 0.
header["datatype"] = ("I_stokes", "type of data stored in the HDU")
I_stokes[(1 - data_mask).astype(bool)] = 0.0
primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header)
primary_hdu.name = 'I_stokes'
primary_hdu.name = "I_stokes"
hdul.append(primary_hdu)
# Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList
for data, name in [[Q_stokes, 'Q_stokes'], [U_stokes, 'U_stokes'],
[Stokes_cov, 'IQU_cov_matrix'], [P, 'Pol_deg'],
[debiased_P, 'Pol_deg_debiased'], [s_P, 'Pol_deg_err'],
[s_P_P, 'Pol_deg_err_Poisson_noise'], [PA, 'Pol_ang'],
[s_PA, 'Pol_ang_err'], [s_PA_P, 'Pol_ang_err_Poisson_noise'],
[data_mask, 'Data_mask']]:
for data, name in [
[Q_stokes, "Q_stokes"],
[U_stokes, "U_stokes"],
[Stokes_cov, "IQU_cov_matrix"],
[P, "Pol_deg"],
[debiased_P, "Pol_deg_debiased"],
[s_P, "Pol_deg_err"],
[s_P_P, "Pol_deg_err_Poisson_noise"],
[PA, "Pol_ang"],
[s_PA, "Pol_ang_err"],
[s_PA_P, "Pol_ang_err_Poisson_noise"],
[data_mask, "Data_mask"],
]:
hdu_header = header.copy()
hdu_header['datatype'] = name
if not name == 'IQU_cov_matrix':
data[(1-data_mask).astype(bool)] = 0.
hdu_header["datatype"] = name
if not name == "IQU_cov_matrix":
data[(1 - data_mask).astype(bool)] = 0.0
hdu = fits.ImageHDU(data=data, header=hdu_header)
hdu.name = name
hdul.append(hdu)
# Save fits file to designated filepath
hdul.writeto(path_join(data_folder, filename+".fits"), overwrite=True)
hdul.writeto(path_join(data_folder, filename + ".fits"), overwrite=True)
if return_hdul:
return hdul

View File

@@ -3,15 +3,19 @@
"""
Library function to query and download datatsets from MAST api.
"""
from os import system
from os.path import join as path_join, exists as path_exists
from astroquery.mast import MastMissions, Observations
from astropy.table import unique, Column
from astropy.time import Time, TimeDelta
from os.path import exists as path_exists
from os.path import join as path_join
from warnings import filterwarnings
import astropy.units as u
import numpy as np
from astropy.table import Column, unique
from astropy.time import Time, TimeDelta
from astroquery.exceptions import NoResultsWarning
from warnings import filterwarnings
from astroquery.mast import MastMissions, Observations
filterwarnings("error", category=NoResultsWarning)
@@ -19,21 +23,24 @@ def divide_proposal(products):
"""
Divide observation in proposals by time or filter
"""
for pid in np.unique(products['Proposal ID']):
obs = products[products['Proposal ID'] == pid].copy()
same_filt = np.unique(np.array(np.sum([obs['Filters'][:, 1:] == filt[1:] for filt in obs['Filters']], axis=2) < 3, dtype=bool), axis=0)
for pid in np.unique(products["Proposal ID"]):
obs = products[products["Proposal ID"] == pid].copy()
same_filt = np.unique(np.array(np.sum([obs["Filters"][:, 1:] == filt[1:] for filt in obs["Filters"]], axis=2) < 3, dtype=bool), axis=0)
if len(same_filt) > 1:
for filt in same_filt:
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][filt]], axis=0)] = "_".join(
[obs['Proposal ID'][filt][0], "_".join([fi for fi in obs['Filters'][filt][0][1:] if fi[:-1] != "CLEAR"])])
for pid in np.unique(products['Proposal ID']):
obs = products[products['Proposal ID'] == pid].copy()
close_date = np.unique([[np.abs(TimeDelta(obs['Start'][i].unix-date.unix, format='sec'))
< 7.*u.d for i in range(len(obs))] for date in obs['Start']], axis=0)
products["Proposal ID"][np.any([products["Dataset"] == dataset for dataset in obs["Dataset"][filt]], axis=0)] = "_".join(
[obs["Proposal ID"][filt][0], "_".join([fi for fi in obs["Filters"][filt][0][1:] if fi[:-1] != "CLEAR"])]
)
for pid in np.unique(products["Proposal ID"]):
obs = products[products["Proposal ID"] == pid].copy()
close_date = np.unique(
[[np.abs(TimeDelta(obs["Start"][i].unix - date.unix, format="sec")) < 7.0 * u.d for i in range(len(obs))] for date in obs["Start"]], axis=0
)
if len(close_date) > 1:
for date in close_date:
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][date]], axis=0)
] = "_".join([obs['Proposal ID'][date][0], str(obs['Start'][date][0])[:10]])
products["Proposal ID"][np.any([products["Dataset"] == dataset for dataset in obs["Dataset"][date]], axis=0)] = "_".join(
[obs["Proposal ID"][date][0], str(obs["Start"][date][0])[:10]]
)
return products
@@ -41,53 +48,36 @@ def get_product_list(target=None, proposal_id=None):
"""
Retrieve products list for a given target from the MAST archive
"""
mission = MastMissions(mission='hst')
radius = '3'
mission = MastMissions(mission="hst")
radius = "3"
select_cols = [
'sci_data_set_name',
'sci_spec_1234',
'sci_actual_duration',
'sci_start_time',
'sci_stop_time',
'sci_central_wavelength',
'sci_instrume',
'sci_aper_1234',
'sci_targname',
'sci_pep_id',
'sci_pi_last_name']
"sci_data_set_name",
"sci_spec_1234",
"sci_actual_duration",
"sci_start_time",
"sci_stop_time",
"sci_central_wavelength",
"sci_instrume",
"sci_aper_1234",
"sci_targname",
"sci_pep_id",
"sci_pi_last_name",
]
cols = [
'Dataset',
'Filters',
'Exptime',
'Start',
'Stop',
'Central wavelength',
'Instrument',
'Size',
'Target name',
'Proposal ID',
'PI last name']
cols = ["Dataset", "Filters", "Exptime", "Start", "Stop", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]
if target is None:
target = input("Target name:\n>")
# Use query_object method to resolve the object name into coordinates
results = mission.query_object(
target,
radius=radius,
select_cols=select_cols,
sci_spec_1234='POL*',
sci_obs_type='image',
sci_aec='S',
sci_instrume='foc')
results = mission.query_object(target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc")
for c, n_c in zip(select_cols, cols):
results.rename_column(c, n_c)
results['Proposal ID'] = Column(results['Proposal ID'], dtype='U35')
results['Filters'] = Column(np.array([filt.split(";") for filt in results['Filters']], dtype=str))
results['Start'] = Column(Time(results['Start']))
results['Stop'] = Column(Time(results['Stop']))
results["Proposal ID"] = Column(results["Proposal ID"], dtype="U35")
results["Filters"] = Column(np.array([filt.split(";") for filt in results["Filters"]], dtype=str))
results["Start"] = Column(Time(results["Start"]))
results["Stop"] = Column(Time(results["Stop"]))
results = divide_proposal(results)
obs = results.copy()
@@ -95,67 +85,70 @@ def get_product_list(target=None, proposal_id=None):
# Remove single observations for which a FIND filter is used
to_remove = []
for i in range(len(obs)):
if "F1ND" in obs[i]['Filters']:
if "F1ND" in obs[i]["Filters"]:
to_remove.append(i)
obs.remove_rows(to_remove)
# Remove observations for which a polarization filter is missing
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
for pid in np.unique(obs['Proposal ID']):
for pid in np.unique(obs["Proposal ID"]):
used_pol = np.zeros(3)
for dataset in obs[obs['Proposal ID'] == pid]:
used_pol[polfilt[dataset['Filters'][0]]] += 1
for dataset in obs[obs["Proposal ID"] == pid]:
used_pol[polfilt[dataset["Filters"][0]]] += 1
if np.any(used_pol < 1):
obs.remove_rows(np.arange(len(obs))[obs['Proposal ID'] == pid])
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
tab = unique(obs, ['Target name', 'Proposal ID'])
obs["Obs"] = [np.argmax(np.logical_and(tab['Proposal ID'] == data['Proposal ID'], tab['Target name'] == data['Target name']))+1 for data in obs]
tab = unique(obs, ["Target name", "Proposal ID"])
obs["Obs"] = [np.argmax(np.logical_and(tab["Proposal ID"] == data["Proposal ID"], tab["Target name"] == data["Target name"])) + 1 for data in obs]
try:
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], 'Obs')
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], "Obs")
except IndexError:
raise ValueError(
"There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
raise ValueError("There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
b = np.zeros(len(results), dtype=bool)
if proposal_id is not None and str(proposal_id) in obs['Proposal ID']:
b[results['Proposal ID'] == str(proposal_id)] = True
if proposal_id is not None and str(proposal_id) in obs["Proposal ID"]:
b[results["Proposal ID"] == str(proposal_id)] = True
else:
n_obs.pprint(len(n_obs)+2)
a = [np.array(i.split(":"), dtype=str)
for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(',')]
if a[0][0] == '':
n_obs.pprint(len(n_obs) + 2)
a = [
np.array(i.split(":"), dtype=str)
for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(",")
]
if a[0][0] == "":
a = [[1]]
if a[0][0] in ['a', 'all', '*']:
if a[0][0] in ["a", "all", "*"]:
b = np.ones(len(results), dtype=bool)
else:
a = [np.array(i, dtype=int) for i in a]
for i in a:
if len(i) > 1:
for j in range(i[0], i[1]+1):
b[np.array([dataset in obs['Dataset'][obs["Obs"] == j] for dataset in results['Dataset']])] = True
for j in range(i[0], i[1] + 1):
b[np.array([dataset in obs["Dataset"][obs["Obs"] == j] for dataset in results["Dataset"]])] = True
else:
b[np.array([dataset in obs['Dataset'][obs['Obs'] == i[0]] for dataset in results['Dataset']])] = True
b[np.array([dataset in obs["Dataset"][obs["Obs"] == i[0]] for dataset in results["Dataset"]])] = True
observations = Observations.query_criteria(obs_id=list(results['Dataset'][b]))
products = Observations.filter_products(Observations.get_product_list(observations),
productType=['SCIENCE'],
dataproduct_type=['image'],
calib_level=[2],
description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP")
products['proposal_id'] = Column(products['proposal_id'], dtype='U35')
products['target_name'] = Column(observations['target_name'])
observations = Observations.query_criteria(obs_id=list(results["Dataset"][b]))
products = Observations.filter_products(
Observations.get_product_list(observations),
productType=["SCIENCE"],
dataproduct_type=["image"],
calib_level=[2],
description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP",
)
products["proposal_id"] = Column(products["proposal_id"], dtype="U35")
products["target_name"] = Column(observations["target_name"])
for prod in products:
prod['proposal_id'] = results['Proposal ID'][results['Dataset'] == prod['productFilename'][:len(results['Dataset'][0])].upper()][0]
prod["proposal_id"] = results["Proposal ID"][results["Dataset"] == prod["productFilename"][: len(results["Dataset"][0])].upper()][0]
for prod in products:
prod['target_name'] = observations['target_name'][observations['obsid'] == prod['obsID']][0]
tab = unique(products, ['target_name', 'proposal_id'])
prod["target_name"] = observations["target_name"][observations["obsid"] == prod["obsID"]][0]
tab = unique(products, ["target_name", "proposal_id"])
products["Obs"] = [np.argmax(np.logical_and(tab['proposal_id'] == data['proposal_id'], tab['target_name'] == data['target_name']))+1 for data in products]
products["Obs"] = [np.argmax(np.logical_and(tab["proposal_id"] == data["proposal_id"], tab["target_name"] == data["target_name"])) + 1 for data in products]
return target, products
def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
def retrieve_products(target=None, proposal_id=None, output_dir="./data"):
"""
Given a target name and a proposal_id, create the local directories and retrieve the fits files from the MAST Archive
"""
@@ -163,18 +156,19 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
prodpaths = []
# data_dir = path_join(output_dir, target)
out = ""
for obs in unique(products, 'Obs'):
for obs in unique(products, "Obs"):
filepaths = []
# obs_dir = path_join(data_dir, obs['prodposal_id'])
# if obs['target_name']!=target:
obs_dir = path_join(path_join(output_dir, target), obs['proposal_id'])
obs_dir = path_join(path_join(output_dir, target), obs["proposal_id"])
if not path_exists(obs_dir):
system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
for file in products['productFilename'][products['Obs'] == obs['Obs']]:
for file in products["productFilename"][products["Obs"] == obs["Obs"]]:
fpath = path_join(obs_dir, file)
if not path_exists(fpath):
out += "{0:s} : {1:s}\n".format(file, Observations.download_file(
products['dataURI'][products['productFilename'] == file][0], local_path=fpath)[0])
out += "{0:s} : {1:s}\n".format(
file, Observations.download_file(products["dataURI"][products["productFilename"] == file][0], local_path=fpath)[0]
)
else:
out += "{0:s} : Exists\n".format(file)
filepaths.append([obs_dir, file])
@@ -186,13 +180,12 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Query MAST for target products')
parser.add_argument('-t', '--target', metavar='targetname', required=False,
help='the name of the target', type=str, default=None)
parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False,
help='the proposal id of the data products', type=int, default=None)
parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False,
help='output directory path for the data products', type=str, default="./data")
parser = argparse.ArgumentParser(description="Query MAST for target products")
parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
parser.add_argument("-p", "--proposal_id", metavar="proposal_id", required=False, help="the proposal id of the data products", type=int, default=None)
parser.add_argument(
"-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data"
)
args = parser.parse_args()
print(args.target)
prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id)

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,11 @@
import numpy as np
def rot2D(ang):
"""
Return the 2D rotation matrix of given angle in degrees
"""
alpha = np.pi*ang/180
alpha = np.pi * ang / 180
return np.array([[np.cos(alpha), np.sin(alpha)], [-np.sin(alpha), np.cos(alpha)]])
@@ -17,10 +18,10 @@ def princ_angle(ang):
A = np.array([ang])
else:
A = np.array(ang)
while np.any(A < 0.):
A[A < 0.] = A[A < 0.]+360.
while np.any(A >= 180.):
A[A >= 180.] = A[A >= 180.]-180.
while np.any(A < 0.0):
A[A < 0.0] = A[A < 0.0] + 360.0
while np.any(A >= 180.0):
A[A >= 180.0] = A[A >= 180.0] - 180.0
if type(ang) is type(A):
return A
else:
@@ -31,16 +32,16 @@ def sci_not(v, err, rnd=1, out=str):
"""
Return the scientifque error notation as a string.
"""
power = - int(('%E' % v)[-3:])+1
output = [r"({0}".format(round(v*10**power, rnd)), round(v*10**power, rnd)]
power = -int(("%E" % v)[-3:]) + 1
output = [r"({0}".format(round(v * 10**power, rnd)), round(v * 10**power, rnd)]
if isinstance(err, list):
for error in err:
output[0] += r" $\pm$ {0}".format(round(error*10**power, rnd))
output.append(round(error*10**power, rnd))
output[0] += r" $\pm$ {0}".format(round(error * 10**power, rnd))
output.append(round(error * 10**power, rnd))
else:
output[0] += r" $\pm$ {0}".format(round(err*10**power, rnd))
output.append(round(err*10**power, rnd))
output[0] += r" $\pm$ {0}".format(round(err * 10**power, rnd))
output.append(round(err * 10**power, rnd))
if out == str:
return output[0]+r")e{0}".format(-power)
return output[0] + r")e{0}".format(-power)
else:
return *output[1:], -power

View File

@@ -1,7 +1,7 @@
#!/usr/bin/python3
from astropy.io import fits
import numpy as np
from lib.plots import overplot_radio, overplot_pol
from astropy.io import fits
from lib.plots import overplot_pol, overplot_radio
from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits")
@@ -14,31 +14,37 @@ Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits")
Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
# levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
levelsMorganti = np.logspace(-0.1249, 1.97, 7)/100.
levelsMorganti = np.logspace(-0.1249, 1.97, 7) / 100.0
levels18GHz = levelsMorganti*Stokes_18GHz[0].data.max()
levels18GHz = levelsMorganti * Stokes_18GHz[0].data.max()
A = overplot_radio(Stokes_UV, Stokes_18GHz)
A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/18GHz_overplot.pdf', vec_scale=None)
A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/18GHz_overplot.pdf", vec_scale=None)
levels24GHz = levelsMorganti*Stokes_24GHz[0].data.max()
levels24GHz = levelsMorganti * Stokes_24GHz[0].data.max()
B = overplot_radio(Stokes_UV, Stokes_24GHz)
B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/24GHz_overplot.pdf', vec_scale=None)
B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/24GHz_overplot.pdf", vec_scale=None)
levels103GHz = levelsMorganti*Stokes_103GHz[0].data.max()
levels103GHz = levelsMorganti * Stokes_103GHz[0].data.max()
C = overplot_radio(Stokes_UV, Stokes_103GHz)
C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/103GHz_overplot.pdf', vec_scale=None)
C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/103GHz_overplot.pdf", vec_scale=None)
levels229GHz = levelsMorganti*Stokes_229GHz[0].data.max()
levels229GHz = levelsMorganti * Stokes_229GHz[0].data.max()
D = overplot_radio(Stokes_UV, Stokes_229GHz)
D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/229GHz_overplot.pdf', vec_scale=None)
D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/229GHz_overplot.pdf", vec_scale=None)
levels357GHz = levelsMorganti*Stokes_357GHz[0].data.max()
levels357GHz = levelsMorganti * Stokes_357GHz[0].data.max()
E = overplot_radio(Stokes_UV, Stokes_357GHz)
E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/357GHz_overplot.pdf', vec_scale=None)
E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/357GHz_overplot.pdf", vec_scale=None)
# F = overplot_pol(Stokes_UV, Stokes_S2)
# F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18))
G = overplot_pol(Stokes_UV, Stokes_IR, cmap='inferno')
G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot.pdf', vec_scale=None,
norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3, Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']), cmap='inferno_r')
G = overplot_pol(Stokes_UV, Stokes_IR, cmap="inferno")
G.plot(
SNRp_cut=2.0,
SNRi_cut=10.0,
savename="./plots/IC5063/IR_overplot.pdf",
vec_scale=None,
norm=LogNorm(Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"] / 1e3, Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"]),
cmap="inferno_r",
)

View File

@@ -1,6 +1,6 @@
#!/usr/bin/python3
from astropy.io import fits
import numpy as np
from astropy.io import fits
from lib.plots import overplot_chandra, overplot_pol
from matplotlib.colors import LogNorm
@@ -8,13 +8,13 @@ Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.f
Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits")
Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits")
levels = np.geomspace(1., 99., 7)
levels = np.geomspace(1.0, 99.0, 7)
A = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm())
A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf')
A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, zoom=1, savename="./plots/MRK463E/Chandra_overplot.pdf")
A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned")
levels = np.array([0.8, 2, 5, 10, 20, 50])/100.*Stokes_UV[0].header['photflam']
levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"]
B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm())
B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, norm=LogNorm(8.5e-18, 2.5e-15), savename='./plots/MRK463E/IR_overplot.pdf')
B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, norm=LogNorm(8.5e-18, 2.5e-15), savename="./plots/MRK463E/IR_overplot.pdf")
B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned")

View File

@@ -1,5 +1,6 @@
#!/usr/bin/python
from getopt import getopt, error as get_error
from getopt import error as get_error
from getopt import getopt
from sys import argv
arglist = argv[1:]
@@ -24,7 +25,7 @@ try:
elif curr_arg in ("-i", "--snri"):
SNRi_cut = int(curr_val)
elif curr_arg in ("-l", "--lim"):
flux_lim = list("".join(curr_val).split(','))
flux_lim = list("".join(curr_val).split(","))
except get_error as err:
print(str(err))

View File

@@ -1,19 +1,21 @@
#!/usr/bin/python
def main(infiles=None):
"""
Retrieve native spatial resolution from given observation.
"""
from os.path import join as path_join
from warnings import catch_warnings, filterwarnings
from astropy.io.fits import getheader
from astropy.wcs import WCS, FITSFixedWarning
from numpy.linalg import eig
if infiles is None:
print("Usage: \"python get_cdelt.py -f infiles\"")
print('Usage: "python get_cdelt.py -f infiles"')
return 1
prod = [["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles]
prod = [["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles]
data_folder = prod[0][0]
infiles = [p[1] for p in prod]
@@ -21,14 +23,14 @@ def main(infiles=None):
size = {}
for currfile in infiles:
with catch_warnings():
filterwarnings('ignore', message="'datfix' made the change", category=FITSFixedWarning)
filterwarnings("ignore", message="'datfix' made the change", category=FITSFixedWarning)
wcs = WCS(getheader(path_join(data_folder, currfile))).celestial
key = currfile[:-5]
size[key] = wcs.array_shape
if wcs.wcs.has_cd():
cdelt[key] = eig(wcs.wcs.cd)[0]*3600.
cdelt[key] = eig(wcs.wcs.cd)[0] * 3600.0
else:
cdelt[key] = wcs.wcs.cdelt*3600.
cdelt[key] = wcs.wcs.cdelt * 3600.0
print("Image name, native resolution in arcsec and shape")
for currfile in infiles:
@@ -41,7 +43,7 @@ def main(infiles=None):
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Query MAST for target products')
parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None)
parser = argparse.ArgumentParser(description="Query MAST for target products")
parser.add_argument("-f", "--files", metavar="path", required=False, nargs="*", help="the full or relative path to the data products", default=None)
args = parser.parse_args()
exitcode = main(infiles=args.files)