reformat code using python-lsp-ruff

This commit is contained in:
2024-07-01 15:21:52 +02:00
parent 271ecbb631
commit 5a62fa4983
13 changed files with 1271 additions and 860 deletions

View File

@@ -5,14 +5,15 @@ Main script where are progressively added the steps for the FOC pipeline reducti
""" """
# Project libraries # Project libraries
import numpy as np
from copy import deepcopy from copy import deepcopy
from os import system from os import system
from os.path import exists as path_exists from os.path import exists as path_exists
import lib.fits as proj_fits # Functions to handle fits files import lib.fits as proj_fits # Functions to handle fits files
import lib.reduction as proj_red # Functions used in reduction pipeline
import lib.plots as proj_plots # Functions for plotting data import lib.plots as proj_plots # Functions for plotting data
from lib.utils import sci_not, princ_angle import lib.reduction as proj_red # Functions used in reduction pipeline
import numpy as np
from lib.utils import princ_angle, sci_not
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
@@ -22,10 +23,10 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
deconvolve = False deconvolve = False
if deconvolve: if deconvolve:
# from lib.deconvolve import from_file_psf # from lib.deconvolve import from_file_psf
psf = 'gaussian' # Can be user-defined as well psf = "gaussian" # Can be user-defined as well
# psf = from_file_psf(data_folder+psf_file) # psf = from_file_psf(data_folder+psf_file)
psf_FWHM = 3.1 psf_FWHM = 3.1
psf_scale = 'px' psf_scale = "px"
psf_shape = None # (151, 151) psf_shape = None # (151, 151)
iterations = 1 iterations = 1
algo = "conjgrad" algo = "conjgrad"
@@ -34,45 +35,45 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
display_crop = False display_crop = False
# Background estimation # Background estimation
error_sub_type = 'freedman-diaconis' # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 0.01 subtract_error = 1.0
display_bkg = True display_bkg = False
# Data binning # Data binning
rebin = True rebin = True
pxsize = 2 pxsize = 2
px_scale = 'px' # pixel, arcsec or full px_scale = "px" # pixel, arcsec or full
rebin_operation = 'sum' # sum or average rebin_operation = "sum" # sum or average
# Alignement # Alignement
align_center = 'center' # If None will not align the images align_center = "center" # If None will not align the images
display_align = True display_align = False
display_data = False display_data = False
# Transmittance correction # Transmittance correction
transmitcorr = True transmitcorr = True
# Smoothing # Smoothing
smoothing_function = 'combine' # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = None # If None, no smoothing is done smoothing_FWHM = 1.5 # If None, no smoothing is done
smoothing_scale = 'px' # pixel or arcsec smoothing_scale = "px" # pixel or arcsec
# Rotation # Rotation
rotate_data = False # rotation to North convention can give erroneous results rotate_data = False # rotation to North convention can give erroneous results
rotate_stokes = True rotate_stokes = True
# Polarization map output # Polarization map output
SNRp_cut = 3. # P measurments with SNR>3 SNRp_cut = 3.0 # P measurments with SNR>3
SNRi_cut = 3. # I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 3.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
vec_scale = 5 vec_scale = 3
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
# Pipeline start # Pipeline start
# Step 1: # Step 1:
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom. # Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
if infiles is not None: if infiles is not None:
prod = np.array([["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles], dtype=str) prod = np.array([["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles], dtype=str)
obs_dir = "/".join(infiles[0].split("/")[:-1]) obs_dir = "/".join(infiles[0].split("/")[:-1])
if not path_exists(obs_dir): if not path_exists(obs_dir):
system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots"))) system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
@@ -80,6 +81,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
target = input("Target name:\n>") target = input("Target name:\n>")
else: else:
from lib.query import retrieve_products from lib.query import retrieve_products
target, products = retrieve_products(target, proposal_id, output_dir=output_dir) target, products = retrieve_products(target, proposal_id, output_dir=output_dir)
prod = products.pop() prod = products.pop()
for prods in products: for prods in products:
@@ -97,21 +99,23 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
figname = "_".join([target, "FOC"]) figname = "_".join([target, "FOC"])
figtype = "" figtype = ""
if rebin: if rebin:
if px_scale not in ['full']: if px_scale not in ["full"]:
figtype = "".join(["b", "{0:.2f}".format(pxsize), px_scale]) # additionnal informations figtype = "".join(["b", "{0:.2f}".format(pxsize), px_scale]) # additionnal informations
else: else:
figtype = "full" figtype = "full"
if smoothing_FWHM is not None: if smoothing_FWHM is not None:
figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]), figtype += "_" + "".join(
"{0:.2f}".format(smoothing_FWHM), smoothing_scale]) # additionnal informations ["".join([s[0] for s in smoothing_function.split("_")]), "{0:.2f}".format(smoothing_FWHM), smoothing_scale]
) # additionnal informations
if deconvolve: if deconvolve:
figtype += "_deconv" figtype += "_deconv"
if align_center is None: if align_center is None:
figtype += "_not_aligned" figtype += "_not_aligned"
# Crop data to remove outside blank margins. # Crop data to remove outside blank margins.
data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0., data_array, error_array, headers = proj_red.crop_array(
inside=True, display=display_crop, savename=figname, plots_folder=plots_folder) data_array, headers, step=5, null_val=0.0, inside=True, display=display_crop, savename=figname, plots_folder=plots_folder
)
data_mask = np.ones(data_array[0].shape, dtype=bool) data_mask = np.ones(data_array[0].shape, dtype=bool)
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM. # Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
@@ -120,36 +124,68 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Estimate error from data background, estimated from sub-image of desired sub_shape. # Estimate error from data background, estimated from sub-image of desired sub_shape.
background = None background = None
data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, data_mask=data_mask, sub_type=error_sub_type, subtract_error=subtract_error, display=display_bkg, savename="_".join([figname, "errors"]), plots_folder=plots_folder, return_background=True) data_array, error_array, headers, background = proj_red.get_error(
data_array,
headers,
error_array,
data_mask=data_mask,
sub_type=error_sub_type,
subtract_error=subtract_error,
display=display_bkg,
savename="_".join([figname, "errors"]),
plots_folder=plots_folder,
return_background=True,
)
# Align and rescale images with oversampling. # Align and rescale images with oversampling.
data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data( data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data(
data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=True) data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=True
)
if display_align: if display_align:
print("Image shifts: {} \nShifts uncertainty: {}".format(shifts, error_shifts)) print("Image shifts: {} \nShifts uncertainty: {}".format(shifts, error_shifts))
proj_plots.plot_obs(data_array, headers, savename="_".join([figname, str(align_center)]), plots_folder=plots_folder, norm=LogNorm( proj_plots.plot_obs(
vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'])) data_array,
headers,
savename="_".join([figname, str(align_center)]),
plots_folder=plots_folder,
norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]),
)
# Rebin data to desired pixel size. # Rebin data to desired pixel size.
if rebin: if rebin:
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array( data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(
data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask) data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask
)
# Rotate data to have North up # Rotate data to have North up
if rotate_data: if rotate_data:
data_mask = np.ones(data_array.shape[1:]).astype(bool) data_mask = np.ones(data_array.shape[1:]).astype(bool)
alpha = headers[0]['orientat'] alpha = headers[0]["orientat"]
data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers, -alpha) data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers, -alpha)
# Plot array for checking output # Plot array for checking output
if display_data and px_scale.lower() not in ['full', 'integrate']: if display_data and px_scale.lower() not in ["full", "integrate"]:
proj_plots.plot_obs(data_array, headers, savename="_".join([figname, "rebin"]), plots_folder=plots_folder, norm=LogNorm( proj_plots.plot_obs(
vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'])) data_array,
headers,
savename="_".join([figname, "rebin"]),
plots_folder=plots_folder,
norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]),
)
background = np.array([np.array(bkg).reshape(1, 1) for bkg in background]) background = np.array([np.array(bkg).reshape(1, 1) for bkg in background])
background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1'] == head['filtnam1'] for h in headers], dtype=bool)].mean()) background_error = np.array(
** 2/np.sum([h['filtnam1'] == head['filtnam1'] for h in headers]))).reshape(1, 1) for bkg, head in zip(background, headers)]) [
np.array(
np.sqrt(
(bkg - background[np.array([h["filtnam1"] == head["filtnam1"] for h in headers], dtype=bool)].mean()) ** 2
/ np.sum([h["filtnam1"] == head["filtnam1"] for h in headers])
)
).reshape(1, 1)
for bkg, head in zip(background, headers)
]
)
# Step 2: # Step 2:
# Compute Stokes I, Q, U with smoothed polarized images # Compute Stokes I, Q, U with smoothed polarized images
@@ -158,15 +194,18 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2 # see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
# Bibcode : 1995chst.conf...10J # Bibcode : 1995chst.conf...10J
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes( I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(
data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr) data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr
I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(background, background_error, np.array(True).reshape( )
1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False) I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(
background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False
)
# Step 3: # Step 3:
# Rotate images to have North up # Rotate images to have North up
if rotate_stokes: if rotate_stokes:
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes( I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None) I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None
)
I_bkg, Q_bkg, U_bkg, S_cov_bkg, _, _ = proj_red.rotate_Stokes(I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), headers, SNRi_cut=None) I_bkg, Q_bkg, U_bkg, S_cov_bkg, _, _ = proj_red.rotate_Stokes(I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), headers, SNRi_cut=None)
# Compute polarimetric parameters (polarization degree and angle). # Compute polarimetric parameters (polarization degree and angle).
@@ -176,8 +215,24 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Step 4: # Step 4:
# Save image to FITS. # Save image to FITS.
figname = "_".join([figname, figtype]) if figtype != "" else figname figname = "_".join([figname, figtype]) if figtype != "" else figname
Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, Stokes_test = proj_fits.save_Stokes(
headers, data_mask, figname, data_folder=data_folder, return_hdul=True) I_stokes,
Q_stokes,
U_stokes,
Stokes_cov,
P,
debiased_P,
s_P,
s_P_P,
PA,
s_PA,
s_PA_P,
headers,
data_mask,
figname,
data_folder=data_folder,
return_hdul=True,
)
# Step 5: # Step 5:
# crop to desired region of interest (roi) # crop to desired region of interest (roi)
@@ -188,40 +243,142 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
stokescrop.write_to("/".join([data_folder, figname + ".fits"])) stokescrop.write_to("/".join([data_folder, figname + ".fits"]))
Stokes_test, headers = stokescrop.hdul_crop, [dataset.header for dataset in stokescrop.hdul_crop] Stokes_test, headers = stokescrop.hdul_crop, [dataset.header for dataset in stokescrop.hdul_crop]
data_mask = Stokes_test['data_mask'].data.astype(bool) data_mask = Stokes_test["data_mask"].data.astype(bool)
print("F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(headers[0]['photplam'], *sci_not( print(
Stokes_test[0].data[data_mask].sum()*headers[0]['photflam'], np.sqrt(Stokes_test[3].data[0, 0][data_mask].sum())*headers[0]['photflam'], 2, out=int))) "F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
print("P_int = {0:.1f} ± {1:.1f} %".format(headers[0]['p_int']*100., np.ceil(headers[0]['p_int_err']*1000.)/10.)) headers[0]["photplam"],
print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(headers[0]['pa_int']), princ_angle(np.ceil(headers[0]['pa_int_err']*10.)/10.))) *sci_not(
Stokes_test[0].data[data_mask].sum() * headers[0]["photflam"],
np.sqrt(Stokes_test[3].data[0, 0][data_mask].sum()) * headers[0]["photflam"],
2,
out=int,
),
)
)
print("P_int = {0:.1f} ± {1:.1f} %".format(headers[0]["p_int"] * 100.0, np.ceil(headers[0]["p_int_err"] * 1000.0) / 10.0))
print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(headers[0]["pa_int"]), princ_angle(np.ceil(headers[0]["pa_int_err"] * 10.0) / 10.0)))
# Background values # Background values
print("F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(headers[0]['photplam'], *sci_not( print(
I_bkg[0, 0]*headers[0]['photflam'], np.sqrt(S_cov_bkg[0, 0][0, 0])*headers[0]['photflam'], 2, out=int))) "F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0]*100., np.ceil(s_P_bkg[0, 0]*1000.)/10.)) headers[0]["photplam"], *sci_not(I_bkg[0, 0] * headers[0]["photflam"], np.sqrt(S_cov_bkg[0, 0][0, 0]) * headers[0]["photflam"], 2, out=int)
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0]*10.)/10.))) )
)
print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0] * 100.0, np.ceil(s_P_bkg[0, 0] * 1000.0) / 10.0))
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0] * 10.0) / 10.0)))
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error). # Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
if px_scale.lower() not in ['full', 'integrate'] and not interactive: if px_scale.lower() not in ["full", "integrate"] and not interactive:
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, proj_plots.polarization_map(
step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname]), plots_folder=plots_folder) deepcopy(Stokes_test),
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, data_mask,
vec_scale=vec_scale, savename="_".join([figname, "I"]), plots_folder=plots_folder, display='Intensity') SNRp_cut=SNRp_cut,
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, SNRi_cut=SNRi_cut,
vec_scale=vec_scale, savename="_".join([figname, "P_flux"]), plots_folder=plots_folder, display='Pol_Flux') flux_lim=flux_lim,
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, "P"]), plots_folder=plots_folder, display='Pol_deg') vec_scale=vec_scale,
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, savename="_".join([figname]),
vec_scale=vec_scale, savename="_".join([figname, "PA"]), plots_folder=plots_folder, display='Pol_ang') plots_folder=plots_folder,
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, )
vec_scale=vec_scale, savename="_".join([figname, "I_err"]), plots_folder=plots_folder, display='I_err') proj_plots.polarization_map(
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, deepcopy(Stokes_test),
vec_scale=vec_scale, savename="_".join([figname, "P_err"]), plots_folder=plots_folder, display='Pol_deg_err') data_mask,
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, SNRp_cut=SNRp_cut,
vec_scale=vec_scale, savename="_".join([figname, "SNRi"]), plots_folder=plots_folder, display='SNRi') SNRi_cut=SNRi_cut,
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, flux_lim=flux_lim,
vec_scale=vec_scale, savename="_".join([figname, "SNRp"]), plots_folder=plots_folder, display='SNRp') step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "I"]),
plots_folder=plots_folder,
display="Intensity",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "P_flux"]),
plots_folder=plots_folder,
display="Pol_Flux",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "P"]),
plots_folder=plots_folder,
display="Pol_deg",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "PA"]),
plots_folder=plots_folder,
display="Pol_ang",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "I_err"]),
plots_folder=plots_folder,
display="I_err",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "P_err"]),
plots_folder=plots_folder,
display="Pol_deg_err",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "SNRi"]),
plots_folder=plots_folder,
display="SNRi",
)
proj_plots.polarization_map(
deepcopy(Stokes_test),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=vec_scale,
savename="_".join([figname, "SNRp"]),
plots_folder=plots_folder,
display="SNRp",
)
elif not interactive: elif not interactive:
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, proj_plots.polarization_map(
savename=figname, plots_folder=plots_folder, display='integrate') deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=figname, plots_folder=plots_folder, display="integrate"
elif px_scale.lower() not in ['full', 'integrate']: )
elif px_scale.lower() not in ["full", "integrate"]:
proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim) proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
return 0 return 0
@@ -230,15 +387,17 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
if __name__ == "__main__": if __name__ == "__main__":
import argparse import argparse
parser = argparse.ArgumentParser(description='Query MAST for target products') parser = argparse.ArgumentParser(description="Query MAST for target products")
parser.add_argument('-t', '--target', metavar='targetname', required=False, help='the name of the target', type=str, default=None) parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, help='the proposal id of the data products', type=int, default=None) parser.add_argument("-p", "--proposal_id", metavar="proposal_id", required=False, help="the proposal id of the data products", type=int, default=None)
parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None) parser.add_argument("-f", "--files", metavar="path", required=False, nargs="*", help="the full or relative path to the data products", default=None)
parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False, parser.add_argument(
help='output directory path for the data products', type=str, default="./data") "-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data"
parser.add_argument('-c', '--crop', action='store_true', required=False, help='whether to crop the analysis region') )
parser.add_argument('-i', '--interactive', action='store_true', required=False, help='whether to output to the interactive analysis tool') parser.add_argument("-c", "--crop", action="store_true", required=False, help="whether to crop the analysis region")
parser.add_argument("-i", "--interactive", action="store_true", required=False, help="whether to output to the interactive analysis tool")
args = parser.parse_args() args = parser.parse_args()
exitcode = main(target=args.target, proposal_id=args.proposal_id, infiles=args.files, exitcode = main(
output_dir=args.output_dir, crop=args.crop, interactive=args.interactive) target=args.target, proposal_id=args.proposal_id, infiles=args.files, output_dir=args.output_dir, crop=args.crop, interactive=args.interactive
)
print("Finished with ExitCode: ", exitcode) print("Finished with ExitCode: ", exitcode)

View File

@@ -9,81 +9,87 @@ prototypes :
- bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background) - bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background)
Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape. Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape.
""" """
from os.path import join as path_join
from copy import deepcopy from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.colors import LogNorm
from matplotlib.patches import Rectangle
from datetime import datetime, timedelta from datetime import datetime, timedelta
from os.path import join as path_join
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
from astropy.time import Time from astropy.time import Time
from lib.plots import plot_obs from lib.plots import plot_obs
from matplotlib.colors import LogNorm
from matplotlib.patches import Rectangle
from scipy.optimize import curve_fit from scipy.optimize import curve_fit
def gauss(x, *p): def gauss(x, *p):
N, mu, sigma = p N, mu, sigma = p
return N*np.exp(-(x-mu)**2/(2.*sigma**2)) return N * np.exp(-((x - mu) ** 2) / (2.0 * sigma**2))
def gausspol(x, *p): def gausspol(x, *p):
N, mu, sigma, a, b, c, d = p N, mu, sigma, a, b, c, d = p
return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + a*np.log(x) + b/x + c*x + d return N * np.exp(-((x - mu) ** 2) / (2.0 * sigma**2)) + a * np.log(x) + b / x + c * x + d
def bin_centers(edges): def bin_centers(edges):
return (edges[1:]+edges[:-1])/2. return (edges[1:] + edges[:-1]) / 2.0
def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"): def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"):
plt.rcParams.update({'font.size': 15}) plt.rcParams.update({"font.size": 15})
convert_flux = np.array([head['photflam'] for head in headers]) convert_flux = np.array([head["photflam"] for head in headers])
date_time = np.array([Time((headers[i]['expstart']+headers[i]['expend'])/2., format='mjd', precision=0).iso for i in range(len(headers))]) date_time = np.array([Time((headers[i]["expstart"] + headers[i]["expend"]) / 2.0, format="mjd", precision=0).iso for i in range(len(headers))])
date_time = np.array([datetime.strptime(d, '%Y-%m-%d %H:%M:%S') for d in date_time]) date_time = np.array([datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in date_time])
date_err = np.array([timedelta(seconds=headers[i]['exptime']/2.) for i in range(len(headers))]) date_err = np.array([timedelta(seconds=headers[i]["exptime"] / 2.0) for i in range(len(headers))])
filt = np.array([headers[i]['filtnam1'] for i in range(len(headers))]) filt = np.array([headers[i]["filtnam1"] for i in range(len(headers))])
dict_filt = {"POL0": 'r', "POL60": 'g', "POL120": 'b'} dict_filt = {"POL0": "r", "POL60": "g", "POL120": "b"}
c_filt = np.array([dict_filt[f] for f in filt]) c_filt = np.array([dict_filt[f] for f in filt])
fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True) fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
for f in np.unique(filt): for f in np.unique(filt):
mask = [fil == f for fil in filt] mask = [fil == f for fil in filt]
ax.scatter(date_time[mask], background[mask]*convert_flux[mask], color=dict_filt[f], ax.scatter(date_time[mask], background[mask] * convert_flux[mask], color=dict_filt[f], label="{0:s}".format(f))
label="{0:s}".format(f)) ax.errorbar(date_time, background * convert_flux, xerr=date_err, yerr=std_bkg * convert_flux, fmt="+k", markersize=0, ecolor=c_filt)
ax.errorbar(date_time, background*convert_flux, xerr=date_err, yerr=std_bkg*convert_flux, fmt='+k',
markersize=0, ecolor=c_filt)
# Date handling # Date handling
locator = mdates.AutoDateLocator() locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator) formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter) ax.xaxis.set_major_formatter(formatter)
# ax.set_ylim(bottom=0.) # ax.set_ylim(bottom=0.)
ax.set_yscale('log') ax.set_yscale("log")
ax.set_xlabel("Observation date and time") ax.set_xlabel("Observation date and time")
ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
plt.legend() plt.legend()
if not (savename is None): if not (savename is None):
this_savename = deepcopy(savename) this_savename = deepcopy(savename)
if not savename[-4:] in ['.png', '.jpg', '.pdf']: if not savename[-4:] in [".png", ".jpg", ".pdf"]:
this_savename += '_background_flux.pdf' this_savename += "_background_flux.pdf"
else: else:
this_savename = savename[:-4] + "_background_flux" + savename[-4:] this_savename = savename[:-4] + "_background_flux" + savename[-4:]
fig.savefig(path_join(plots_folder, this_savename), bbox_inches='tight') fig.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
if not (histograms is None): if not (histograms is None):
filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0} filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0}
fig_h, ax_h = plt.subplots(figsize=(10, 6), constrained_layout=True) fig_h, ax_h = plt.subplots(figsize=(10, 6), constrained_layout=True)
for i, (hist, bins) in enumerate(zip(histograms, binning)): for i, (hist, bins) in enumerate(zip(histograms, binning)):
filt_obs[headers[i]['filtnam1']] += 1 filt_obs[headers[i]["filtnam1"]] += 1
ax_h.plot(bins*convert_flux[i], hist, '+', color="C{0:d}".format(i), alpha=0.8, ax_h.plot(
label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')') bins * convert_flux[i],
ax_h.plot([background[i]*convert_flux[i], background[i]*convert_flux[i]], [hist.min(), hist.max()], 'x--', color="C{0:d}".format(i), alpha=0.8) hist,
"+",
color="C{0:d}".format(i),
alpha=0.8,
label=headers[i]["filtnam1"] + " (Obs " + str(filt_obs[headers[i]["filtnam1"]]) + ")",
)
ax_h.plot([background[i] * convert_flux[i], background[i] * convert_flux[i]], [hist.min(), hist.max()], "x--", color="C{0:d}".format(i), alpha=0.8)
if not (coeff is None): if not (coeff is None):
# ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8) # ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
ax_h.plot(bins*convert_flux[i], gauss(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8) ax_h.plot(bins * convert_flux[i], gauss(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8)
ax_h.set_xscale('log') ax_h.set_xscale("log")
ax_h.set_ylim([0., np.max([hist.max() for hist in histograms])]) ax_h.set_ylim([0.0, np.max([hist.max() for hist in histograms])])
ax_h.set_xlim([np.min(background * convert_flux) * 1e-2, np.max(background * convert_flux) * 1e2]) ax_h.set_xlim([np.min(background * convert_flux) * 1e-2, np.max(background * convert_flux) * 1e2])
ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
ax_h.set_ylabel(r"Number of pixels in bin") ax_h.set_ylabel(r"Number of pixels in bin")
@@ -91,46 +97,56 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
plt.legend() plt.legend()
if not (savename is None): if not (savename is None):
this_savename = deepcopy(savename) this_savename = deepcopy(savename)
if not savename[-4:] in ['.png', '.jpg', '.pdf']: if not savename[-4:] in [".png", ".jpg", ".pdf"]:
this_savename += '_histograms.pdf' this_savename += "_histograms.pdf"
else: else:
this_savename = savename[:-4] + "_histograms" + savename[-4:] this_savename = savename[:-4] + "_histograms" + savename[-4:]
fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches='tight') fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
fig2, ax2 = plt.subplots(figsize=(10, 10)) fig2, ax2 = plt.subplots(figsize=(10, 10))
data0 = data[0] * convert_flux[0] data0 = data[0] * convert_flux[0]
bkg_data0 = data0 <= background[0] * convert_flux[0] bkg_data0 = data0 <= background[0] * convert_flux[0]
instr = headers[0]['instrume'] instr = headers[0]["instrume"]
rootname = headers[0]['rootname'] rootname = headers[0]["rootname"]
exptime = headers[0]['exptime'] exptime = headers[0]["exptime"]
filt = headers[0]['filtnam1'] filt = headers[0]["filtnam1"]
# plots # plots
im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.].mean()/10., data0.max()), origin='lower', cmap='gray') im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.0].mean() / 10.0, data0.max()), origin="lower", cmap="gray")
ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.5) ax2.imshow(bkg_data0, origin="lower", cmap="Reds", alpha=0.5)
if not (rectangle is None): if not (rectangle is None):
x, y, width, height, angle, color = rectangle[0] x, y, width, height, angle, color = rectangle[0]
ax2.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False, lw=2)) ax2.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False, lw=2))
ax2.annotate(instr+":"+rootname, color='white', fontsize=10, xy=(0.01, 1.00), xycoords='axes fraction', verticalalignment='top', horizontalalignment='left') ax2.annotate(
ax2.annotate(filt, color='white', fontsize=14, xy=(0.01, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left') instr + ":" + rootname, color="white", fontsize=10, xy=(0.01, 1.00), xycoords="axes fraction", verticalalignment="top", horizontalalignment="left"
ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(1.00, 0.01), )
xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right') ax2.annotate(filt, color="white", fontsize=14, xy=(0.01, 0.01), xycoords="axes fraction", verticalalignment="bottom", horizontalalignment="left")
ax2.set(xlabel='pixel offset', ylabel='pixel offset', aspect='equal') ax2.annotate(
str(exptime) + " s", color="white", fontsize=10, xy=(1.00, 0.01), xycoords="axes fraction", verticalalignment="bottom", horizontalalignment="right"
)
ax2.set(xlabel="pixel offset", ylabel="pixel offset", aspect="equal")
fig2.subplots_adjust(hspace=0, wspace=0, right=1.0) fig2.subplots_adjust(hspace=0, wspace=0, right=1.0)
fig2.colorbar(im2, ax=ax2, location='right', aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") fig2.colorbar(im2, ax=ax2, location="right", aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
if not (savename is None): if not (savename is None):
this_savename = deepcopy(savename) this_savename = deepcopy(savename)
if not savename[-4:] in ['.png', '.jpg', '.pdf']: if not savename[-4:] in [".png", ".jpg", ".pdf"]:
this_savename += '_'+filt+'_background_location.pdf' this_savename += "_" + filt + "_background_location.pdf"
else: else:
this_savename = savename[:-4]+'_'+filt+'_background_location'+savename[-4:] this_savename = savename[:-4] + "_" + filt + "_background_location" + savename[-4:]
fig2.savefig(path_join(plots_folder, this_savename), bbox_inches='tight') fig2.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
if not (rectangle is None): if not (rectangle is None):
plot_obs(data, headers, vmin=data[data > 0.].min()*convert_flux.mean(), vmax=data[data > 0.].max()*convert_flux.mean(), rectangle=rectangle, plot_obs(
savename=savename+"_background_location", plots_folder=plots_folder) data,
headers,
vmin=data[data > 0.0].min() * convert_flux.mean(),
vmax=data[data > 0.0].max() * convert_flux.mean(),
rectangle=rectangle,
savename=savename + "_background_location",
plots_folder=plots_folder,
)
elif not (rectangle is None): elif not (rectangle is None):
plot_obs(data, headers, vmin=data[data > 0.].min(), vmax=data[data > 0.].max(), rectangle=rectangle) plot_obs(data, headers, vmin=data[data > 0.0].min(), vmax=data[data > 0.0].max(), rectangle=rectangle)
plt.show() plt.show()
@@ -141,7 +157,7 @@ def sky_part(img):
# Intensity range # Intensity range
sky_med = np.median(rand_pix) sky_med = np.median(rand_pix)
sig = np.min([img[img < sky_med].std(), img[img > sky_med].std()]) sig = np.min([img[img < sky_med].std(), img[img > sky_med].std()])
sky_range = [sky_med-2.*sig, np.max([sky_med+sig, 7e-4])] # Detector background average FOC Data Handbook Sec. 7.6 sky_range = [sky_med - 2.0 * sig, np.max([sky_med + sig, 7e-4])] # Detector background average FOC Data Handbook Sec. 7.6
sky = img[np.logical_and(img >= sky_range[0], img <= sky_range[1])] sky = img[np.logical_and(img >= sky_range[0], img <= sky_range[1])]
return sky, sky_range return sky, sky_range
@@ -152,13 +168,13 @@ def bkg_estimate(img, bins=None, chi2=None, coeff=None):
bins, chi2, coeff = [8], [], [] bins, chi2, coeff = [8], [], []
else: else:
try: try:
bins.append(int(3./2.*bins[-1])) bins.append(int(3.0 / 2.0 * bins[-1]))
except IndexError: except IndexError:
bins, chi2, coeff = [8], [], [] bins, chi2, coeff = [8], [], []
hist, bin_edges = np.histogram(img[img > 0], bins=bins[-1]) hist, bin_edges = np.histogram(img[img > 0], bins=bins[-1])
binning = bin_centers(bin_edges) binning = bin_centers(bin_edges)
peak = binning[np.argmax(hist)] peak = binning[np.argmax(hist)]
bins_stdev = binning[hist > hist.max()/2.] bins_stdev = binning[hist > hist.max() / 2.0]
stdev = bins_stdev[-1] - bins_stdev[0] stdev = bins_stdev[-1] - bins_stdev[0]
# p0 = [hist.max(), peak, stdev, 1e-3, 1e-3, 1e-3, 1e-3] # p0 = [hist.max(), peak, stdev, 1e-3, 1e-3, 1e-3, 1e-3]
p0 = [hist.max(), peak, stdev] p0 = [hist.max(), peak, stdev]
@@ -223,7 +239,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
for i, image in enumerate(data): for i, image in enumerate(data):
# Compute the Count-rate histogram for the image # Compute the Count-rate histogram for the image
sky, sky_range = sky_part(image[image > 0.]) sky, sky_range = sky_part(image[image > 0.0])
bins, chi2, coeff = bkg_estimate(sky) bins, chi2, coeff = bkg_estimate(sky)
while bins[-1] < 256: while bins[-1] < 256:
@@ -246,7 +262,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
n_data_array[i][mask] = n_data_array[i][mask] - bkg n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std() std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
background[i] = bkg background[i] = bkg
if display: if display:
@@ -308,31 +324,36 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
for i, image in enumerate(data): for i, image in enumerate(data):
# Compute the Count-rate histogram for the image # Compute the Count-rate histogram for the image
n_mask = np.logical_and(mask, image > 0.) n_mask = np.logical_and(mask, image > 0.0)
if not (sub_type is None): if not (sub_type is None):
if isinstance(sub_type, int): if isinstance(sub_type, int):
n_bins = sub_type n_bins = sub_type
elif sub_type.lower() in ['sqrt']: elif sub_type.lower() in ["sqrt"]:
n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
elif sub_type.lower() in ['sturges']: elif sub_type.lower() in ["sturges"]:
n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int) + 1 # Sturges n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int) + 1 # Sturges
elif sub_type.lower() in ['rice']: elif sub_type.lower() in ["rice"]:
n_bins = 2 * np.fix(np.power(image[n_mask].size, 1 / 3)).astype(int) # Rice n_bins = 2 * np.fix(np.power(image[n_mask].size, 1 / 3)).astype(int) # Rice
elif sub_type.lower() in ['scott']: elif sub_type.lower() in ["scott"]:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size, 1/3))).astype(int) # Scott n_bins = np.fix((image[n_mask].max() - image[n_mask].min()) / (3.5 * image[n_mask].std() / np.power(image[n_mask].size, 1 / 3))).astype(
int
) # Scott
else: else:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) / n_bins = np.fix(
np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis (image[n_mask].max() - image[n_mask].min())
/ (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3))
).astype(int) # Freedman-Diaconis
else: else:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) / n_bins = np.fix(
np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis (image[n_mask].max() - image[n_mask].min()) / (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3))
).astype(int) # Freedman-Diaconis
hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins) hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins)
histograms.append(hist) histograms.append(hist)
binning.append(np.exp(bin_centers(bin_edges))) binning.append(np.exp(bin_centers(bin_edges)))
# Fit a gaussian to the log-intensity histogram # Fit a gaussian to the log-intensity histogram
bins_stdev = binning[-1][hist > hist.max()/2.] bins_stdev = binning[-1][hist > hist.max() / 2.0]
stdev = bins_stdev[-1] - bins_stdev[0] stdev = bins_stdev[-1] - bins_stdev[0]
# p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3] # p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3]
p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev] p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev]
@@ -350,7 +371,7 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
n_data_array[i][mask] = n_data_array[i][mask] - bkg n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std() std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
background[i] = bkg background[i] = bkg
if display: if display:
@@ -433,7 +454,7 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True
minima = np.unravel_index(np.argmin(temp.sum(axis=0)), temp.shape[1:]) minima = np.unravel_index(np.argmin(temp.sum(axis=0)), temp.shape[1:])
for i, image in enumerate(data): for i, image in enumerate(data):
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0., 'r']) rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0.0, "r"])
# Compute error : root mean square of the background # Compute error : root mean square of the background
sub_image = image[minima[0] : minima[0] + sub_shape[0], minima[1] : minima[1] + sub_shape[1]] sub_image = image[minima[0] : minima[0] + sub_shape[0], minima[1] : minima[1] + sub_shape[1]]
# bkg = np.std(sub_image) # Previously computed using standard deviation over the background # bkg = np.std(sub_image) # Previously computed using standard deviation over the background
@@ -443,11 +464,11 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2) n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
# Substract background # Substract background
if subtract_error > 0.: if subtract_error > 0.0:
n_data_array[i][mask] = n_data_array[i][mask] - bkg n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std() std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
background[i] = bkg background[i] = bkg
if display: if display:

View File

@@ -3,6 +3,7 @@ Library functions for graham algorithm implementation (find the convex hull of a
""" """
from copy import deepcopy from copy import deepcopy
import numpy as np import numpy as np
@@ -16,15 +17,15 @@ def clean_ROI(image):
row, col = np.indices(shape) row, col = np.indices(shape)
for i in range(0, shape[0]): for i in range(0, shape[0]):
r = row[i, :][image[i, :] > 0.] r = row[i, :][image[i, :] > 0.0]
c = col[i, :][image[i, :] > 0.] c = col[i, :][image[i, :] > 0.0]
if len(r) > 1 and len(c) > 1: if len(r) > 1 and len(c) > 1:
H.append((r[0], c[0])) H.append((r[0], c[0]))
H.append((r[-1], c[-1])) H.append((r[-1], c[-1]))
H = np.array(H) H = np.array(H)
for j in range(0, shape[1]): for j in range(0, shape[1]):
r = row[:, j][image[:, j] > 0.] r = row[:, j][image[:, j] > 0.0]
c = col[:, j][image[:, j] > 0.] c = col[:, j][image[:, j] > 0.0]
if len(r) > 1 and len(c) > 1: if len(r) > 1 and len(c) > 1:
J.append((r[0], c[0])) J.append((r[0], c[0]))
J.append((r[-1], c[-1])) J.append((r[-1], c[-1]))
@@ -206,16 +207,32 @@ def sort_angles_distances(Omega, s):
Sort the list of points 's' for the composition order given reference point Sort the list of points 's' for the composition order given reference point
Omega. Omega.
""" """
def order(A, B): return comp(Omega, A, B)
def order(A, B):
return comp(Omega, A, B)
quicksort(s, order) quicksort(s, order)
# Define fuction for stacks (use here python lists with stack operations). # Define fuction for stacks (use here python lists with stack operations).
def empty_stack(): return [] def empty_stack():
def stack(S, A): S.append(A) return []
def unstack(S): S.pop()
def stack_top(S): return S[-1]
def stack_sub_top(S): return S[-2] def stack(S, A):
S.append(A)
def unstack(S):
S.pop()
def stack_top(S):
return S[-1]
def stack_sub_top(S):
return S[-2]
# Alignement handling # Alignement handling
@@ -299,7 +316,7 @@ def convex_hull(H):
return S return S
def image_hull(image, step=5, null_val=0., inside=True): def image_hull(image, step=5, null_val=0.0, inside=True):
""" """
Compute the convex hull of a 2D image and return the 4 relevant coordinates Compute the convex hull of a 2D image and return the 4 relevant coordinates
of the maximum included rectangle (ie. crop image to maximum rectangle). of the maximum included rectangle (ie. crop image to maximum rectangle).

View File

@@ -1,6 +1,7 @@
""" """
Library functions for phase cross-correlation computation. Library functions for phase cross-correlation computation.
""" """
# Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+) # Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+)
# Otherwise fall back to numpy.fft. # Otherwise fall back to numpy.fft.
# Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer # Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer
@@ -13,8 +14,7 @@ except ImportError:
import numpy as np import numpy as np
def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, axis_offsets=None):
axis_offsets=None):
""" """
Upsampled DFT by matrix multiplication. Upsampled DFT by matrix multiplication.
This code is intended to provide the same result as if the following This code is intended to provide the same result as if the following
@@ -48,26 +48,27 @@ def _upsampled_dft(data, upsampled_region_size, upsample_factor=1,
""" """
# if people pass in an integer, expand it to a list of equal-sized sections # if people pass in an integer, expand it to a list of equal-sized sections
if not hasattr(upsampled_region_size, "__iter__"): if not hasattr(upsampled_region_size, "__iter__"):
upsampled_region_size = [upsampled_region_size, ] * data.ndim upsampled_region_size = [
upsampled_region_size,
] * data.ndim
else: else:
if len(upsampled_region_size) != data.ndim: if len(upsampled_region_size) != data.ndim:
raise ValueError("shape of upsampled region sizes must be equal " raise ValueError("shape of upsampled region sizes must be equal " "to input data's number of dimensions.")
"to input data's number of dimensions.")
if axis_offsets is None: if axis_offsets is None:
axis_offsets = [0, ] * data.ndim axis_offsets = [
0,
] * data.ndim
else: else:
if len(axis_offsets) != data.ndim: if len(axis_offsets) != data.ndim:
raise ValueError("number of axis offsets must be equal to input " raise ValueError("number of axis offsets must be equal to input " "data's number of dimensions.")
"data's number of dimensions.")
im2pi = 1j * 2 * np.pi im2pi = 1j * 2 * np.pi
dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets)) dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets))
for (n_items, ups_size, ax_offset) in dim_properties[::-1]: for n_items, ups_size, ax_offset in dim_properties[::-1]:
kernel = ((np.arange(ups_size) - ax_offset)[:, None] kernel = (np.arange(ups_size) - ax_offset)[:, None] * fft.fftfreq(n_items, upsample_factor)
* fft.fftfreq(n_items, upsample_factor))
kernel = np.exp(-im2pi * kernel) kernel = np.exp(-im2pi * kernel)
# Equivalent to: # Equivalent to:
@@ -100,14 +101,11 @@ def _compute_error(cross_correlation_max, src_amp, target_amp):
target_amp : float target_amp : float
The normalized average image intensity of the target image The normalized average image intensity of the target image
""" """
error = 1.0 - cross_correlation_max * cross_correlation_max.conj() /\ error = 1.0 - cross_correlation_max * cross_correlation_max.conj() / (src_amp * target_amp)
(src_amp * target_amp)
return np.sqrt(np.abs(error)) return np.sqrt(np.abs(error))
def phase_cross_correlation(reference_image, moving_image, *, def phase_cross_correlation(reference_image, moving_image, *, upsample_factor=1, space="real", return_error=True, overlap_ratio=0.3):
upsample_factor=1, space="real",
return_error=True, overlap_ratio=0.3):
""" """
Efficient subpixel image translation registration by cross-correlation. Efficient subpixel image translation registration by cross-correlation.
This code gives the same precision as the FFT upsampled cross-correlation This code gives the same precision as the FFT upsampled cross-correlation
@@ -174,11 +172,11 @@ def phase_cross_correlation(reference_image, moving_image, *,
raise ValueError("images must be same shape") raise ValueError("images must be same shape")
# assume complex data is already in Fourier space # assume complex data is already in Fourier space
if space.lower() == 'fourier': if space.lower() == "fourier":
src_freq = reference_image src_freq = reference_image
target_freq = moving_image target_freq = moving_image
# real data needs to be fft'd. # real data needs to be fft'd.
elif space.lower() == 'real': elif space.lower() == "real":
src_freq = fft.fftn(reference_image) src_freq = fft.fftn(reference_image)
target_freq = fft.fftn(moving_image) target_freq = fft.fftn(moving_image)
else: else:
@@ -190,8 +188,7 @@ def phase_cross_correlation(reference_image, moving_image, *,
cross_correlation = fft.ifftn(image_product) cross_correlation = fft.ifftn(image_product)
# Locate maximum # Locate maximum
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape)
cross_correlation.shape)
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape]) midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = np.stack(maxima).astype(np.float64) shifts = np.stack(maxima).astype(np.float64)
@@ -214,13 +211,9 @@ def phase_cross_correlation(reference_image, moving_image, *,
upsample_factor = np.array(upsample_factor, dtype=np.float64) upsample_factor = np.array(upsample_factor, dtype=np.float64)
# Matrix multiply DFT around the current shift estimate # Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts * upsample_factor sample_region_offset = dftshift - shifts * upsample_factor
cross_correlation = _upsampled_dft(image_product.conj(), cross_correlation = _upsampled_dft(image_product.conj(), upsampled_region_size, upsample_factor, sample_region_offset).conj()
upsampled_region_size,
upsample_factor,
sample_region_offset).conj()
# Locate maximum and map back to original pixel grid # Locate maximum and map back to original pixel grid
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape)
cross_correlation.shape)
CCmax = cross_correlation[maxima] CCmax = cross_correlation[maxima]
maxima = np.stack(maxima).astype(np.float64) - dftshift maxima = np.stack(maxima).astype(np.float64) - dftshift
@@ -240,10 +233,8 @@ def phase_cross_correlation(reference_image, moving_image, *,
if return_error: if return_error:
# Redirect user to masked_phase_cross_correlation if NaNs are observed # Redirect user to masked_phase_cross_correlation if NaNs are observed
if np.isnan(CCmax) or np.isnan(src_amp) or np.isnan(target_amp): if np.isnan(CCmax) or np.isnan(src_amp) or np.isnan(target_amp):
raise ValueError( raise ValueError("NaN values found, please remove NaNs from your input data")
"NaN values found, please remove NaNs from your input data")
return shifts, _compute_error(CCmax, src_amp, target_amp), \ return shifts, _compute_error(CCmax, src_amp, target_amp), _compute_phasediff(CCmax)
_compute_phasediff(CCmax)
else: else:
return shifts return shifts

View File

@@ -28,8 +28,8 @@ prototypes :
""" """
import numpy as np import numpy as np
from scipy.signal import convolve
from astropy.io import fits from astropy.io import fits
from scipy.signal import convolve
def abs2(x): def abs2(x):
@@ -118,7 +118,7 @@ def gaussian2d(x, y, sigma):
return np.exp(-(x**2 + y**2) / (2 * sigma**2)) / (2 * np.pi * sigma**2) return np.exp(-(x**2 + y**2) / (2 * sigma**2)) / (2 * np.pi * sigma**2)
def gaussian_psf(FWHM=1., shape=(5, 5)): def gaussian_psf(FWHM=1.0, shape=(5, 5)):
""" """
Define the gaussian Point-Spread-Function of chosen shape and FWHM. Define the gaussian Point-Spread-Function of chosen shape and FWHM.
---------- ----------
@@ -136,7 +136,7 @@ def gaussian_psf(FWHM=1., shape=(5, 5)):
Kernel containing the weights of the desired gaussian PSF. Kernel containing the weights of the desired gaussian PSF.
""" """
# Compute standard deviation from FWHM # Compute standard deviation from FWHM
stdev = FWHM/(2.*np.sqrt(2.*np.log(2.))) stdev = FWHM / (2.0 * np.sqrt(2.0 * np.log(2.0)))
# Create kernel of desired shape # Create kernel of desired shape
x, y = np.meshgrid(np.arange(-shape[0] / 2, shape[0] / 2), np.arange(-shape[1] / 2, shape[1] / 2)) x, y = np.meshgrid(np.arange(-shape[0] / 2, shape[0] / 2), np.arange(-shape[1] / 2, shape[1] / 2))
@@ -241,7 +241,7 @@ def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon=
im_deconv = image.copy() im_deconv = image.copy()
for _ in range(iterations): for _ in range(iterations):
conv = convolve(im_deconv, psf, mode='same') conv = convolve(im_deconv, psf, mode="same")
if filter_epsilon: if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image - conv) relative_blur = np.where(conv < filter_epsilon, 0, image - conv)
else: else:
@@ -290,12 +290,12 @@ def richardson_lucy(image, psf, iterations=20, clip=True, filter_epsilon=None):
psf_mirror = np.flip(psf) psf_mirror = np.flip(psf)
for _ in range(iterations): for _ in range(iterations):
conv = convolve(im_deconv, psf, mode='same') conv = convolve(im_deconv, psf, mode="same")
if filter_epsilon: if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image / conv) relative_blur = np.where(conv < filter_epsilon, 0, image / conv)
else: else:
relative_blur = image / conv relative_blur = image / conv
im_deconv *= convolve(relative_blur, psf_mirror, mode='same') im_deconv *= convolve(relative_blur, psf_mirror, mode="same")
if clip: if clip:
im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv > 1] = 1
@@ -335,12 +335,12 @@ def one_step_gradient(image, psf, iterations=20, clip=True, filter_epsilon=None)
psf_mirror = np.flip(psf) psf_mirror = np.flip(psf)
for _ in range(iterations): for _ in range(iterations):
conv = convolve(im_deconv, psf, mode='same') conv = convolve(im_deconv, psf, mode="same")
if filter_epsilon: if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image - conv) relative_blur = np.where(conv < filter_epsilon, 0, image - conv)
else: else:
relative_blur = image - conv relative_blur = image - conv
im_deconv += convolve(relative_blur, psf_mirror, mode='same') im_deconv += convolve(relative_blur, psf_mirror, mode="same")
if clip: if clip:
im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv > 1] = 1
@@ -458,7 +458,7 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
r = np.copy(b) r = np.copy(b)
x = np.zeros(b.shape, dtype=b.dtype) x = np.zeros(b.shape, dtype=b.dtype)
rho = inner(r, r) rho = inner(r, r)
epsilon = np.max([0., 1e-5*np.sqrt(rho)]) epsilon = np.max([0.0, 1e-5 * np.sqrt(rho)])
# Conjugate gradient iterations. # Conjugate gradient iterations.
beta = 0.0 beta = 0.0
@@ -494,8 +494,7 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
return im_deconv return im_deconv
def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True, def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True, filter_epsilon=None, algo="richardson"):
filter_epsilon=None, algo='richardson'):
""" """
Prepare an image for deconvolution using a chosen algorithm and return Prepare an image for deconvolution using a chosen algorithm and return
results. results.
@@ -537,25 +536,21 @@ def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
""" """
# Normalize image to highest pixel value # Normalize image to highest pixel value
pxmax = image[np.isfinite(image)].max() pxmax = image[np.isfinite(image)].max()
if pxmax == 0.: if pxmax == 0.0:
raise ValueError("Invalid image") raise ValueError("Invalid image")
norm_image = image / pxmax norm_image = image / pxmax
# Deconvolve normalized image # Deconvolve normalized image
if algo.lower() in ['wiener', 'wiener simple']: if algo.lower() in ["wiener", "wiener simple"]:
norm_deconv = wiener(image=norm_image, psf=psf, alpha=alpha, clip=clip) norm_deconv = wiener(image=norm_image, psf=psf, alpha=alpha, clip=clip)
elif algo.lower() in ['van-cittert', 'vancittert', 'cittert']: elif algo.lower() in ["van-cittert", "vancittert", "cittert"]:
norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha, norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) elif algo.lower() in ["1grad", "one_step_grad", "one step grad"]:
elif algo.lower() in ['1grad', 'one_step_grad', 'one step grad']: norm_deconv = one_step_gradient(image=norm_image, psf=psf, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
norm_deconv = one_step_gradient(image=norm_image, psf=psf, elif algo.lower() in ["conjgrad", "conj_grad", "conjugate gradient"]:
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) norm_deconv = conjgrad(image=norm_image, psf=psf, alpha=alpha, error=error, iterations=iterations)
elif algo.lower() in ['conjgrad', 'conj_grad', 'conjugate gradient']:
norm_deconv = conjgrad(image=norm_image, psf=psf, alpha=alpha,
error=error, iterations=iterations)
else: # Defaults to Richardson-Lucy else: # Defaults to Richardson-Lucy
norm_deconv = richardson_lucy(image=norm_image, psf=psf, norm_deconv = richardson_lucy(image=norm_image, psf=psf, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
# Output deconvolved image with original pxmax value # Output deconvolved image with original pxmax value
im_deconv = pxmax * norm_deconv im_deconv = pxmax * norm_deconv

View File

@@ -9,10 +9,12 @@ prototypes :
Save computed polarimetry parameters to a single fits file (and return HDUList) Save computed polarimetry parameters to a single fits file (and return HDUList)
""" """
import numpy as np
from os.path import join as path_join from os.path import join as path_join
import numpy as np
from astropy.io import fits from astropy.io import fits
from astropy.wcs import WCS from astropy.wcs import WCS
from .convex_hull import clean_ROI from .convex_hull import clean_ROI
@@ -38,7 +40,7 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
""" """
data_array, headers, wcs_array = [], [], [] data_array, headers, wcs_array = [], [], []
for i in range(len(infiles)): for i in range(len(infiles)):
with fits.open(path_join(data_folder, infiles[i]), mode='update') as f: with fits.open(path_join(data_folder, infiles[i]), mode="update") as f:
headers.append(f[0].header) headers.append(f[0].header)
data_array.append(f[0].data) data_array.append(f[0].data)
wcs_array.append(WCS(header=f[0].header, fobj=f).celestial) wcs_array.append(WCS(header=f[0].header, fobj=f).celestial)
@@ -47,53 +49,52 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
# Prevent negative count value in imported data # Prevent negative count value in imported data
for i in range(len(data_array)): for i in range(len(data_array)):
data_array[i][data_array[i] < 0.] = 0. data_array[i][data_array[i] < 0.0] = 0.0
# force WCS to convention PCi_ja unitary, cdelt in deg # force WCS to convention PCi_ja unitary, cdelt in deg
for wcs, header in zip(wcs_array, headers): for wcs, header in zip(wcs_array, headers):
new_wcs = wcs.deepcopy() new_wcs = wcs.deepcopy()
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1., 1.])).all(): if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1.0, 1.0])).all():
# Update WCS with relevant information # Update WCS with relevant information
if new_wcs.wcs.has_cd(): if new_wcs.wcs.has_cd():
old_cd = new_wcs.wcs.cd old_cd = new_wcs.wcs.cd
del new_wcs.wcs.cd del new_wcs.wcs.cd
keys = list(new_wcs.to_header().keys())+['CD1_1', 'CD1_2', 'CD1_3', 'CD2_1', 'CD2_2', 'CD2_3', 'CD3_1', 'CD3_2', 'CD3_3'] keys = list(new_wcs.to_header().keys()) + ["CD1_1", "CD1_2", "CD1_3", "CD2_1", "CD2_2", "CD2_3", "CD3_1", "CD3_2", "CD3_3"]
for key in keys: for key in keys:
header.remove(key, ignore_missing=True) header.remove(key, ignore_missing=True)
new_cdelt = np.linalg.eig(old_cd)[0] new_cdelt = np.linalg.eig(old_cd)[0]
elif (new_wcs.wcs.cdelt == np.array([1., 1.])).all() and \ elif (new_wcs.wcs.cdelt == np.array([1.0, 1.0])).all() and (new_wcs.array_shape in [(512, 512), (1024, 512), (512, 1024), (1024, 1024)]):
(new_wcs.array_shape in [(512, 512), (1024, 512), (512, 1024), (1024, 1024)]):
old_cd = new_wcs.wcs.pc old_cd = new_wcs.wcs.pc
new_wcs.wcs.pc = np.dot(old_cd, np.diag(1./new_cdelt)) new_wcs.wcs.pc = np.dot(old_cd, np.diag(1.0 / new_cdelt))
new_wcs.wcs.cdelt = new_cdelt new_wcs.wcs.cdelt = new_cdelt
for key, val in new_wcs.to_header().items(): for key, val in new_wcs.to_header().items():
header[key] = val header[key] = val
try: try:
_ = header['ORIENTAT'] _ = header["ORIENTAT"]
except KeyError: except KeyError:
header['ORIENTAT'] = -np.arccos(new_wcs.wcs.pc[0, 0])*180./np.pi header["ORIENTAT"] = -np.arccos(new_wcs.wcs.pc[0, 0]) * 180.0 / np.pi
# force WCS for POL60 to have same pixel size as POL0 and POL120 # force WCS for POL60 to have same pixel size as POL0 and POL120
is_pol60 = np.array([head['filtnam1'].lower() == 'pol60' for head in headers], dtype=bool) is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 14) cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 14)
if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2: if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0)) print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0))
raise ValueError("Not all images have same pixel size") raise ValueError("Not all images have same pixel size")
else: else:
for i in np.arange(len(headers))[is_pol60]: for i in np.arange(len(headers))[is_pol60]:
headers[i]['cdelt1'], headers[i]['cdelt2'] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0] headers[i]["cdelt1"], headers[i]["cdelt2"] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0]
if compute_flux: if compute_flux:
for i in range(len(infiles)): for i in range(len(infiles)):
# Compute the flux in counts/sec # Compute the flux in counts/sec
data_array[i] /= headers[i]['EXPTIME'] data_array[i] /= headers[i]["EXPTIME"]
return data_array, headers return data_array, headers
def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, def save_Stokes(
s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="", I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="", return_hdul=False
return_hdul=False): ):
""" """
Save computed polarimetry parameters to a single fits file, Save computed polarimetry parameters to a single fits file,
updating header accordingly. updating header accordingly.
@@ -130,7 +131,7 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
""" """
# Create new WCS object given the modified images # Create new WCS object given the modified images
ref_header = headers[0] ref_header = headers[0]
exp_tot = np.array([header['exptime'] for header in headers]).sum() exp_tot = np.array([header["exptime"] for header in headers]).sum()
new_wcs = WCS(ref_header).deepcopy() new_wcs = WCS(ref_header).deepcopy()
if data_mask.shape != (1, 1): if data_mask.shape != (1, 1):
@@ -140,19 +141,19 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2] new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2]
header = new_wcs.to_header() header = new_wcs.to_header()
header['telescop'] = (ref_header['telescop'] if 'TELESCOP' in list(ref_header.keys()) else 'HST', 'telescope used to acquire data') header["telescop"] = (ref_header["telescop"] if "TELESCOP" in list(ref_header.keys()) else "HST", "telescope used to acquire data")
header['instrume'] = (ref_header['instrume'] if 'INSTRUME' in list(ref_header.keys()) else 'FOC', 'identifier for instrument used to acuire data') header["instrume"] = (ref_header["instrume"] if "INSTRUME" in list(ref_header.keys()) else "FOC", "identifier for instrument used to acuire data")
header['photplam'] = (ref_header['photplam'], 'Pivot Wavelength') header["photplam"] = (ref_header["photplam"], "Pivot Wavelength")
header['photflam'] = (ref_header['photflam'], 'Inverse Sensitivity in DN/sec/cm**2/Angst') header["photflam"] = (ref_header["photflam"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
header['exptot'] = (exp_tot, 'Total exposure time in sec') header["exptot"] = (exp_tot, "Total exposure time in sec")
header['proposid'] = (ref_header['proposid'], 'PEP proposal identifier for observation') header["proposid"] = (ref_header["proposid"], "PEP proposal identifier for observation")
header['targname'] = (ref_header['targname'], 'Target name') header["targname"] = (ref_header["targname"], "Target name")
header['orientat'] = (ref_header['orientat'], 'Angle between North and the y-axis of the image') header["orientat"] = (ref_header["orientat"], "Angle between North and the y-axis of the image")
header['filename'] = (filename, 'Original filename') header["filename"] = (filename, "Original filename")
header['P_int'] = (ref_header['P_int'], 'Integrated polarization degree') header["P_int"] = (ref_header["P_int"], "Integrated polarization degree")
header['P_int_err'] = (ref_header['P_int_err'], 'Integrated polarization degree error') header["P_int_err"] = (ref_header["P_int_err"], "Integrated polarization degree error")
header['PA_int'] = (ref_header['PA_int'], 'Integrated polarization angle') header["PA_int"] = (ref_header["PA_int"], "Integrated polarization angle")
header['PA_int_err'] = (ref_header['PA_int_err'], 'Integrated polarization angle error') header["PA_int_err"] = (ref_header["PA_int_err"], "Integrated polarization angle error")
# Crop Data to mask # Crop Data to mask
if data_mask.shape != (1, 1): if data_mask.shape != (1, 1):
@@ -170,7 +171,7 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1])) new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1]))
for i in range(3): for i in range(3):
for j in range(3): for j in range(3):
Stokes_cov[i, j][(1-data_mask).astype(bool)] = 0. Stokes_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]] new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
Stokes_cov = new_Stokes_cov Stokes_cov = new_Stokes_cov
@@ -181,23 +182,30 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
hdul = fits.HDUList([]) hdul = fits.HDUList([])
# Add I_stokes as PrimaryHDU # Add I_stokes as PrimaryHDU
header['datatype'] = ('I_stokes', 'type of data stored in the HDU') header["datatype"] = ("I_stokes", "type of data stored in the HDU")
I_stokes[(1-data_mask).astype(bool)] = 0. I_stokes[(1 - data_mask).astype(bool)] = 0.0
primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header) primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header)
primary_hdu.name = 'I_stokes' primary_hdu.name = "I_stokes"
hdul.append(primary_hdu) hdul.append(primary_hdu)
# Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList # Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList
for data, name in [[Q_stokes, 'Q_stokes'], [U_stokes, 'U_stokes'], for data, name in [
[Stokes_cov, 'IQU_cov_matrix'], [P, 'Pol_deg'], [Q_stokes, "Q_stokes"],
[debiased_P, 'Pol_deg_debiased'], [s_P, 'Pol_deg_err'], [U_stokes, "U_stokes"],
[s_P_P, 'Pol_deg_err_Poisson_noise'], [PA, 'Pol_ang'], [Stokes_cov, "IQU_cov_matrix"],
[s_PA, 'Pol_ang_err'], [s_PA_P, 'Pol_ang_err_Poisson_noise'], [P, "Pol_deg"],
[data_mask, 'Data_mask']]: [debiased_P, "Pol_deg_debiased"],
[s_P, "Pol_deg_err"],
[s_P_P, "Pol_deg_err_Poisson_noise"],
[PA, "Pol_ang"],
[s_PA, "Pol_ang_err"],
[s_PA_P, "Pol_ang_err_Poisson_noise"],
[data_mask, "Data_mask"],
]:
hdu_header = header.copy() hdu_header = header.copy()
hdu_header['datatype'] = name hdu_header["datatype"] = name
if not name == 'IQU_cov_matrix': if not name == "IQU_cov_matrix":
data[(1-data_mask).astype(bool)] = 0. data[(1 - data_mask).astype(bool)] = 0.0
hdu = fits.ImageHDU(data=data, header=hdu_header) hdu = fits.ImageHDU(data=data, header=hdu_header)
hdu.name = name hdu.name = name
hdul.append(hdu) hdul.append(hdu)

View File

@@ -3,15 +3,19 @@
""" """
Library function to query and download datatsets from MAST api. Library function to query and download datatsets from MAST api.
""" """
from os import system from os import system
from os.path import join as path_join, exists as path_exists from os.path import exists as path_exists
from astroquery.mast import MastMissions, Observations from os.path import join as path_join
from astropy.table import unique, Column from warnings import filterwarnings
from astropy.time import Time, TimeDelta
import astropy.units as u import astropy.units as u
import numpy as np import numpy as np
from astropy.table import Column, unique
from astropy.time import Time, TimeDelta
from astroquery.exceptions import NoResultsWarning from astroquery.exceptions import NoResultsWarning
from warnings import filterwarnings from astroquery.mast import MastMissions, Observations
filterwarnings("error", category=NoResultsWarning) filterwarnings("error", category=NoResultsWarning)
@@ -19,21 +23,24 @@ def divide_proposal(products):
""" """
Divide observation in proposals by time or filter Divide observation in proposals by time or filter
""" """
for pid in np.unique(products['Proposal ID']): for pid in np.unique(products["Proposal ID"]):
obs = products[products['Proposal ID'] == pid].copy() obs = products[products["Proposal ID"] == pid].copy()
same_filt = np.unique(np.array(np.sum([obs['Filters'][:, 1:] == filt[1:] for filt in obs['Filters']], axis=2) < 3, dtype=bool), axis=0) same_filt = np.unique(np.array(np.sum([obs["Filters"][:, 1:] == filt[1:] for filt in obs["Filters"]], axis=2) < 3, dtype=bool), axis=0)
if len(same_filt) > 1: if len(same_filt) > 1:
for filt in same_filt: for filt in same_filt:
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][filt]], axis=0)] = "_".join( products["Proposal ID"][np.any([products["Dataset"] == dataset for dataset in obs["Dataset"][filt]], axis=0)] = "_".join(
[obs['Proposal ID'][filt][0], "_".join([fi for fi in obs['Filters'][filt][0][1:] if fi[:-1] != "CLEAR"])]) [obs["Proposal ID"][filt][0], "_".join([fi for fi in obs["Filters"][filt][0][1:] if fi[:-1] != "CLEAR"])]
for pid in np.unique(products['Proposal ID']): )
obs = products[products['Proposal ID'] == pid].copy() for pid in np.unique(products["Proposal ID"]):
close_date = np.unique([[np.abs(TimeDelta(obs['Start'][i].unix-date.unix, format='sec')) obs = products[products["Proposal ID"] == pid].copy()
< 7.*u.d for i in range(len(obs))] for date in obs['Start']], axis=0) close_date = np.unique(
[[np.abs(TimeDelta(obs["Start"][i].unix - date.unix, format="sec")) < 7.0 * u.d for i in range(len(obs))] for date in obs["Start"]], axis=0
)
if len(close_date) > 1: if len(close_date) > 1:
for date in close_date: for date in close_date:
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][date]], axis=0) products["Proposal ID"][np.any([products["Dataset"] == dataset for dataset in obs["Dataset"][date]], axis=0)] = "_".join(
] = "_".join([obs['Proposal ID'][date][0], str(obs['Start'][date][0])[:10]]) [obs["Proposal ID"][date][0], str(obs["Start"][date][0])[:10]]
)
return products return products
@@ -41,53 +48,36 @@ def get_product_list(target=None, proposal_id=None):
""" """
Retrieve products list for a given target from the MAST archive Retrieve products list for a given target from the MAST archive
""" """
mission = MastMissions(mission='hst') mission = MastMissions(mission="hst")
radius = '3' radius = "3"
select_cols = [ select_cols = [
'sci_data_set_name', "sci_data_set_name",
'sci_spec_1234', "sci_spec_1234",
'sci_actual_duration', "sci_actual_duration",
'sci_start_time', "sci_start_time",
'sci_stop_time', "sci_stop_time",
'sci_central_wavelength', "sci_central_wavelength",
'sci_instrume', "sci_instrume",
'sci_aper_1234', "sci_aper_1234",
'sci_targname', "sci_targname",
'sci_pep_id', "sci_pep_id",
'sci_pi_last_name'] "sci_pi_last_name",
]
cols = [ cols = ["Dataset", "Filters", "Exptime", "Start", "Stop", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]
'Dataset',
'Filters',
'Exptime',
'Start',
'Stop',
'Central wavelength',
'Instrument',
'Size',
'Target name',
'Proposal ID',
'PI last name']
if target is None: if target is None:
target = input("Target name:\n>") target = input("Target name:\n>")
# Use query_object method to resolve the object name into coordinates # Use query_object method to resolve the object name into coordinates
results = mission.query_object( results = mission.query_object(target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc")
target,
radius=radius,
select_cols=select_cols,
sci_spec_1234='POL*',
sci_obs_type='image',
sci_aec='S',
sci_instrume='foc')
for c, n_c in zip(select_cols, cols): for c, n_c in zip(select_cols, cols):
results.rename_column(c, n_c) results.rename_column(c, n_c)
results['Proposal ID'] = Column(results['Proposal ID'], dtype='U35') results["Proposal ID"] = Column(results["Proposal ID"], dtype="U35")
results['Filters'] = Column(np.array([filt.split(";") for filt in results['Filters']], dtype=str)) results["Filters"] = Column(np.array([filt.split(";") for filt in results["Filters"]], dtype=str))
results['Start'] = Column(Time(results['Start'])) results["Start"] = Column(Time(results["Start"]))
results['Stop'] = Column(Time(results['Stop'])) results["Stop"] = Column(Time(results["Stop"]))
results = divide_proposal(results) results = divide_proposal(results)
obs = results.copy() obs = results.copy()
@@ -95,67 +85,70 @@ def get_product_list(target=None, proposal_id=None):
# Remove single observations for which a FIND filter is used # Remove single observations for which a FIND filter is used
to_remove = [] to_remove = []
for i in range(len(obs)): for i in range(len(obs)):
if "F1ND" in obs[i]['Filters']: if "F1ND" in obs[i]["Filters"]:
to_remove.append(i) to_remove.append(i)
obs.remove_rows(to_remove) obs.remove_rows(to_remove)
# Remove observations for which a polarization filter is missing # Remove observations for which a polarization filter is missing
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2} polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
for pid in np.unique(obs['Proposal ID']): for pid in np.unique(obs["Proposal ID"]):
used_pol = np.zeros(3) used_pol = np.zeros(3)
for dataset in obs[obs['Proposal ID'] == pid]: for dataset in obs[obs["Proposal ID"] == pid]:
used_pol[polfilt[dataset['Filters'][0]]] += 1 used_pol[polfilt[dataset["Filters"][0]]] += 1
if np.any(used_pol < 1): if np.any(used_pol < 1):
obs.remove_rows(np.arange(len(obs))[obs['Proposal ID'] == pid]) obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
tab = unique(obs, ['Target name', 'Proposal ID']) tab = unique(obs, ["Target name", "Proposal ID"])
obs["Obs"] = [np.argmax(np.logical_and(tab['Proposal ID'] == data['Proposal ID'], tab['Target name'] == data['Target name']))+1 for data in obs] obs["Obs"] = [np.argmax(np.logical_and(tab["Proposal ID"] == data["Proposal ID"], tab["Target name"] == data["Target name"])) + 1 for data in obs]
try: try:
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], 'Obs') n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], "Obs")
except IndexError: except IndexError:
raise ValueError( raise ValueError("There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
"There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
b = np.zeros(len(results), dtype=bool) b = np.zeros(len(results), dtype=bool)
if proposal_id is not None and str(proposal_id) in obs['Proposal ID']: if proposal_id is not None and str(proposal_id) in obs["Proposal ID"]:
b[results['Proposal ID'] == str(proposal_id)] = True b[results["Proposal ID"] == str(proposal_id)] = True
else: else:
n_obs.pprint(len(n_obs) + 2) n_obs.pprint(len(n_obs) + 2)
a = [np.array(i.split(":"), dtype=str) a = [
for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(',')] np.array(i.split(":"), dtype=str)
if a[0][0] == '': for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(",")
]
if a[0][0] == "":
a = [[1]] a = [[1]]
if a[0][0] in ['a', 'all', '*']: if a[0][0] in ["a", "all", "*"]:
b = np.ones(len(results), dtype=bool) b = np.ones(len(results), dtype=bool)
else: else:
a = [np.array(i, dtype=int) for i in a] a = [np.array(i, dtype=int) for i in a]
for i in a: for i in a:
if len(i) > 1: if len(i) > 1:
for j in range(i[0], i[1] + 1): for j in range(i[0], i[1] + 1):
b[np.array([dataset in obs['Dataset'][obs["Obs"] == j] for dataset in results['Dataset']])] = True b[np.array([dataset in obs["Dataset"][obs["Obs"] == j] for dataset in results["Dataset"]])] = True
else: else:
b[np.array([dataset in obs['Dataset'][obs['Obs'] == i[0]] for dataset in results['Dataset']])] = True b[np.array([dataset in obs["Dataset"][obs["Obs"] == i[0]] for dataset in results["Dataset"]])] = True
observations = Observations.query_criteria(obs_id=list(results['Dataset'][b])) observations = Observations.query_criteria(obs_id=list(results["Dataset"][b]))
products = Observations.filter_products(Observations.get_product_list(observations), products = Observations.filter_products(
productType=['SCIENCE'], Observations.get_product_list(observations),
dataproduct_type=['image'], productType=["SCIENCE"],
dataproduct_type=["image"],
calib_level=[2], calib_level=[2],
description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP") description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP",
products['proposal_id'] = Column(products['proposal_id'], dtype='U35') )
products['target_name'] = Column(observations['target_name']) products["proposal_id"] = Column(products["proposal_id"], dtype="U35")
products["target_name"] = Column(observations["target_name"])
for prod in products: for prod in products:
prod['proposal_id'] = results['Proposal ID'][results['Dataset'] == prod['productFilename'][:len(results['Dataset'][0])].upper()][0] prod["proposal_id"] = results["Proposal ID"][results["Dataset"] == prod["productFilename"][: len(results["Dataset"][0])].upper()][0]
for prod in products: for prod in products:
prod['target_name'] = observations['target_name'][observations['obsid'] == prod['obsID']][0] prod["target_name"] = observations["target_name"][observations["obsid"] == prod["obsID"]][0]
tab = unique(products, ['target_name', 'proposal_id']) tab = unique(products, ["target_name", "proposal_id"])
products["Obs"] = [np.argmax(np.logical_and(tab['proposal_id'] == data['proposal_id'], tab['target_name'] == data['target_name']))+1 for data in products] products["Obs"] = [np.argmax(np.logical_and(tab["proposal_id"] == data["proposal_id"], tab["target_name"] == data["target_name"])) + 1 for data in products]
return target, products return target, products
def retrieve_products(target=None, proposal_id=None, output_dir='./data'): def retrieve_products(target=None, proposal_id=None, output_dir="./data"):
""" """
Given a target name and a proposal_id, create the local directories and retrieve the fits files from the MAST Archive Given a target name and a proposal_id, create the local directories and retrieve the fits files from the MAST Archive
""" """
@@ -163,18 +156,19 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
prodpaths = [] prodpaths = []
# data_dir = path_join(output_dir, target) # data_dir = path_join(output_dir, target)
out = "" out = ""
for obs in unique(products, 'Obs'): for obs in unique(products, "Obs"):
filepaths = [] filepaths = []
# obs_dir = path_join(data_dir, obs['prodposal_id']) # obs_dir = path_join(data_dir, obs['prodposal_id'])
# if obs['target_name']!=target: # if obs['target_name']!=target:
obs_dir = path_join(path_join(output_dir, target), obs['proposal_id']) obs_dir = path_join(path_join(output_dir, target), obs["proposal_id"])
if not path_exists(obs_dir): if not path_exists(obs_dir):
system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots"))) system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
for file in products['productFilename'][products['Obs'] == obs['Obs']]: for file in products["productFilename"][products["Obs"] == obs["Obs"]]:
fpath = path_join(obs_dir, file) fpath = path_join(obs_dir, file)
if not path_exists(fpath): if not path_exists(fpath):
out += "{0:s} : {1:s}\n".format(file, Observations.download_file( out += "{0:s} : {1:s}\n".format(
products['dataURI'][products['productFilename'] == file][0], local_path=fpath)[0]) file, Observations.download_file(products["dataURI"][products["productFilename"] == file][0], local_path=fpath)[0]
)
else: else:
out += "{0:s} : Exists\n".format(file) out += "{0:s} : Exists\n".format(file)
filepaths.append([obs_dir, file]) filepaths.append([obs_dir, file])
@@ -186,13 +180,12 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
if __name__ == "__main__": if __name__ == "__main__":
import argparse import argparse
parser = argparse.ArgumentParser(description='Query MAST for target products') parser = argparse.ArgumentParser(description="Query MAST for target products")
parser.add_argument('-t', '--target', metavar='targetname', required=False, parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
help='the name of the target', type=str, default=None) parser.add_argument("-p", "--proposal_id", metavar="proposal_id", required=False, help="the proposal id of the data products", type=int, default=None)
parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, parser.add_argument(
help='the proposal id of the data products', type=int, default=None) "-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data"
parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False, )
help='output directory path for the data products', type=str, default="./data")
args = parser.parse_args() args = parser.parse_args()
print(args.target) print(args.target)
prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id) prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id)

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,6 @@
import numpy as np import numpy as np
def rot2D(ang): def rot2D(ang):
""" """
Return the 2D rotation matrix of given angle in degrees Return the 2D rotation matrix of given angle in degrees
@@ -17,10 +18,10 @@ def princ_angle(ang):
A = np.array([ang]) A = np.array([ang])
else: else:
A = np.array(ang) A = np.array(ang)
while np.any(A < 0.): while np.any(A < 0.0):
A[A < 0.] = A[A < 0.]+360. A[A < 0.0] = A[A < 0.0] + 360.0
while np.any(A >= 180.): while np.any(A >= 180.0):
A[A >= 180.] = A[A >= 180.]-180. A[A >= 180.0] = A[A >= 180.0] - 180.0
if type(ang) is type(A): if type(ang) is type(A):
return A return A
else: else:
@@ -31,7 +32,7 @@ def sci_not(v, err, rnd=1, out=str):
""" """
Return the scientifque error notation as a string. Return the scientifque error notation as a string.
""" """
power = - int(('%E' % v)[-3:])+1 power = -int(("%E" % v)[-3:]) + 1
output = [r"({0}".format(round(v * 10**power, rnd)), round(v * 10**power, rnd)] output = [r"({0}".format(round(v * 10**power, rnd)), round(v * 10**power, rnd)]
if isinstance(err, list): if isinstance(err, list):
for error in err: for error in err:

View File

@@ -1,7 +1,7 @@
#!/usr/bin/python3 #!/usr/bin/python3
from astropy.io import fits
import numpy as np import numpy as np
from lib.plots import overplot_radio, overplot_pol from astropy.io import fits
from lib.plots import overplot_pol, overplot_radio
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits") Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits")
@@ -14,31 +14,37 @@ Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits")
Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits") Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
# levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.]) # levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
levelsMorganti = np.logspace(-0.1249, 1.97, 7)/100. levelsMorganti = np.logspace(-0.1249, 1.97, 7) / 100.0
levels18GHz = levelsMorganti * Stokes_18GHz[0].data.max() levels18GHz = levelsMorganti * Stokes_18GHz[0].data.max()
A = overplot_radio(Stokes_UV, Stokes_18GHz) A = overplot_radio(Stokes_UV, Stokes_18GHz)
A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/18GHz_overplot.pdf', vec_scale=None) A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/18GHz_overplot.pdf", vec_scale=None)
levels24GHz = levelsMorganti * Stokes_24GHz[0].data.max() levels24GHz = levelsMorganti * Stokes_24GHz[0].data.max()
B = overplot_radio(Stokes_UV, Stokes_24GHz) B = overplot_radio(Stokes_UV, Stokes_24GHz)
B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/24GHz_overplot.pdf', vec_scale=None) B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/24GHz_overplot.pdf", vec_scale=None)
levels103GHz = levelsMorganti * Stokes_103GHz[0].data.max() levels103GHz = levelsMorganti * Stokes_103GHz[0].data.max()
C = overplot_radio(Stokes_UV, Stokes_103GHz) C = overplot_radio(Stokes_UV, Stokes_103GHz)
C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/103GHz_overplot.pdf', vec_scale=None) C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/103GHz_overplot.pdf", vec_scale=None)
levels229GHz = levelsMorganti * Stokes_229GHz[0].data.max() levels229GHz = levelsMorganti * Stokes_229GHz[0].data.max()
D = overplot_radio(Stokes_UV, Stokes_229GHz) D = overplot_radio(Stokes_UV, Stokes_229GHz)
D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/229GHz_overplot.pdf', vec_scale=None) D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/229GHz_overplot.pdf", vec_scale=None)
levels357GHz = levelsMorganti * Stokes_357GHz[0].data.max() levels357GHz = levelsMorganti * Stokes_357GHz[0].data.max()
E = overplot_radio(Stokes_UV, Stokes_357GHz) E = overplot_radio(Stokes_UV, Stokes_357GHz)
E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/357GHz_overplot.pdf', vec_scale=None) E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/357GHz_overplot.pdf", vec_scale=None)
# F = overplot_pol(Stokes_UV, Stokes_S2) # F = overplot_pol(Stokes_UV, Stokes_S2)
# F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18)) # F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18))
G = overplot_pol(Stokes_UV, Stokes_IR, cmap='inferno') G = overplot_pol(Stokes_UV, Stokes_IR, cmap="inferno")
G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot.pdf', vec_scale=None, G.plot(
norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3, Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']), cmap='inferno_r') SNRp_cut=2.0,
SNRi_cut=10.0,
savename="./plots/IC5063/IR_overplot.pdf",
vec_scale=None,
norm=LogNorm(Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"] / 1e3, Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"]),
cmap="inferno_r",
)

View File

@@ -1,6 +1,6 @@
#!/usr/bin/python3 #!/usr/bin/python3
from astropy.io import fits
import numpy as np import numpy as np
from astropy.io import fits
from lib.plots import overplot_chandra, overplot_pol from lib.plots import overplot_chandra, overplot_pol
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
@@ -8,13 +8,13 @@ Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.f
Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits") Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits")
Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits") Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits")
levels = np.geomspace(1., 99., 7) levels = np.geomspace(1.0, 99.0, 7)
A = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm()) A = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm())
A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf') A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, zoom=1, savename="./plots/MRK463E/Chandra_overplot.pdf")
A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned") A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned")
levels = np.array([0.8, 2, 5, 10, 20, 50])/100.*Stokes_UV[0].header['photflam'] levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"]
B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm()) B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm())
B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, norm=LogNorm(8.5e-18, 2.5e-15), savename='./plots/MRK463E/IR_overplot.pdf') B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, norm=LogNorm(8.5e-18, 2.5e-15), savename="./plots/MRK463E/IR_overplot.pdf")
B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned") B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned")

View File

@@ -1,5 +1,6 @@
#!/usr/bin/python #!/usr/bin/python
from getopt import getopt, error as get_error from getopt import error as get_error
from getopt import getopt
from sys import argv from sys import argv
arglist = argv[1:] arglist = argv[1:]
@@ -24,7 +25,7 @@ try:
elif curr_arg in ("-i", "--snri"): elif curr_arg in ("-i", "--snri"):
SNRi_cut = int(curr_val) SNRi_cut = int(curr_val)
elif curr_arg in ("-l", "--lim"): elif curr_arg in ("-l", "--lim"):
flux_lim = list("".join(curr_val).split(',')) flux_lim = list("".join(curr_val).split(","))
except get_error as err: except get_error as err:
print(str(err)) print(str(err))

View File

@@ -1,19 +1,21 @@
#!/usr/bin/python #!/usr/bin/python
def main(infiles=None): def main(infiles=None):
""" """
Retrieve native spatial resolution from given observation. Retrieve native spatial resolution from given observation.
""" """
from os.path import join as path_join from os.path import join as path_join
from warnings import catch_warnings, filterwarnings from warnings import catch_warnings, filterwarnings
from astropy.io.fits import getheader from astropy.io.fits import getheader
from astropy.wcs import WCS, FITSFixedWarning from astropy.wcs import WCS, FITSFixedWarning
from numpy.linalg import eig from numpy.linalg import eig
if infiles is None: if infiles is None:
print("Usage: \"python get_cdelt.py -f infiles\"") print('Usage: "python get_cdelt.py -f infiles"')
return 1 return 1
prod = [["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles] prod = [["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles]
data_folder = prod[0][0] data_folder = prod[0][0]
infiles = [p[1] for p in prod] infiles = [p[1] for p in prod]
@@ -21,14 +23,14 @@ def main(infiles=None):
size = {} size = {}
for currfile in infiles: for currfile in infiles:
with catch_warnings(): with catch_warnings():
filterwarnings('ignore', message="'datfix' made the change", category=FITSFixedWarning) filterwarnings("ignore", message="'datfix' made the change", category=FITSFixedWarning)
wcs = WCS(getheader(path_join(data_folder, currfile))).celestial wcs = WCS(getheader(path_join(data_folder, currfile))).celestial
key = currfile[:-5] key = currfile[:-5]
size[key] = wcs.array_shape size[key] = wcs.array_shape
if wcs.wcs.has_cd(): if wcs.wcs.has_cd():
cdelt[key] = eig(wcs.wcs.cd)[0]*3600. cdelt[key] = eig(wcs.wcs.cd)[0] * 3600.0
else: else:
cdelt[key] = wcs.wcs.cdelt*3600. cdelt[key] = wcs.wcs.cdelt * 3600.0
print("Image name, native resolution in arcsec and shape") print("Image name, native resolution in arcsec and shape")
for currfile in infiles: for currfile in infiles:
@@ -41,7 +43,7 @@ def main(infiles=None):
if __name__ == "__main__": if __name__ == "__main__":
import argparse import argparse
parser = argparse.ArgumentParser(description='Query MAST for target products') parser = argparse.ArgumentParser(description="Query MAST for target products")
parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None) parser.add_argument("-f", "--files", metavar="path", required=False, nargs="*", help="the full or relative path to the data products", default=None)
args = parser.parse_args() args = parser.parse_args()
exitcode = main(infiles=args.files) exitcode = main(infiles=args.files)