Merge branch 'test' into main

This commit is contained in:
sugar_jo
2024-07-16 21:39:25 +08:00
committed by GitHub
16 changed files with 3029 additions and 1595 deletions

237
package/Combine.py Executable file
View File

@@ -0,0 +1,237 @@
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Project libraries
import numpy as np
def same_reduction(infiles):
"""
Test if infiles are pipeline productions with same parameters.
"""
from astropy.io.fits import open as fits_open
from astropy.wcs import WCS
params = {"IQU": [], "ROT": [], "SIZE": [], "TARGNAME": [], "BKG_SUB": [], "SAMPLING": [], "SMOOTH": []}
for file in infiles:
with fits_open(file) as f:
# test for presence of I, Q, U images
datatype = []
for hdu in f:
try:
datatype.append(hdu.header["datatype"])
except KeyError:
pass
test_IQU = True
for look in ["I_stokes", "Q_stokes", "U_stokes", "IQU_cov_matrix"]:
test_IQU *= look in datatype
params["IQU"].append(test_IQU)
# test for orientation and pixel size
wcs = WCS(f[0].header).celestial
if wcs.wcs.has_cd() or (wcs.wcs.cdelt[:2] == np.array([1.0, 1.0])).all():
cdelt = np.linalg.eig(wcs.wcs.cd)[0]
pc = np.dot(wcs.wcs.cd, np.diag(1.0 / cdelt))
else:
cdelt = wcs.wcs.cdelt
pc = wcs.wcs.pc
params["ROT"].append(np.round(np.arccos(pc[0, 0]), 2) if np.abs(pc[0, 0]) < 1.0 else 0.0)
params["SIZE"].append(np.round(np.max(np.abs(cdelt * 3600.0)), 2))
# look for information on reduction procedure
for key in [k for k in params.keys() if k not in ["IQU", "ROT", "SIZE"]]:
try:
params[key].append(f[0].header[key])
except KeyError:
params[key].append("null")
result = np.all(params["IQU"])
for key in [k for k in params.keys() if k != "IQU"]:
result *= np.unique(params[key]).size == 1
if np.all(params["IQU"]) and not result:
print(np.unique(params["SIZE"]))
raise ValueError("Not all observations were reduced with the same parameters, please provide the raw files.")
return result
def same_obs(infiles, data_folder):
"""
Group infiles into same observations.
"""
import astropy.units as u
from astropy.io.fits import getheader
from astropy.table import Table
from astropy.time import Time, TimeDelta
headers = [getheader("/".join([data_folder, file])) for file in infiles]
files = {}
files["PROPOSID"] = np.array([str(head["PROPOSID"]) for head in headers], dtype=str)
files["ROOTNAME"] = np.array([head["ROOTNAME"].lower() + "_c0f.fits" for head in headers], dtype=str)
files["EXPSTART"] = np.array([Time(head["EXPSTART"], format="mjd") for head in headers])
products = Table(files)
new_infiles = []
for pid in np.unique(products["PROPOSID"]):
obs = products[products["PROPOSID"] == pid].copy()
close_date = np.unique(
[[np.abs(TimeDelta(obs["EXPSTART"][i].unix - date.unix, format="sec")) < 7.0 * u.d for i in range(len(obs))] for date in obs["EXPSTART"]], axis=0
)
if len(close_date) > 1:
for date in close_date:
new_infiles.append(list(products["ROOTNAME"][np.any([products["ROOTNAME"] == dataset for dataset in obs["ROOTNAME"][date]], axis=0)]))
else:
new_infiles.append(list(products["ROOTNAME"][products["PROPOSID"] == pid]))
return new_infiles
def combine_Stokes(infiles):
"""
Combine I, Q, U from different observations of a same object.
"""
from astropy.io.fits import open as fits_open
from lib.reduction import align_data, zeropad
from scipy.ndimage import shift as sc_shift
I_array, Q_array, U_array, IQU_cov_array, data_mask, headers = [], [], [], [], [], []
shape = np.array([0, 0])
for file in infiles:
with fits_open(file) as f:
headers.append(f[0].header)
I_array.append(f["I_stokes"].data)
Q_array.append(f["Q_stokes"].data)
U_array.append(f["U_stokes"].data)
IQU_cov_array.append(f["IQU_cov_matrix"].data)
data_mask.append(f["data_mask"].data.astype(bool))
shape[0] = np.max([shape[0], f["I_stokes"].data.shape[0]])
shape[1] = np.max([shape[1], f["I_stokes"].data.shape[1]])
exposure_array = np.array([float(head["EXPTIME"]) for head in headers])
shape += np.array([5, 5])
data_mask = np.sum([zeropad(mask, shape) for mask in data_mask], axis=0).astype(bool)
I_array = np.array([zeropad(I, shape) for I in I_array])
Q_array = np.array([zeropad(Q, shape) for Q in Q_array])
U_array = np.array([zeropad(U, shape) for U in U_array])
IQU_cov_array = np.array([[[zeropad(cov[i, j], shape) for j in range(3)] for i in range(3)] for cov in IQU_cov_array])
sI_array = np.sqrt(IQU_cov_array[:, 0, 0])
sQ_array = np.sqrt(IQU_cov_array[:, 1, 1])
sU_array = np.sqrt(IQU_cov_array[:, 2, 2])
_, _, _, _, shifts, errors = align_data(I_array, headers, error_array=sI_array, data_mask=data_mask, ref_center="center", return_shifts=True)
data_mask_aligned = np.sum([sc_shift(data_mask, s, order=1, cval=0.0) for s in shifts], axis=0).astype(bool)
I_aligned, sI_aligned = (
np.array([sc_shift(I, s, order=1, cval=0.0) for I, s in zip(I_array, shifts)]),
np.array([sc_shift(sI, s, order=1, cval=0.0) for sI, s in zip(sI_array, shifts)]),
)
Q_aligned, sQ_aligned = (
np.array([sc_shift(Q, s, order=1, cval=0.0) for Q, s in zip(Q_array, shifts)]),
np.array([sc_shift(sQ, s, order=1, cval=0.0) for sQ, s in zip(sQ_array, shifts)]),
)
U_aligned, sU_aligned = (
np.array([sc_shift(U, s, order=1, cval=0.0) for U, s in zip(U_array, shifts)]),
np.array([sc_shift(sU, s, order=1, cval=0.0) for sU, s in zip(sU_array, shifts)]),
)
IQU_cov_aligned = np.array([[[sc_shift(cov[i, j], s, order=1, cval=0.0) for j in range(3)] for i in range(3)] for cov, s in zip(IQU_cov_array, shifts)])
I_combined = np.sum([exp * I for exp, I in zip(exposure_array, I_aligned)], axis=0) / exposure_array.sum()
Q_combined = np.sum([exp * Q for exp, Q in zip(exposure_array, Q_aligned)], axis=0) / exposure_array.sum()
U_combined = np.sum([exp * U for exp, U in zip(exposure_array, U_aligned)], axis=0) / exposure_array.sum()
IQU_cov_combined = np.zeros((3, 3, shape[0], shape[1]))
for i in range(3):
IQU_cov_combined[i, i] = np.sum([exp**2 * cov for exp, cov in zip(exposure_array, IQU_cov_aligned[:, i, i])], axis=0) / exposure_array.sum() ** 2
for j in [x for x in range(3) if x != i]:
IQU_cov_combined[i, j] = np.sqrt(
np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, IQU_cov_aligned[:, i, j])], axis=0) / exposure_array.sum() ** 2
)
IQU_cov_combined[j, i] = np.sqrt(
np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, IQU_cov_aligned[:, j, i])], axis=0) / exposure_array.sum() ** 2
)
header_combined = headers[0]
header_combined["EXPTIME"] = exposure_array.sum()
return I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_aligned, header_combined
def main(infiles, target=None, output_dir="./data/"):
""" """
from lib.fits import save_Stokes
from lib.plots import pol_map
from lib.reduction import compute_pol, rotate_Stokes
if target is None:
target = input("Target name:\n>")
prod = np.array([["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles], dtype=str)
data_folder = prod[0][0]
files = [p[1] for p in prod]
# Reduction parameters
kwargs = {}
# Polarization map output
kwargs["SNRp_cut"] = 3.0
kwargs["SNRi_cut"] = 1.0
kwargs["flux_lim"] = 1e-19, 3e-17
kwargs["scale_vec"] = 5
kwargs["step_vec"] = 1
if not same_reduction(infiles):
from FOC_reduction import main as FOC_reduction
grouped_infiles = same_obs(files, data_folder)
new_infiles = []
for i, group in enumerate(grouped_infiles):
new_infiles.append(
FOC_reduction(target=target + "-" + str(i + 1), infiles=["/".join([data_folder, file]) for file in group], interactive=True)[0]
)
infiles = new_infiles
I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = combine_Stokes(infiles=infiles)
I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = rotate_Stokes(
I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, data_mask=data_mask_combined, header_stokes=header_combined
)
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = compute_pol(
I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, header_stokes=header_combined
)
filename = header_combined["FILENAME"]
figname = "_".join([target, filename[filename.find("FOC_") :], "combined"])
Stokes_combined = save_Stokes(
I_stokes=I_combined,
Q_stokes=Q_combined,
U_stokes=U_combined,
Stokes_cov=IQU_cov_combined,
P=P,
debiased_P=debiased_P,
s_P=s_P,
s_P_P=s_P_P,
PA=PA,
s_PA=s_PA,
s_PA_P=s_PA_P,
header_stokes=header_combined,
data_mask=data_mask_combined,
filename=figname,
data_folder=data_folder,
return_hdul=True,
)
pol_map(Stokes_combined, **kwargs)
return "/".join([data_folder, figname + ".fits"])
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Combine different observations of a single object")
parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
parser.add_argument("-f", "--files", metavar="path", required=False, nargs="*", help="the full or relative path to the data products", default=None)
parser.add_argument(
"-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data"
)
args = parser.parse_args()
exitcode = main(target=args.target, infiles=args.files, output_dir=args.output_dir)
print("Written to: ", exitcode)

View File

@@ -22,16 +22,17 @@ from lib.utils import sci_not, princ_angle
def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir="./data", crop=False, interactive=False):
# Reduction parameters
# Deconvolution
deconvolve = False
if deconvolve:
# from lib.deconvolve import from_file_psf
psf = 'gaussian' # Can be user-defined as well
psf = "gaussian" # Can be user-defined as well
# psf = from_file_psf(data_folder+psf_file)
psf_FWHM = 3.1
psf_scale = 'px'
psf_scale = "px"
psf_shape = None # (151, 151)
iterations = 1
algo = "conjgrad"
@@ -40,18 +41,20 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir=
display_crop = False
# Background estimation
error_sub_type = 'freedman-diaconis' # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 0.01
error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 1.0
display_bkg = False
# Data binning
rebin = True
pxsize = 2
px_scale = 'px' # pixel, arcsec or full
rebin_operation = 'sum' # sum or average
pxscale = "px" # pixel, arcsec or full
rebin_operation = "sum" # sum or average
# Alignement
align_center = 'center' # If None will not align the images
align_center = "center" # If None will not align the images
display_align = False
display_data = False
@@ -59,20 +62,19 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir=
transmitcorr = True
# Smoothing
smoothing_function = 'combine' # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = None # If None, no smoothing is done
smoothing_scale = 'px' # pixel or arcsec
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 2.0 # If None, no smoothing is done
smoothing_scale = "px" # pixel or arcsec
# Rotation
rotate_data = False # rotation to North convention can give erroneous results
rotate_stokes = True
rotate_North = True
# Polarization map output
SNRp_cut = 3. # P measurments with SNR>3
SNRi_cut = 3. # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
vec_scale = 5
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
SNRp_cut = 3.0 # P measurments with SNR>3
SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
scale_vec = 5
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
# Adaptive binning
# in order to perfrom optimal binning, there are several steps to follow:
@@ -85,9 +87,10 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir=
optimize = False
# Pipeline start
# Step 1:
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
if data_dir is None:
if infiles is not None:
prod = np.array([["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles], dtype=str)
@@ -114,6 +117,7 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir=
target = input("Target name:\n>")
data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=True)
try:
plots_folder = data_folder.replace("data", "plots")
except ValueError:
@@ -123,18 +127,20 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir=
figname = "_".join([target, "FOC"])
figtype = ""
if rebin:
if px_scale not in ['full']:
figtype = "".join(["b", "{0:.2f}".format(pxsize), px_scale]) # additionnal informations
if (pxsize is not None) and not (pxsize == 1 and pxscale.lower() in ["px", "pixel", "pixels"]):
if pxscale not in ["full"]:
figtype = "".join(["b", "{0:.2f}".format(pxsize), pxscale]) # additionnal informations
else:
figtype = "full"
if smoothing_FWHM is not None:
figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]),
"{0:.2f}".format(smoothing_FWHM), smoothing_scale]) # additionnal informations
if smoothing_FWHM is not None and smoothing_scale is not None:
smoothstr = "".join([*[s[0] for s in smoothing_function.split("_")], "{0:.2f}".format(smoothing_FWHM), smoothing_scale])
figtype = "_".join([figtype, smoothstr] if figtype != "" else [smoothstr])
if deconvolve:
figtype += "_deconv"
figtype = "_".join([figtype, "deconv"] if figtype != "" else ["deconv"])
if align_center is None:
figtype += "_not_aligned"
figtype = "_".join([figtype, "not_aligned"] if figtype != "" else ["not_aligned"])
if optimal_binning:
options = {'optimize': optimize, 'optimal_binning': True}
@@ -337,12 +343,14 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir=
elif px_scale.lower() not in ['full', 'integrate']:
proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
return 0
return outfiles
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Query MAST for target products')
parser.add_argument('-t', '--target', metavar='targetname', required=False, help='the name of the target', type=str, default=None)
parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, help='the proposal id of the data products', type=int, default=None)
@@ -355,4 +363,4 @@ if __name__ == "__main__":
args = parser.parse_args()
exitcode = main(target=args.target, proposal_id=args.proposal_id, data_dir=args.data_dir, infiles=args.files,
output_dir=args.output_dir, crop=args.crop, interactive=args.interactive)
print("Finished with ExitCode: ", exitcode)
print("Finished with ExitCode: ", exitcode)

View File

@@ -1,2 +1,3 @@
from . import lib
from . import src
from . import FOC_reduction

View File

@@ -9,139 +9,155 @@ prototypes :
- bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background)
Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape.
"""
from os.path import join as path_join
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.colors import LogNorm
from matplotlib.patches import Rectangle
from datetime import datetime, timedelta
from os.path import join as path_join
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
from astropy.time import Time
from lib.plots import plot_obs
from matplotlib.colors import LogNorm
from matplotlib.patches import Rectangle
from scipy.optimize import curve_fit
def gauss(x, *p):
N, mu, sigma = p
return N*np.exp(-(x-mu)**2/(2.*sigma**2))
return N * np.exp(-((x - mu) ** 2) / (2.0 * sigma**2))
def gausspol(x, *p):
N, mu, sigma, a, b, c, d = p
return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + a*np.log(x) + b/x + c*x + d
return N * np.exp(-((x - mu) ** 2) / (2.0 * sigma**2)) + a * np.log(x) + b / x + c * x + d
def bin_centers(edges):
return (edges[1:]+edges[:-1])/2.
return (edges[1:] + edges[:-1]) / 2.0
def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"):
plt.rcParams.update({'font.size': 15})
convert_flux = np.array([head['photflam'] for head in headers])
date_time = np.array([Time((headers[i]['expstart']+headers[i]['expend'])/2., format='mjd', precision=0).iso for i in range(len(headers))])
date_time = np.array([datetime.strptime(d, '%Y-%m-%d %H:%M:%S') for d in date_time])
date_err = np.array([timedelta(seconds=headers[i]['exptime']/2.) for i in range(len(headers))])
filt = np.array([headers[i]['filtnam1'] for i in range(len(headers))])
dict_filt = {"POL0": 'r', "POL60": 'g', "POL120": 'b'}
plt.rcParams.update({"font.size": 15})
convert_flux = np.array([head["photflam"] for head in headers])
date_time = np.array([Time((headers[i]["expstart"] + headers[i]["expend"]) / 2.0, format="mjd", precision=0).iso for i in range(len(headers))])
date_time = np.array([datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in date_time])
date_err = np.array([timedelta(seconds=headers[i]["exptime"] / 2.0) for i in range(len(headers))])
filt = np.array([headers[i]["filtnam1"] for i in range(len(headers))])
dict_filt = {"POL0": "r", "POL60": "g", "POL120": "b"}
c_filt = np.array([dict_filt[f] for f in filt])
fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
for f in np.unique(filt):
mask = [fil == f for fil in filt]
ax.scatter(date_time[mask], background[mask]*convert_flux[mask], color=dict_filt[f],
label="{0:s}".format(f))
ax.errorbar(date_time, background*convert_flux, xerr=date_err, yerr=std_bkg*convert_flux, fmt='+k',
markersize=0, ecolor=c_filt)
ax.scatter(date_time[mask], background[mask] * convert_flux[mask], color=dict_filt[f], label="{0:s}".format(f))
ax.errorbar(date_time, background * convert_flux, xerr=date_err, yerr=std_bkg * convert_flux, fmt="+k", markersize=0, ecolor=c_filt)
# Date handling
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
# ax.set_ylim(bottom=0.)
ax.set_yscale('log')
ax.set_yscale("log")
ax.set_xlabel("Observation date and time")
ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
plt.legend()
if not (savename is None):
if savename is not None:
this_savename = deepcopy(savename)
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
this_savename += '_background_flux.pdf'
if savename[-4:] not in [".png", ".jpg", ".pdf"]:
this_savename += "_background_flux.pdf"
else:
this_savename = savename[:-4]+"_background_flux"+savename[-4:]
fig.savefig(path_join(plots_folder, this_savename), bbox_inches='tight')
this_savename = savename[:-4] + "_background_flux" + savename[-4:]
fig.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
if not (histograms is None):
if histograms is not None:
filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0}
fig_h, ax_h = plt.subplots(figsize=(10, 6), constrained_layout=True)
fig_h, ax_h = plt.subplots(figsize=(10, 8), constrained_layout=True)
for i, (hist, bins) in enumerate(zip(histograms, binning)):
filt_obs[headers[i]['filtnam1']] += 1
ax_h.plot(bins*convert_flux[i], hist, '+', color="C{0:d}".format(i), alpha=0.8,
label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')')
ax_h.plot([background[i]*convert_flux[i], background[i]*convert_flux[i]], [hist.min(), hist.max()], 'x--', color="C{0:d}".format(i), alpha=0.8)
if not (coeff is None):
filt_obs[headers[i]["filtnam1"]] += 1
ax_h.plot(
bins * convert_flux[i],
hist,
"+",
color="C{0:d}".format(i),
alpha=0.8,
label=headers[i]["filtnam1"] + " (Obs " + str(filt_obs[headers[i]["filtnam1"]]) + ")",
)
ax_h.plot([background[i] * convert_flux[i], background[i] * convert_flux[i]], [hist.min(), hist.max()], "x--", color="C{0:d}".format(i), alpha=0.8)
if coeff is not None:
# ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
ax_h.plot(bins*convert_flux[i], gauss(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
ax_h.set_xscale('log')
ax_h.set_ylim([0., np.max([hist.max() for hist in histograms])])
ax_h.set_xlim([np.min(background*convert_flux)*1e-2, np.max(background*convert_flux)*1e2])
ax_h.plot(bins * convert_flux[i], gauss(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8)
ax_h.set_xscale("log")
ax_h.set_ylim([0.0, np.max([hist.max() for hist in histograms])])
ax_h.set_xlim([np.min(background * convert_flux) * 1e-2, np.max(background * convert_flux) * 1e2])
ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
ax_h.set_ylabel(r"Number of pixels in bin")
ax_h.set_title("Histogram for each observation")
plt.legend()
if not (savename is None):
if savename is not None:
this_savename = deepcopy(savename)
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
this_savename += '_histograms.pdf'
if savename[-4:] not in [".png", ".jpg", ".pdf"]:
this_savename += "_histograms.pdf"
else:
this_savename = savename[:-4]+"_histograms"+savename[-4:]
fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches='tight')
this_savename = savename[:-4] + "_histograms" + savename[-4:]
fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
fig2, ax2 = plt.subplots(figsize=(10, 10))
data0 = data[0]*convert_flux[0]
bkg_data0 = data0 <= background[0]*convert_flux[0]
instr = headers[0]['instrume']
rootname = headers[0]['rootname']
exptime = headers[0]['exptime']
filt = headers[0]['filtnam1']
data0 = data[0] * convert_flux[0]
bkg_data0 = data0 <= background[0] * convert_flux[0]
instr = headers[0]["instrume"]
rootname = headers[0]["rootname"]
exptime = headers[0]["exptime"]
filt = headers[0]["filtnam1"]
# plots
im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.].mean()/10., data0.max()), origin='lower', cmap='gray')
ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.5)
if not (rectangle is None):
im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.0].mean() / 10.0, data0.max()), origin="lower", cmap="gray")
ax2.imshow(bkg_data0, origin="lower", cmap="Reds", alpha=0.5)
if rectangle is not None:
x, y, width, height, angle, color = rectangle[0]
ax2.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False, lw=2))
ax2.annotate(instr+":"+rootname, color='white', fontsize=10, xy=(0.01, 1.00), xycoords='axes fraction', verticalalignment='top', horizontalalignment='left')
ax2.annotate(filt, color='white', fontsize=14, xy=(0.01, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left')
ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(1.00, 0.01),
xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right')
ax2.set(xlabel='pixel offset', ylabel='pixel offset', aspect='equal')
ax2.annotate(
instr + ":" + rootname, color="white", fontsize=10, xy=(0.01, 1.00), xycoords="axes fraction", verticalalignment="top", horizontalalignment="left"
)
ax2.annotate(filt, color="white", fontsize=14, xy=(0.01, 0.01), xycoords="axes fraction", verticalalignment="bottom", horizontalalignment="left")
ax2.annotate(
str(exptime) + " s", color="white", fontsize=10, xy=(1.00, 0.01), xycoords="axes fraction", verticalalignment="bottom", horizontalalignment="right"
)
ax2.set(xlabel="pixel offset", ylabel="pixel offset", aspect="equal")
fig2.subplots_adjust(hspace=0, wspace=0, right=1.0)
fig2.colorbar(im2, ax=ax2, location='right', aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
fig2.colorbar(im2, ax=ax2, location="right", aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
if not (savename is None):
if savename is not None:
this_savename = deepcopy(savename)
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
this_savename += '_'+filt+'_background_location.pdf'
if savename[-4:] not in [".png", ".jpg", ".pdf"]:
this_savename += "_" + filt + "_background_location.pdf"
else:
this_savename = savename[:-4]+'_'+filt+'_background_location'+savename[-4:]
fig2.savefig(path_join(plots_folder, this_savename), bbox_inches='tight')
if not (rectangle is None):
plot_obs(data, headers, vmin=data[data > 0.].min()*convert_flux.mean(), vmax=data[data > 0.].max()*convert_flux.mean(), rectangle=rectangle,
savename=savename+"_background_location", plots_folder=plots_folder)
elif not (rectangle is None):
plot_obs(data, headers, vmin=data[data > 0.].min(), vmax=data[data > 0.].max(), rectangle=rectangle)
this_savename = savename[:-4] + "_" + filt + "_background_location" + savename[-4:]
fig2.savefig(path_join(plots_folder, this_savename), bbox_inches="tight")
if rectangle is not None:
plot_obs(
data,
headers,
vmin=data[data > 0.0].min() * convert_flux.mean(),
vmax=data[data > 0.0].max() * convert_flux.mean(),
rectangle=rectangle,
savename=savename + "_background_location",
plots_folder=plots_folder,
)
elif rectangle is not None:
plot_obs(data, headers, vmin=data[data > 0.0].min(), vmax=data[data > 0.0].max(), rectangle=rectangle)
plt.show()
def sky_part(img):
rand_ind = np.unique((np.random.rand(np.floor(img.size/4).astype(int))*2*img.size).astype(int) % img.size)
rand_ind = np.unique((np.random.rand(np.floor(img.size / 4).astype(int)) * 2 * img.size).astype(int) % img.size)
rand_pix = img.flatten()[rand_ind]
# Intensity range
sky_med = np.median(rand_pix)
sig = np.min([img[img < sky_med].std(), img[img > sky_med].std()])
sky_range = [sky_med-2.*sig, np.max([sky_med+sig, 7e-4])] # Detector background average FOC Data Handbook Sec. 7.6
sky_range = [sky_med - 2.0 * sig, np.max([sky_med + sig, 7e-4])] # Detector background average FOC Data Handbook Sec. 7.6
sky = img[np.logical_and(img >= sky_range[0], img <= sky_range[1])]
return sky, sky_range
@@ -152,14 +168,14 @@ def bkg_estimate(img, bins=None, chi2=None, coeff=None):
bins, chi2, coeff = [8], [], []
else:
try:
bins.append(int(3./2.*bins[-1]))
bins.append(int(3.0 / 2.0 * bins[-1]))
except IndexError:
bins, chi2, coeff = [8], [], []
hist, bin_edges = np.histogram(img[img > 0], bins=bins[-1])
binning = bin_centers(bin_edges)
peak = binning[np.argmax(hist)]
bins_stdev = binning[hist > hist.max()/2.]
stdev = bins_stdev[-1]-bins_stdev[0]
bins_stdev = binning[hist > hist.max() / 2.0]
stdev = bins_stdev[-1] - bins_stdev[0]
# p0 = [hist.max(), peak, stdev, 1e-3, 1e-3, 1e-3, 1e-3]
p0 = [hist.max(), peak, stdev]
try:
@@ -168,7 +184,7 @@ def bkg_estimate(img, bins=None, chi2=None, coeff=None):
except RuntimeError:
popt = p0
# chi2.append(np.sum((hist - gausspol(binning, *popt))**2)/hist.size)
chi2.append(np.sum((hist - gauss(binning, *popt))**2)/hist.size)
chi2.append(np.sum((hist - gauss(binning, *popt)) ** 2) / hist.size)
coeff.append(popt)
return bins, chi2, coeff
@@ -223,7 +239,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
for i, image in enumerate(data):
# Compute the Count-rate histogram for the image
sky, sky_range = sky_part(image[image > 0.])
sky, sky_range = sky_part(image[image > 0.0])
bins, chi2, coeff = bkg_estimate(sky)
while bins[-1] < 256:
@@ -232,9 +248,10 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
histograms.append(hist)
binning.append(bin_centers(bin_edges))
chi2, coeff = np.array(chi2), np.array(coeff)
weights = 1/chi2**2
weights = 1 / chi2**2
weights /= weights.sum()
bkg = np.sum(weights*(coeff[:, 1]+np.abs(coeff[:, 2]) * 0.01)) # why not just use 0.01
error_bkg[i] *= bkg
@@ -246,7 +263,8 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
# n_data_array[i][mask] = n_data_array[i][mask] - bkg
# n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std()
std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
background[i] = bkg
if subtract_error > 0:
@@ -311,37 +329,43 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
for i, image in enumerate(data):
# Compute the Count-rate histogram for the image
n_mask = np.logical_and(mask, image > 0.)
if not (sub_type is None):
n_mask = np.logical_and(mask, image > 0.0)
if sub_type is not None:
if isinstance(sub_type, int):
n_bins = sub_type
elif sub_type.lower() in ['sqrt']:
elif sub_type.lower() in ["sqrt"]:
n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
elif sub_type.lower() in ['sturges']:
n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int)+1 # Sturges
elif sub_type.lower() in ['rice']:
n_bins = 2*np.fix(np.power(image[n_mask].size, 1/3)).astype(int) # Rice
elif sub_type.lower() in ['scott']:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size, 1/3))).astype(int) # Scott
elif sub_type.lower() in ["sturges"]:
n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int) + 1 # Sturges
elif sub_type.lower() in ["rice"]:
n_bins = 2 * np.fix(np.power(image[n_mask].size, 1 / 3)).astype(int) # Rice
elif sub_type.lower() in ["scott"]:
n_bins = np.fix((image[n_mask].max() - image[n_mask].min()) / (3.5 * image[n_mask].std() / np.power(image[n_mask].size, 1 / 3))).astype(
int
) # Scott
else:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) /
np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis
n_bins = np.fix(
(image[n_mask].max() - image[n_mask].min())
/ (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3))
).astype(int) # Freedman-Diaconis
else:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) /
np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis
n_bins = np.fix(
(image[n_mask].max() - image[n_mask].min()) / (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3))
).astype(int) # Freedman-Diaconis
hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins)
histograms.append(hist)
binning.append(np.exp(bin_centers(bin_edges)))
# Fit a gaussian to the log-intensity histogram
bins_stdev = binning[-1][hist > hist.max()/2.]
stdev = bins_stdev[-1]-bins_stdev[0]
bins_stdev = binning[-1][hist > hist.max() / 2.0]
stdev = bins_stdev[-1] - bins_stdev[0]
# p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3]
p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev]
# popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
popt, pcov = curve_fit(gauss, binning[-1], hist, p0=p0)
coeff.append(popt)
bkg = popt[1]+np.abs(popt[2]) * 0.01 # why not just use 0.01
error_bkg[i] *= bkg
@@ -353,7 +377,8 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
# n_data_array[i][mask] = n_data_array[i][mask] - bkg
# n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std()
std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
background[i] = bkg
if subtract_error > 0:
@@ -415,10 +440,10 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True
sub_shape = np.array(sub_shape)
# Make sub_shape of odd values
if not (np.all(sub_shape % 2)):
sub_shape += 1-sub_shape % 2
sub_shape += 1 - sub_shape % 2
shape = np.array(data.shape)
diff = (sub_shape-1).astype(int)
temp = np.zeros((shape[0], shape[1]-diff[0], shape[2]-diff[1]))
diff = (sub_shape - 1).astype(int)
temp = np.zeros((shape[0], shape[1] - diff[0], shape[2] - diff[1]))
n_data_array, n_error_array = deepcopy(data), deepcopy(error)
error_bkg = np.ones(n_data_array.shape)
@@ -431,18 +456,19 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True
# sub-image dominated by background
fmax = np.finfo(np.double).max
img = deepcopy(image)
img[1-mask] = fmax/(diff[0]*diff[1])
img[1 - mask] = fmax / (diff[0] * diff[1])
for r in range(temp.shape[1]):
for c in range(temp.shape[2]):
temp[i][r, c] = np.where(mask[r, c], img[r:r+diff[0], c:c+diff[1]].sum(), fmax/(diff[0]*diff[1]))
temp[i][r, c] = np.where(mask[r, c], img[r : r + diff[0], c : c + diff[1]].sum(), fmax / (diff[0] * diff[1]))
minima = np.unravel_index(np.argmin(temp.sum(axis=0)), temp.shape[1:])
for i, image in enumerate(data):
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0., 'r'])
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0.0, "r"])
# Compute error : root mean square of the background
sub_image = image[minima[0]:minima[0]+sub_shape[0], minima[1]:minima[1]+sub_shape[1]]
sub_image = image[minima[0] : minima[0] + sub_shape[0], minima[1] : minima[1] + sub_shape[1]]
# bkg = np.std(sub_image) # Previously computed using standard deviation over the background
bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*0.01 if subtract_error > 0 else np.sqrt(np.sum(sub_image**2)/sub_image.size)
error_bkg[i] *= bkg
@@ -453,7 +479,8 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True
# n_data_array[i][mask] = n_data_array[i][mask] - bkg
# n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std()
std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
background[i] = bkg
if subtract_error > 0:

View File

@@ -3,6 +3,7 @@ Library functions for graham algorithm implementation (find the convex hull of a
"""
from copy import deepcopy
import numpy as np
@@ -16,23 +17,23 @@ def clean_ROI(image):
row, col = np.indices(shape)
for i in range(0, shape[0]):
r = row[i, :][image[i, :] > 0.]
c = col[i, :][image[i, :] > 0.]
r = row[i, :][image[i, :] > 0.0]
c = col[i, :][image[i, :] > 0.0]
if len(r) > 1 and len(c) > 1:
H.append((r[0], c[0]))
H.append((r[-1], c[-1]))
H = np.array(H)
for j in range(0, shape[1]):
r = row[:, j][image[:, j] > 0.]
c = col[:, j][image[:, j] > 0.]
r = row[:, j][image[:, j] > 0.0]
c = col[:, j][image[:, j] > 0.0]
if len(r) > 1 and len(c) > 1:
J.append((r[0], c[0]))
J.append((r[-1], c[-1]))
J = np.array(J)
xmin = np.min([H[:, 1].min(), J[:, 1].min()])
xmax = np.max([H[:, 1].max(), J[:, 1].max()])+1
xmax = np.max([H[:, 1].max(), J[:, 1].max()]) + 1
ymin = np.min([H[:, 0].min(), J[:, 0].min()])
ymax = np.max([H[:, 0].max(), J[:, 0].max()])+1
ymax = np.max([H[:, 0].max(), J[:, 0].max()]) + 1
return np.array([xmin, xmax, ymin, ymax])
@@ -81,7 +82,7 @@ def distance(A, B):
Euclidian distance between A, B.
"""
x, y = vector(A, B)
return np.sqrt(x ** 2 + y ** 2)
return np.sqrt(x**2 + y**2)
# Define lexicographic and composition order
@@ -174,8 +175,8 @@ def partition(s, left, right, order):
temp = deepcopy(s[i])
s[i] = deepcopy(s[j])
s[j] = deepcopy(temp)
temp = deepcopy(s[i+1])
s[i+1] = deepcopy(s[right])
temp = deepcopy(s[i + 1])
s[i + 1] = deepcopy(s[right])
s[right] = deepcopy(temp)
return i + 1
@@ -206,16 +207,32 @@ def sort_angles_distances(Omega, s):
Sort the list of points 's' for the composition order given reference point
Omega.
"""
def order(A, B): return comp(Omega, A, B)
def order(A, B):
return comp(Omega, A, B)
quicksort(s, order)
# Define fuction for stacks (use here python lists with stack operations).
def empty_stack(): return []
def stack(S, A): S.append(A)
def unstack(S): S.pop()
def stack_top(S): return S[-1]
def stack_sub_top(S): return S[-2]
def empty_stack():
return []
def stack(S, A):
S.append(A)
def unstack(S):
S.pop()
def stack_top(S):
return S[-1]
def stack_sub_top(S):
return S[-2]
# Alignement handling
@@ -299,7 +316,7 @@ def convex_hull(H):
return S
def image_hull(image, step=5, null_val=0., inside=True):
def image_hull(image, step=5, null_val=0.0, inside=True):
"""
Compute the convex hull of a 2D image and return the 4 relevant coordinates
of the maximum included rectangle (ie. crop image to maximum rectangle).
@@ -331,7 +348,7 @@ def image_hull(image, step=5, null_val=0., inside=True):
H = []
shape = np.array(image.shape)
row, col = np.indices(shape)
for i in range(0, int(min(shape)/2), step):
for i in range(0, int(min(shape) / 2), step):
r1, r2 = row[i, :][image[i, :] > null_val], row[-i, :][image[-i, :] > null_val]
c1, c2 = col[i, :][image[i, :] > null_val], col[-i, :][image[-i, :] > null_val]
if r1.shape[0] > 1:
@@ -349,10 +366,10 @@ def image_hull(image, step=5, null_val=0., inside=True):
# S1 = S[x_min*y_max][np.argmax(S[x_min*y_max][:, 1])]
# S2 = S[x_max*y_min][np.argmin(S[x_max*y_min][:, 1])]
# S3 = S[x_max*y_max][np.argmax(S[x_max*y_max][:, 0])]
S0 = S[x_min*y_min][np.abs(0-S[x_min*y_min].sum(axis=1)).min() == np.abs(0-S[x_min*y_min].sum(axis=1))][0]
S1 = S[x_min*y_max][np.abs(shape[1]-S[x_min*y_max].sum(axis=1)).min() == np.abs(shape[1]-S[x_min*y_max].sum(axis=1))][0]
S2 = S[x_max*y_min][np.abs(shape[0]-S[x_max*y_min].sum(axis=1)).min() == np.abs(shape[0]-S[x_max*y_min].sum(axis=1))][0]
S3 = S[x_max*y_max][np.abs(shape.sum()-S[x_max*y_max].sum(axis=1)).min() == np.abs(shape.sum()-S[x_max*y_max].sum(axis=1))][0]
S0 = S[x_min * y_min][np.abs(0 - S[x_min * y_min].sum(axis=1)).min() == np.abs(0 - S[x_min * y_min].sum(axis=1))][0]
S1 = S[x_min * y_max][np.abs(shape[1] - S[x_min * y_max].sum(axis=1)).min() == np.abs(shape[1] - S[x_min * y_max].sum(axis=1))][0]
S2 = S[x_max * y_min][np.abs(shape[0] - S[x_max * y_min].sum(axis=1)).min() == np.abs(shape[0] - S[x_max * y_min].sum(axis=1))][0]
S3 = S[x_max * y_max][np.abs(shape.sum() - S[x_max * y_max].sum(axis=1)).min() == np.abs(shape.sum() - S[x_max * y_max].sum(axis=1))][0]
# Get the vertex of the biggest included rectangle
if inside:
f0 = np.max([S0[0], S1[0]])

View File

@@ -1,6 +1,7 @@
"""
Library functions for phase cross-correlation computation.
"""
# Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+)
# Otherwise fall back to numpy.fft.
# Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer
@@ -13,8 +14,7 @@ except ImportError:
import numpy as np
def _upsampled_dft(data, upsampled_region_size, upsample_factor=1,
axis_offsets=None):
def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, axis_offsets=None):
"""
Upsampled DFT by matrix multiplication.
This code is intended to provide the same result as if the following
@@ -48,26 +48,27 @@ def _upsampled_dft(data, upsampled_region_size, upsample_factor=1,
"""
# if people pass in an integer, expand it to a list of equal-sized sections
if not hasattr(upsampled_region_size, "__iter__"):
upsampled_region_size = [upsampled_region_size, ] * data.ndim
upsampled_region_size = [
upsampled_region_size,
] * data.ndim
else:
if len(upsampled_region_size) != data.ndim:
raise ValueError("shape of upsampled region sizes must be equal "
"to input data's number of dimensions.")
raise ValueError("shape of upsampled region sizes must be equal " "to input data's number of dimensions.")
if axis_offsets is None:
axis_offsets = [0, ] * data.ndim
axis_offsets = [
0,
] * data.ndim
else:
if len(axis_offsets) != data.ndim:
raise ValueError("number of axis offsets must be equal to input "
"data's number of dimensions.")
raise ValueError("number of axis offsets must be equal to input " "data's number of dimensions.")
im2pi = 1j * 2 * np.pi
dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets))
for (n_items, ups_size, ax_offset) in dim_properties[::-1]:
kernel = ((np.arange(ups_size) - ax_offset)[:, None]
* fft.fftfreq(n_items, upsample_factor))
for n_items, ups_size, ax_offset in dim_properties[::-1]:
kernel = (np.arange(ups_size) - ax_offset)[:, None] * fft.fftfreq(n_items, upsample_factor)
kernel = np.exp(-im2pi * kernel)
# Equivalent to:
@@ -100,14 +101,11 @@ def _compute_error(cross_correlation_max, src_amp, target_amp):
target_amp : float
The normalized average image intensity of the target image
"""
error = 1.0 - cross_correlation_max * cross_correlation_max.conj() /\
(src_amp * target_amp)
error = 1.0 - cross_correlation_max * cross_correlation_max.conj() / (src_amp * target_amp)
return np.sqrt(np.abs(error))
def phase_cross_correlation(reference_image, moving_image, *,
upsample_factor=1, space="real",
return_error=True, overlap_ratio=0.3):
def phase_cross_correlation(reference_image, moving_image, *, upsample_factor=1, space="real", return_error=True, overlap_ratio=0.3):
"""
Efficient subpixel image translation registration by cross-correlation.
This code gives the same precision as the FFT upsampled cross-correlation
@@ -174,11 +172,11 @@ def phase_cross_correlation(reference_image, moving_image, *,
raise ValueError("images must be same shape")
# assume complex data is already in Fourier space
if space.lower() == 'fourier':
if space.lower() == "fourier":
src_freq = reference_image
target_freq = moving_image
# real data needs to be fft'd.
elif space.lower() == 'real':
elif space.lower() == "real":
src_freq = fft.fftn(reference_image)
target_freq = fft.fftn(moving_image)
else:
@@ -190,8 +188,7 @@ def phase_cross_correlation(reference_image, moving_image, *,
cross_correlation = fft.ifftn(image_product)
# Locate maximum
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
cross_correlation.shape)
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape)
midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape])
shifts = np.stack(maxima).astype(np.float64)
@@ -213,14 +210,10 @@ def phase_cross_correlation(reference_image, moving_image, *,
dftshift = np.fix(upsampled_region_size / 2.0)
upsample_factor = np.array(upsample_factor, dtype=np.float64)
# Matrix multiply DFT around the current shift estimate
sample_region_offset = dftshift - shifts*upsample_factor
cross_correlation = _upsampled_dft(image_product.conj(),
upsampled_region_size,
upsample_factor,
sample_region_offset).conj()
sample_region_offset = dftshift - shifts * upsample_factor
cross_correlation = _upsampled_dft(image_product.conj(), upsampled_region_size, upsample_factor, sample_region_offset).conj()
# Locate maximum and map back to original pixel grid
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)),
cross_correlation.shape)
maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape)
CCmax = cross_correlation[maxima]
maxima = np.stack(maxima).astype(np.float64) - dftshift
@@ -240,10 +233,8 @@ def phase_cross_correlation(reference_image, moving_image, *,
if return_error:
# Redirect user to masked_phase_cross_correlation if NaNs are observed
if np.isnan(CCmax) or np.isnan(src_amp) or np.isnan(target_amp):
raise ValueError(
"NaN values found, please remove NaNs from your input data")
raise ValueError("NaN values found, please remove NaNs from your input data")
return shifts, _compute_error(CCmax, src_amp, target_amp), \
_compute_phasediff(CCmax)
return shifts, _compute_error(CCmax, src_amp, target_amp), _compute_phasediff(CCmax)
else:
return shifts

View File

@@ -28,8 +28,8 @@ prototypes :
"""
import numpy as np
from scipy.signal import convolve
from astropy.io import fits
from scipy.signal import convolve
def abs2(x):
@@ -37,9 +37,9 @@ def abs2(x):
if np.iscomplexobj(x):
x_re = x.real
x_im = x.imag
return x_re*x_re + x_im*x_im
return x_re * x_re + x_im * x_im
else:
return x*x
return x * x
def zeropad(arr, shape):
@@ -53,7 +53,7 @@ def zeropad(arr, shape):
diff = np.asarray(shape) - np.asarray(arr.shape)
if diff.min() < 0:
raise ValueError("output dimensions must be larger or equal input dimensions")
offset = diff//2
offset = diff // 2
z = np.zeros(shape, dtype=arr.dtype)
if rank == 1:
i0 = offset[0]
@@ -115,10 +115,10 @@ def zeropad(arr, shape):
def gaussian2d(x, y, sigma):
return np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2)
return np.exp(-(x**2 + y**2) / (2 * sigma**2)) / (2 * np.pi * sigma**2)
def gaussian_psf(FWHM=1., shape=(5, 5)):
def gaussian_psf(FWHM=1.0, shape=(5, 5)):
"""
Define the gaussian Point-Spread-Function of chosen shape and FWHM.
----------
@@ -136,13 +136,13 @@ def gaussian_psf(FWHM=1., shape=(5, 5)):
Kernel containing the weights of the desired gaussian PSF.
"""
# Compute standard deviation from FWHM
stdev = FWHM/(2.*np.sqrt(2.*np.log(2.)))
stdev = FWHM / (2.0 * np.sqrt(2.0 * np.log(2.0)))
# Create kernel of desired shape
x, y = np.meshgrid(np.arange(-shape[0]/2, shape[0]/2), np.arange(-shape[1]/2, shape[1]/2))
x, y = np.meshgrid(np.arange(-shape[0] / 2, shape[0] / 2), np.arange(-shape[1] / 2, shape[1] / 2))
kernel = gaussian2d(x, y, stdev)
return kernel/kernel.sum()
return kernel / kernel.sum()
def from_file_psf(filename):
@@ -164,7 +164,7 @@ def from_file_psf(filename):
if isinstance(psf, np.ndarray) or len(psf) != 2:
raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename))
# Return the normalized Point Spread Function
kernel = psf/psf.max()
kernel = psf / psf.max()
return kernel
@@ -199,14 +199,14 @@ def wiener(image, psf, alpha=0.1, clip=True):
ft_y = np.fft.fftn(im_deconv)
ft_h = np.fft.fftn(np.fft.ifftshift(psf))
ft_x = ft_h.conj()*ft_y / (abs2(ft_h) + alpha)
ft_x = ft_h.conj() * ft_y / (abs2(ft_h) + alpha)
im_deconv = np.fft.ifftn(ft_x).real
if clip:
im_deconv[im_deconv > 1] = 1
im_deconv[im_deconv < -1] = -1
return im_deconv/im_deconv.max()
return im_deconv / im_deconv.max()
def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon=None):
@@ -241,12 +241,12 @@ def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon=
im_deconv = image.copy()
for _ in range(iterations):
conv = convolve(im_deconv, psf, mode='same')
conv = convolve(im_deconv, psf, mode="same")
if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image - conv)
else:
relative_blur = image - conv
im_deconv += alpha*relative_blur
im_deconv += alpha * relative_blur
if clip:
im_deconv[im_deconv > 1] = 1
@@ -290,12 +290,12 @@ def richardson_lucy(image, psf, iterations=20, clip=True, filter_epsilon=None):
psf_mirror = np.flip(psf)
for _ in range(iterations):
conv = convolve(im_deconv, psf, mode='same')
conv = convolve(im_deconv, psf, mode="same")
if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image / conv)
else:
relative_blur = image / conv
im_deconv *= convolve(relative_blur, psf_mirror, mode='same')
im_deconv *= convolve(relative_blur, psf_mirror, mode="same")
if clip:
im_deconv[im_deconv > 1] = 1
@@ -335,12 +335,12 @@ def one_step_gradient(image, psf, iterations=20, clip=True, filter_epsilon=None)
psf_mirror = np.flip(psf)
for _ in range(iterations):
conv = convolve(im_deconv, psf, mode='same')
conv = convolve(im_deconv, psf, mode="same")
if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image - conv)
else:
relative_blur = image - conv
im_deconv += convolve(relative_blur, psf_mirror, mode='same')
im_deconv += convolve(relative_blur, psf_mirror, mode="same")
if clip:
im_deconv[im_deconv > 1] = 1
@@ -387,20 +387,20 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
if error is None:
wgt = np.ones(image.shape)
else:
wgt = image/error
wgt = image / error
wgt /= wgt.max()
def W(x):
"""Define W operator : apply weights"""
return wgt*x
return wgt * x
def H(x):
"""Define H operator : convolution with PSF"""
return np.fft.ifftn(ft_h*np.fft.fftn(x)).real
return np.fft.ifftn(ft_h * np.fft.fftn(x)).real
def Ht(x):
"""Define Ht operator : transpose of H"""
return np.fft.ifftn(ft_h.conj()*np.fft.fftn(x)).real
return np.fft.ifftn(ft_h.conj() * np.fft.fftn(x)).real
def DtD(x):
"""Returns the result of D'.D.x where D is a (multi-dimensional)
@@ -444,7 +444,7 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
def A(x):
"""Define symetric positive semi definite operator A"""
return Ht(W(H(x)))+alpha*DtD(x)
return Ht(W(H(x))) + alpha * DtD(x)
# Define obtained vector A.x = b
b = Ht(W(image))
@@ -458,7 +458,7 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
r = np.copy(b)
x = np.zeros(b.shape, dtype=b.dtype)
rho = inner(r, r)
epsilon = np.max([0., 1e-5*np.sqrt(rho)])
epsilon = np.max([0.0, 1e-5 * np.sqrt(rho)])
# Conjugate gradient iterations.
beta = 0.0
@@ -476,26 +476,25 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
if beta == 0.0:
p = r
else:
p = r + beta*p
p = r + beta * p
# Make optimal step along search direction.
q = A(p)
gamma = inner(p, q)
if gamma <= 0.0:
raise ValueError("Operator A is not positive definite")
alpha = rho/gamma
x += alpha*p
r -= alpha*q
alpha = rho / gamma
x += alpha * p
r -= alpha * q
rho_prev, rho = rho, inner(r, r)
beta = rho/rho_prev
beta = rho / rho_prev
# Return normalized solution
im_deconv = x/x.max()
im_deconv = x / x.max()
return im_deconv
def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
filter_epsilon=None, algo='richardson'):
def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True, filter_epsilon=None, algo="richardson"):
"""
Prepare an image for deconvolution using a chosen algorithm and return
results.
@@ -537,27 +536,23 @@ def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
"""
# Normalize image to highest pixel value
pxmax = image[np.isfinite(image)].max()
if pxmax == 0.:
if pxmax == 0.0:
raise ValueError("Invalid image")
norm_image = image/pxmax
norm_image = image / pxmax
# Deconvolve normalized image
if algo.lower() in ['wiener', 'wiener simple']:
if algo.lower() in ["wiener", "wiener simple"]:
norm_deconv = wiener(image=norm_image, psf=psf, alpha=alpha, clip=clip)
elif algo.lower() in ['van-cittert', 'vancittert', 'cittert']:
norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha,
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
elif algo.lower() in ['1grad', 'one_step_grad', 'one step grad']:
norm_deconv = one_step_gradient(image=norm_image, psf=psf,
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
elif algo.lower() in ['conjgrad', 'conj_grad', 'conjugate gradient']:
norm_deconv = conjgrad(image=norm_image, psf=psf, alpha=alpha,
error=error, iterations=iterations)
elif algo.lower() in ["van-cittert", "vancittert", "cittert"]:
norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
elif algo.lower() in ["1grad", "one_step_grad", "one step grad"]:
norm_deconv = one_step_gradient(image=norm_image, psf=psf, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
elif algo.lower() in ["conjgrad", "conj_grad", "conjugate gradient"]:
norm_deconv = conjgrad(image=norm_image, psf=psf, alpha=alpha, error=error, iterations=iterations)
else: # Defaults to Richardson-Lucy
norm_deconv = richardson_lucy(image=norm_image, psf=psf,
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
norm_deconv = richardson_lucy(image=norm_image, psf=psf, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
# Output deconvolved image with original pxmax value
im_deconv = pxmax*norm_deconv
im_deconv = pxmax * norm_deconv
return im_deconv

View File

@@ -9,11 +9,14 @@ prototypes :
Save computed polarimetry parameters to a single fits file (and return HDUList)
"""
import numpy as np
from os.path import join as path_join
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
from .convex_hull import clean_ROI
from .utils import wcs_PA
def get_obs_data(infiles, data_folder="", compute_flux=False):
@@ -36,59 +39,61 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
headers : header list
List of headers objects corresponding to each image in data_array.
"""
data_array, headers = [], []
data_array, headers, wcs_array = [], [], []
for i in range(len(infiles)):
with fits.open(path_join(data_folder, infiles[i])) as f:
with fits.open(path_join(data_folder, infiles[i]), mode="update") as f:
headers.append(f[0].header)
data_array.append(f[0].data)
wcs_array.append(WCS(header=f[0].header, fobj=f).celestial)
f.flush()
data_array = np.array(data_array, dtype=np.double)
# Prevent negative count value in imported data
for i in range(len(data_array)):
data_array[i][data_array[i] < 0.] = 0.
data_array[i][data_array[i] < 0.0] = 0.0
# force WCS to convention PCi_ja unitary, cdelt in deg
for header in headers:
new_wcs = WCS(header).celestial.deepcopy()
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1., 1.])).all():
for wcs, header in zip(wcs_array, headers):
new_wcs = wcs.deepcopy()
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1.0, 1.0])).all():
# Update WCS with relevant information
if new_wcs.wcs.has_cd():
old_cd = new_wcs.wcs.cd
del new_wcs.wcs.cd
keys = list(new_wcs.to_header().keys())+['CD1_1', 'CD1_2', 'CD1_3', 'CD2_1', 'CD2_2', 'CD2_3', 'CD3_1', 'CD3_2', 'CD3_3']
keys = list(new_wcs.to_header().keys()) + ["CD1_1", "CD1_2", "CD1_3", "CD2_1", "CD2_2", "CD2_3", "CD3_1", "CD3_2", "CD3_3"]
for key in keys:
header.remove(key, ignore_missing=True)
new_cdelt = np.linalg.eig(old_cd)[0]
elif (new_wcs.wcs.cdelt == np.array([1., 1.])).all() and \
(new_wcs.array_shape in [(512, 512), (1024, 512), (512, 1024), (1024, 1024)]):
old_cd = new_wcs.wcs.pc
new_wcs.wcs.pc = np.dot(old_cd, np.diag(1./new_cdelt))
new_cdelt = np.linalg.eigvals(wcs.wcs.cd)
new_cdelt.sort()
new_wcs.wcs.pc = wcs.wcs.cd.dot(np.diag(1.0 / new_cdelt))
new_wcs.wcs.cdelt = new_cdelt
for key, val in new_wcs.to_header().items():
header[key] = val
# header['orientat'] = princ_angle(float(header['orientat']))
try:
_ = header["ORIENTAT"]
except KeyError:
header["ORIENTAT"] = wcs_PA(new_wcs.wcs.pc[1, 0], np.diag(new_wcs.wcs.pc).mean())
# force WCS for POL60 to have same pixel size as POL0 and POL120
is_pol60 = np.array([head['filtnam1'].lower() == 'pol60' for head in headers], dtype=bool)
cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 14)
is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 10)
if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0))
raise ValueError("Not all images have same pixel size")
else:
for i in np.arange(len(headers))[is_pol60]:
headers[i]['cdelt1'], headers[i]['cdelt2'] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0]
headers[i]["cdelt1"], headers[i]["cdelt2"] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0]
if compute_flux:
for i in range(len(infiles)):
# Compute the flux in counts/sec
data_array[i] /= headers[i]['EXPTIME']
data_array[i] /= headers[i]["EXPTIME"]
return data_array, headers
def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="",
return_hdul=False):
def save_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, header_stokes, data_mask, filename, data_folder="", return_hdul=False
):
"""
Save computed polarimetry parameters to a single fits file,
updating header accordingly.
@@ -124,81 +129,90 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
Only returned if return_hdul is True.
"""
# Create new WCS object given the modified images
ref_header = headers[0]
exp_tot = np.array([header['exptime'] for header in headers]).sum()
new_wcs = WCS(ref_header).deepcopy()
new_wcs = WCS(header_stokes).deepcopy()
if data_mask.shape != (1, 1):
vertex = clean_ROI(data_mask)
shape = vertex[1::2]-vertex[0::2]
shape = vertex[1::2] - vertex[0::2]
new_wcs.array_shape = shape
new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2]
header = new_wcs.to_header()
header['telescop'] = (ref_header['telescop'] if 'TELESCOP' in list(ref_header.keys()) else 'HST', 'telescope used to acquire data')
header['instrume'] = (ref_header['instrume'] if 'INSTRUME' in list(ref_header.keys()) else 'FOC', 'identifier for instrument used to acuire data')
header['photplam'] = (ref_header['photplam'], 'Pivot Wavelength')
header['photflam'] = (ref_header['photflam'], 'Inverse Sensitivity in DN/sec/cm**2/Angst')
header['exptot'] = (exp_tot, 'Total exposure time in sec')
header['proposid'] = (ref_header['proposid'], 'PEP proposal identifier for observation')
header['targname'] = (ref_header['targname'], 'Target name')
header['orientat'] = (ref_header['orientat'], 'Angle between North and the y-axis of the image')
header['filename'] = (filename, 'Original filename')
header['P_int'] = (ref_header['P_int'], 'Integrated polarization degree')
header['P_int_err'] = (ref_header['P_int_err'], 'Integrated polarization degree error')
header['PA_int'] = (ref_header['PA_int'], 'Integrated polarization angle')
header['PA_int_err'] = (ref_header['PA_int_err'], 'Integrated polarization angle error')
header["TELESCOP"] = (header_stokes["TELESCOP"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "telescope used to acquire data")
header["INSTRUME"] = (header_stokes["INSTRUME"] if "INSTRUME" in list(header_stokes.keys()) else "FOC", "identifier for instrument used to acuire data")
header["PHOTPLAM"] = (header_stokes["PHOTPLAM"], "Pivot Wavelength")
header["PHOTFLAM"] = (header_stokes["PHOTFLAM"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
header["EXPTIME"] = (header_stokes["EXPTIME"], "Total exposure time in sec")
header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation")
header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name")
header["ORIENTAT"] = (header_stokes["ORIENTAT"], "Angle between North and the y-axis of the image")
header["FILENAME"] = (filename, "ORIGINAL FILENAME")
header["BKG_TYPE"] = (header_stokes["BKG_TYPE"], "Bkg estimation method used during reduction")
header["BKG_SUB"] = (header_stokes["BKG_SUB"], "Amount of bkg subtracted from images")
header["SMOOTH"] = (header_stokes["SMOOTH"] if "SMOOTH" in list(header_stokes.keys()) else "None", "Smoothing method used during reduction")
header["SAMPLING"] = (header_stokes["SAMPLING"] if "SAMPLING" in list(header_stokes.keys()) else "None", "Resampling performed during reduction")
header["P_INT"] = (header_stokes["P_INT"], "Integrated polarization degree")
header["sP_INT"] = (header_stokes["sP_INT"], "Integrated polarization degree error")
header["PA_INT"] = (header_stokes["PA_INT"], "Integrated polarization angle")
header["sPA_INT"] = (header_stokes["sPA_INT"], "Integrated polarization angle error")
# Crop Data to mask
if data_mask.shape != (1, 1):
I_stokes = I_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]]
Q_stokes = Q_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]]
U_stokes = U_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]]
P = P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
debiased_P = debiased_P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
s_P = s_P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
s_P_P = s_P_P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
PA = PA[vertex[2]:vertex[3], vertex[0]:vertex[1]]
s_PA = s_PA[vertex[2]:vertex[3], vertex[0]:vertex[1]]
s_PA_P = s_PA_P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
I_stokes = I_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]]
Q_stokes = Q_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]]
U_stokes = U_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]]
P = P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
debiased_P = debiased_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_P = s_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_P_P = s_P_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
PA = PA[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_PA = s_PA[vertex[2] : vertex[3], vertex[0] : vertex[1]]
s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1]))
for i in range(3):
for j in range(3):
Stokes_cov[i, j][(1-data_mask).astype(bool)] = 0.
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2]:vertex[3], vertex[0]:vertex[1]]
Stokes_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
Stokes_cov = new_Stokes_cov
data_mask = data_mask[vertex[2]:vertex[3], vertex[0]:vertex[1]]
data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]]
data_mask = data_mask.astype(float, copy=False)
# Create HDUList object
hdul = fits.HDUList([])
# Add I_stokes as PrimaryHDU
header['datatype'] = ('I_stokes', 'type of data stored in the HDU')
I_stokes[(1-data_mask).astype(bool)] = 0.
header["datatype"] = ("I_stokes", "type of data stored in the HDU")
I_stokes[(1 - data_mask).astype(bool)] = 0.0
primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header)
primary_hdu.name = 'I_stokes'
primary_hdu.name = "I_stokes"
hdul.append(primary_hdu)
# Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList
for data, name in [[Q_stokes, 'Q_stokes'], [U_stokes, 'U_stokes'],
[Stokes_cov, 'IQU_cov_matrix'], [P, 'Pol_deg'],
[debiased_P, 'Pol_deg_debiased'], [s_P, 'Pol_deg_err'],
[s_P_P, 'Pol_deg_err_Poisson_noise'], [PA, 'Pol_ang'],
[s_PA, 'Pol_ang_err'], [s_PA_P, 'Pol_ang_err_Poisson_noise'],
[data_mask, 'Data_mask']]:
for data, name in [
[Q_stokes, "Q_stokes"],
[U_stokes, "U_stokes"],
[Stokes_cov, "IQU_cov_matrix"],
[P, "Pol_deg"],
[debiased_P, "Pol_deg_debiased"],
[s_P, "Pol_deg_err"],
[s_P_P, "Pol_deg_err_Poisson_noise"],
[PA, "Pol_ang"],
[s_PA, "Pol_ang_err"],
[s_PA_P, "Pol_ang_err_Poisson_noise"],
[data_mask, "Data_mask"],
]:
hdu_header = header.copy()
hdu_header['datatype'] = name
if not name == 'IQU_cov_matrix':
data[(1-data_mask).astype(bool)] = 0.
hdu_header["datatype"] = name
if not name == "IQU_cov_matrix":
data[(1 - data_mask).astype(bool)] = 0.0
hdu = fits.ImageHDU(data=data, header=hdu_header)
hdu.name = name
hdul.append(hdu)
# Save fits file to designated filepath
hdul.writeto(path_join(data_folder, filename+".fits"), overwrite=True)
hdul.writeto(path_join(data_folder, filename + ".fits"), overwrite=True)
if return_hdul:
return hdul

File diff suppressed because it is too large Load Diff

View File

@@ -3,34 +3,44 @@
"""
Library function to query and download datatsets from MAST api.
"""
from os import system
from os.path import join as path_join, exists as path_exists
from astroquery.mast import MastMissions, Observations
from astropy.table import unique, Column
from astropy.time import Time, TimeDelta
from os.path import exists as path_exists
from os.path import join as path_join
from warnings import filterwarnings
import astropy.units as u
import numpy as np
from astropy.table import Column, unique
from astropy.time import Time, TimeDelta
from astroquery.exceptions import NoResultsWarning
from astroquery.mast import MastMissions, Observations
filterwarnings("error", category=NoResultsWarning)
def divide_proposal(products):
"""
Divide observation in proposals by time or filter
"""
for pid in np.unique(products['Proposal ID']):
obs = products[products['Proposal ID'] == pid].copy()
same_filt = np.unique(np.array(np.sum([obs['Filters'][:, 1:] == filt[1:] for filt in obs['Filters']], axis=2) < 3, dtype=bool), axis=0)
for pid in np.unique(products["Proposal ID"]):
obs = products[products["Proposal ID"] == pid].copy()
same_filt = np.unique(np.array(np.sum([obs["Filters"][:, 1:] == filt[1:] for filt in obs["Filters"]], axis=2) < 3, dtype=bool), axis=0)
if len(same_filt) > 1:
for filt in same_filt:
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][filt]], axis=0)] = "_".join(
[obs['Proposal ID'][filt][0], "_".join([fi for fi in obs['Filters'][filt][0][1:] if fi[:-1] != "CLEAR"])])
for pid in np.unique(products['Proposal ID']):
obs = products[products['Proposal ID'] == pid].copy()
close_date = np.unique([[np.abs(TimeDelta(obs['Start'][i].unix-date.unix, format='sec'))
< 7.*u.d for i in range(len(obs))] for date in obs['Start']], axis=0)
products["Proposal ID"][np.any([products["Dataset"] == dataset for dataset in obs["Dataset"][filt]], axis=0)] = "_".join(
[obs["Proposal ID"][filt][0], "_".join([fi for fi in obs["Filters"][filt][0][1:] if fi[:-1] != "CLEAR"])]
)
for pid in np.unique(products["Proposal ID"]):
obs = products[products["Proposal ID"] == pid].copy()
close_date = np.unique(
[[np.abs(TimeDelta(obs["Start"][i].unix - date.unix, format="sec")) < 7.0 * u.d for i in range(len(obs))] for date in obs["Start"]], axis=0
)
if len(close_date) > 1:
for date in close_date:
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][date]], axis=0)
] = "_".join([obs['Proposal ID'][date][0], str(obs['Start'][date][0])[:10]])
products["Proposal ID"][np.any([products["Dataset"] == dataset for dataset in obs["Dataset"][date]], axis=0)] = "_".join(
[obs["Proposal ID"][date][0], str(obs["Start"][date][0])[:10]]
)
return products
@@ -38,53 +48,36 @@ def get_product_list(target=None, proposal_id=None):
"""
Retrieve products list for a given target from the MAST archive
"""
mission = MastMissions(mission='hst')
radius = '3'
mission = MastMissions(mission="hst")
radius = "3"
select_cols = [
'sci_data_set_name',
'sci_spec_1234',
'sci_actual_duration',
'sci_start_time',
'sci_stop_time',
'sci_central_wavelength',
'sci_instrume',
'sci_aper_1234',
'sci_targname',
'sci_pep_id',
'sci_pi_last_name']
"sci_data_set_name",
"sci_spec_1234",
"sci_actual_duration",
"sci_start_time",
"sci_stop_time",
"sci_central_wavelength",
"sci_instrume",
"sci_aper_1234",
"sci_targname",
"sci_pep_id",
"sci_pi_last_name",
]
cols = [
'Dataset',
'Filters',
'Exptime',
'Start',
'Stop',
'Central wavelength',
'Instrument',
'Size',
'Target name',
'Proposal ID',
'PI last name']
cols = ["Dataset", "Filters", "Exptime", "Start", "Stop", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]
if target is None:
target = input("Target name:\n>")
# Use query_object method to resolve the object name into coordinates
results = mission.query_object(
target,
radius=radius,
select_cols=select_cols,
sci_spec_1234='POL*',
sci_obs_type='image',
sci_aec='S',
sci_instrume='foc')
results = mission.query_object(target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc")
for c, n_c in zip(select_cols, cols):
results.rename_column(c, n_c)
results['Proposal ID'] = Column(results['Proposal ID'], dtype='U35')
results['Filters'] = Column(np.array([filt.split(";") for filt in results['Filters']], dtype=str))
results['Start'] = Column(Time(results['Start']))
results['Stop'] = Column(Time(results['Stop']))
results["Proposal ID"] = Column(results["Proposal ID"], dtype="U35")
results["Filters"] = Column(np.array([filt.split(";") for filt in results["Filters"]], dtype=str))
results["Start"] = Column(Time(results["Start"]))
results["Stop"] = Column(Time(results["Stop"]))
results = divide_proposal(results)
obs = results.copy()
@@ -92,67 +85,70 @@ def get_product_list(target=None, proposal_id=None):
# Remove single observations for which a FIND filter is used
to_remove = []
for i in range(len(obs)):
if "F1ND" in obs[i]['Filters']:
if "F1ND" in obs[i]["Filters"]:
to_remove.append(i)
obs.remove_rows(to_remove)
# Remove observations for which a polarization filter is missing
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
for pid in np.unique(obs['Proposal ID']):
for pid in np.unique(obs["Proposal ID"]):
used_pol = np.zeros(3)
for dataset in obs[obs['Proposal ID'] == pid]:
used_pol[polfilt[dataset['Filters'][0]]] += 1
for dataset in obs[obs["Proposal ID"] == pid]:
used_pol[polfilt[dataset["Filters"][0]]] += 1
if np.any(used_pol < 1):
obs.remove_rows(np.arange(len(obs))[obs['Proposal ID'] == pid])
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
tab = unique(obs, ['Target name', 'Proposal ID'])
obs["Obs"] = [np.argmax(np.logical_and(tab['Proposal ID'] == data['Proposal ID'], tab['Target name'] == data['Target name']))+1 for data in obs]
tab = unique(obs, ["Target name", "Proposal ID"])
obs["Obs"] = [np.argmax(np.logical_and(tab["Proposal ID"] == data["Proposal ID"], tab["Target name"] == data["Target name"])) + 1 for data in obs]
try:
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], 'Obs')
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], "Obs")
except IndexError:
raise ValueError(
"There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
raise ValueError("There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
b = np.zeros(len(results), dtype=bool)
if proposal_id is not None and str(proposal_id) in obs['Proposal ID']:
b[results['Proposal ID'] == str(proposal_id)] = True
if proposal_id is not None and str(proposal_id) in obs["Proposal ID"]:
b[results["Proposal ID"] == str(proposal_id)] = True
else:
n_obs.pprint(len(n_obs)+2)
a = [np.array(i.split(":"), dtype=str)
for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(',')]
if a[0][0] == '':
n_obs.pprint(len(n_obs) + 2)
a = [
np.array(i.split(":"), dtype=str)
for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(",")
]
if a[0][0] == "":
a = [[1]]
if a[0][0] in ['a', 'all', '*']:
if a[0][0] in ["a", "all", "*"]:
b = np.ones(len(results), dtype=bool)
else:
a = [np.array(i, dtype=int) for i in a]
for i in a:
if len(i) > 1:
for j in range(i[0], i[1]+1):
b[np.array([dataset in obs['Dataset'][obs["Obs"] == j] for dataset in results['Dataset']])] = True
for j in range(i[0], i[1] + 1):
b[np.array([dataset in obs["Dataset"][obs["Obs"] == j] for dataset in results["Dataset"]])] = True
else:
b[np.array([dataset in obs['Dataset'][obs['Obs'] == i[0]] for dataset in results['Dataset']])] = True
b[np.array([dataset in obs["Dataset"][obs["Obs"] == i[0]] for dataset in results["Dataset"]])] = True
observations = Observations.query_criteria(obs_id=list(results['Dataset'][b]))
products = Observations.filter_products(Observations.get_product_list(observations),
productType=['SCIENCE'],
dataproduct_type=['image'],
calib_level=[2],
description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP")
products['proposal_id'] = Column(products['proposal_id'], dtype='U35')
products['target_name'] = Column(observations['target_name'])
observations = Observations.query_criteria(obs_id=list(results["Dataset"][b]))
products = Observations.filter_products(
Observations.get_product_list(observations),
productType=["SCIENCE"],
dataproduct_type=["image"],
calib_level=[2],
description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP",
)
products["proposal_id"] = Column(products["proposal_id"], dtype="U35")
products["target_name"] = Column(observations["target_name"])
for prod in products:
prod['proposal_id'] = results['Proposal ID'][results['Dataset'] == prod['productFilename'][:len(results['Dataset'][0])].upper()][0]
prod["proposal_id"] = results["Proposal ID"][results["Dataset"] == prod["productFilename"][: len(results["Dataset"][0])].upper()][0]
for prod in products:
prod['target_name'] = observations['target_name'][observations['obsid'] == prod['obsID']][0]
tab = unique(products, ['target_name', 'proposal_id'])
prod["target_name"] = observations["target_name"][observations["obsid"] == prod["obsID"]][0]
tab = unique(products, ["target_name", "proposal_id"])
products["Obs"] = [np.argmax(np.logical_and(tab['proposal_id'] == data['proposal_id'], tab['target_name'] == data['target_name']))+1 for data in products]
products["Obs"] = [np.argmax(np.logical_and(tab["proposal_id"] == data["proposal_id"], tab["target_name"] == data["target_name"])) + 1 for data in products]
return target, products
def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
def retrieve_products(target=None, proposal_id=None, output_dir="./data"):
"""
Given a target name and a proposal_id, create the local directories and retrieve the fits files from the MAST Archive
"""
@@ -160,18 +156,19 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
prodpaths = []
# data_dir = path_join(output_dir, target)
out = ""
for obs in unique(products, 'Obs'):
for obs in unique(products, "Obs"):
filepaths = []
# obs_dir = path_join(data_dir, obs['prodposal_id'])
# if obs['target_name']!=target:
obs_dir = path_join(path_join(output_dir, target), obs['proposal_id'])
obs_dir = path_join(path_join(output_dir, target), obs["proposal_id"])
if not path_exists(obs_dir):
system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
for file in products['productFilename'][products['Obs'] == obs['Obs']]:
for file in products["productFilename"][products["Obs"] == obs["Obs"]]:
fpath = path_join(obs_dir, file)
if not path_exists(fpath):
out += "{0:s} : {1:s}\n".format(file, Observations.download_file(
products['dataURI'][products['productFilename'] == file][0], local_path=fpath)[0])
out += "{0:s} : {1:s}\n".format(
file, Observations.download_file(products["dataURI"][products["productFilename"] == file][0], local_path=fpath)[0]
)
else:
out += "{0:s} : Exists\n".format(file)
filepaths.append([obs_dir, file])
@@ -183,13 +180,13 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Query MAST for target products')
parser.add_argument('-t', '--target', metavar='targetname', required=False,
help='the name of the target', type=str, default=None)
parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False,
help='the proposal id of the data products', type=int, default=None)
parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False,
help='output directory path for the data products', type=str, default="./data")
parser = argparse.ArgumentParser(description="Query MAST for target products")
parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
parser.add_argument("-p", "--proposal_id", metavar="proposal_id", required=False, help="the proposal id of the data products", type=int, default=None)
parser.add_argument(
"-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data"
)
args = parser.parse_args()
print(args.target)
prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id)
print(prodpaths)

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,11 @@
import numpy as np
def rot2D(ang):
"""
Return the 2D rotation matrix of given angle in degrees
"""
alpha = np.pi*ang/180
alpha = np.pi * ang / 180
return np.array([[np.cos(alpha), np.sin(alpha)], [-np.sin(alpha), np.cos(alpha)]])
@@ -17,10 +18,10 @@ def princ_angle(ang):
A = np.array([ang])
else:
A = np.array(ang)
while np.any(A < 0.):
A[A < 0.] = A[A < 0.]+360.
while np.any(A >= 180.):
A[A >= 180.] = A[A >= 180.]-180.
while np.any(A < 0.0):
A[A < 0.0] = A[A < 0.0] + 360.0
while np.any(A >= 180.0):
A[A >= 180.0] = A[A >= 180.0] - 180.0
if type(ang) is type(A):
return A
else:
@@ -31,16 +32,31 @@ def sci_not(v, err, rnd=1, out=str):
"""
Return the scientifque error notation as a string.
"""
power = - int(('%E' % v)[-3:])+1
output = [r"({0}".format(round(v*10**power, rnd)), round(v*10**power, rnd)]
power = -int(("%E" % v)[-3:]) + 1
output = [r"({0}".format(round(v * 10**power, rnd)), round(v * 10**power, rnd)]
if isinstance(err, list):
for error in err:
output[0] += r" $\pm$ {0}".format(round(error*10**power, rnd))
output.append(round(error*10**power, rnd))
output[0] += r" $\pm$ {0}".format(round(error * 10**power, rnd))
output.append(round(error * 10**power, rnd))
else:
output[0] += r" $\pm$ {0}".format(round(err*10**power, rnd))
output.append(round(err*10**power, rnd))
if out == str:
return output[0]+r")e{0}".format(-power)
output[0] += r" $\pm$ {0}".format(round(err * 10**power, rnd))
output.append(round(err * 10**power, rnd))
if out is str:
return output[0] + r")e{0}".format(-power)
else:
return *output[1:], -power
def wcs_PA(PC21, PC22):
"""
Return the position angle in degrees to the North direction of a wcs
from the values of coefficient of its transformation matrix.
"""
if (abs(PC21) > abs(PC22)) and (PC21 >= 0):
orient = -np.arccos(PC22) * 180.0 / np.pi
elif (abs(PC21) > abs(PC22)) and (PC21 < 0):
orient = np.arccos(PC22) * 180.0 / np.pi
elif (abs(PC21) < abs(PC22)) and (PC22 >= 0):
orient = np.arccos(PC22) * 180.0 / np.pi
elif (abs(PC21) < abs(PC22)) and (PC22 < 0):
orient = -np.arccos(PC22) * 180.0 / np.pi
return orient

View File

@@ -1,7 +1,7 @@
#!/usr/bin/python3
from astropy.io import fits
import numpy as np
from lib.plots import overplot_radio, overplot_pol
from astropy.io import fits
from lib.plots import overplot_pol, overplot_radio
from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits")
@@ -14,31 +14,37 @@ Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits")
Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
# levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
levelsMorganti = np.logspace(-0.1249, 1.97, 7)/100.
levelsMorganti = np.logspace(-0.1249, 1.97, 7) / 100.0
levels18GHz = levelsMorganti*Stokes_18GHz[0].data.max()
levels18GHz = levelsMorganti * Stokes_18GHz[0].data.max()
A = overplot_radio(Stokes_UV, Stokes_18GHz)
A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/18GHz_overplot.pdf', vec_scale=None)
A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/18GHz_overplot.pdf", vec_scale=None)
levels24GHz = levelsMorganti*Stokes_24GHz[0].data.max()
levels24GHz = levelsMorganti * Stokes_24GHz[0].data.max()
B = overplot_radio(Stokes_UV, Stokes_24GHz)
B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/24GHz_overplot.pdf', vec_scale=None)
B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/24GHz_overplot.pdf", vec_scale=None)
levels103GHz = levelsMorganti*Stokes_103GHz[0].data.max()
levels103GHz = levelsMorganti * Stokes_103GHz[0].data.max()
C = overplot_radio(Stokes_UV, Stokes_103GHz)
C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/103GHz_overplot.pdf', vec_scale=None)
C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/103GHz_overplot.pdf", vec_scale=None)
levels229GHz = levelsMorganti*Stokes_229GHz[0].data.max()
levels229GHz = levelsMorganti * Stokes_229GHz[0].data.max()
D = overplot_radio(Stokes_UV, Stokes_229GHz)
D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/229GHz_overplot.pdf', vec_scale=None)
D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/229GHz_overplot.pdf", vec_scale=None)
levels357GHz = levelsMorganti*Stokes_357GHz[0].data.max()
levels357GHz = levelsMorganti * Stokes_357GHz[0].data.max()
E = overplot_radio(Stokes_UV, Stokes_357GHz)
E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/357GHz_overplot.pdf', vec_scale=None)
E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/357GHz_overplot.pdf", vec_scale=None)
# F = overplot_pol(Stokes_UV, Stokes_S2)
# F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18))
G = overplot_pol(Stokes_UV, Stokes_IR, cmap='inferno')
G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot.pdf', vec_scale=None,
norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3, Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']), cmap='inferno_r')
G = overplot_pol(Stokes_UV, Stokes_IR, cmap="inferno")
G.plot(
SNRp_cut=2.0,
SNRi_cut=10.0,
savename="./plots/IC5063/IR_overplot.pdf",
vec_scale=None,
norm=LogNorm(Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"] / 1e3, Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"]),
cmap="inferno_r",
)

View File

@@ -1,6 +1,6 @@
#!/usr/bin/python3
from astropy.io import fits
import numpy as np
from astropy.io import fits
from lib.plots import overplot_chandra, overplot_pol
from matplotlib.colors import LogNorm
@@ -8,13 +8,13 @@ Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.f
Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits")
Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits")
levels = np.geomspace(1., 99., 7)
levels = np.geomspace(1.0, 99.0, 7)
A = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm())
A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf')
A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, zoom=1, savename="./plots/MRK463E/Chandra_overplot.pdf")
A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned")
levels = np.array([0.8, 2, 5, 10, 20, 50])/100.*Stokes_UV[0].header['photflam']
levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"]
B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm())
B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, norm=LogNorm(8.5e-18, 2.5e-15), savename='./plots/MRK463E/IR_overplot.pdf')
B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, norm=LogNorm(8.5e-18, 2.5e-15), savename="./plots/MRK463E/IR_overplot.pdf")
B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned")

View File

@@ -1,5 +1,6 @@
#!/usr/bin/python
from getopt import getopt, error as get_error
from getopt import error as get_error
from getopt import getopt
from sys import argv
arglist = argv[1:]
@@ -24,7 +25,7 @@ try:
elif curr_arg in ("-i", "--snri"):
SNRi_cut = int(curr_val)
elif curr_arg in ("-l", "--lim"):
flux_lim = list("".join(curr_val).split(','))
flux_lim = list("".join(curr_val).split(","))
except get_error as err:
print(str(err))

View File

@@ -1,19 +1,21 @@
#!/usr/bin/python
def main(infiles=None):
"""
Retrieve native spatial resolution from given observation.
"""
from os.path import join as path_join
from warnings import catch_warnings, filterwarnings
from astropy.io.fits import getheader
from astropy.wcs import WCS, FITSFixedWarning
from numpy.linalg import eig
if infiles is None:
print("Usage: \"python get_cdelt.py -f infiles\"")
print('Usage: "python get_cdelt.py -f infiles"')
return 1
prod = [["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles]
prod = [["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles]
data_folder = prod[0][0]
infiles = [p[1] for p in prod]
@@ -21,14 +23,14 @@ def main(infiles=None):
size = {}
for currfile in infiles:
with catch_warnings():
filterwarnings('ignore', message="'datfix' made the change", category=FITSFixedWarning)
filterwarnings("ignore", message="'datfix' made the change", category=FITSFixedWarning)
wcs = WCS(getheader(path_join(data_folder, currfile))).celestial
key = currfile[:-5]
size[key] = wcs.array_shape
if wcs.wcs.has_cd():
cdelt[key] = eig(wcs.wcs.cd)[0]*3600.
cdelt[key] = eig(wcs.wcs.cd)[0] * 3600.0
else:
cdelt[key] = wcs.wcs.cdelt*3600.
cdelt[key] = wcs.wcs.cdelt * 3600.0
print("Image name, native resolution in arcsec and shape")
for currfile in infiles:
@@ -41,7 +43,7 @@ def main(infiles=None):
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Query MAST for target products')
parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None)
parser = argparse.ArgumentParser(description="Query MAST for target products")
parser.add_argument("-f", "--files", metavar="path", required=False, nargs="*", help="the full or relative path to the data products", default=None)
args = parser.parse_args()
exitcode = main(infiles=args.files)