Merge branch 'testing'

fix wrong use of PHOTFLAM and other display fixes
This commit is contained in:
2025-02-13 15:45:21 +01:00
6 changed files with 658 additions and 158 deletions

View File

@@ -85,12 +85,21 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
label=headers[i]["filtnam1"] + " (Obs " + str(filt_obs[headers[i]["filtnam1"]]) + ")",
)
ax_h.plot([background[i] * convert_flux[i], background[i] * convert_flux[i]], [hist.min(), hist.max()], "x--", color="C{0:d}".format(i), alpha=0.8)
if i == 0:
xmin, xmax = np.min(np.array(bins)[np.array(hist) > 1e2]) * convert_flux[0], np.max(np.array(bins)[np.array(hist) > 1e2]) * convert_flux[0]
else:
xmin, xmax = (
min(xmin, np.min(np.array(bins)[np.array(hist) > 1e2]) * convert_flux[0]),
max(xmax, np.max(np.array(bins)[np.array(hist) > 1e2]) * convert_flux[0]),
)
if coeff is not None:
# ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
ax_h.plot(bins * convert_flux[i], gauss(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8)
ax_h.set_xscale("log")
ax_h.set_ylim([0.0, np.max([hist.max() for hist in histograms])])
ax_h.set_xlim([np.min(background * convert_flux) * 1e-2, np.max(background * convert_flux) * 1e2])
ax_h.set_yscale("log")
ax_h.set_ylim([100.0, np.max([hist.max() for hist in histograms])])
# ax_h.set_xlim([np.min(background * convert_flux) * 1e-2, np.max(background * convert_flux) * 1e2])
ax_h.set_xlim([xmin, xmax])
ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
ax_h.set_ylabel(r"Number of pixels in bin")
ax_h.set_title("Histogram for each observation")

View File

@@ -46,6 +46,15 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
data_array.append(f[0].data)
wcs_array.append(WCS(header=f[0].header, fobj=f).celestial)
f.flush()
# Save pixel area for flux density computation
if headers[i]["PXFORMT"] == "NORMAL":
headers[i]["PXAREA"] = 1.96e-4 # 14x14 milliarcsec squared pixel area in arcsec^2
elif headers[i]["PXFORMT"] == "ZOOM":
headers[i]["PXAREA"] = 4.06e-4 # 29x14 milliarcsec squared pixel area in arcsec^2
else:
headers[i]["PXAREA"] = 1.0 # unknown default to 1 arcsec^2
# Convert PHOTFLAM value from 1arcsec aperture to the pixel area
# headers[i]["PHOTFLAM"] *= np.pi / headers[i]["PXAREA"]
data_array = np.array(data_array, dtype=np.double)
# Prevent negative count value in imported data
@@ -143,6 +152,7 @@ def save_Stokes(
header["PHOTPLAM"] = (header_stokes["PHOTPLAM"], "Pivot Wavelength")
header["PHOTBW"] = (header_stokes["PHOTBW"], "RMS Bandwidth of the Filter and Detector")
header["PHOTFLAM"] = (header_stokes["PHOTFLAM"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
header["PXAREA"] = (header_stokes["PXAREA"], "Pixel area in arcsec**2")
header["EXPTIME"] = (header_stokes["EXPTIME"], "Total exposure time in sec")
header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation")
header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name")

View File

@@ -114,7 +114,7 @@ def plot_obs(data_array, headers, rectangle=None, shifts=None, savename=None, pl
used[r_ax][c_ax] = True
else:
ax_curr = ax[r_ax]
ax_curr[r_ax] = True
used[r_ax] = True
# plots
if "vmin" in kwargs.keys() or "vmax" in kwargs.keys():
vmin, vmax = kwargs["vmin"], kwargs["vmax"]
@@ -338,9 +338,12 @@ def polarization_map(
if P_cut >= 1.0:
# MaskP on the signal-to-noise value
maskP[pol_err > 0.0] = SNRp[pol_err > 0.0] > P_cut
SNRp_cut = deepcopy(P_cut)
maskP[pol_err > 0.0] = SNRp[pol_err > 0.0] > SNRp_cut
P_cut = 0.99
else:
# MaskP on the confidence value
SNRp_cut = 5.0
maskP = confP > P_cut
mask = np.logical_and(maskI, maskP)
@@ -363,7 +366,7 @@ def polarization_map(
fig = plt.figure(figsize=(7 * ratiox, 7 * ratioy), layout="constrained")
if ax is None:
ax = fig.add_subplot(111, projection=wcs)
ax.set(aspect="equal", fc="k", ylim=[-stkI.shape[0] * 0.01, stkI.shape[0] * 1.01])
ax.set(aspect="equal", fc="k")
# fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02)
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
@@ -375,8 +378,8 @@ def polarization_map(
vmin, vmax = 0.0, stkI.max() * convert_flux
for key, value in [
["cmap", [["cmap", "inferno"]]],
["width", [["width", 0.5]]],
["linewidth", [["linewidth", 0.3]]],
["width", [["width", 0.4]]],
["linewidth", [["linewidth", 0.6]]],
]:
try:
_ = kwargs[key]
@@ -424,7 +427,7 @@ def polarization_map(
else:
vmin, vmax = flux_lim
im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
fig.colorbar(im, ax=ax, aspect=30, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
fig.colorbar(im, ax=ax, aspect=30, shrink=0.60, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsI = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax
print("Flux density contour levels : ", levelsI)
ax.contour(stkI * convert_flux, levels=levelsI, colors="grey", linewidths=0.5)
@@ -440,8 +443,9 @@ def polarization_map(
pfmax = (stkI[stkI > 0.0] * pol[stkI > 0.0] * convert_flux).max()
else:
vmin, vmax = flux_lim
pfmax = (stkI[stkI > 0.0] * pol[stkI > 0.0] * convert_flux).max()
im = ax.imshow(stkI * convert_flux * pol, norm=LogNorm(vmin, vmax), aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
fig.colorbar(im, ax=ax, aspect=30, shrink=0.60, pad=0.025, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
# levelsPf = np.linspace(0.0175, 0.50, 5) * pfmax
levelsPf = np.array([1.73, 13.0, 33.0, 66.0]) / 100.0 * pfmax
print("Polarized flux density contour levels : ", levelsPf)
@@ -451,13 +455,13 @@ def polarization_map(
display = "p"
vmin, vmax = 0.0, 100.0
im = ax.imshow(pol * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$P$ [%]")
fig.colorbar(im, ax=ax, aspect=30, shrink=0.60, pad=0.025, label=r"$P$ [%]")
elif display.lower() in ["pa", "pang", "pol_ang"]:
# Display polarization degree map
display = "pa"
vmin, vmax = 0.0, 180.0
im = ax.imshow(princ_angle(pang), vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\theta_P$ [°]")
fig.colorbar(im, ax=ax, aspect=30, shrink=0.60, pad=0.025, label=r"$\theta_P$ [°]")
elif display.lower() in ["s_p", "pol_err", "pol_deg_err"]:
# Display polarization degree error map
display = "s_p"
@@ -467,7 +471,7 @@ def polarization_map(
else:
vmin, vmax = 0.0, 100.0
im = ax.imshow(pol_err * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno_r", alpha=1.0)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\sigma_P$ [%]")
fig.colorbar(im, ax=ax, aspect=30, shrink=0.60, pad=0.025, label=r"$\sigma_P$ [%]")
elif display.lower() in ["s_i", "i_err"]:
# Display intensity error map
display = "s_i"
@@ -479,7 +483,7 @@ def polarization_map(
im = ax.imshow(np.sqrt(stk_cov[0, 0]) * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno_r", alpha=1.0)
else:
im = ax.imshow(np.sqrt(stk_cov[0, 0]) * convert_flux, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
fig.colorbar(im, ax=ax, aspect=30, shrink=0.60, pad=0.025, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
elif display.lower() in ["snri"]:
# Display I_stokes signal-to-noise map
display = "snri"
@@ -491,19 +495,19 @@ def polarization_map(
ax.contour(SNRi, levels=levelsSNRi, colors="grey", linewidths=0.5)
else:
im = ax.imshow(SNRi, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$I_{Stokes}/\sigma_{I}$")
fig.colorbar(im, ax=ax, aspect=30, shrink=0.60, pad=0.025, label=r"$I_{Stokes}/\sigma_{I}$")
elif display.lower() in ["snr", "snrp"]:
# Display polarization degree signal-to-noise map
display = "snrp"
vmin, vmax = 0.0, np.max(SNRp[np.isfinite(SNRp)])
if vmax * 0.99 > P_cut:
if vmax * 0.99 > SNRp_cut:
im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
levelsSNRp = np.linspace(P_cut, vmax * 0.99, 5).astype(int)
print("SNRp contour levels : ", levelsSNRp)
ax.contour(SNRp, levels=levelsSNRp, colors="grey", linewidths=0.5)
else:
im = ax.imshow(SNRp, aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$P/\sigma_{P}$")
fig.colorbar(im, ax=ax, aspect=30, shrink=0.60, pad=0.025, label=r"$P/\sigma_{P}$")
elif display.lower() in ["conf", "confp"]:
# Display polarization degree signal-to-noise map
display = "confp"
@@ -512,7 +516,7 @@ def polarization_map(
levelsconfp = np.array([0.500, 0.900, 0.990, 0.999])
print("confp contour levels : ", levelsconfp)
ax.contour(confP, levels=levelsconfp, colors="grey", linewidths=0.5)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$Conf_{P}$")
fig.colorbar(im, ax=ax, aspect=30, shrink=0.60, pad=0.025, label=r"$Conf_{P}$")
else:
# Defaults to intensity map
if mask.sum() > 0.0:
@@ -520,18 +524,19 @@ def polarization_map(
else:
vmin, vmax = 1.0 * np.mean(np.sqrt(stk_cov[0, 0][stkI > 0.0]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux)
im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
fig.colorbar(im, ax=ax, aspect=30, shrink=0.60, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
# Get integrated values from header
# Get integrated flux values from sum
I_diluted = stkI[data_mask].sum()
I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask]))
# Get integrated polarization values from header
P_diluted = Stokes[0].header["P_int"]
P_diluted_err = Stokes[0].header["sP_int"]
PA_diluted = Stokes[0].header["PA_int"]
PA_diluted_err = Stokes[0].header["sPA_int"]
plt.rcParams.update({"font.size": 10})
plt.rcParams.update({"font.size": 11})
px_size = wcs.wcs.get_cdelt()[0] * 3600.0
px_sc = AnchoredSizeBar(ax.transData, 1.0 / px_size, "1 arcsec", 3, pad=0.25, sep=5, borderpad=0.25, frameon=False, size_vertical=0.005, color=font_color)
north_dir = AnchoredDirectionArrows(
@@ -545,10 +550,10 @@ def polarization_map(
sep_y=0.01,
sep_x=0.01,
back_length=0.0,
head_length=10.0,
head_width=10.0,
head_length=7.0,
head_width=7.0,
angle=-Stokes[0].header["orientat"],
text_props={"ec": "k", "fc": font_color, "alpha": 1, "lw": 0.4},
text_props={"ec": "k", "fc": font_color, "alpha": 1, "lw": 0.5},
arrow_props={"ec": "k", "fc": font_color, "alpha": 1, "lw": 1},
)
@@ -596,7 +601,7 @@ def polarization_map(
color="white",
xy=(0.01, 1.00),
xycoords="axes fraction",
path_effects=[pe.withStroke(linewidth=0.5, foreground="k")],
path_effects=[pe.withStroke(linewidth=2.0, foreground="k")],
verticalalignment="top",
horizontalalignment="left",
)
@@ -611,7 +616,7 @@ def polarization_map(
color="white",
xy=(0.01, 1.00),
xycoords="axes fraction",
path_effects=[pe.withStroke(linewidth=0.5, foreground="k")],
path_effects=[pe.withStroke(linewidth=2.0, foreground="k")],
verticalalignment="top",
horizontalalignment="left",
)
@@ -1930,7 +1935,7 @@ class crop_map(object):
self.im.remove()
self.im = self.ax.imshow(data * convert_flux, **kwargs)
if hasattr(self, "cr"):
self.cr[0].set_data(*wcs.wcs.crpix)
self.cr[0].set_data([wcs.wcs.crpix[0]], [wcs.wcs.crpix[1]])
else:
self.cr = self.ax.plot(*wcs.wcs.crpix, "r+")
self.fig.canvas.draw_idle()
@@ -3275,7 +3280,6 @@ class pol_map(object):
str_conf = ""
if self.region is None:
s_I = np.sqrt(self.IQU_cov[0, 0])
N_pix = self.I.size
I_reg = self.I.sum()
I_reg_err = np.sqrt(np.sum(s_I**2))
P_reg = self.Stokes[0].header["P_int"]
@@ -3290,7 +3294,6 @@ class pol_map(object):
s_IU = self.IQU_cov[0, 2]
s_QU = self.IQU_cov[1, 2]
N_cut = self.cut.sum()
I_cut = self.I[self.cut].sum()
Q_cut = self.Q[self.cut].sum()
U_cut = self.U[self.cut].sum()
@@ -3326,7 +3329,6 @@ class pol_map(object):
s_IU = self.IQU_cov[0, 2]
s_QU = self.IQU_cov[1, 2]
N_pix = self.region.sum()
I_reg = self.I[self.region].sum()
Q_reg = self.Q[self.region].sum()
U_reg = self.U[self.region].sum()
@@ -3359,7 +3361,6 @@ class pol_map(object):
)
new_cut = np.logical_and(self.region, self.cut)
N_cut = new_cut.sum()
I_cut = self.I[new_cut].sum()
Q_cut = self.Q[new_cut].sum()
U_cut = self.U[new_cut].sum()

View File

@@ -25,11 +25,11 @@ def divide_proposal(products):
"""
for pid in np.unique(products["Proposal ID"]):
obs = products[products["Proposal ID"] == pid].copy()
same_filt = np.unique(np.array(np.sum([obs["Filters"][:, 1:] == filt[1:] for filt in obs["Filters"]], axis=2) < 3, dtype=bool), axis=0)
same_filt = np.unique(np.array(np.sum([obs["Filters"] == filt for filt in obs["Filters"]], axis=2) >= len(obs["Filters"][0]), dtype=bool), axis=0)
if len(same_filt) > 1:
for filt in same_filt:
products["Proposal ID"][np.any([products["Dataset"] == dataset for dataset in obs["Dataset"][filt]], axis=0)] = "_".join(
[obs["Proposal ID"][filt][0], "_".join([fi for fi in obs["Filters"][filt][0][1:] if fi[:-1] != "CLEAR"])]
[obs["Proposal ID"][filt][0], "_".join([fi for fi in obs["Filters"][filt][0] if fi[:-1] != "CLEAR"])]
)
for pid in np.unique(products["Proposal ID"]):
obs = products[products["Proposal ID"] == pid].copy()
@@ -44,38 +44,69 @@ def divide_proposal(products):
return products
def get_product_list(target=None, proposal_id=None):
def get_product_list(target=None, proposal_id=None, instrument="foc"):
"""
Retrieve products list for a given target from the MAST archive
"""
mission = MastMissions(mission="hst")
radius = "3"
select_cols = [
"sci_data_set_name",
"sci_spec_1234",
"sci_actual_duration",
"sci_start_time",
"sci_stop_time",
"sci_central_wavelength",
"sci_instrume",
"sci_aper_1234",
"sci_targname",
"sci_pep_id",
"sci_pi_last_name",
"sci_targname",
"sci_aper_1234",
"sci_spec_1234",
"sci_central_wavelength",
"sci_actual_duration",
"sci_instrume",
"sci_operating_mode",
"sci_data_set_name",
"sci_start_time",
"sci_stop_time",
"sci_refnum",
]
cols = ["Dataset", "Filters", "Exptime", "Start", "Stop", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]
cols = [
"Proposal ID",
"PI last name",
"Target name",
"Aperture",
"Filters",
"Central wavelength",
"Exptime",
"Instrument",
"Operating Mode",
"Dataset",
"Start",
"Stop",
"References",
]
if target is None:
target = input("Target name:\n>")
# Use query_object method to resolve the object name into coordinates
results = mission.query_object(target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc")
if instrument == "foc":
results = mission.query_object(
target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc"
)
dataproduct_type = "image"
description = "DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP"
elif instrument == "fos":
results = mission.query_object(
target, radius=radius, select_cols=select_cols, sci_operating_mode="SPECTROPOLARIMETRY", sci_obs_type="spectrum", sci_aec="S", sci_instrume="fos"
)
dataproduct_type = "spectrum"
description = ["DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP", "DADS C3F file - Calibrated exposure GHRS/FOS/HSP"]
for c, n_c in zip(select_cols, cols):
results.rename_column(c, n_c)
results["Proposal ID"] = Column(results["Proposal ID"], dtype="U35")
results["Filters"] = Column(np.array([filt.split(";") for filt in results["Filters"]], dtype=str))
if instrument == "foc":
results["POLFilters"] = Column(np.array([filt.split(";")[0] for filt in results["Filters"]], dtype=str))
results["Filters"] = Column(np.array([filt.split(";")[1:] for filt in results["Filters"]], dtype=str))
else:
results["Filters"] = Column(np.array([filt.split(";") for filt in results["Filters"]], dtype=str))
results["Start"] = Column(Time(results["Start"]))
results["Stop"] = Column(Time(results["Stop"]))
@@ -89,20 +120,34 @@ def get_product_list(target=None, proposal_id=None):
to_remove.append(i)
obs.remove_rows(to_remove)
# Remove observations for which a polarization filter is missing
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
for pid in np.unique(obs["Proposal ID"]):
used_pol = np.zeros(3)
for dataset in obs[obs["Proposal ID"] == pid]:
used_pol[polfilt[dataset["Filters"][0]]] += 1
if np.any(used_pol < 1):
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
if instrument == "foc":
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
for pid in np.unique(obs["Proposal ID"]):
used_pol = np.zeros(3)
for dataset in obs[obs["Proposal ID"] == pid]:
used_pol[polfilt[dataset["POLFilters"]]] += 1
if np.any(used_pol < 1):
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
# Remove observations for which a spectropolarization has not been reduced
if instrument == "fos":
for pid in np.unique(obs["Proposal ID"]):
observations = Observations.query_criteria(proposal_id=pid.split("_")[0])
c3prod = Observations.filter_products(
Observations.get_product_list(observations),
productType=["SCIENCE"],
dataproduct_type=dataproduct_type,
calib_level=[2],
description=description[1],
)
if len(c3prod) < 1:
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
tab = unique(obs, ["Target name", "Proposal ID"])
obs["Obs"] = [np.argmax(np.logical_and(tab["Proposal ID"] == data["Proposal ID"], tab["Target name"] == data["Target name"])) + 1 for data in obs]
try:
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], "Obs")
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Aperture", "Target name", "Proposal ID", "PI last name"]], "Obs")
except IndexError:
raise ValueError("There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
raise ValueError("There is no observation with polarimetry for {0:s} in HST/{1:s} Legacy Archive".format(target, instrument.upper()))
b = np.zeros(len(results), dtype=bool)
if proposal_id is not None and str(proposal_id) in obs["Proposal ID"]:
@@ -130,29 +175,27 @@ def get_product_list(target=None, proposal_id=None):
products = Observations.filter_products(
Observations.get_product_list(observations),
productType=["SCIENCE"],
dataproduct_type=["image"],
dataproduct_type=dataproduct_type,
calib_level=[2],
description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP",
description=description,
)
products["proposal_id"] = Column(products["proposal_id"], dtype="U35")
products["target_name"] = Column(observations["target_name"])
for prod in products:
prod["proposal_id"] = results["Proposal ID"][results["Dataset"] == prod["productFilename"][: len(results["Dataset"][0])].upper()][0]
for prod in products:
prod["target_name"] = observations["target_name"][observations["obsid"] == prod["obsID"]][0]
tab = unique(products, ["target_name", "proposal_id"])
tab = unique(products, "proposal_id")
products["Obs"] = [np.argmax(np.logical_and(tab["proposal_id"] == data["proposal_id"], tab["target_name"] == data["target_name"])) + 1 for data in products]
products["Obs"] = [np.argmax(tab["proposal_id"] == data["proposal_id"]) + 1 for data in products]
return target, products
def retrieve_products(target=None, proposal_id=None, output_dir="./data"):
def retrieve_products(target=None, proposal_id=None, instrument="foc", output_dir="./data"):
"""
Given a target name and a proposal_id, create the local directories and retrieve the fits files from the MAST Archive
"""
target, products = get_product_list(target=target, proposal_id=proposal_id)
target, products = get_product_list(target=target, proposal_id=proposal_id, instrument=instrument)
prodpaths = []
# data_dir = path_join(output_dir, target)
out = ""
@@ -183,10 +226,11 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Query MAST for target products")
parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
parser.add_argument("-p", "--proposal_id", metavar="proposal_id", required=False, help="the proposal id of the data products", type=int, default=None)
parser.add_argument("-i", "--instrum", metavar="instrum", required=False, help="the instrument used for observation", type=str, default="foc")
parser.add_argument(
"-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data"
)
args = parser.parse_args()
print(args.target)
prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id)
prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id, instrument=args.instrum.lower(), output_dir=args.output_dir)
print(prodpaths)

View File

@@ -676,7 +676,6 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
nw.wcs.crpix /= Dxy
nw.array_shape = new_shape
new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape
new_header["PHOTFLAM"] = header["PHOTFLAM"] * (Dxy[0] * Dxy[1])
for key, val in nw.to_header().items():
new_header.set(key, val)
new_header["SAMPLING"] = (str(pxsize) + scale, "Resampling performed during reduction")

View File

@@ -51,15 +51,24 @@ class specpol(object):
"""
def __init__(self, other=None):
if isinstance(other, specpol):
if isinstance(other, __class__):
# Copy constructor
self.hd = deepcopy(other.hd)
self.bin_edges = deepcopy(other.bin_edges)
self.wav = deepcopy(other.wav)
self.wav_err = deepcopy(other.wav_err)
self.I = deepcopy(other.I)
self.Q = deepcopy(other.Q)
self.U = deepcopy(other.U)
self.V = deepcopy(other.V)
self.IQU_cov = deepcopy(other.IQU_cov)
self.IQUV_cov = deepcopy(other.IQUV_cov)
if hasattr(other, "I_r"):
self.I_r = deepcopy(other.I_r)
self.I_r_err = deepcopy(other.I_r_err)
self.wav_r = deepcopy(other.wav_r)
self.wav_r_err = deepcopy(other.wav_r_err)
elif isinstance(other, str):
self.from_txt(other)
else:
# Initialise to zero
if isinstance(other, int):
@@ -72,29 +81,62 @@ class specpol(object):
"""
Set all values to zero.
"""
self.hd = dict([])
self.hd["TARGNAME"], self.hd["PROPOSID"], self.hd["ROOTNAME"], self.hd["APER_ID"] = "", 0, "", ""
self.hd["DENSITY"] = False
self.hd["XUNIT"], self.hd["YUNIT"] = r"Wavelength [m]", r"{0:s}F [$10^{{{1:d}}}$ count s$^{{-1}}$]"
self.bin_edges = np.zeros(n + 1)
self.wav = np.zeros(n)
self.wav_err = np.zeros((n, 2))
self.I = np.zeros(n)
self.Q = np.zeros(n)
self.U = np.zeros(n)
self.V = np.zeros(n)
self.IQU_cov = np.zeros((4, 4, n))
self.IQUV_cov = np.zeros((4, 4, n))
def rest(self, wav=None, z=None):
if z is None and self.hd["TARGNAME"] == "":
z = 0
elif z is None and "REDSHIFT" not in self.hd.keys():
from astroquery.ipac.ned import Ned
z = Ned.query_object(self.hd["TARGNAME"])["Redshift"][0]
self.hd["REDSHIFT"] = z
elif z is None:
z = self.hd["REDSHIFT"]
if wav is None:
wav = self.wav
return wav / (z + 1)
def unrest(self, wav=None, z=None):
if z is None and self.hd["TARGNAME"] == "":
z = 0
elif z is None and "REDSHIFT" not in self.hd.keys():
from astroquery.ipac.ned import Ned
z = Ned.query_object(self.hd["TARGNAME"])["Redshift"][0]
self.hd["REDSHIFT"] = z
elif z is None:
z = self.hd["REDSHIFT"]
if wav is None:
wav = self.wav
return wav * (z + 1)
@property
def I_err(self):
return np.sqrt(self.IQU_cov[0][0])
return np.sqrt(self.IQUV_cov[0][0])
@property
def Q_err(self):
return np.sqrt(self.IQU_cov[1][1])
return np.sqrt(self.IQUV_cov[1][1])
@property
def U_err(self):
return np.sqrt(self.IQU_cov[2][2])
return np.sqrt(self.IQUV_cov[2][2])
@property
def V_err(self):
return np.sqrt(self.IQU_cov[3][3])
return np.sqrt(self.IQUV_cov[3][3])
@property
def QN(self):
@@ -165,35 +207,118 @@ class specpol(object):
]
)
self.I, self.Q, self.U, self.V = np.dot(mrot, np.array([self.I, self.Q, self.U, self.V]))
self.IQU_cov = np.dot(mrot, np.dot(self.IQU_cov.T, mrot.T).T)
self.IQUV_cov = np.dot(mrot, np.dot(self.IQUV_cov.T, mrot.T).T)
def bin(self, bin_edges):
"""
Rebin spectra to given list of bin edges.
"""
# Get new binning distribution and define new empty spectra
in_bin = np.digitize(self.wav, bin_edges) - 1
out = specpol(bin_edges.shape[0] - 1)
if hasattr(self, "I_r"):
# Propagate "raw" flux spectra to new bin
out.I_r = deepcopy(self.I_r)
out.I_r_err = deepcopy(self.I_r_err)
out.wav_r = deepcopy(self.wav_r)
out.wav_r_err = deepcopy(self.wav_r_err)
else:
# Create "raw" flux spectra from previously unbinned spectra
out.I_r = deepcopy(self.I[self.I > 0.0])
out.I_r_err = deepcopy(self.I_err[self.I > 0.0])
out.wav_r = deepcopy(self.wav[self.I > 0.0])
out.wav_r_err = deepcopy(self.wav_err[self.I > 0.0])
for i in range(bin_edges.shape[0] - 1):
# Set the wavelength as the mean wavelength of acquisitions in bin, default to the bin center
out.wav[i] = np.mean(self.wav[in_bin == i]) if np.any(in_bin == i) else 0.5 * (bin_edges[i] + bin_edges[i + 1])
out.wav_err[i] = (out.wav[i] - bin_edges[i], bin_edges[i + 1] - out.wav[i])
if self.hd["DENSITY"] and np.any(in_bin == i):
# If flux density, convert to flux before converting back to the new density
wav1 = np.abs(self.wav_err[in_bin == i]).sum(axis=1)
wav2 = np.abs(out.wav_err[i]).sum()
else:
wav1, wav2 = 1.0, 1.0
out.I[i] = np.sum(self.I[in_bin == i] * wav1) / wav2 if np.any(in_bin == i) else 0.0
out.Q[i] = np.sum(self.Q[in_bin == i] * wav1) / wav2 if np.any(in_bin == i) else 0.0
out.U[i] = np.sum(self.U[in_bin == i] * wav1) / wav2 if np.any(in_bin == i) else 0.0
out.V[i] = np.sum(self.V[in_bin == i] * wav1) / wav2 if np.any(in_bin == i) else 0.0
for m in range(4):
# Quadratically sum the uncertainties
out.IQUV_cov[m][m][i] = np.sum(self.IQUV_cov[m][m][in_bin == i] * wav1**2) / wav2**2 if np.any(in_bin == i) else 0.0
for n in [k for k in range(4) if k != m]:
out.IQUV_cov[m][n][i] = np.sqrt(np.sum((self.IQUV_cov[m][n][in_bin == i] * wav1) ** 2)) / wav2 if np.any(in_bin == i) else 0.0
# Update bin edges and header
out.bin_edges = bin_edges
out.hd = deepcopy(self.hd)
out.hd["NAXIS1"] = bin_edges.shape[0] - 1
out.hd["DATAMIN"], out.hd["DATAMAX"] = out.I.min(), out.I.max()
out.hd["MINWAV"], out.hd["MAXWAV"] = out.wav.min(), out.wav.max()
out.hd["STEPWAV"] = np.max(bin_edges[1:] - bin_edges[:-1])
return out
def bin_size(self, size):
"""
Rebin spectra to selected bin size in Angstrom.
"""
bin_edges = np.arange(np.floor(self.wav.min()), np.ceil(self.wav.max()), size)
in_bin = np.digitize(self.wav, bin_edges) - 1
spec_b = specpol(bin_edges.shape[0] - 1)
for i in range(bin_edges.shape[0] - 1):
spec_b.wav[i] = np.mean(self.wav[in_bin == i])
spec_b.wav_err[i] = (spec_b.wav[i] - bin_edges[i], bin_edges[i + 1] - spec_b.wav[i])
bin_edges = np.arange(self.bin_edges.min(), self.bin_edges.max() + size, size, dtype=np.float32)
return self.bin(bin_edges)
spec_b.I[i] = np.sum(self.I[in_bin == i])
spec_b.Q[i] = np.sum(self.Q[in_bin == i])
spec_b.U[i] = np.sum(self.U[in_bin == i])
spec_b.V[i] = np.sum(self.V[in_bin == i])
for m in range(4):
spec_b.IQU_cov[m][m][i] = np.sum(self.IQU_cov[m][m][in_bin == i])
for n in [k for k in range(4) if k != m]:
spec_b.IQU_cov[m][n][i] = np.sqrt(np.sum(self.IQU_cov[m][n][in_bin == i] ** 2))
return spec_b
def from_txt(self, filename, data_dir=""):
"""
Fill current spectra from a text file.
"""
data_dump = np.loadtxt(join_path(data_dir, filename), skiprows=1).T
self.zero(data_dump.shape[1])
(
self.wav,
self.wav_err[:, 0],
self.I,
self.IQUV_cov[0, 0],
self.Q,
self.IQUV_cov[1, 1],
self.U,
self.IQUV_cov[2, 2],
self.V,
self.IQUV_cov[3, 3],
) = data_dump[:10]
self.wav_err[:, 1] = deepcopy(self.wav_err[:, 0])
self.bin_edges[:-1], self.bin_edges[-1] = deepcopy(self.wav - self.wav_err[:, 0]), deepcopy(self.wav[-1] + self.wav_err[-1, 1])
for i in range(4):
self.IQUV_cov[i][i] = deepcopy(self.IQUV_cov[i][i]) ** 2
with open(join_path(data_dir, filename)) as f:
self.hd["TARGNAME"], self.hd["PROPOSID"], self.hd["ROOTNAME"], self.hd["APER_ID"], self.hd["XUNIT"], self.hd["YUNIT"] = f.readline()[2:].split(";")
def dump_txt(self, filename, output_dir=""):
"""
Dump current spectra to a text file.
"""
data_dump = np.array([self.wav, self.I, self.Q, self.U, self.V, self.P, self.PA]).T
np.savetxt(join_path(output_dir, filename + ".txt"), data_dump)
header = ";".join([self.hd["TARGNAME"], str(self.hd["PROPOSID"]), self.hd["ROOTNAME"], self.hd["APER_ID"], self.hd["XUNIT"], self.hd["YUNIT"]])
header += "\nwav\t wav_err\t I\t I_err\t Q\t Q_err\t U\t U_err\t V\t V_err\t P\t P_err\t PA\t PA_err"
data_dump = np.array(
[
self.wav,
self.wav_err.mean(axis=1),
self.I,
self.I_err,
self.Q,
self.Q_err,
self.U,
self.U_err,
self.V,
self.V_err,
self.P,
self.P_err,
self.PA,
self.PA_err,
]
).T
np.savetxt(
join_path(output_dir, filename + ".txt"),
data_dump,
header=header,
)
return join_path(output_dir, filename)
def plot(self, fig=None, ax=None, savename=None, plots_folder=""):
@@ -201,8 +326,11 @@ class specpol(object):
Display current spectra.
"""
if fig is None:
plt.rcParams.update({"font.size": 15})
if ax is None:
self.fig, self.ax = plt.subplots(1, 2, sharex=True, figsize=(20, 5), layout="constrained")
self.fig, self.ax = plt.subplots(3, 1, sharex=True, figsize=(20, 15))
self.fig.subplots_adjust(hspace=0)
self.fig.suptitle("_".join([self.hd["TARGNAME"], str(self.hd["PROPOSID"]), self.hd["ROOTNAME"], self.hd["APER_ID"]]))
else:
self.ax = ax
else:
@@ -212,33 +340,82 @@ class specpol(object):
else:
self.fig = fig
self.ax = ax
if isinstance(ax, np.ndarray):
ax1, ax2 = self.ax[:2]
if isinstance(self.ax, np.ndarray):
if self.ax.shape[0] == 2:
ax1, ax2 = self.ax[:2]
ax22 = ax2.twinx()
ax2.set_xlabel(self.hd["XUNIT"])
secax1 = ax1.secondary_xaxis("top", functions=(self.rest, self.unrest))
secax1.set_xlabel(r"Rest " + self.hd["XUNIT"])
else:
ax1, ax2, ax22 = self.ax[::-1]
else:
ax1 = self.ax
# Display flux and polarized flux on first ax
ax1.set_xlabel(r"Wavelength [$\AA$]")
ax1.errorbar(self.wav, self.I, xerr=self.wav_err.T, yerr=self.I_err, color="k", fmt=".", label="I")
ax1.errorbar(self.wav, self.PF, xerr=self.wav_err.T, yerr=self.PF_err, color="b", fmt=".", label="PF")
ax1.set_ylabel(r"F$_\lambda$ [erg s$^{-1}$ cm$^{-2} \AA^{-1}$]")
ax1.legend(ncols=2, loc=1)
if hasattr(self, "I_r"):
# If available, display "raw" total flux
yoffset = np.floor(np.log10(self.I_r[self.I_r > 0.0].min())).astype(int)
yoff = 10.0**yoffset
ymin, ymax = (
np.min((self.I_r - 1.5 * self.I_r_err)[self.I_r > 1.5 * self.I_r_err]) / yoff,
np.max((self.I_r + self.I_r_err * 1.5)[self.I_r > 1.5 * self.I_r_err]) / yoff,
)
xmin, xmax = np.min(self.wav_r - self.wav_r_err[:, 0]), np.max(self.wav_r + self.wav_r_err[:, 1])
ax1.errorbar(self.wav_r, self.I_r / yoff, xerr=self.wav_r_err.T, yerr=self.I_r_err / yoff, color="k", fmt=".", label="I")
else:
yoffset = np.floor(np.log10(self.I[self.I > 0.0].min())).astype(int)
yoff = 10.0**yoffset
ymin, ymax = (
np.min((self.I - 1.5 * self.I_err)[self.I > 1.5 * self.I_err]) / yoff,
np.max((self.I + self.I_err * 1.5)[self.I > 1.5 * self.I_err]) / yoff,
)
xmin, xmax = np.min(self.wav - self.wav_err[:, 0]), np.max(self.wav + self.wav_err[:, 1])
ax1.errorbar(self.wav, self.I / yoff, xerr=self.wav_err.T, yerr=self.I_err / yoff, color="k", fmt=".", label="I")
if isinstance(ax, np.ndarray):
ax1.set_xlim([np.min([xmin, self.bin_edges.min()]), np.max([xmax, self.bin_edges.max()])])
ax1.set_xlabel(self.hd["XUNIT"])
ax1.set_ylim([ymin, ymax])
ax1.set_ylabel(self.hd["YUNIT"].format("", yoffset))
# ax11 = ax1.twinx()
# pfoffset = np.floor(np.log10(self.PF[self.PF > 0.0].min())).astype(int)
# pfoff = 10.0**pfoffset
# ax11.errorbar(self.wav, self.PF / pfoff, xerr=self.wav_err.T, yerr=self.PF_err / pfoff, color="b", fmt=".", label="PF")
# ax11.set_ylim(
# [
# ymin * yoff * self.P[np.logical_and(self.P > 0.0, np.isfinite(self.P))].min() / pfoff,
# ymax * yoff * self.P[np.logical_and(self.P > 0.0, np.isfinite(self.P))].max() / pfoff,
# ]
# )
# ax11.set_ylabel(self.hd["YUNIT"].format("Px", pfoffset), color="b")
# ax11.tick_params(axis="y", color="b", labelcolor="b")
# ax1.legend(ncols=2, loc=1)
if isinstance(self.ax, np.ndarray):
# When given 2 axes, display P and PA on second
ax2.set_xlabel(r"Wavelength [$\AA$]")
ax2.errorbar(self.wav, self.P, xerr=self.wav_err.T, yerr=self.P_err, color="b", fmt=".", label="P")
ax2.set_ylim([0.0, 1.0])
ax2.set_ylabel(r"P", color="b")
ax2.errorbar(self.wav, self.P * 100.0, xerr=self.wav_err.T, yerr=self.P_err * 100.0, color="b", fmt=".", label="P")
pmin, pmax = (
np.min(self.P[self.I > 0.0] - 1.5 * self.P_err[self.I > 0.0]) * 100.0,
np.max(self.P[self.I > 0.0] + 1.5 * self.P_err[self.I > 0.0]) * 100.0,
)
ax2.set_ylim([pmin if pmin > 0.0 else 0.0, pmax if pmax < 100.0 else 100.0])
ax2.set_ylabel(r"P [%]", color="b")
ax2.tick_params(axis="y", color="b", labelcolor="b")
ax22 = ax2.twinx()
ax22.errorbar(self.wav, self.PA, xerr=self.wav_err.T, yerr=self.PA_err, color="r", fmt=".", label="PA [°]")
ax22.set_ylim([0.0, 180.0])
ax22.set_ylabel(r"PA", color="r")
pamin, pamax = np.min(self.PA[self.I > 0.0] - 1.5 * self.PA_err[self.I > 0.0]), np.max(self.PA[self.I > 0.0] + 1.5 * self.PA_err[self.I > 0.0])
ax22.set_ylim([pamin if pamin > 0.0 else 0.0, pamax if pamax < 180.0 else 180.0])
ax22.set_ylabel(r"PA [°]", color="r")
ax22.tick_params(axis="y", color="r", labelcolor="r")
secax22 = ax22.secondary_xaxis("top", functions=(self.rest, self.unrest))
secax22.set_xlabel(r"Rest " + self.hd["XUNIT"])
h2, l2 = ax2.get_legend_handles_labels()
h22, l22 = ax22.get_legend_handles_labels()
ax2.legend(h2 + h22, l2 + l22, ncols=2, loc=1)
if self.ax.shape[0] == 2:
ax2.legend(h2 + h22, l2 + l22, ncols=2, loc=1)
if hasattr(self, "fig") and savename is not None:
self.fig.savefig(join_path(plots_folder, savename + ".pdf"), dpi=300, bbox_inches="tight")
@@ -248,6 +425,101 @@ class specpol(object):
else:
return self.ax
def __add__(self, other):
"""
Spectra addition, if not same binning concatenate both spectra binning.
"""
if (self.bin_edges.shape == other.bin_edges.shape) and np.all(self.bin_edges == other.bin_edges):
bin_edges = deepcopy(self.bin_edges)
else:
# If different binning, concatenate binnings
if self.bin_edges[0] <= other.bin_edges[0]:
bin_edges = deepcopy(self.bin_edges)
else:
bin_edges = deepcopy(other.bin_edges)
if other.bin_edges[-1] > bin_edges[-1]:
bin_edges = np.concat((bin_edges, deepcopy(other.bin_edges[other.bin_edges > bin_edges[-1]])), axis=0)
elif self.bin_edges[-1] > bin_edges[-1]:
bin_edges = np.concat((bin_edges, deepcopy(self.bin_edges[self.bin_edges > bin_edges[-1]])), axis=0)
# Rebin spectra to be added to ensure same binning
spec_a = specpol(specpol(self).bin(bin_edges=bin_edges))
spec_b = specpol(specpol(other).bin(bin_edges=bin_edges))
# Create sum spectra
spec = specpol(bin_edges.shape[0] - 1)
spec.hd = deepcopy(self.hd)
spec.bin_edges = bin_edges
spec.wav = np.mean([spec_a.wav, spec_b.wav], axis=0)
spec.wav_err = np.array([spec.wav - spec.bin_edges[:-1], spec.bin_edges[1:] - spec.wav]).T
# Propagate "raw" flux spectra to sum
if hasattr(self, "I_r") and hasattr(other, "I_r"):
# Deal with the concatenation of the "raw" total flux spectra
if self.wav_r[0] <= other.wav_r[0]:
inter = other.wav_r[0], self.wav_r[-1]
spec.wav_r = deepcopy(np.concat((self.wav_r, other.wav_r[other.wav_r > self.wav_r[-1]])))
spec.wav_r_err = deepcopy(np.concat((self.wav_r_err, other.wav_r_err[other.wav_r > self.wav_r[-1]]), axis=0))
spec.I_r = deepcopy(np.concat((self.I_r, other.I_r[other.wav_r > self.wav_r[-1]])))
spec.I_r_err = deepcopy(np.concat((self.I_r_err, other.I_r_err[other.wav_r > self.wav_r[-1]]), axis=0))
else:
inter = self.wav_r[0], other.wav_r[-1]
spec.wav_r = deepcopy(np.concat((other.wav_r, self.wav_r[self.wav_r > other.wav_r[-1]])))
spec.wav_r_err = deepcopy(np.concat((other.wav_r_err, self.wav_r_err[self.wav_r > other.wav_r[-1]]), axis=0))
spec.I_r = deepcopy(np.concat((other.I_r, self.I_r[self.wav_r > other.wav_r[-1]])))
spec.I_r_err = deepcopy(np.concat((other.I_r_err, self.I_r_err[self.wav_r > other.wav_r[-1]]), axis=0))
# When both spectra intersect, compute intersection as the mean
edges = np.concat((spec.wav_r - spec.wav_r_err[:, 0], [spec.wav_r[-1] + spec.wav_r_err[-1, 1]]))
edges.sort()
bin, bino = np.digitize(self.wav_r, edges) - 1, np.digitize(other.wav_r, edges) - 1
for w in np.arange(spec.wav_r.shape[0])[np.logical_and(spec.wav_r >= inter[0], spec.wav_r <= inter[1])]:
if self.hd["DENSITY"] and np.any(bin == w):
# If flux density, convert to flux before converting back to the new density
wav, wavo = (
np.abs(self.wav_r_err[bin == w]).sum(axis=1) * (self.I_r[bin == w] > self.I_r_err[bin == w]),
np.abs(other.wav_r_err[bino == w]).sum(axis=1) * (other.I_r[bino == w] > other.I_r_err[bino == w]),
)
wavs = np.abs(spec.wav_r_err[w]).sum()
else:
wav, wavo, wavs = 1.0, 1.0, 1.0
n = np.sum(self.I_r[bin == w] > self.I_r_err[bin == w]) + np.sum(other.I_r[bino == w] > other.I_r_err[bino == w])
spec.I_r[w] = np.sum(np.concat([self.I_r[bin == w] * wav, other.I_r[bino == w] * wavo])) / wavs / n
spec.I_r_err[w] = np.sqrt(np.sum(np.concat([self.I_r_err[bin == w] ** 2 * wav**2, other.I_r_err[bino == w] ** 2 * wavo**2]))) / wavs / n
# Sum stokes fluxes
spec.I = deepcopy(spec_a.I + spec_b.I)
spec.Q = deepcopy(spec_a.Q + spec_b.Q)
spec.U = deepcopy(spec_a.U + spec_b.U)
spec.V = deepcopy(spec_a.V + spec_b.V)
# Quadratically sum uncertainties
for i in range(4):
spec.IQUV_cov[i][i] = deepcopy(spec_a.IQUV_cov[i][i] + spec_b.IQUV_cov[i][i])
for j in [k for k in range(4) if k != i]:
spec.IQUV_cov[i][j] = deepcopy(np.sqrt(spec_a.IQUV_cov[i][j] ** 2 + spec_b.IQUV_cov[i][j] ** 2))
# Update header to reflect sum
spec.hd["DATAMIN"], spec.hd["DATAMAX"] = spec.I.min(), spec.I.max()
spec.hd["MINWAV"], spec.hd["MAXWAV"] = spec.wav.min(), spec.wav.max()
spec.hd["EXPTIME"] = spec_a.hd["EXPTIME"] + spec_b.hd["EXPTIME"]
rootnames = [spec_a.hd["ROOTNAME"], spec_b.hd["ROOTNAME"]]
spec.hd["ROOTNAME"] = "".join(p for p, *r in zip(*rootnames) if all(p == c for c in r)) + "_SUM"
return spec
def __deepcopy__(self, memo={}):
spec = specpol(self)
spec.__dict__.update(self.__dict__)
spec.hd = deepcopy(self.hd, memo)
spec.bin_edges = deepcopy(spec.bin_edges, memo)
spec.wav = deepcopy(self.wav, memo)
spec.wav_err = deepcopy(self.wav_err, memo)
spec.I = deepcopy(self.I, memo)
spec.Q = deepcopy(self.Q, memo)
spec.U = deepcopy(self.U, memo)
spec.V = deepcopy(self.V, memo)
spec.IQUV_cov = deepcopy(self.IQUV_cov, memo)
return spec
class FOSspecpol(specpol):
"""
@@ -256,43 +528,131 @@ class FOSspecpol(specpol):
def __init__(self, stokes, data_folder=""):
"""
Initialise object from fits filename or hdulist.
Initialise object from fits filename, fits hdulist or copy.
"""
if isinstance(stokes, str):
self.file_root = stokes.split("_")[0]
self.hd = getheader(join_path(data_folder, self.file_root + "_c0f.fits"))
wav = getdata(join_path(data_folder, self.file_root + "_c0f.fits"))
stokes = getdata(join_path(data_folder, self.file_root + "_c3f.fits"))
elif isinstance(stokes, hdu.hdulist.HDUList):
self.hd = stokes.header
self.file_root = self.hd["FILENAME"].split("_")[0]
wav = getdata(join_path(data_folder, self.file_root + "_c0f"))
stokes = stokes.data
if isinstance(stokes, __class__):
# Copy constructor
self.rootname = deepcopy(stokes.rootname)
self.hd = deepcopy(stokes.hd)
self.bin_edges = deepcopy(stokes.bin_edges)
self.wav = deepcopy(stokes.wav)
self.wav_err = deepcopy(stokes.wav_err)
self.I = deepcopy(stokes.I)
self.Q = deepcopy(stokes.Q)
self.U = deepcopy(stokes.U)
self.V = deepcopy(stokes.V)
self.IQUV_cov = deepcopy(stokes.IQUV_cov)
self.P_fos = deepcopy(stokes.P_fos)
self.P_fos_err = deepcopy(stokes.P_fos_err)
self.PC_fos = deepcopy(stokes.PC_fos)
self.PC_fos_err = deepcopy(stokes.PC_fos_err)
self.PA_fos = deepcopy(stokes.PA_fos)
self.PA_fos_err = deepcopy(stokes.PA_fos_err)
self.subspec = {}
for name in ["PASS1", "PASS2", "PASS12", "PASS12corr"]:
spec = deepcopy(stokes.subspec[name])
self.subspec[name] = spec
elif stokes is None or isinstance(stokes, int):
self.zero(n=stokes)
else:
raise ValueError("Input must be a path to a fits file or an HDUlist")
self.from_file(stokes, data_folder=data_folder)
self.wav = np.concat((wav[0:2, :], wav[0].reshape(1, wav.shape[1]), wav[0].reshape(1, wav.shape[1])), axis=0)
self.wav_err = np.zeros((self.wav.shape[0], self.wav.shape[1], 2))
self.IQU_cov = np.zeros((4, 4, self.wav.shape[0], self.wav.shape[1]))
self.I = stokes[0::14]
self.IQU_cov[0, 0] = stokes[4::14] ** 2
self.Q = stokes[1::14]
self.IQU_cov[1, 1] = stokes[5::14] ** 2
self.U = stokes[2::14]
self.IQU_cov[2, 2] = stokes[6::14] ** 2
self.V = stokes[3::14]
self.IQU_cov[3, 3] = stokes[7::14] ** 2
@classmethod
def zero(self, n=1):
"""
Set all values to zero.
"""
self.rootname = ""
self.hd = dict([])
self.hd["DENSITY"] = True
self.hd["TARGNAME"] = "Undefined"
self.hd["XUNIT"], self.hd["YUNIT"] = r"Wavelength [$\AA$]", r"{0:s}F$_\lambda$ [$10^{{{1:d}}}$ erg s$^{{-1}}$ cm$^{{-2}} \AA^{{-1}}$]"
self.bin_edges = np.zeros((4, n + 1))
self.wav = np.zeros((4, n))
self.wav_err = np.zeros((4, n, 2))
self.I = np.zeros((4, n))
self.Q = np.zeros((4, n))
self.U = np.zeros((4, n))
self.V = np.zeros((4, n))
self.IQUV_cov = np.zeros((4, 4, 4, n))
self.subspec = {}
for i, name in enumerate(["PASS1", "PASS2", "PASS12", "PASS12corr"]):
spec = specpol(self.wav[i].shape[0])
spec.wav, spec.wav_err, spec.I, spec.Q, spec.U, spec.V = self.wav[i], self.wav_err[i], self.I[i], self.Q[i], self.U[i], self.V[i]
spec.IQU_cov = self.IQU_cov[:, :, i, :]
spec.rotate((i != 3) * self.hd["PA_APER"])
spec = specpol(n)
spec.hd, spec.wav, spec.wav_err, spec.I, spec.Q, spec.U, spec.V = self.hd, self.wav[i], self.wav_err[i], self.I[i], self.Q[i], self.U[i], self.V[i]
spec.IQUV_cov = self.IQUV_cov[:, :, i, :]
self.subspec[name] = spec
self.P_fos = np.zeros(self.I.shape)
self.P_fos_err = np.zeros(self.I.shape)
self.PC_fos = np.zeros(self.I.shape)
self.PC_fos_err = np.zeros(self.I.shape)
self.PA_fos = np.zeros(self.I.shape)
self.PA_fos_err = np.zeros(self.I.shape)
def from_file(self, stokes, data_folder=""):
"""
Initialise object from fits file or hdulist.
"""
if isinstance(stokes, str):
self.rootname = stokes.split("_")[0]
self.hd = dict(getheader(join_path(data_folder, self.rootname + "_c3f.fits")))
wav = getdata(join_path(data_folder, self.rootname + "_c0f.fits"))
stokes = getdata(join_path(data_folder, self.rootname + "_c3f.fits"))
elif isinstance(stokes, hdu.hdulist.HDUList):
self.hd = dict(stokes.header)
self.rootname = self.hd["FILENAME"].split("_")[0]
wav = getdata(join_path(data_folder, self.rootname + "_c0f"))
stokes = stokes.data
else:
raise ValueError("Input must be a path to a fits file or an HDUlist")
# FOS spectra are given in flux density with respect to angstrom wavelength
self.hd["DENSITY"] = True
self.hd["XUNIT"], self.hd["YUNIT"] = r"Wavelength [$\AA$]", r"{0:s}F$_\lambda$ [$10^{{{1:d}}}$ erg s$^{{-1}}$ cm$^{{-2}} \AA^{{-1}}$]"
# We set the error to be half the distance to the next mesure
self.wav = np.concat((wav[0:2, :], wav[0].reshape(1, wav.shape[1]), wav[0].reshape(1, wav.shape[1])), axis=0)
self.wav_err = np.zeros((self.wav.shape[0], self.wav.shape[1], 2))
for i in range(1, self.wav.shape[1] - 1):
self.wav_err[:, i] = np.abs(
np.array([((self.wav[j][i] - self.wav[j][i - 1]) / 2.0, (self.wav[j][i + 1] - self.wav[j][i - 1]) / 2.0) for j in range(self.wav.shape[0])])
)
self.wav_err[:, 0] = np.array([self.wav_err[:, 1, 0], self.wav_err[:, 1, 0]]).T
self.wav_err[:, -1] = np.array([self.wav_err[:, -2, 1], self.wav_err[:, -2, 1]]).T
self.hd["MINWAV"], self.hd["MAXWAV"] = self.wav.min(), self.wav.max()
self.hd["STEPWAV"] = np.mean(self.wav_err) * 2.0
self.bin_edges = np.array(
[np.concat((self.wav[i] - self.wav_err[i, :, 0], [self.wav[i, -1] + self.wav_err[i, -1, -1]]), axis=0) for i in range(self.wav.shape[0])]
)
self.IQUV_cov = np.zeros((4, 4, self.wav.shape[0], self.wav.shape[1]))
# Special way of reading FOS spectropolarimetry fits files
self.I = stokes[0::14]
self.IQUV_cov[0, 0] = stokes[4::14] ** 2
self.Q = stokes[1::14]
self.IQUV_cov[1, 1] = stokes[5::14] ** 2
self.U = stokes[2::14]
self.IQUV_cov[2, 2] = stokes[6::14] ** 2
self.V = stokes[3::14]
self.IQUV_cov[3, 3] = stokes[7::14] ** 2
self.hd["DATAMIN"], self.hd["DATAMAX"] = self.I.min(), self.I.max()
# Each file contain 4 spectra: Pass 1, Pass 2, combination of the 2, Combination corrected for orientation and background
self.subspec = {}
for i, name in enumerate(["PASS1", "PASS2", "PASS12", "PASS12corr"]):
spec = specpol(self.wav[i].shape[0])
spec.hd, spec.wav, spec.wav_err, spec.I, spec.Q, spec.U, spec.V = self.hd, self.wav[i], self.wav_err[i], self.I[i], self.Q[i], self.U[i], self.V[i]
spec.bin_edges = np.concat((spec.wav - spec.wav_err[:, 0], [spec.wav[-1] + spec.wav_err[-1, 1]]), axis=0)
spec.hd["MINWAV"], spec.hd["MAXWAV"] = spec.wav.min(), spec.wav.max()
spec.hd["DATAMIN"], spec.hd["DATAMAX"] = spec.I.min(), spec.I.max()
spec.IQUV_cov = self.IQUV_cov[:, :, i, :]
# Only PASS12corr is corrected for telescope orientation
spec.rotate(-(name[-4:] != "corr") * spec.hd["PA_APER"])
self.subspec[name] = spec
# Following lines contain the polarization components computed by calfos
self.P_fos = stokes[8::14]
self.P_fos_err = stokes[11::14]
self.PC_fos = stokes[9::14]
@@ -302,12 +662,6 @@ class FOSspecpol(specpol):
)
self.PA_fos_err = princ_angle(np.degrees(stokes[13::14]))
def __del__(self):
if hasattr(self, "ax"):
del self.ax
if hasattr(self, "fig"):
del self.fig
def dump_txt(self, filename, spec_list=None, output_dir=""):
"""
Dump current spectra to a text file.
@@ -338,6 +692,10 @@ class FOSspecpol(specpol):
if fos:
self.ax[i][0] = spec_list[name].plot(ax=self.ax[i][0])
self.ax[i][1].set_xlabel(r"Wavelength [$\AA$]")
secax1 = self.ax[i][0].secondary_xaxis("top", functions=(self.rest, self.unrest))
secax1.set_xlabel(r"Rest wavelength [$\AA$]")
secax2 = self.ax[i][1].secondary_xaxis("top", functions=(self.rest, self.unrest))
secax2.set_xlabel(r"Rest wavelength [$\AA$]")
self.ax[i][1].errorbar(self.wav[i], self.P_fos[i], xerr=self.wav_err[i].T, yerr=self.P_fos_err[i], color="b", fmt=".", label="P_fos")
self.ax[i][1].set_ylim([0.0, 1.0])
self.ax[i][1].set_ylabel(r"P", color="b")
@@ -352,9 +710,9 @@ class FOSspecpol(specpol):
self.ax[i][1].legend(h2 + h22, l2 + l22, ncols=2, loc=1)
else:
self.ax[i] = spec_list[name].plot(ax=self.ax[i])
self.ax[0][0].set_ylim(ymin=0.0)
# self.ax[0][0].set_ylim(ymin=0.0)
self.fig.suptitle("_".join([self.hd["TARGNAME"], str(self.hd["PROPOSID"]), self.hd["FILENAME"], self.hd["APER_ID"]]))
self.fig.suptitle("_".join([self.hd["TARGNAME"], str(self.hd["PROPOSID"]), self.hd["ROOTNAME"], self.hd["APER_ID"]]))
if savename is not None:
self.fig.savefig(join_path(plots_folder, savename + ".pdf"), dpi=300, bbox_inches="tight")
outfiles.append(join_path(plots_folder, savename + ".pdf"))
@@ -371,42 +729,121 @@ class FOSspecpol(specpol):
self.subspec[key][name] = self.subspec[name].bin_size(size)
return self.subspec[key]
def __add__(self, other):
"""
Spectra addition, if not same binning default to self bins.
"""
spec_a = FOSspecpol(self)
if np.all(self.wav == other.wav):
spec_b = other
else:
bin_edges = np.zeros(spec_a.wav.shape[0] + 1)
bin_edges[:-1], bin_edges[-1] = spec_a.wav - spec_a.wav_err[:, 0], spec_a.wav[-1] + spec_a.wav_err[-1:1]
spec_b = other.bin(bin_edges=bin_edges)
spec_a.I += deepcopy(spec_b.I)
spec_a.Q += deepcopy(spec_b.Q)
spec_a.U += deepcopy(spec_b.U)
spec_a.V += deepcopy(spec_b.V)
spec_a.IQUV_cov += deepcopy(spec_b.IQUV_cov)
for name in ["PASS1", "PASS2", "PASS12", "PASS12corr"]:
spec_a.subspec[name] += deepcopy(spec_b.subspec[name])
spec_a.subspec[name].hd["DATAMIN"], spec_a.subspec[name].hd["DATAMAX"] = spec_a.subspec[name].I.min(), spec_a.subspec[name].I.max()
spec_a.subspec[name].hd["EXPTIME"] += spec_b.subspec[name].hd["EXPTIME"]
spec_a.subspec[name].hd["ROOTNAME"] += "+" + spec_b.subspec[name].hd["ROOTNAME"]
spec_a.hd["DATAMIN"], spec_a.hd["DATAMAX"] = spec_a.I.min(), spec_a.I.max()
spec_a.hd["EXPTIME"] += spec_b.hd["EXPTIME"]
spec_a.hd["ROOTNAME"] += "+" + spec_b.hd["ROOTNAME"]
return spec_a
def __deepcopy__(self, memo):
spec = FOSspecpol(self.wav.shape[0])
spec.__dict__.update(self.__dict__)
for key in self.subspec.keys():
spec.subspec[key] = deepcopy(self.subspec[key])
return spec
def __del__(self):
if hasattr(self, "ax"):
del self.ax
if hasattr(self, "fig"):
del self.fig
def main(infiles, bin_size=None, output_dir=None):
"""
Produce (binned and summed) spectra for a list of given fits files.
"""
outfiles = []
if infiles is not None:
# Divide path in folder + filename
prod = np.array([["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles], dtype=str)
obs_dir = "/".join(infiles[0].split("/")[:-1])
if not path_exists(obs_dir):
system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
obs_dir = np.unique(["/".join(file.split("/")[:-1]) for file in infiles])
for dir in obs_dir:
# Create missing data/plot folder for tydiness
if not path_exists(dir):
system("mkdir -p {0:s} {1:s}".format(dir, dir.replace("data", "plots")))
else:
print("Must input files to process.")
return 1
data_folder = prod[0][0]
data_folder = np.unique(prod[:, 0])
if output_dir is None:
output_dir = data_folder
output_dir = data_folder[0]
try:
plots_folder = data_folder.replace("data", "plots")
plots_folder = output_dir.replace("data", "plots")
except ValueError:
plots_folder = "."
plots_folder = output_dir
if not path_exists(plots_folder):
system("mkdir -p {0:s} ".format(plots_folder))
infiles = [p[1] for p in prod]
roots = np.unique([file.split("_")[0] for file in infiles])
for file_root in roots:
print(file_root)
spec = FOSspecpol(file_root, data_folder)
filename = "_".join([spec.hd["TARGNAME"], "FOS", str(spec.hd["PROPOSID"]), spec.file_root, spec.hd["APER_ID"]])
aper = dict([])
roots = np.unique([p[1].split("_")[0] for p in prod])
# Iteration on each observation in infiles
for rootname in roots:
print(rootname)
if data_folder.shape[0] > 1:
# For multiple folders (multiple filters) match data_folder on file rootname
spec = FOSspecpol(rootname, prod[np.array([p[1].split("_")[0] == rootname for p in prod])][0, 0])
else:
spec = FOSspecpol(rootname, data_folder[0])
filename = "_".join([spec.hd["TARGNAME"], "FOS", str(spec.hd["PROPOSID"]), spec.rootname, spec.hd["APER_ID"]])
if bin_size is not None:
key = "{0:.2f}bin".format(bin_size)
spec.bin_size(bin_size)
# Only output binned spectra
outfiles += spec.dump_txt("_".join([filename, key]), spec_list=spec.subspec[key], output_dir=output_dir)
outfiles += spec.plot(savename="_".join([filename, key]), spec_list=spec.subspec[key], plots_folder=plots_folder)
# Save corrected and combined pass for later summation, only sum on same aperture
if spec.hd["APER_ID"] in aper.keys():
aper[str(spec.hd["APER_ID"])].append(specpol(spec.subspec[key]["PASS12corr"]))
else:
aper[str(spec.hd["APER_ID"])] = [specpol(spec.subspec[key]["PASS12corr"])]
else:
outfiles += spec.dump_txt(filename, output_dir=output_dir)
outfiles += spec.plot(savename=filename, plots_folder=plots_folder)
del spec
if spec.hd["APER_ID"] in aper.keys():
aper[str(spec.hd["APER_ID"])].append(specpol(spec.subspec["PASS12corr"]))
else:
aper[str(spec.hd["APER_ID"])] = [specpol(spec.subspec["PASS12corr"])]
plt.close("all")
# Sum spectra acquired through same aperture
for key in aper.keys():
rootnames = [s.hd["ROOTNAME"] for s in aper[key]]
print(*rootnames)
spec = np.sum(aper[key])
spec.hd["ROOTNAME"] = "".join(p for p, *r in zip(*rootnames) if all(p == c for c in r)) + "_SUM"
filename = "_".join([spec.hd["TARGNAME"], "FOS", str(spec.hd["PROPOSID"]), spec.hd["ROOTNAME"]])
if bin_size is not None:
filename += "_{0:.2f}bin".format(bin_size)
# Output summed spectra
outfiles.append(spec.dump_txt("_".join([filename, key]), output_dir=output_dir))
outfiles.append(spec.plot(savename="_".join([filename, key]), plots_folder=plots_folder)[2])
plt.show()
return outfiles