save raw flux in fits file and display
This commit is contained in:
@@ -20,6 +20,7 @@ import lib.reduction as proj_red # Functions used in reduction pipeline
|
||||
import numpy as np
|
||||
from lib.utils import princ_angle, sci_not
|
||||
from matplotlib.colors import LogNorm
|
||||
from astropy.wcs import WCS
|
||||
|
||||
|
||||
def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False):
|
||||
@@ -40,12 +41,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
display_crop = False
|
||||
|
||||
# Background estimation
|
||||
error_sub_type = "scott" # sqrt, sturges, rice, freedman-diaconis, scott (default) or shape (example (51, 51))
|
||||
error_sub_type = "scott" # auto, sqrt, sturges, rice, freedman-diaconis, scott (default) or shape (example (51, 51))
|
||||
subtract_error = 3.0
|
||||
display_bkg = True
|
||||
display_bkg = False
|
||||
|
||||
# Data binning
|
||||
pxsize = 40
|
||||
pxsize = 4
|
||||
pxscale = "px" # pixel, arcsec or full
|
||||
rebin_operation = "sum" # sum or average
|
||||
|
||||
@@ -63,10 +64,11 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
smoothing_scale = "px" # pixel or arcsec
|
||||
|
||||
# Rotation
|
||||
rotate_data = False
|
||||
rotate_North = True
|
||||
|
||||
# Polarization map output
|
||||
P_cut = 5 # if >=1.0 cut on the signal-to-noise else cut on the confidence level in Q, U
|
||||
P_cut = 3 # if >=1.0 cut on the signal-to-noise else cut on the confidence level in Q, U
|
||||
SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
||||
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
|
||||
scale_vec = 3
|
||||
@@ -142,7 +144,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
)
|
||||
|
||||
# Rotate data to have same orientation
|
||||
rotate_data = np.unique([np.round(float(head["ORIENTAT"]), 3) for head in headers]).size != 1
|
||||
rotate_data = rotate_data or np.unique([np.round(float(head["ORIENTAT"]), 3) for head in headers]).size != 1
|
||||
if rotate_data:
|
||||
ang = np.mean([head["ORIENTAT"] for head in headers])
|
||||
for head in headers:
|
||||
@@ -158,7 +160,6 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]
|
||||
),
|
||||
)
|
||||
|
||||
# Align and rescale images with oversampling.
|
||||
data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data(
|
||||
data_array,
|
||||
@@ -243,6 +244,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
)
|
||||
flux_data, flux_error, flux_mask, flux_head = proj_red.rotate_data(np.array([flux_data]), np.array([flux_error]), flux_mask, [flux_head])
|
||||
flux_data, flux_error, flux_head = flux_data[0], flux_error[0], flux_head[0]
|
||||
elif not rotate_data:
|
||||
figname += "_noROT"
|
||||
|
||||
# Compute polarimetric parameters (polarization degree and angle).
|
||||
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes)
|
||||
@@ -250,7 +253,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
|
||||
# Step 4:
|
||||
# Save image to FITS.
|
||||
figname = "_".join([figname, figtype]) if figtype != "" else figname
|
||||
savename = "_".join([figname, figtype]) if figtype != "" else figname
|
||||
Stokes_hdul = proj_fits.save_Stokes(
|
||||
I_stokes,
|
||||
Q_stokes,
|
||||
@@ -265,18 +268,18 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
s_PA_P,
|
||||
header_stokes,
|
||||
data_mask,
|
||||
figname,
|
||||
savename,
|
||||
data_folder=data_folder,
|
||||
return_hdul=True,
|
||||
flux_data=flux_data,
|
||||
flux_data=np.array([flux_data, flux_error, flux_mask]),
|
||||
flux_head=flux_head,
|
||||
)
|
||||
outfiles.append("/".join([data_folder, Stokes_hdul["I_STOKES"].header["FILENAME"] + ".fits"]))
|
||||
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"]))
|
||||
|
||||
# Step 5:
|
||||
# crop to desired region of interest (roi)
|
||||
if crop:
|
||||
figname += "_crop"
|
||||
savename += "_crop"
|
||||
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_hdul), norm=LogNorm())
|
||||
stokescrop.crop()
|
||||
stokescrop.write_to("/".join([data_folder, figname + ".fits"]))
|
||||
@@ -286,10 +289,10 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
data_mask = Stokes_hdul["data_mask"].data.astype(bool)
|
||||
print(
|
||||
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
|
||||
header_stokes["PHOTPLAM"],
|
||||
flux_head["PHOTPLAM"],
|
||||
*sci_not(
|
||||
Stokes_hdul["I_STOKES"].data[data_mask].sum() * header_stokes["PHOTFLAM"],
|
||||
np.sqrt(Stokes_hdul["IQU_COV_MATRIX"].data[0, 0][data_mask].sum()) * header_stokes["PHOTFLAM"],
|
||||
flux_data[flux_mask].sum() * flux_head["PHOTFLAM"],
|
||||
np.sqrt(np.sum(flux_error[flux_mask] ** 2)) * flux_head["PHOTFLAM"],
|
||||
2,
|
||||
out=int,
|
||||
),
|
||||
@@ -315,12 +318,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
flux_lim=flux_lim,
|
||||
step_vec=step_vec,
|
||||
scale_vec=scale_vec,
|
||||
savename="_".join([figname]),
|
||||
savename=figname,
|
||||
plots_folder=plots_folder,
|
||||
)
|
||||
for figtype, figsuffix in zip(
|
||||
["Intensity", "Pol_flux", "Pol_deg", "Pol_ang", "I_err", "P_err", "SNRi", "SNRp", "confp"],
|
||||
["I", "P_flux", "P", "PA", "I_err", "P_err", "SNRi", "SNRp", "confP"],
|
||||
["FluxDensity", "Intensity", "Pol_flux", "Pol_deg", "Pol_ang", "I_err", "P_err", "SNRi", "SNRp", "confp"],
|
||||
["F", "I", "P_flux", "P", "PA", "I_err", "P_err", "SNRi", "SNRp", "confP"],
|
||||
):
|
||||
try:
|
||||
proj_plots.polarization_map(
|
||||
@@ -331,7 +334,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
flux_lim=flux_lim,
|
||||
step_vec=step_vec,
|
||||
scale_vec=scale_vec,
|
||||
savename="_".join([figname, figsuffix]),
|
||||
savename="_".join([savename, figsuffix]),
|
||||
plots_folder=plots_folder,
|
||||
display=figtype,
|
||||
)
|
||||
@@ -339,7 +342,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
||||
pass
|
||||
elif not interactive:
|
||||
proj_plots.polarization_map(
|
||||
deepcopy(Stokes_hdul), data_mask, P_cut=P_cut, SNRi_cut=SNRi_cut, savename=figname, plots_folder=plots_folder, display="integrate"
|
||||
deepcopy(Stokes_hdul), data_mask, P_cut=P_cut, SNRi_cut=SNRi_cut, savename=savename, plots_folder=plots_folder, display="integrate"
|
||||
)
|
||||
elif pxscale.lower() not in ["full", "integrate"]:
|
||||
proj_plots.pol_map(Stokes_hdul, P_cut=P_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, scale_vec=scale_vec, flux_lim=flux_lim)
|
||||
|
||||
@@ -47,9 +47,9 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
|
||||
wcs_array.append(WCS(header=f[0].header, fobj=f).celestial)
|
||||
f.flush()
|
||||
# Save pixel area for flux density computation
|
||||
if headers[i]["PXFORMT"] == "NORMAL":
|
||||
if "PXFORMT" in headers[i].keys() and headers[i]["PXFORMT"] == "NORMAL":
|
||||
headers[i]["PXAREA"] = 1.96e-4 # 14x14 milliarcsec squared pixel area in arcsec^2
|
||||
elif headers[i]["PXFORMT"] == "ZOOM":
|
||||
elif "PXFORMT" in headers[i].keys() and headers[i]["PXFORMT"] == "ZOOM":
|
||||
headers[i]["PXAREA"] = 4.06e-4 # 29x14 milliarcsec squared pixel area in arcsec^2
|
||||
else:
|
||||
headers[i]["PXAREA"] = 1.0 # unknown default to 1 arcsec^2
|
||||
@@ -85,10 +85,10 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
|
||||
# force WCS for POL60 to have same pixel size as POL0 and POL120
|
||||
is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
|
||||
cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 10)
|
||||
if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
|
||||
if np.any(is_pol60) and np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
|
||||
print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0))
|
||||
raise ValueError("Not all images have same pixel size")
|
||||
else:
|
||||
elif np.any(is_pol60):
|
||||
for i in np.arange(len(headers))[is_pol60]:
|
||||
headers[i]["cdelt1"], headers[i]["cdelt2"] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0]
|
||||
|
||||
@@ -174,7 +174,7 @@ def save_Stokes(
|
||||
header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation")
|
||||
header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name")
|
||||
header["ORIENTAT"] = (header_stokes["ORIENTAT"], "Angle between North and the y-axis of the image")
|
||||
header["FILENAME"] = (filename, "ORIGINAL FILENAME")
|
||||
header["FILENAME"] = (filename, "Original filename")
|
||||
header["BKG_TYPE"] = (header_stokes["BKG_TYPE"], "Bkg estimation method used during reduction")
|
||||
header["BKG_SUB"] = (header_stokes["BKG_SUB"], "Amount of bkg subtracted from images")
|
||||
header["SMOOTH"] = (header_stokes["SMOOTH"] if "SMOOTH" in list(header_stokes.keys()) else "None", "Smoothing method used during reduction")
|
||||
@@ -210,7 +210,7 @@ def save_Stokes(
|
||||
# Create HDUList object
|
||||
hdul = fits.HDUList([])
|
||||
|
||||
# Add I_stokes as PrimaryHDU
|
||||
# Add Flux density as PrimaryHDU
|
||||
if flux_data is None:
|
||||
header["datatype"] = ("I_stokes", "type of data stored in the HDU")
|
||||
I_stokes[(1 - data_mask).astype(bool)] = 0.0
|
||||
@@ -218,10 +218,16 @@ def save_Stokes(
|
||||
primary_hdu.name = "I_stokes"
|
||||
hdul.append(primary_hdu)
|
||||
else:
|
||||
flux_head["TELESCOP"], flux_head["INSTRUME"] = header["TELESCOP"], header["INSTRUME"]
|
||||
header["datatype"] = ("Flux map", "type of data stored in the HDU")
|
||||
primary_hdu = fits.PrimaryHDU(data=flux_data, header=flux_head)
|
||||
primary_hdu.name = "Flux map"
|
||||
flux_head["FILENAME"] = header["FILENAME"]
|
||||
head = WCS(flux_head).deepcopy().to_header()
|
||||
for key in [key for key in header.keys() if key not in ["SMOOTH", "SAMPLING"]]:
|
||||
try:
|
||||
head[key] = flux_head[key]
|
||||
except KeyError:
|
||||
head[key] = header[key]
|
||||
header["datatype"] = ("Flux_density", "type of data stored in the HDU")
|
||||
primary_hdu = fits.PrimaryHDU(data=flux_data, header=head)
|
||||
primary_hdu.name = "Flux_density"
|
||||
hdul.append(primary_hdu)
|
||||
header["datatype"] = ("I_stokes", "type of data stored in the HDU")
|
||||
I_stokes[(1 - data_mask).astype(bool)] = 0.0
|
||||
|
||||
@@ -58,7 +58,6 @@ from mpl_toolkits.axes_grid1.anchored_artists import (
|
||||
AnchoredDirectionArrows,
|
||||
AnchoredSizeBar,
|
||||
)
|
||||
from scipy.ndimage import zoom as sc_zoom
|
||||
|
||||
try:
|
||||
from .utils import PCconf, princ_angle, rot2D, sci_not
|
||||
@@ -166,26 +165,29 @@ def plot_obs(
|
||||
)
|
||||
ax_curr.plot([x, x], [0, data.shape[0] - 1], "--", lw=2, color="g", alpha=0.85)
|
||||
ax_curr.plot([0, data.shape[1] - 1], [y, y], "--", lw=2, color="g", alpha=0.85)
|
||||
# position of centroid
|
||||
ax_curr.plot(
|
||||
[data.shape[1] / 2, data.shape[1] / 2],
|
||||
[0, data.shape[0] - 1],
|
||||
"--",
|
||||
lw=2,
|
||||
color="b",
|
||||
alpha=0.85,
|
||||
)
|
||||
ax_curr.plot(
|
||||
[0, data.shape[1] - 1],
|
||||
[data.shape[0] / 2, data.shape[0] / 2],
|
||||
"--",
|
||||
lw=2,
|
||||
color="b",
|
||||
alpha=0.85,
|
||||
)
|
||||
cr_x, cr_y = head["CRPIX1"], head["CRPIX2"]
|
||||
# Plot WCS reference point
|
||||
ax_curr.plot([cr_x], [cr_y], "+", lw=2, color="r", alpha=0.85)
|
||||
if rectangle is not None:
|
||||
x, y, width, height, angle, color = rectangle[i]
|
||||
ax_curr.add_patch(Rectangle((x, y), width, height, angle=angle, edgecolor=color, fill=False))
|
||||
# position of centroid
|
||||
ax_curr.plot(
|
||||
[data.shape[1] / 2, data.shape[1] / 2],
|
||||
[0, data.shape[0] - 1],
|
||||
"--",
|
||||
lw=2,
|
||||
color="b",
|
||||
alpha=0.85,
|
||||
)
|
||||
ax_curr.plot(
|
||||
[0, data.shape[1] - 1],
|
||||
[data.shape[0] / 2, data.shape[0] / 2],
|
||||
"--",
|
||||
lw=2,
|
||||
color="b",
|
||||
alpha=0.85,
|
||||
)
|
||||
ax_curr.annotate(
|
||||
instr + ":" + rootname,
|
||||
color="white",
|
||||
@@ -269,7 +271,7 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""):
|
||||
for dataset in [stkI, stkQ, stkU]:
|
||||
dataset[np.logical_not(data_mask)] = np.nan
|
||||
|
||||
wcs = WCS(Stokes[0]).deepcopy()
|
||||
wcs = WCS(Stokes["I_STOKES"]).deepcopy()
|
||||
|
||||
# Plot figure
|
||||
plt.rcParams.update({"font.size": 14})
|
||||
@@ -373,6 +375,9 @@ def polarization_map(
|
||||
The figure and ax created for interactive contour maps.
|
||||
"""
|
||||
# Get data
|
||||
flux = Stokes[0].data[0].copy() * Stokes[0].header["PHOTFLAM"]
|
||||
flux_error = Stokes[0].data[1].copy() * Stokes[0].header["PHOTFLAM"]
|
||||
flux_mask = Stokes[0].data[2].astype(bool).copy()
|
||||
stkI = Stokes["I_stokes"].data.copy()
|
||||
stkQ = Stokes["Q_stokes"].data.copy()
|
||||
stkU = Stokes["U_stokes"].data.copy()
|
||||
@@ -387,6 +392,20 @@ def polarization_map(
|
||||
data_mask = np.zeros(stkI.shape).astype(bool)
|
||||
data_mask[stkI > 0.0] = True
|
||||
|
||||
wcs = WCS(Stokes["I_STOKES"]).deepcopy()
|
||||
pivot_wav = Stokes["I_STOKES"].header["photplam"]
|
||||
convert_flux = Stokes["I_STOKES"].header["photflam"]
|
||||
|
||||
# Get integrated flux values from sum
|
||||
I_diluted = stkI[data_mask].sum() * convert_flux
|
||||
I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask])) * convert_flux
|
||||
|
||||
# Get integrated polarization values from header
|
||||
P_diluted = Stokes["I_STOKES"].header["P_int"]
|
||||
P_diluted_err = Stokes["I_STOKES"].header["sP_int"]
|
||||
PA_diluted = Stokes["I_STOKES"].header["PA_int"]
|
||||
PA_diluted_err = Stokes["I_STOKES"].header["sPA_int"]
|
||||
|
||||
# Compute confidence level map
|
||||
QN, UN, QN_ERR, UN_ERR = np.full((4, stkI.shape[0], stkI.shape[1]), np.nan)
|
||||
for nflux, sflux in zip(
|
||||
@@ -402,12 +421,8 @@ def polarization_map(
|
||||
for j in range(3):
|
||||
stk_cov[i][j][np.logical_not(data_mask)] = np.nan
|
||||
|
||||
wcs = WCS(Stokes[0]).deepcopy()
|
||||
pivot_wav = Stokes[0].header["photplam"]
|
||||
convert_flux = Stokes[0].header["photflam"]
|
||||
|
||||
# Plot Stokes parameters map
|
||||
if display is None or display.lower() in ["default"]:
|
||||
if display is None or display.lower() in ["pol", "polarization", "polarisation", "pol_deg", "p"]:
|
||||
plot_Stokes(Stokes, savename=savename, plots_folder=plots_folder)
|
||||
|
||||
# Compute SNR and apply cuts
|
||||
@@ -451,7 +466,7 @@ def polarization_map(
|
||||
fig = plt.figure(figsize=(7 * ratiox, 7 * ratioy), layout="constrained")
|
||||
if ax is None:
|
||||
ax = fig.add_subplot(111, projection=wcs)
|
||||
ax.set(aspect="equal", fc="k")
|
||||
ax.set(aspect="equal", fc="k", xlim=(0, stkI.shape[1]), ylim=(0, stkI.shape[0]))
|
||||
# fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02)
|
||||
|
||||
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
|
||||
@@ -501,7 +516,30 @@ def polarization_map(
|
||||
ax.set_facecolor("white")
|
||||
font_color = "black"
|
||||
|
||||
if display.lower() in ["i", "intensity"]:
|
||||
if display.lower() in ["f", "flux", "fluxdensity"]:
|
||||
# If no display selected, show intensity map
|
||||
display = "f"
|
||||
if flux_lim is not None:
|
||||
vmin, vmax = flux_lim
|
||||
else:
|
||||
vmin, vmax = np.max(flux[flux > 0.0]) / 2e3, np.max(flux[flux > 0.0])
|
||||
imflux, cr = flux.copy(), WCS(Stokes[0].header).wcs.crpix.astype(int)
|
||||
imflux[cr[1] - 1 : cr[1] + 2, cr[0] - 1 : cr[0] + 2] = np.nan
|
||||
im = ax.imshow(
|
||||
imflux,
|
||||
transform=ax.get_transform(WCS(Stokes[0].header).celestial),
|
||||
norm=LogNorm(vmin, vmax),
|
||||
aspect="equal",
|
||||
cmap=kwargs["cmap"],
|
||||
alpha=1.0,
|
||||
)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
|
||||
levelsF = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax
|
||||
print("Flux density contour levels : ", levelsF)
|
||||
ax.contour(flux, levels=levelsF, transform=ax.get_transform(WCS(Stokes[0].header).celestial), colors="grey", linewidths=0.5)
|
||||
ax.plot(*WCS(Stokes[1]).wcs.crpix, "g+")
|
||||
I_diluted, I_diluted_err = np.sum(flux[flux_mask]), np.sqrt(np.sum(flux_error[flux_mask] ** 2))
|
||||
elif display.lower() in ["i", "intensity"]:
|
||||
# If no display selected, show intensity map
|
||||
display = "i"
|
||||
if flux_lim is not None:
|
||||
@@ -512,9 +550,12 @@ def polarization_map(
|
||||
vmin, vmax = 1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][stkI > 0.0]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux)
|
||||
im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||
levelsI = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax
|
||||
print("Flux density contour levels : ", levelsI)
|
||||
ax.contour(stkI * convert_flux, levels=levelsI, colors="grey", linewidths=0.5)
|
||||
# levelsI = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax
|
||||
# print("Stokes I contour levels : ", levelsI)
|
||||
# ax.contour(stkI * convert_flux, levels=levelsI, colors="grey", linewidths=0.5)
|
||||
levelsF = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * np.max(flux[flux > 0.0])
|
||||
print("Flux density contour levels : ", levelsF)
|
||||
ax.contour(flux, levels=levelsF, transform=ax.get_transform(WCS(Stokes[0].header).celestial), colors="grey", linewidths=0.5)
|
||||
elif display.lower() in ["pf", "pol_flux"]:
|
||||
# Display polarization flux
|
||||
display = "pf"
|
||||
@@ -604,22 +645,18 @@ def polarization_map(
|
||||
# Defaults to intensity map
|
||||
if flux_lim is not None:
|
||||
vmin, vmax = flux_lim
|
||||
elif mask.sum() > 0.0:
|
||||
vmin, vmax = 1.0 * np.mean(np.sqrt(stk_cov[0, 0][mask]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux)
|
||||
else:
|
||||
vmin, vmax = 1.0 * np.mean(np.sqrt(stk_cov[0, 0][stkI > 0.0]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux)
|
||||
im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap=kwargs["cmap"], alpha=1.0)
|
||||
vmin, vmax = np.max(flux[flux > 0.0] * convert_flux) / 2e3, np.max(flux[flux > 0.0] * convert_flux)
|
||||
im = ax.imshow(
|
||||
flux * Stokes[0].header["PHOTFLAM"],
|
||||
transform=ax.get_transform(WCS(Stokes[0].header).celestial),
|
||||
norm=LogNorm(vmin, vmax),
|
||||
aspect="equal",
|
||||
cmap=kwargs["cmap"],
|
||||
alpha=1.0,
|
||||
)
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.60, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
|
||||
|
||||
# Get integrated flux values from sum
|
||||
I_diluted = stkI[data_mask].sum()
|
||||
I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask]))
|
||||
|
||||
# Get integrated polarization values from header
|
||||
P_diluted = Stokes[0].header["P_int"]
|
||||
P_diluted_err = Stokes[0].header["sP_int"]
|
||||
PA_diluted = Stokes[0].header["PA_int"]
|
||||
PA_diluted_err = Stokes[0].header["sPA_int"]
|
||||
I_diluted, I_diluted_err = np.sum(flux[flux_mask]), np.sqrt(np.sum(flux_error[flux_mask] ** 2))
|
||||
|
||||
plt.rcParams.update({"font.size": 11})
|
||||
px_size = wcs.wcs.get_cdelt()[0] * 3600.0
|
||||
@@ -648,12 +685,12 @@ def polarization_map(
|
||||
back_length=0.0,
|
||||
head_length=7.0,
|
||||
head_width=7.0,
|
||||
angle=-Stokes[0].header["orientat"],
|
||||
angle=-Stokes["I_STOKES"].header["orientat"],
|
||||
text_props={"ec": "k", "fc": font_color, "alpha": 1, "lw": 0.5},
|
||||
arrow_props={"ec": "k", "fc": font_color, "alpha": 1, "lw": 1},
|
||||
)
|
||||
|
||||
if display.lower() in ["i", "s_i", "snri", "pf", "p", "pa", "s_p", "snrp", "confp"] and step_vec != 0:
|
||||
if display.lower() in ["f", "i", "s_i", "snri", "pf", "p", "pa", "s_p", "snrp", "confp"] and step_vec != 0:
|
||||
if scale_vec == -1:
|
||||
poldata[np.isfinite(poldata)] = 1.0 / 2.0
|
||||
step_vec = 1
|
||||
@@ -701,7 +738,7 @@ def polarization_map(
|
||||
ax.annotate(
|
||||
r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(
|
||||
pivot_wav,
|
||||
sci_not(I_diluted * convert_flux, I_diluted_err * convert_flux, 2),
|
||||
sci_not(I_diluted, I_diluted_err, 2),
|
||||
)
|
||||
+ "\n"
|
||||
+ r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_diluted * 100.0, P_diluted_err * 100.0)
|
||||
@@ -721,7 +758,7 @@ def polarization_map(
|
||||
ax.annotate(
|
||||
r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(
|
||||
pivot_wav,
|
||||
sci_not(I_diluted * convert_flux, I_diluted_err * convert_flux, 2),
|
||||
sci_not(I_diluted, I_diluted_err, 2),
|
||||
),
|
||||
color="white",
|
||||
xy=(0.01, 1.00),
|
||||
@@ -1373,6 +1410,8 @@ class overplot_chandra(align_maps):
|
||||
other_data = deepcopy(self.other_data)
|
||||
other_wcs = self.other_wcs.deepcopy()
|
||||
if zoom != 1:
|
||||
from scipy.ndimage import zoom as sc_zoom
|
||||
|
||||
other_data = sc_zoom(other_data, zoom)
|
||||
other_wcs.wcs.crpix *= zoom
|
||||
other_wcs.wcs.cdelt /= zoom
|
||||
@@ -2947,9 +2986,9 @@ class pol_map(object):
|
||||
self.conf = PCconf(self.QN, self.UN, self.QN_ERR, self.UN_ERR)
|
||||
|
||||
# Get data
|
||||
self.targ = self.Stokes[0].header["targname"]
|
||||
self.pivot_wav = self.Stokes[0].header["photplam"]
|
||||
self.map_convert = self.Stokes[0].header["photflam"]
|
||||
self.targ = self.Stokes["I_STOKES"].header["targname"]
|
||||
self.pivot_wav = self.Stokes["I_STOKES"].header["photplam"]
|
||||
self.map_convert = self.Stokes["I_STOKES"].header["photflam"]
|
||||
|
||||
# Create figure
|
||||
plt.rcParams.update({"font.size": 10})
|
||||
@@ -3062,7 +3101,7 @@ class pol_map(object):
|
||||
|
||||
def select_roi(event):
|
||||
if self.data is None:
|
||||
self.data = self.Stokes[0].data
|
||||
self.data = self.Stokes["I_STOKES"].data
|
||||
if self.selected:
|
||||
self.selected = False
|
||||
self.region = deepcopy(self.select_instance.mask.astype(bool))
|
||||
@@ -3111,7 +3150,7 @@ class pol_map(object):
|
||||
|
||||
def select_aperture(event):
|
||||
if self.data is None:
|
||||
self.data = self.Stokes[0].data
|
||||
self.data = self.Stokes["I_STOKES"].data
|
||||
if self.selected:
|
||||
self.selected = False
|
||||
self.select_instance.update_mask()
|
||||
@@ -3200,7 +3239,7 @@ class pol_map(object):
|
||||
|
||||
def select_slit(event):
|
||||
if self.data is None:
|
||||
self.data = self.Stokes[0].data
|
||||
self.data = self.Stokes["I_STOKES"].data
|
||||
if self.selected:
|
||||
self.selected = False
|
||||
self.select_instance.update_mask()
|
||||
@@ -3534,7 +3573,19 @@ class pol_map(object):
|
||||
|
||||
@property
|
||||
def wcs(self):
|
||||
return WCS(self.Stokes[0].header).celestial.deepcopy()
|
||||
return WCS(self.Stokes["I_STOKES"].header).celestial.deepcopy()
|
||||
|
||||
@property
|
||||
def Flux(self):
|
||||
return self.Stokes[0].data[0] * self.Stokes[0].header["PHOTFLAM"]
|
||||
|
||||
@property
|
||||
def Flux_err(self):
|
||||
return self.Stokes[0].data[1] * self.Stokes[0].header["PHOTFLAM"]
|
||||
|
||||
@property
|
||||
def Flux_mask(self):
|
||||
return self.Stokes[0].data[2].astype(bool)
|
||||
|
||||
@property
|
||||
def I(self):
|
||||
@@ -3598,7 +3649,7 @@ class pol_map(object):
|
||||
|
||||
@property
|
||||
def data_mask(self):
|
||||
return self.Stokes["DATA_MASK"].data
|
||||
return self.Stokes["DATA_MASK"].data.astype(bool)
|
||||
|
||||
def set_data_mask(self, mask):
|
||||
self.Stokes[np.argmax([self.Stokes[i].header["datatype"] == "Data_mask" for i in range(len(self.Stokes))])].data = mask.astype(float)
|
||||
@@ -3682,7 +3733,7 @@ class pol_map(object):
|
||||
back_length=0.0,
|
||||
head_length=10.0,
|
||||
head_width=10.0,
|
||||
angle=-self.Stokes[0].header["orientat"],
|
||||
angle=-self.Stokes["I_STOKES"].header["orientat"],
|
||||
color="white",
|
||||
text_props={"ec": None, "fc": "w", "alpha": 1, "lw": 0.4},
|
||||
arrow_props={"ec": None, "fc": "w", "alpha": 1, "lw": 1},
|
||||
@@ -3690,21 +3741,23 @@ class pol_map(object):
|
||||
ax.add_artist(self.north_dir)
|
||||
|
||||
def display(self, fig=None, ax=None, flux_lim=None):
|
||||
norm = None
|
||||
if self.display_selection is None:
|
||||
self.display_selection = "total_flux"
|
||||
kwargs = dict([])
|
||||
if flux_lim is None:
|
||||
flux_lim = self.flux_lim
|
||||
if self.display_selection.lower() in ["total_flux"]:
|
||||
self.data = self.I * self.map_convert
|
||||
if self.display_selection is None or self.display_selection.lower() in ["total_flux"]:
|
||||
self.data = self.Flux # self.I * self.map_convert
|
||||
if flux_lim is None:
|
||||
vmin, vmax = (
|
||||
1.0 / 2.0 * np.median(self.data[self.data > 0.0]),
|
||||
np.max(self.data[self.data > 0.0]),
|
||||
1.0 / 2.0 * np.median(self.I[self.I > 0.0] * self.map_convert),
|
||||
np.max(self.I[self.I > 0.0] * self.map_convert),
|
||||
)
|
||||
else:
|
||||
vmin, vmax = flux_lim
|
||||
norm = LogNorm(vmin, vmax)
|
||||
kwargs["norm"] = LogNorm(vmin, vmax)
|
||||
if ax is None:
|
||||
kwargs["transform"] = self.ax.get_transform(WCS(self.Stokes[0].header).celestial)
|
||||
else:
|
||||
kwargs["transform"] = ax.get_transform(WCS(self.Stokes[0].header).celestial)
|
||||
label = r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]"
|
||||
elif self.display_selection.lower() in ["pol_flux"]:
|
||||
self.data = self.I * self.map_convert * self.P
|
||||
@@ -3715,28 +3768,28 @@ class pol_map(object):
|
||||
)
|
||||
else:
|
||||
vmin, vmax = flux_lim
|
||||
norm = LogNorm(vmin, vmax)
|
||||
kwargs["norm"] = LogNorm(vmin, vmax)
|
||||
label = r"$P \cdot F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]"
|
||||
elif self.display_selection.lower() in ["pol_deg"]:
|
||||
self.data = self.P * 100.0
|
||||
vmin, vmax = 0.0, np.max(self.data[self.P > self.s_P])
|
||||
kwargs["vmin"], kwargs["vmax"] = 0.0, np.max(self.data[self.P > self.s_P])
|
||||
label = r"$P$ [%]"
|
||||
elif self.display_selection.lower() in ["pol_ang"]:
|
||||
self.data = princ_angle(self.PA)
|
||||
vmin, vmax = 0, 180.0
|
||||
kwargs["vmin"], kwargs["vmax"] = 0, 180.0
|
||||
label = r"$\theta_{P}$ [°]"
|
||||
elif self.display_selection.lower() in ["snri"]:
|
||||
s_I = np.sqrt(self.IQU_cov[0, 0])
|
||||
SNRi = np.zeros(self.I.shape)
|
||||
SNRi[s_I > 0.0] = self.I[s_I > 0.0] / s_I[s_I > 0.0]
|
||||
self.data = SNRi
|
||||
vmin, vmax = 0.0, np.max(self.data[self.data > 0.0])
|
||||
kwargs["vmin"], kwargs["vmax"] = 0.0, np.max(self.data[self.data > 0.0])
|
||||
label = r"$I_{Stokes}/\sigma_{I}$"
|
||||
elif self.display_selection.lower() in ["snrp"]:
|
||||
SNRp = np.zeros(self.P.shape)
|
||||
SNRp[self.s_P > 0.0] = self.P[self.s_P > 0.0] / self.s_P[self.s_P > 0.0]
|
||||
self.data = SNRp
|
||||
vmin, vmax = 0.0, np.max(self.data[self.data > 0.0])
|
||||
kwargs["vmin"], kwargs["vmax"] = 0.0, np.max(self.data[self.data > 0.0])
|
||||
label = r"$P/\sigma_{P}$"
|
||||
|
||||
if fig is None:
|
||||
@@ -3747,22 +3800,17 @@ class pol_map(object):
|
||||
self.cbar.remove()
|
||||
if hasattr(self, "im"):
|
||||
self.im.remove()
|
||||
if norm is not None:
|
||||
self.im = ax.imshow(self.data, norm=norm, aspect="equal", cmap="inferno")
|
||||
else:
|
||||
self.im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno")
|
||||
self.im = ax.imshow(self.data, aspect="equal", cmap="inferno", **kwargs)
|
||||
ax.set(xlim=(0, self.I.shape[1]), ylim=(0, self.I.shape[0]))
|
||||
plt.rcParams.update({"font.size": 14})
|
||||
self.cbar = fig.colorbar(self.im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=label)
|
||||
plt.rcParams.update({"font.size": 10})
|
||||
fig.canvas.draw_idle()
|
||||
return self.im
|
||||
else:
|
||||
if norm is not None:
|
||||
im = ax.imshow(self.data, norm=norm, aspect="equal", cmap="inferno")
|
||||
else:
|
||||
im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno")
|
||||
ax.set_xlim(0, self.data.shape[1])
|
||||
ax.set_ylim(0, self.data.shape[0])
|
||||
im = ax.imshow(self.data, aspect="equal", cmap="inferno", **kwargs)
|
||||
ax.set_xlim(0, self.I.shape[1])
|
||||
ax.set_ylim(0, self.I.shape[0])
|
||||
plt.rcParams.update({"font.size": 14})
|
||||
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=label)
|
||||
plt.rcParams.update({"font.size": 10})
|
||||
@@ -3789,24 +3837,24 @@ class pol_map(object):
|
||||
ax = self.ax
|
||||
if hasattr(self, "quiver"):
|
||||
self.quiver.remove()
|
||||
self.quiver = ax.quiver(
|
||||
X[:: self.step_vec, :: self.step_vec],
|
||||
Y[:: self.step_vec, :: self.step_vec],
|
||||
XY_U[:: self.step_vec, :: self.step_vec],
|
||||
XY_V[:: self.step_vec, :: self.step_vec],
|
||||
units="xy",
|
||||
scale=1.0 / scale_vec,
|
||||
scale_units="xy",
|
||||
pivot="mid",
|
||||
headwidth=0.0,
|
||||
headlength=0.0,
|
||||
headaxislength=0.0,
|
||||
width=0.3,
|
||||
linewidth=0.6,
|
||||
color="white",
|
||||
edgecolor="black",
|
||||
)
|
||||
if self.pa_err:
|
||||
self.quiver = ax.quiver(
|
||||
X[:: self.step_vec, :: self.step_vec],
|
||||
Y[:: self.step_vec, :: self.step_vec],
|
||||
XY_U[:: self.step_vec, :: self.step_vec],
|
||||
XY_V[:: self.step_vec, :: self.step_vec],
|
||||
units="xy",
|
||||
scale=1.0 / scale_vec,
|
||||
scale_units="xy",
|
||||
pivot="mid",
|
||||
headwidth=0.0,
|
||||
headlength=0.0,
|
||||
headaxislength=0.0,
|
||||
width=0.1,
|
||||
# linewidth=0.6,
|
||||
color="black",
|
||||
edgecolor="black",
|
||||
)
|
||||
XY_U_err1, XY_V_err1 = (
|
||||
P_cut * np.cos(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0),
|
||||
P_cut * np.sin(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0),
|
||||
@@ -3855,6 +3903,25 @@ class pol_map(object):
|
||||
edgecolor="black",
|
||||
ls="dashed",
|
||||
)
|
||||
else:
|
||||
self.quiver = ax.quiver(
|
||||
X[:: self.step_vec, :: self.step_vec],
|
||||
Y[:: self.step_vec, :: self.step_vec],
|
||||
XY_U[:: self.step_vec, :: self.step_vec],
|
||||
XY_V[:: self.step_vec, :: self.step_vec],
|
||||
units="xy",
|
||||
scale=1.0 / scale_vec,
|
||||
scale_units="xy",
|
||||
pivot="mid",
|
||||
headwidth=0.0,
|
||||
headlength=0.0,
|
||||
headaxislength=0.0,
|
||||
width=0.3,
|
||||
linewidth=0.6,
|
||||
color="white",
|
||||
edgecolor="black",
|
||||
)
|
||||
|
||||
fig.canvas.draw_idle()
|
||||
return self.quiver
|
||||
else:
|
||||
@@ -3926,12 +3993,20 @@ class pol_map(object):
|
||||
str_conf = ""
|
||||
if self.region is None:
|
||||
s_I = np.sqrt(self.IQU_cov[0, 0])
|
||||
I_reg = self.I.sum()
|
||||
I_reg_err = np.sqrt(np.sum(s_I**2))
|
||||
P_reg = self.Stokes[0].header["P_int"]
|
||||
P_reg_err = self.Stokes[0].header["sP_int"]
|
||||
PA_reg = self.Stokes[0].header["PA_int"]
|
||||
PA_reg_err = self.Stokes[0].header["sPA_int"]
|
||||
I_reg = (
|
||||
np.sum(self.Flux[self.Flux_mask]) / self.map_convert
|
||||
if self.display_selection is None or self.display_selection.lower() in ["total_flux"]
|
||||
else np.sum(self.I[self.data_mask])
|
||||
)
|
||||
I_reg_err = (
|
||||
np.sqrt(np.sum(self.Flux_err[self.Flux_mask] ** 2)) / self.map_convert
|
||||
if self.display_selection is None or self.display_selection.lower() in ["total_flux"]
|
||||
else np.sqrt(np.sum(s_I[self.data_mask] ** 2))
|
||||
)
|
||||
P_reg = self.Stokes["I_STOKES"].header["P_int"]
|
||||
P_reg_err = self.Stokes["I_STOKES"].header["sP_int"]
|
||||
PA_reg = self.Stokes["I_STOKES"].header["PA_int"]
|
||||
PA_reg_err = self.Stokes["I_STOKES"].header["sPA_int"]
|
||||
|
||||
s_I = np.sqrt(self.IQU_cov[0, 0])
|
||||
s_Q = np.sqrt(self.IQU_cov[1, 1])
|
||||
|
||||
@@ -52,7 +52,6 @@ from scipy.ndimage import rotate as sc_rotate
|
||||
from scipy.ndimage import shift as sc_shift
|
||||
from scipy.signal import fftconvolve
|
||||
|
||||
from .background import bkg_fit, bkg_hist, bkg_mini
|
||||
from .convex_hull import image_hull
|
||||
from .cross_correlation import phase_cross_correlation
|
||||
from .deconvolve import deconvolve_im, gaussian2d, gaussian_psf, zeropad
|
||||
@@ -310,6 +309,8 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu
|
||||
# Update CRPIX value in the associated header
|
||||
curr_wcs = WCS(crop_headers[i]).celestial.deepcopy()
|
||||
curr_wcs.wcs.crpix[:2] = curr_wcs.wcs.crpix[:2] - np.array([v_array[2], v_array[0]])
|
||||
curr_wcs.array_shape = crop_array[i].shape
|
||||
curr_wcs.wcs.set()
|
||||
crop_headers[i].update(curr_wcs.to_header())
|
||||
crop_headers[i]["naxis1"], crop_headers[i]["naxis2"] = crop_array[i].shape
|
||||
|
||||
@@ -528,20 +529,22 @@ def get_error(
|
||||
# estimated to less than 3%
|
||||
err_flat = data * 0.03
|
||||
|
||||
from .background import bkg_fit, bkg_hist, bkg_mini
|
||||
|
||||
if sub_type is None:
|
||||
n_data_array, c_error_bkg, headers, background = bkg_hist(
|
||||
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
||||
)
|
||||
sub_type, subtract_error = "histogram ", str(int(subtract_error > 0.0))
|
||||
elif isinstance(sub_type, str):
|
||||
if sub_type.lower() in ["auto"]:
|
||||
if sub_type.lower() in ["fit"]:
|
||||
n_data_array, c_error_bkg, headers, background = bkg_fit(
|
||||
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
||||
)
|
||||
sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
|
||||
else:
|
||||
n_data_array, c_error_bkg, headers, background = bkg_hist(
|
||||
data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
||||
data, error, mask, headers, n_bins=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
|
||||
)
|
||||
sub_type, subtract_error = "histogram ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
|
||||
elif isinstance(sub_type, tuple):
|
||||
@@ -675,7 +678,9 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
|
||||
nw.wcs.cdelt *= Dxy
|
||||
nw.wcs.crpix /= Dxy
|
||||
nw.array_shape = new_shape
|
||||
nw.wcs.set()
|
||||
new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape
|
||||
new_header["PXAREA"] *= Dxy[0] * Dxy[1]
|
||||
for key, val in nw.to_header().items():
|
||||
new_header.set(key, val)
|
||||
new_header["SAMPLING"] = (str(pxsize) + scale, "Resampling performed during reduction")
|
||||
@@ -853,7 +858,10 @@ def align_data(
|
||||
new_crpix = np.array([wcs.wcs.crpix for wcs in headers_wcs]) + shifts[:, ::-1] + res_shift[::-1]
|
||||
for i in range(len(headers_wcs)):
|
||||
headers_wcs[i].wcs.crpix = new_crpix[0]
|
||||
headers_wcs[i].array_shape = (res_shape, res_shape)
|
||||
headers_wcs[i].wcs.set()
|
||||
headers[i].update(headers_wcs[i].to_header())
|
||||
headers[i]["NAXIS1"], headers[i]["NAXIS2"] = res_shape, res_shape
|
||||
|
||||
data_mask = rescaled_mask.all(axis=0)
|
||||
data_array, error_array, data_mask, headers = crop_array(rescaled_image, headers, rescaled_error, data_mask, null_val=0.01 * background)
|
||||
@@ -1722,13 +1730,11 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
|
||||
|
||||
new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc)
|
||||
new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center[::-1]) + new_center[::-1]
|
||||
new_wcs.array_shape = shape
|
||||
new_wcs.wcs.set()
|
||||
for key, val in new_wcs.to_header().items():
|
||||
new_header_stokes.set(key, val)
|
||||
if new_wcs.wcs.pc[0, 0] == 1.0:
|
||||
new_header_stokes.set("PC1_1", 1.0)
|
||||
if new_wcs.wcs.pc[1, 1] == 1.0:
|
||||
new_header_stokes.set("PC2_2", 1.0)
|
||||
new_header_stokes["NAXIS1"], new_header_stokes["NAXIS2"] = new_wcs.array_shape
|
||||
new_header_stokes["ORIENTAT"] += ang
|
||||
|
||||
# Nan handling :
|
||||
@@ -1829,9 +1835,11 @@ def rotate_data(data_array, error_array, data_mask, headers):
|
||||
new_wcs = WCS(header).celestial.deepcopy()
|
||||
new_wcs.wcs.pc[:2, :2] = np.dot(mrot, new_wcs.wcs.pc[:2, :2])
|
||||
new_wcs.wcs.crpix[:2] = np.dot(mrot, new_wcs.wcs.crpix[:2] - old_center[::-1]) + new_center[::-1]
|
||||
new_wcs.array_shape = shape
|
||||
new_wcs.wcs.set()
|
||||
for key, val in new_wcs.to_header().items():
|
||||
new_header[key] = val
|
||||
new_header["NAXIS1"], new_header["NAXIS2"] = new_wcs.array_shape
|
||||
new_header["ORIENTAT"] = np.arccos(new_wcs.celestial.wcs.pc[0, 0]) * 180.0 / np.pi
|
||||
new_header["ROTATE"] = ang
|
||||
new_headers.append(new_header)
|
||||
|
||||
Reference in New Issue
Block a user