better plots and filenames

This commit is contained in:
2024-07-08 17:05:42 +02:00
parent d8365e984d
commit 155717a585
3 changed files with 125 additions and 153 deletions

View File

@@ -17,7 +17,7 @@ from lib.utils import princ_angle, sci_not
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False, **kwargs): def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False):
# Reduction parameters # Reduction parameters
# Deconvolution # Deconvolution
deconvolve = False deconvolve = False
@@ -36,12 +36,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Background estimation # Background estimation
error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 0.7 subtract_error = 1.0
display_bkg = False display_bkg = False
# Data binning # Data binning
pxsize = 0.1 pxsize = 2
pxscale = "arcsec" # pixel, arcsec or full pxscale = "px" # pixel, arcsec or full
rebin_operation = "sum" # sum or average rebin_operation = "sum" # sum or average
# Alignement # Alignement
@@ -54,8 +54,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Smoothing # Smoothing
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 0.2 # If None, no smoothing is done smoothing_FWHM = 2.0 # If None, no smoothing is done
smoothing_scale = "arcsec" # pixel or arcsec smoothing_scale = "px" # pixel or arcsec
# Rotation # Rotation
rotate_North = True rotate_North = True
@@ -64,31 +64,10 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
SNRp_cut = 3.0 # P measurments with SNR>3 SNRp_cut = 3.0 # P measurments with SNR>3
SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
scale_vec = 3 scale_vec = 5
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
# Pipeline start # Pipeline start
# Step 0:
# Get parameters from kwargs
for key, value in [
["error_sub_type", error_sub_type],
["subtract_error", subtract_error],
["pxsize", pxsize],
["pxscale", pxscale],
["smoothing_function", smoothing_function],
["smoothing_FWHM", smoothing_FWHM],
["smoothing_scale", smoothing_scale],
["SNRp_cut", SNRp_cut],
["SNRi_cut", SNRi_cut],
["flux_lim", flux_lim],
["scale_vec", scale_vec],
["step_vec", step_vec],
]:
try:
value = kwargs[key]
except KeyError:
pass
rebin = True if pxsize is not None else False
# Step 1: # Step 1:
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom. # Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
@@ -119,19 +98,18 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
figname = "_".join([target, "FOC"]) figname = "_".join([target, "FOC"])
figtype = "" figtype = ""
if rebin: if (pxsize is not None) and not (pxsize == 1 and pxscale.lower() in ["px", "pixel", "pixels"]):
if pxscale not in ["full"]: if pxscale not in ["full"]:
figtype = "".join(["b", "{0:.2f}".format(pxsize), pxscale]) # additionnal informations figtype = "".join(["b", "{0:.2f}".format(pxsize), pxscale]) # additionnal informations
else: else:
figtype = "full" figtype = "full"
if smoothing_FWHM is not None: if smoothing_FWHM is not None and smoothing_scale is not None:
figtype += "_" + "".join( smoothstr = "".join([*[s[0] for s in smoothing_function.split("_")], "{0:.2f}".format(smoothing_FWHM), smoothing_scale])
["".join([s[0] for s in smoothing_function.split("_")]), "{0:.2f}".format(smoothing_FWHM), smoothing_scale] figtype = "_".join([figtype, smoothstr] if figtype != "" else [smoothstr])
) # additionnal informations
if deconvolve: if deconvolve:
figtype += "_deconv" figtype = "_".join([figtype, "deconv"] if figtype != "" else ["deconv"])
if align_center is None: if align_center is None:
figtype += "_not_aligned" figtype = "_".join([figtype, "not_aligned"] if figtype != "" else ["not_aligned"])
# Crop data to remove outside blank margins. # Crop data to remove outside blank margins.
data_array, error_array, headers = proj_red.crop_array( data_array, error_array, headers = proj_red.crop_array(
@@ -159,7 +137,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
) )
# Rotate data to have same orientation # Rotate data to have same orientation
rotate_data = np.unique([float(head["ORIENTAT"]) for head in headers]).size != 1 rotate_data = np.unique([np.round(float(head["ORIENTAT"]), 3) for head in headers]).size != 1
if rotate_data: if rotate_data:
ang = np.mean([head["ORIENTAT"] for head in headers]) ang = np.mean([head["ORIENTAT"] for head in headers])
for head in headers: for head in headers:
@@ -199,7 +177,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
) )
# Rebin data to desired pixel size. # Rebin data to desired pixel size.
if rebin: if (pxsize is not None) and not (pxsize == 1 and pxscale.lower() in ["px", "pixel", "pixels"]):
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array( data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(
data_array, error_array, headers, pxsize=pxsize, scale=pxscale, operation=rebin_operation, data_mask=data_mask data_array, error_array, headers, pxsize=pxsize, scale=pxscale, operation=rebin_operation, data_mask=data_mask
) )
@@ -246,7 +224,9 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes = proj_red.rotate_Stokes( I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes = proj_red.rotate_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None
) )
I_bkg, Q_bkg, U_bkg, S_cov_bkg, _, _ = proj_red.rotate_Stokes(I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), header_bkg, SNRi_cut=None) I_bkg, Q_bkg, U_bkg, S_cov_bkg, data_mask_bkg, header_bkg = proj_red.rotate_Stokes(
I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), header_bkg, SNRi_cut=None
)
# Compute polarimetric parameters (polarization degree and angle). # Compute polarimetric parameters (polarization degree and angle).
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes) P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes)
@@ -273,6 +253,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
data_folder=data_folder, data_folder=data_folder,
return_hdul=True, return_hdul=True,
) )
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"]))
# Step 5: # Step 5:
# crop to desired region of interest (roi) # crop to desired region of interest (roi)
@@ -281,15 +262,16 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_hdul), norm=LogNorm()) stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_hdul), norm=LogNorm())
stokescrop.crop() stokescrop.crop()
stokescrop.write_to("/".join([data_folder, figname + ".fits"])) stokescrop.write_to("/".join([data_folder, figname + ".fits"]))
Stokes_hdul, headers = stokescrop.hdul_crop, [dataset.header for dataset in stokescrop.hdul_crop] Stokes_hdul, header_stokes = stokescrop.hdul_crop, stokescrop.hdul_crop[0].header
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"]))
data_mask = Stokes_hdul["data_mask"].data.astype(bool) data_mask = Stokes_hdul["data_mask"].data.astype(bool)
print( print(
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format( "F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
header_stokes["photplam"], header_stokes["PHOTPLAM"],
*sci_not( *sci_not(
Stokes_hdul[0].data[data_mask].sum() * header_stokes["photflam"], Stokes_hdul[0].data[data_mask].sum() * header_stokes["PHOTFLAM"],
np.sqrt(Stokes_hdul[3].data[0, 0][data_mask].sum()) * header_stokes["photflam"], np.sqrt(Stokes_hdul[3].data[0, 0][data_mask].sum()) * header_stokes["PHOTFLAM"],
2, 2,
out=int, out=int,
), ),
@@ -421,8 +403,6 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
elif pxscale.lower() not in ["full", "integrate"]: elif pxscale.lower() not in ["full", "integrate"]:
proj_plots.pol_map(Stokes_hdul, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim) proj_plots.pol_map(Stokes_hdul, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"]+".fits"]))
return outfiles return outfiles

View File

@@ -182,23 +182,23 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""):
wcs = WCS(Stokes[0]).deepcopy() wcs = WCS(Stokes[0]).deepcopy()
# Plot figure # Plot figure
plt.rcParams.update({"font.size": 12}) plt.rcParams.update({"font.size": 14})
ratiox = max(int(stkI.shape[1]/stkI.shape[0]),1) ratiox = max(int(stkI.shape[1]/stkI.shape[0]),1)
ratioy = max(int(stkI.shape[0]/stkI.shape[1]),1) ratioy = max(int(stkI.shape[0]/stkI.shape[1]),1)
fig, (axI, axQ, axU) = plt.subplots(ncols=3, figsize=(20*ratiox, 8*ratioy), subplot_kw=dict(projection=wcs)) fig, (axI, axQ, axU) = plt.subplots(ncols=3, figsize=(15*ratiox, 6*ratioy), subplot_kw=dict(projection=wcs))
fig.subplots_adjust(hspace=0, wspace=0.50, bottom=0.01, top=0.99, left=0.07, right=0.97) fig.subplots_adjust(hspace=0, wspace=0.50, bottom=0.01, top=0.99, left=0.07, right=0.97)
fig.suptitle("I, Q, U Stokes parameters") fig.suptitle("I, Q, U Stokes parameters")
imI = axI.imshow(stkI, origin="lower", cmap="inferno") imI = axI.imshow(stkI, origin="lower", cmap="inferno")
fig.colorbar(imI, ax=axI, aspect=50, shrink=0.50, pad=0.025, label="counts/sec") fig.colorbar(imI, ax=axI, aspect=30, shrink=0.50, pad=0.025, label="counts/sec")
axI.set(xlabel="RA", ylabel="DEC", title=r"$I_{stokes}$") axI.set(xlabel="RA", ylabel="DEC", title=r"$I_{stokes}$")
imQ = axQ.imshow(stkQ, origin="lower", cmap="inferno") imQ = axQ.imshow(stkQ, origin="lower", cmap="inferno")
fig.colorbar(imQ, ax=axQ, aspect=50, shrink=0.50, pad=0.025, label="counts/sec") fig.colorbar(imQ, ax=axQ, aspect=30, shrink=0.50, pad=0.025, label="counts/sec")
axQ.set(xlabel="RA", ylabel="DEC", title=r"$Q_{stokes}$") axQ.set(xlabel="RA", ylabel="DEC", title=r"$Q_{stokes}$")
imU = axU.imshow(stkU, origin="lower", cmap="inferno") imU = axU.imshow(stkU, origin="lower", cmap="inferno")
fig.colorbar(imU, ax=axU, aspect=50, shrink=0.50, pad=0.025, label="counts/sec") fig.colorbar(imU, ax=axU, aspect=30, shrink=0.50, pad=0.025, label="counts/sec")
axU.set(xlabel="RA", ylabel="DEC", title=r"$U_{stokes}$") axU.set(xlabel="RA", ylabel="DEC", title=r"$U_{stokes}$")
if savename is not None: if savename is not None:
@@ -322,14 +322,20 @@ def polarization_map(
print("No pixel with polarization information above requested SNR.") print("No pixel with polarization information above requested SNR.")
# Plot the map # Plot the map
plt.rcParams.update({"font.size": 12}) plt.rcParams.update({"font.size": 14})
plt.rcdefaults() plt.rcdefaults()
ratiox = max(int(stkI.shape[1]/stkI.shape[0]),1) ratiox = max(int(stkI.shape[1]/(stkI.shape[0])),1)
ratioy = max(int(stkI.shape[0]/stkI.shape[1]),1) ratioy = max(int((stkI.shape[0])/stkI.shape[1]),1)
fig, ax = plt.subplots(figsize=(10*ratiox, 10*ratioy), layout="constrained", subplot_kw=dict(projection=wcs)) fig, ax = plt.subplots(figsize=(6*ratiox, 6*ratioy), layout="compressed", subplot_kw=dict(projection=wcs))
ax.set(aspect="equal", fc="k") ax.set(aspect="equal", fc="k", ylim=[-stkI.shape[0]*0.10,stkI.shape[0]*1.15])
# fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02) # fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02)
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
ax.coords[0].set_axislabel("Right Ascension (J2000)")
ax.coords[0].set_axislabel_position("t")
ax.coords[0].set_ticklabel_position("t")
ax.set_ylabel("Declination (J2000)", labelpad=-1)
if display.lower() in ["intensity"]: if display.lower() in ["intensity"]:
# If no display selected, show intensity map # If no display selected, show intensity map
display = "i" display = "i"
@@ -341,7 +347,7 @@ def polarization_map(
else: else:
vmin, vmax = flux_lim vmin, vmax = flux_lim
im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno", alpha=1.0) im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno", alpha=1.0)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") fig.colorbar(im, ax=ax, aspect=30, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsI = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax levelsI = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax
print("Total flux contour levels : ", levelsI) print("Total flux contour levels : ", levelsI)
ax.contour(stkI * convert_flux, levels=levelsI, colors="grey", linewidths=0.5) ax.contour(stkI * convert_flux, levels=levelsI, colors="grey", linewidths=0.5)
@@ -436,9 +442,9 @@ def polarization_map(
PA_diluted = Stokes[0].header["PA_int"] PA_diluted = Stokes[0].header["PA_int"]
PA_diluted_err = Stokes[0].header["sPA_int"] PA_diluted_err = Stokes[0].header["sPA_int"]
plt.rcParams.update({"font.size": 12}) plt.rcParams.update({"font.size": 10})
px_size = wcs.wcs.get_cdelt()[0] * 3600.0 px_size = wcs.wcs.get_cdelt()[0] * 3600.0
px_sc = AnchoredSizeBar(ax.transData, 1.0 / px_size, "1 arcsec", 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="w") px_sc = AnchoredSizeBar(ax.transData, 1.0 / px_size, "1 arcsec", 3, pad=0.25, sep=5, borderpad=0.25, frameon=False, size_vertical=0.005, color="w")
north_dir = AnchoredDirectionArrows( north_dir = AnchoredDirectionArrows(
ax.transAxes, ax.transAxes,
"E", "E",
@@ -446,7 +452,7 @@ def polarization_map(
length=-0.05, length=-0.05,
fontsize=0.02, fontsize=0.02,
loc=1, loc=1,
aspect_ratio=-(stkI.shape[1]/stkI.shape[0]), aspect_ratio=-(stkI.shape[1]/(stkI.shape[0]*1.25)),
sep_y=0.01, sep_y=0.01,
sep_x=0.01, sep_x=0.01,
back_length=0.0, back_length=0.0,
@@ -482,7 +488,7 @@ def polarization_map(
color="w", color="w",
edgecolor="k", edgecolor="k",
) )
pol_sc = AnchoredSizeBar(ax.transData, scale_vec, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="w") pol_sc = AnchoredSizeBar(ax.transData, scale_vec, r"$P$= 100 %", 4, pad=0.25, sep=5, borderpad=0.25, frameon=False, size_vertical=0.005, color="w")
ax.add_artist(pol_sc) ax.add_artist(pol_sc)
ax.add_artist(px_sc) ax.add_artist(px_sc)
@@ -525,12 +531,6 @@ def polarization_map(
x, y = np.array([x, y]) - np.array(stkI.shape) / 2.0 x, y = np.array([x, y]) - np.array(stkI.shape) / 2.0
ax.add_patch(Rectangle((x, y), width, height, angle=angle, edgecolor=color, fill=False)) ax.add_patch(Rectangle((x, y), width, height, angle=angle, edgecolor=color, fill=False))
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
ax.coords[0].set_axislabel("Right Ascension (J2000)")
ax.coords[0].set_axislabel_position("t")
ax.coords[0].set_ticklabel_position("t")
ax.set_ylabel("Declination (J2000)", labelpad=-1)
if savename is not None: if savename is not None:
if savename[-4:] not in [".png", ".jpg", ".pdf"]: if savename[-4:] not in [".png", ".jpg", ".pdf"]:
savename += ".pdf" savename += ".pdf"

View File

@@ -433,7 +433,18 @@ def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px",
return deconv_array return deconv_array
def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=None, subtract_error=0.5, display=False, savename=None, plots_folder="", return_background=False): def get_error(
data_array,
headers,
error_array=None,
data_mask=None,
sub_type=None,
subtract_error=0.5,
display=False,
savename=None,
plots_folder="",
return_background=False,
):
""" """
Look for sub-image of shape sub_shape that have the smallest integrated Look for sub-image of shape sub_shape that have the smallest integrated
flux (no source assumption) and define the background on the image by the flux (no source assumption) and define the background on the image by the
@@ -521,23 +532,23 @@ def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=No
n_data_array, c_error_bkg, headers, background = bkg_hist( n_data_array, c_error_bkg, headers, background = bkg_hist(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "histogram ", str(int(subtract_error>0.)) sub_type, subtract_error = "histogram ", str(int(subtract_error > 0.0))
elif isinstance(sub_type, str): elif isinstance(sub_type, str):
if sub_type.lower() in ["auto"]: if sub_type.lower() in ["auto"]:
n_data_array, c_error_bkg, headers, background = bkg_fit( n_data_array, c_error_bkg, headers, background = bkg_fit(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma"%subtract_error if subtract_error != 0. else 0. sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
else: else:
n_data_array, c_error_bkg, headers, background = bkg_hist( n_data_array, c_error_bkg, headers, background = bkg_hist(
data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "histogram ", "mean+%.1fsigma"%subtract_error if subtract_error != 0. else 0. sub_type, subtract_error = "histogram ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
elif isinstance(sub_type, tuple): elif isinstance(sub_type, tuple):
n_data_array, c_error_bkg, headers, background = bkg_mini( n_data_array, c_error_bkg, headers, background = bkg_mini(
data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "minimal flux ", "mean+%.1fsigma"%subtract_error if subtract_error != 0. else 0. sub_type, subtract_error = "minimal flux ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
else: else:
print("Warning: Invalid subtype.") print("Warning: Invalid subtype.")
@@ -618,7 +629,12 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
# Compute binning ratio # Compute binning ratio
if scale.lower() in ["px", "pixel"]: if scale.lower() in ["px", "pixel"]:
Dxy_arr[i] = np.array( [ pxsize, ] * 2) Dxy_arr[i] = np.array(
[
pxsize,
]
* 2
)
scale = "px" scale = "px"
elif scale.lower() in ["arcsec", "arcseconds"]: elif scale.lower() in ["arcsec", "arcseconds"]:
Dxy_arr[i] = np.array(pxsize / np.abs(w.wcs.cdelt) / 3600.0) Dxy_arr[i] = np.array(pxsize / np.abs(w.wcs.cdelt) / 3600.0)
@@ -676,7 +692,9 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask
def align_data(data_array, headers, error_array=None, data_mask=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False): def align_data(
data_array, headers, error_array=None, data_mask=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False
):
""" """
Align images in data_array using cross correlation, and rescale them to Align images in data_array using cross correlation, and rescale them to
wider images able to contain any rotation of the reference image. wider images able to contain any rotation of the reference image.
@@ -757,7 +775,9 @@ def align_data(data_array, headers, error_array=None, data_mask=None, background
if data_mask is None: if data_mask is None:
full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0) full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0)
else: else:
full_array, err_array, data_mask, full_headers = crop_array(full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0) full_array, err_array, data_mask, full_headers = crop_array(
full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0
)
data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1] data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1]
error_array = err_array[:-1] error_array = err_array[:-1]
@@ -806,8 +826,8 @@ def align_data(data_array, headers, error_array=None, data_mask=None, background
rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.0) rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.0)
rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i]) rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i])
curr_mask = sc_shift(res_mask*10., shift, order=1, cval=0.0) curr_mask = sc_shift(res_mask * 10.0, shift, order=1, cval=0.0)
curr_mask[curr_mask < curr_mask.max()*2./3.] = 0.0 curr_mask[curr_mask < curr_mask.max() * 2.0 / 3.0] = 0.0
rescaled_mask[i] = curr_mask.astype(bool) rescaled_mask[i] = curr_mask.astype(bool)
# mask_vertex = clean_ROI(curr_mask) # mask_vertex = clean_ROI(curr_mask)
# rescaled_mask[i, mask_vertex[2] : mask_vertex[3], mask_vertex[0] : mask_vertex[1]] = True # rescaled_mask[i, mask_vertex[2] : mask_vertex[3], mask_vertex[0] : mask_vertex[1]] = True
@@ -1195,9 +1215,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
rotate = np.zeros(len(headers)) rotate = np.zeros(len(headers))
for i, head in enumerate(headers): for i, head in enumerate(headers):
try: try:
rotate[i] = head['ROTATE'] rotate[i] = head["ROTATE"]
except KeyError: except KeyError:
rotate[i] = 0. rotate[i] = 0.0
if (np.unique(rotate) == rotate[0]).all(): if (np.unique(rotate) == rotate[0]).all():
theta = globals()["theta"] - rotate[0] * np.pi / 180.0 theta = globals()["theta"] - rotate[0] * np.pi / 180.0
@@ -1231,8 +1251,8 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Calculating correction factor: allows all pol_filt to share same exptime and inverse sensitivity (taken to be the one from POL0) # Calculating correction factor: allows all pol_filt to share same exptime and inverse sensitivity (taken to be the one from POL0)
corr = np.array([1.0 * h["photflam"] / h["exptime"] for h in pol_headers]) * pol_headers[0]["exptime"] / pol_headers[0]["photflam"] corr = np.array([1.0 * h["photflam"] / h["exptime"] for h in pol_headers]) * pol_headers[0]["exptime"] / pol_headers[0]["photflam"]
pol_headers[1]['photflam'], pol_headers[1]['exptime'] = pol_headers[0]['photflam'], pol_headers[1]['exptime'] pol_headers[1]["photflam"], pol_headers[1]["exptime"] = pol_headers[0]["photflam"], pol_headers[1]["exptime"]
pol_headers[2]['photflam'], pol_headers[2]['exptime'] = pol_headers[0]['photflam'], pol_headers[2]['exptime'] pol_headers[2]["photflam"], pol_headers[2]["exptime"] = pol_headers[0]["photflam"], pol_headers[2]["exptime"]
# Orientation and error for each polarizer # Orientation and error for each polarizer
# fmax = np.finfo(np.float64).max # fmax = np.finfo(np.float64).max
@@ -1241,22 +1261,12 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
coeff_stokes = np.zeros((3, 3)) coeff_stokes = np.zeros((3, 3))
# Coefficients linking each polarizer flux to each Stokes parameter # Coefficients linking each polarizer flux to each Stokes parameter
for i in range(3): for i in range(3):
coeff_stokes[0, i] = ( coeff_stokes[0, i] = pol_eff[(i + 1) % 3] * pol_eff[(i + 2) % 3] * np.sin(-2.0 * theta[(i + 1) % 3] + 2.0 * theta[(i + 2) % 3]) * 2.0 / transmit[i]
pol_eff[(i + 1) % 3]
* pol_eff[(i + 2) % 3]
* np.sin(-2.0 * theta[(i + 1) % 3] + 2.0 * theta[(i + 2) % 3])
* 2.0
/ transmit[i]
)
coeff_stokes[1, i] = ( coeff_stokes[1, i] = (
(-pol_eff[(i + 1) % 3] * np.sin(2.0 * theta[(i + 1) % 3]) + pol_eff[(i + 2) % 3] * np.sin(2.0 * theta[(i + 2) % 3])) (-pol_eff[(i + 1) % 3] * np.sin(2.0 * theta[(i + 1) % 3]) + pol_eff[(i + 2) % 3] * np.sin(2.0 * theta[(i + 2) % 3])) * 2.0 / transmit[i]
* 2.0
/ transmit[i]
) )
coeff_stokes[2, i] = ( coeff_stokes[2, i] = (
(pol_eff[(i + 1) % 3] * np.cos(2.0 * theta[(i + 1) % 3]) - pol_eff[(i + 2) % 3] * np.cos(2.0 * theta[(i + 2) % 3])) (pol_eff[(i + 1) % 3] * np.cos(2.0 * theta[(i + 1) % 3]) - pol_eff[(i + 2) % 3] * np.cos(2.0 * theta[(i + 2) % 3])) * 2.0 / transmit[i]
* 2.0
/ transmit[i]
) )
# Normalization parameter for Stokes parameters computation # Normalization parameter for Stokes parameters computation
@@ -1348,11 +1358,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
/ N / N
* ( * (
np.cos(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2]) np.cos(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
- ( - (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * Q_stokes
pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
- pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
)
* Q_stokes
+ coeff_stokes_corr[1, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes) + coeff_stokes_corr[1, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
) )
) )
@@ -1362,11 +1368,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
/ N / N
* ( * (
np.cos(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0]) np.cos(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- ( - (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * Q_stokes
pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
- pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
)
* Q_stokes
+ coeff_stokes_corr[1, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes) + coeff_stokes_corr[1, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
) )
) )
@@ -1376,11 +1378,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
/ N / N
* ( * (
np.cos(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1]) np.cos(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- ( - (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * Q_stokes
pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
- pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
)
* Q_stokes
+ coeff_stokes_corr[1, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes) + coeff_stokes_corr[1, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
) )
) )
@@ -1392,11 +1390,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
/ N / N
* ( * (
np.sin(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2]) np.sin(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
- ( - (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * U_stokes
pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
- pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
)
* U_stokes
+ coeff_stokes_corr[2, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes) + coeff_stokes_corr[2, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
) )
) )
@@ -1406,11 +1400,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
/ N / N
* ( * (
np.sin(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0]) np.sin(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- ( - (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * U_stokes
pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
- pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
)
* U_stokes
+ coeff_stokes_corr[2, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes) + coeff_stokes_corr[2, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
) )
) )
@@ -1420,11 +1410,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
/ N / N
* ( * (
np.sin(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1]) np.sin(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- ( - (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * U_stokes
pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
- pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
)
* U_stokes
+ coeff_stokes_corr[2, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes) + coeff_stokes_corr[2, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
) )
) )
@@ -1451,12 +1437,24 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
all_Q_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2])) all_Q_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_U_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2])) all_U_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_Stokes_cov = np.zeros((np.unique(rotate).size, 3, 3, data_array.shape[1], data_array.shape[2])) all_Stokes_cov = np.zeros((np.unique(rotate).size, 3, 3, data_array.shape[1], data_array.shape[2]))
all_header_stokes = [{},]*np.unique(rotate).size all_header_stokes = [
{},
] * np.unique(rotate).size
for i, rot in enumerate(np.unique(rotate)): for i, rot in enumerate(np.unique(rotate)):
rot_mask = (rotate == rot) rot_mask = rotate == rot
all_I_stokes[i], all_Q_stokes[i], all_U_stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes(data_array[rot_mask], error_array[rot_mask], data_mask, [headers[i] for i in np.arange(len(headers))[rot_mask]], FWHM=FWHM, scale=scale, smoothing=smoothing, transmitcorr=transmitcorr, integrate=False) all_I_stokes[i], all_Q_stokes[i], all_U_stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes(
all_exp = np.array([float(h['exptime']) for h in all_header_stokes]) data_array[rot_mask],
error_array[rot_mask],
data_mask,
[headers[i] for i in np.arange(len(headers))[rot_mask]],
FWHM=FWHM,
scale=scale,
smoothing=smoothing,
transmitcorr=transmitcorr,
integrate=False,
)
all_exp = np.array([float(h["exptime"]) for h in all_header_stokes])
I_stokes = np.sum([exp * I for exp, I in zip(all_exp, all_I_stokes)], axis=0) / all_exp.sum() I_stokes = np.sum([exp * I for exp, I in zip(all_exp, all_I_stokes)], axis=0) / all_exp.sum()
Q_stokes = np.sum([exp * Q for exp, Q in zip(all_exp, all_Q_stokes)], axis=0) / all_exp.sum() Q_stokes = np.sum([exp * Q for exp, Q in zip(all_exp, all_Q_stokes)], axis=0) / all_exp.sum()
@@ -1470,7 +1468,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Save values to single header # Save values to single header
header_stokes = all_header_stokes[0] header_stokes = all_header_stokes[0]
header_stokes['exptime'] = all_exp.sum() header_stokes["exptime"] = all_exp.sum()
# Nan handling : # Nan handling :
fmax = np.finfo(np.float64).max fmax = np.finfo(np.float64).max
@@ -1681,12 +1679,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j]) U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j])
# Rotate I_stokes, Q_stokes, U_stokes using rotation matrix # Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
# ang = np.zeros((len(headers),)) ang = -float(header_stokes["ORIENTAT"])
# for i, head in enumerate(headers):
# pc = WCS(head).celestial.wcs.pc[0,0]
# ang[i] = -np.arccos(WCS(head).celestial.wcs.pc[0,0]) * 180.0 / np.pi if np.abs(pc) < 1. else 0.
pc = WCS(header_stokes).celestial.wcs.pc[0,0]
ang = -np.arccos(WCS(header_stokes).celestial.wcs.pc[0,0]) * 180.0 / np.pi if np.abs(pc) < 1. else 0.
alpha = np.pi / 180.0 * ang alpha = np.pi / 180.0 * ang
mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]]) mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]])
@@ -1709,7 +1702,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
new_Q_stokes = sc_rotate(Q_stokes, ang, order=1, reshape=False, cval=0.0) new_Q_stokes = sc_rotate(Q_stokes, ang, order=1, reshape=False, cval=0.0)
new_U_stokes = sc_rotate(U_stokes, ang, order=1, reshape=False, cval=0.0) new_U_stokes = sc_rotate(U_stokes, ang, order=1, reshape=False, cval=0.0)
new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0) new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0)
new_data_mask[new_data_mask < 2] = 0.0 new_data_mask[new_data_mask < 1.0] = 0.0
new_data_mask = new_data_mask.astype(bool) new_data_mask = new_data_mask.astype(bool)
for i in range(3): for i in range(3):
for j in range(3): for j in range(3):
@@ -1725,7 +1718,6 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
new_header_stokes = deepcopy(header_stokes) new_header_stokes = deepcopy(header_stokes)
new_header_stokes["orientat"] = header_stokes["orientat"] + ang
new_wcs = WCS(header_stokes).celestial.deepcopy() new_wcs = WCS(header_stokes).celestial.deepcopy()
new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc) new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc)
@@ -1737,7 +1729,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
new_header_stokes.set("PC1_1", 1.0) new_header_stokes.set("PC1_1", 1.0)
if new_wcs.wcs.pc[1, 1] == 1.0: if new_wcs.wcs.pc[1, 1] == 1.0:
new_header_stokes.set("PC2_2", 1.0) new_header_stokes.set("PC2_2", 1.0)
new_header_stokes["orientat"] = header_stokes["orientat"] + ang new_header_stokes["ORIENTAT"] += ang
# Nan handling : # Nan handling :
fmax = np.finfo(np.float64).max fmax = np.finfo(np.float64).max
@@ -1849,7 +1841,7 @@ def rotate_data(data_array, error_array, data_mask, headers):
new_data_array = np.array(new_data_array) new_data_array = np.array(new_data_array)
new_error_array = np.array(new_error_array) new_error_array = np.array(new_error_array)
new_data_mask = np.array(new_data_mask).sum(axis=0) new_data_mask = np.array(new_data_mask).sum(axis=0)
new_data_mask[new_data_mask < new_data_mask.max()*2./3.] = 0.0 new_data_mask[new_data_mask < 1.0] = 0.0
new_data_mask = new_data_mask.astype(bool) new_data_mask = new_data_mask.astype(bool)
for i in range(new_data_array.shape[0]): for i in range(new_data_array.shape[0]):