better plots and filenames

This commit is contained in:
2024-07-08 17:05:42 +02:00
parent d8365e984d
commit 155717a585
3 changed files with 125 additions and 153 deletions

View File

@@ -182,23 +182,23 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""):
wcs = WCS(Stokes[0]).deepcopy()
# Plot figure
plt.rcParams.update({"font.size": 12})
plt.rcParams.update({"font.size": 14})
ratiox = max(int(stkI.shape[1]/stkI.shape[0]),1)
ratioy = max(int(stkI.shape[0]/stkI.shape[1]),1)
fig, (axI, axQ, axU) = plt.subplots(ncols=3, figsize=(20*ratiox, 8*ratioy), subplot_kw=dict(projection=wcs))
fig, (axI, axQ, axU) = plt.subplots(ncols=3, figsize=(15*ratiox, 6*ratioy), subplot_kw=dict(projection=wcs))
fig.subplots_adjust(hspace=0, wspace=0.50, bottom=0.01, top=0.99, left=0.07, right=0.97)
fig.suptitle("I, Q, U Stokes parameters")
imI = axI.imshow(stkI, origin="lower", cmap="inferno")
fig.colorbar(imI, ax=axI, aspect=50, shrink=0.50, pad=0.025, label="counts/sec")
fig.colorbar(imI, ax=axI, aspect=30, shrink=0.50, pad=0.025, label="counts/sec")
axI.set(xlabel="RA", ylabel="DEC", title=r"$I_{stokes}$")
imQ = axQ.imshow(stkQ, origin="lower", cmap="inferno")
fig.colorbar(imQ, ax=axQ, aspect=50, shrink=0.50, pad=0.025, label="counts/sec")
fig.colorbar(imQ, ax=axQ, aspect=30, shrink=0.50, pad=0.025, label="counts/sec")
axQ.set(xlabel="RA", ylabel="DEC", title=r"$Q_{stokes}$")
imU = axU.imshow(stkU, origin="lower", cmap="inferno")
fig.colorbar(imU, ax=axU, aspect=50, shrink=0.50, pad=0.025, label="counts/sec")
fig.colorbar(imU, ax=axU, aspect=30, shrink=0.50, pad=0.025, label="counts/sec")
axU.set(xlabel="RA", ylabel="DEC", title=r"$U_{stokes}$")
if savename is not None:
@@ -322,14 +322,20 @@ def polarization_map(
print("No pixel with polarization information above requested SNR.")
# Plot the map
plt.rcParams.update({"font.size": 12})
plt.rcParams.update({"font.size": 14})
plt.rcdefaults()
ratiox = max(int(stkI.shape[1]/stkI.shape[0]),1)
ratioy = max(int(stkI.shape[0]/stkI.shape[1]),1)
fig, ax = plt.subplots(figsize=(10*ratiox, 10*ratioy), layout="constrained", subplot_kw=dict(projection=wcs))
ax.set(aspect="equal", fc="k")
ratiox = max(int(stkI.shape[1]/(stkI.shape[0])),1)
ratioy = max(int((stkI.shape[0])/stkI.shape[1]),1)
fig, ax = plt.subplots(figsize=(6*ratiox, 6*ratioy), layout="compressed", subplot_kw=dict(projection=wcs))
ax.set(aspect="equal", fc="k", ylim=[-stkI.shape[0]*0.10,stkI.shape[0]*1.15])
# fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02)
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
ax.coords[0].set_axislabel("Right Ascension (J2000)")
ax.coords[0].set_axislabel_position("t")
ax.coords[0].set_ticklabel_position("t")
ax.set_ylabel("Declination (J2000)", labelpad=-1)
if display.lower() in ["intensity"]:
# If no display selected, show intensity map
display = "i"
@@ -341,7 +347,7 @@ def polarization_map(
else:
vmin, vmax = flux_lim
im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno", alpha=1.0)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
fig.colorbar(im, ax=ax, aspect=30, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsI = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax
print("Total flux contour levels : ", levelsI)
ax.contour(stkI * convert_flux, levels=levelsI, colors="grey", linewidths=0.5)
@@ -436,9 +442,9 @@ def polarization_map(
PA_diluted = Stokes[0].header["PA_int"]
PA_diluted_err = Stokes[0].header["sPA_int"]
plt.rcParams.update({"font.size": 12})
plt.rcParams.update({"font.size": 10})
px_size = wcs.wcs.get_cdelt()[0] * 3600.0
px_sc = AnchoredSizeBar(ax.transData, 1.0 / px_size, "1 arcsec", 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="w")
px_sc = AnchoredSizeBar(ax.transData, 1.0 / px_size, "1 arcsec", 3, pad=0.25, sep=5, borderpad=0.25, frameon=False, size_vertical=0.005, color="w")
north_dir = AnchoredDirectionArrows(
ax.transAxes,
"E",
@@ -446,7 +452,7 @@ def polarization_map(
length=-0.05,
fontsize=0.02,
loc=1,
aspect_ratio=-(stkI.shape[1]/stkI.shape[0]),
aspect_ratio=-(stkI.shape[1]/(stkI.shape[0]*1.25)),
sep_y=0.01,
sep_x=0.01,
back_length=0.0,
@@ -482,7 +488,7 @@ def polarization_map(
color="w",
edgecolor="k",
)
pol_sc = AnchoredSizeBar(ax.transData, scale_vec, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="w")
pol_sc = AnchoredSizeBar(ax.transData, scale_vec, r"$P$= 100 %", 4, pad=0.25, sep=5, borderpad=0.25, frameon=False, size_vertical=0.005, color="w")
ax.add_artist(pol_sc)
ax.add_artist(px_sc)
@@ -525,12 +531,6 @@ def polarization_map(
x, y = np.array([x, y]) - np.array(stkI.shape) / 2.0
ax.add_patch(Rectangle((x, y), width, height, angle=angle, edgecolor=color, fill=False))
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
ax.coords[0].set_axislabel("Right Ascension (J2000)")
ax.coords[0].set_axislabel_position("t")
ax.coords[0].set_ticklabel_position("t")
ax.set_ylabel("Declination (J2000)", labelpad=-1)
if savename is not None:
if savename[-4:] not in [".png", ".jpg", ".pdf"]:
savename += ".pdf"

View File

@@ -433,7 +433,18 @@ def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px",
return deconv_array
def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=None, subtract_error=0.5, display=False, savename=None, plots_folder="", return_background=False):
def get_error(
data_array,
headers,
error_array=None,
data_mask=None,
sub_type=None,
subtract_error=0.5,
display=False,
savename=None,
plots_folder="",
return_background=False,
):
"""
Look for sub-image of shape sub_shape that have the smallest integrated
flux (no source assumption) and define the background on the image by the
@@ -521,29 +532,29 @@ def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=No
n_data_array, c_error_bkg, headers, background = bkg_hist(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
)
sub_type, subtract_error = "histogram ", str(int(subtract_error>0.))
sub_type, subtract_error = "histogram ", str(int(subtract_error > 0.0))
elif isinstance(sub_type, str):
if sub_type.lower() in ["auto"]:
n_data_array, c_error_bkg, headers, background = bkg_fit(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
)
sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma"%subtract_error if subtract_error != 0. else 0.
sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
else:
n_data_array, c_error_bkg, headers, background = bkg_hist(
data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
)
sub_type, subtract_error = "histogram ", "mean+%.1fsigma"%subtract_error if subtract_error != 0. else 0.
sub_type, subtract_error = "histogram ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
elif isinstance(sub_type, tuple):
n_data_array, c_error_bkg, headers, background = bkg_mini(
data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
)
sub_type, subtract_error = "minimal flux ", "mean+%.1fsigma"%subtract_error if subtract_error != 0. else 0.
sub_type, subtract_error = "minimal flux ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
else:
print("Warning: Invalid subtype.")
for header in headers:
header["BKG_TYPE"] = (sub_type,"Bkg estimation method used during reduction")
header["BKG_SUB"] = (subtract_error,"Amount of bkg subtracted from images")
header["BKG_TYPE"] = (sub_type, "Bkg estimation method used during reduction")
header["BKG_SUB"] = (subtract_error, "Amount of bkg subtracted from images")
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
n_error_array = np.sqrt(err_wav**2 + err_psf**2 + err_flat**2 + c_error_bkg**2)
@@ -618,7 +629,12 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
# Compute binning ratio
if scale.lower() in ["px", "pixel"]:
Dxy_arr[i] = np.array( [ pxsize, ] * 2)
Dxy_arr[i] = np.array(
[
pxsize,
]
* 2
)
scale = "px"
elif scale.lower() in ["arcsec", "arcseconds"]:
Dxy_arr[i] = np.array(pxsize / np.abs(w.wcs.cdelt) / 3600.0)
@@ -662,7 +678,7 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape
for key, val in nw.to_header().items():
new_header.set(key, val)
new_header["SAMPLING"] = (str(pxsize)+scale, "Resampling performed during reduction")
new_header["SAMPLING"] = (str(pxsize) + scale, "Resampling performed during reduction")
rebinned_headers.append(new_header)
if data_mask is not None:
data_mask = bin_ndarray(data_mask, new_shape=new_shape, operation="average") > 0.80
@@ -676,7 +692,9 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask
def align_data(data_array, headers, error_array=None, data_mask=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False):
def align_data(
data_array, headers, error_array=None, data_mask=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False
):
"""
Align images in data_array using cross correlation, and rescale them to
wider images able to contain any rotation of the reference image.
@@ -757,7 +775,9 @@ def align_data(data_array, headers, error_array=None, data_mask=None, background
if data_mask is None:
full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0)
else:
full_array, err_array, data_mask, full_headers = crop_array(full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0)
full_array, err_array, data_mask, full_headers = crop_array(
full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0
)
data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1]
error_array = err_array[:-1]
@@ -787,7 +807,7 @@ def align_data(data_array, headers, error_array=None, data_mask=None, background
res_mask = np.zeros((res_shape, res_shape), dtype=bool)
res_mask[res_shift[0] : res_shift[0] + shape[1], res_shift[1] : res_shift[1] + shape[2]] = True
if data_mask is not None:
res_mask = np.logical_and(res_mask,zeropad(data_mask, (res_shape, res_shape)).astype(bool))
res_mask = np.logical_and(res_mask, zeropad(data_mask, (res_shape, res_shape)).astype(bool))
shifts, errors = [], []
for i, image in enumerate(data_array):
@@ -806,8 +826,8 @@ def align_data(data_array, headers, error_array=None, data_mask=None, background
rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.0)
rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i])
curr_mask = sc_shift(res_mask*10., shift, order=1, cval=0.0)
curr_mask[curr_mask < curr_mask.max()*2./3.] = 0.0
curr_mask = sc_shift(res_mask * 10.0, shift, order=1, cval=0.0)
curr_mask[curr_mask < curr_mask.max() * 2.0 / 3.0] = 0.0
rescaled_mask[i] = curr_mask.astype(bool)
# mask_vertex = clean_ROI(curr_mask)
# rescaled_mask[i, mask_vertex[2] : mask_vertex[3], mask_vertex[0] : mask_vertex[1]] = True
@@ -964,7 +984,7 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.5, scale="pi
raise ValueError("{} is not a valid smoothing option".format(smoothing))
for header in headers:
header["SMOOTH"] = (" ".join([smoothing,FWHM_size,FWHM_scale]),"Smoothing method used during reduction")
header["SMOOTH"] = (" ".join([smoothing, FWHM_size, FWHM_scale]), "Smoothing method used during reduction")
return smoothed, error
@@ -1193,11 +1213,11 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
(yet)".format(instr)
)
rotate = np.zeros(len(headers))
for i,head in enumerate(headers):
for i, head in enumerate(headers):
try:
rotate[i] = head['ROTATE']
rotate[i] = head["ROTATE"]
except KeyError:
rotate[i] = 0.
rotate[i] = 0.0
if (np.unique(rotate) == rotate[0]).all():
theta = globals()["theta"] - rotate[0] * np.pi / 180.0
@@ -1231,8 +1251,8 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Calculating correction factor: allows all pol_filt to share same exptime and inverse sensitivity (taken to be the one from POL0)
corr = np.array([1.0 * h["photflam"] / h["exptime"] for h in pol_headers]) * pol_headers[0]["exptime"] / pol_headers[0]["photflam"]
pol_headers[1]['photflam'], pol_headers[1]['exptime'] = pol_headers[0]['photflam'], pol_headers[1]['exptime']
pol_headers[2]['photflam'], pol_headers[2]['exptime'] = pol_headers[0]['photflam'], pol_headers[2]['exptime']
pol_headers[1]["photflam"], pol_headers[1]["exptime"] = pol_headers[0]["photflam"], pol_headers[1]["exptime"]
pol_headers[2]["photflam"], pol_headers[2]["exptime"] = pol_headers[0]["photflam"], pol_headers[2]["exptime"]
# Orientation and error for each polarizer
# fmax = np.finfo(np.float64).max
@@ -1241,22 +1261,12 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
coeff_stokes = np.zeros((3, 3))
# Coefficients linking each polarizer flux to each Stokes parameter
for i in range(3):
coeff_stokes[0, i] = (
pol_eff[(i + 1) % 3]
* pol_eff[(i + 2) % 3]
* np.sin(-2.0 * theta[(i + 1) % 3] + 2.0 * theta[(i + 2) % 3])
* 2.0
/ transmit[i]
)
coeff_stokes[0, i] = pol_eff[(i + 1) % 3] * pol_eff[(i + 2) % 3] * np.sin(-2.0 * theta[(i + 1) % 3] + 2.0 * theta[(i + 2) % 3]) * 2.0 / transmit[i]
coeff_stokes[1, i] = (
(-pol_eff[(i + 1) % 3] * np.sin(2.0 * theta[(i + 1) % 3]) + pol_eff[(i + 2) % 3] * np.sin(2.0 * theta[(i + 2) % 3]))
* 2.0
/ transmit[i]
(-pol_eff[(i + 1) % 3] * np.sin(2.0 * theta[(i + 1) % 3]) + pol_eff[(i + 2) % 3] * np.sin(2.0 * theta[(i + 2) % 3])) * 2.0 / transmit[i]
)
coeff_stokes[2, i] = (
(pol_eff[(i + 1) % 3] * np.cos(2.0 * theta[(i + 1) % 3]) - pol_eff[(i + 2) % 3] * np.cos(2.0 * theta[(i + 2) % 3]))
* 2.0
/ transmit[i]
(pol_eff[(i + 1) % 3] * np.cos(2.0 * theta[(i + 1) % 3]) - pol_eff[(i + 2) % 3] * np.cos(2.0 * theta[(i + 2) % 3])) * 2.0 / transmit[i]
)
# Normalization parameter for Stokes parameters computation
@@ -1348,11 +1358,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
/ N
* (
np.cos(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
- (
pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
- pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
)
* Q_stokes
- (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * Q_stokes
+ coeff_stokes_corr[1, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
)
)
@@ -1362,11 +1368,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
/ N
* (
np.cos(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- (
pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
- pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
)
* Q_stokes
- (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * Q_stokes
+ coeff_stokes_corr[1, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
)
)
@@ -1376,11 +1378,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
/ N
* (
np.cos(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- (
pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
- pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
)
* Q_stokes
- (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * Q_stokes
+ coeff_stokes_corr[1, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
)
)
@@ -1392,11 +1390,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
/ N
* (
np.sin(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
- (
pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
- pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
)
* U_stokes
- (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * U_stokes
+ coeff_stokes_corr[2, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
)
)
@@ -1406,11 +1400,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
/ N
* (
np.sin(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- (
pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
- pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
)
* U_stokes
- (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * U_stokes
+ coeff_stokes_corr[2, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
)
)
@@ -1420,11 +1410,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
/ N
* (
np.sin(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- (
pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
- pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
)
* U_stokes
- (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * U_stokes
+ coeff_stokes_corr[2, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
)
)
@@ -1451,26 +1437,38 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
all_Q_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_U_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_Stokes_cov = np.zeros((np.unique(rotate).size, 3, 3, data_array.shape[1], data_array.shape[2]))
all_header_stokes = [{},]*np.unique(rotate).size
all_header_stokes = [
{},
] * np.unique(rotate).size
for i,rot in enumerate(np.unique(rotate)):
rot_mask = (rotate == rot)
all_I_stokes[i], all_Q_stokes[i], all_U_stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes(data_array[rot_mask], error_array[rot_mask], data_mask, [headers[i] for i in np.arange(len(headers))[rot_mask]], FWHM=FWHM, scale=scale, smoothing=smoothing, transmitcorr=transmitcorr, integrate=False)
all_exp = np.array([float(h['exptime']) for h in all_header_stokes])
for i, rot in enumerate(np.unique(rotate)):
rot_mask = rotate == rot
all_I_stokes[i], all_Q_stokes[i], all_U_stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes(
data_array[rot_mask],
error_array[rot_mask],
data_mask,
[headers[i] for i in np.arange(len(headers))[rot_mask]],
FWHM=FWHM,
scale=scale,
smoothing=smoothing,
transmitcorr=transmitcorr,
integrate=False,
)
all_exp = np.array([float(h["exptime"]) for h in all_header_stokes])
I_stokes = np.sum([exp*I for exp, I in zip(all_exp, all_I_stokes)],axis=0) / all_exp.sum()
Q_stokes = np.sum([exp*Q for exp, Q in zip(all_exp, all_Q_stokes)],axis=0) / all_exp.sum()
U_stokes = np.sum([exp*U for exp, U in zip(all_exp, all_U_stokes)],axis=0) / all_exp.sum()
I_stokes = np.sum([exp * I for exp, I in zip(all_exp, all_I_stokes)], axis=0) / all_exp.sum()
Q_stokes = np.sum([exp * Q for exp, Q in zip(all_exp, all_Q_stokes)], axis=0) / all_exp.sum()
U_stokes = np.sum([exp * U for exp, U in zip(all_exp, all_U_stokes)], axis=0) / all_exp.sum()
Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1]))
for i in range(3):
Stokes_cov[i,i] = np.sum([exp**2*cov for exp, cov in zip(all_exp, all_Stokes_cov[:,i,i])], axis=0) / all_exp.sum()**2
for j in [x for x in range(3) if x!=i]:
Stokes_cov[i,j] = np.sqrt(np.sum([exp**2*cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:,i,j])], axis=0) / all_exp.sum()**2)
Stokes_cov[j,i] = np.sqrt(np.sum([exp**2*cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:,j,i])], axis=0) / all_exp.sum()**2)
Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2
for j in [x for x in range(3) if x != i]:
Stokes_cov[i, j] = np.sqrt(np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, i, j])], axis=0) / all_exp.sum() ** 2)
Stokes_cov[j, i] = np.sqrt(np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, j, i])], axis=0) / all_exp.sum() ** 2)
# Save values to single header
header_stokes = all_header_stokes[0]
header_stokes['exptime'] = all_exp.sum()
header_stokes["exptime"] = all_exp.sum()
# Nan handling :
fmax = np.finfo(np.float64).max
@@ -1681,12 +1679,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j])
# Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
# ang = np.zeros((len(headers),))
# for i, head in enumerate(headers):
# pc = WCS(head).celestial.wcs.pc[0,0]
# ang[i] = -np.arccos(WCS(head).celestial.wcs.pc[0,0]) * 180.0 / np.pi if np.abs(pc) < 1. else 0.
pc = WCS(header_stokes).celestial.wcs.pc[0,0]
ang = -np.arccos(WCS(header_stokes).celestial.wcs.pc[0,0]) * 180.0 / np.pi if np.abs(pc) < 1. else 0.
ang = -float(header_stokes["ORIENTAT"])
alpha = np.pi / 180.0 * ang
mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]])
@@ -1709,7 +1702,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
new_Q_stokes = sc_rotate(Q_stokes, ang, order=1, reshape=False, cval=0.0)
new_U_stokes = sc_rotate(U_stokes, ang, order=1, reshape=False, cval=0.0)
new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0)
new_data_mask[new_data_mask < 2] = 0.0
new_data_mask[new_data_mask < 1.0] = 0.0
new_data_mask = new_data_mask.astype(bool)
for i in range(3):
for j in range(3):
@@ -1725,7 +1718,6 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
new_header_stokes = deepcopy(header_stokes)
new_header_stokes["orientat"] = header_stokes["orientat"] + ang
new_wcs = WCS(header_stokes).celestial.deepcopy()
new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc)
@@ -1737,7 +1729,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_st
new_header_stokes.set("PC1_1", 1.0)
if new_wcs.wcs.pc[1, 1] == 1.0:
new_header_stokes.set("PC2_2", 1.0)
new_header_stokes["orientat"] = header_stokes["orientat"] + ang
new_header_stokes["ORIENTAT"] += ang
# Nan handling :
fmax = np.finfo(np.float64).max
@@ -1824,7 +1816,7 @@ def rotate_data(data_array, error_array, data_mask, headers):
new_data_array = []
new_error_array = []
new_data_mask = []
for i,header in zip(range(data_array.shape[0]),headers):
for i, header in zip(range(data_array.shape[0]), headers):
ang = -float(header["ORIENTAT"])
alpha = ang * np.pi / 180.0
@@ -1842,14 +1834,14 @@ def rotate_data(data_array, error_array, data_mask, headers):
new_wcs.wcs.set()
for key, val in new_wcs.to_header().items():
new_header[key] = val
new_header["ORIENTAT"] = np.arccos(new_wcs.celestial.wcs.pc[0,0]) * 180.0 / np.pi
new_header["ORIENTAT"] = np.arccos(new_wcs.celestial.wcs.pc[0, 0]) * 180.0 / np.pi
new_header["ROTATE"] = ang
new_headers.append(new_header)
new_data_array = np.array(new_data_array)
new_error_array = np.array(new_error_array)
new_data_mask = np.array(new_data_mask).sum(axis=0)
new_data_mask[new_data_mask < new_data_mask.max()*2./3.] = 0.0
new_data_mask[new_data_mask < 1.0] = 0.0
new_data_mask = new_data_mask.astype(bool)
for i in range(new_data_array.shape[0]):