Change display, margin handling and redo all reductions

This commit is contained in:
Thibault Barnouin
2021-06-17 22:00:52 +02:00
parent 8c0ab1fad1
commit 44a060e2ae
402 changed files with 176 additions and 2601 deletions

View File

@@ -12,10 +12,17 @@ prototypes :
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
import matplotlib.font_manager as fm
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar, AnchoredDirectionArrows
from astropy.wcs import WCS
def sci_not(v,err,rnd=1):
power = - int(('%E' % v)[-3:])+1
return r"({0} $\pm$ {1})e{2}".format(
round(v*10**power,rnd),round(err*10**power,rnd),-power)
def plot_obs(data_array, headers, shape=None, vmin=0., vmax=6., rectangle=None,
savename=None, plots_folder=""):
"""
@@ -66,8 +73,8 @@ def plot_obs(data_array, headers, shape=None, vmin=0., vmax=6., rectangle=None,
#plots
im = ax.imshow(data, vmin=vmin, vmax=vmax, origin='lower')
if not(rectangle is None):
x, y, width, height = rectangle[i]
ax.add_patch(Rectangle((x, y), width, height, edgecolor='r', fill=False))
x, y, width, height, color = rectangle[i]
ax.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False))
#position of centroid
ax.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], lw=1,
color='black')
@@ -137,7 +144,7 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""):
return 0
def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
def polarization_map(Stokes, data_mask, SNRp_cut=3., SNRi_cut=30., step_vec=1,
savename=None, plots_folder="", display=None):
"""
Plots polarization map from Stokes HDUList.
@@ -177,7 +184,8 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
stkQ = Stokes[np.argmax([Stokes[i].header['datatype']=='Q_stokes' for i in range(len(Stokes))])]
stkU = Stokes[np.argmax([Stokes[i].header['datatype']=='U_stokes' for i in range(len(Stokes))])]
stk_cov = Stokes[np.argmax([Stokes[i].header['datatype']=='IQU_cov_matrix' for i in range(len(Stokes))])]
pol = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg' for i in range(len(Stokes))])]
#pol = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg' for i in range(len(Stokes))])]
pol = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_debiased' for i in range(len(Stokes))])]
pol_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err' for i in range(len(Stokes))])]
#pol_err_Poisson = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err_Poisson_noise' for i in range(len(Stokes))])]
pang = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang' for i in range(len(Stokes))])]
@@ -210,11 +218,12 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
print("No pixel with polarization information above requested SNR.")
#Plot the map
plt.rcParams.update({'font.size': 16})
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111, projection=wcs)
ax.set_facecolor('k')
fig.subplots_adjust(hspace=0, wspace=0, right=0.95)
cbar_ax = fig.add_axes([0.98, 0.12, 0.01, 0.75])
fig.subplots_adjust(hspace=0, wspace=0, right=0.9)
cbar_ax = fig.add_axes([0.95, 0.12, 0.01, 0.75])
if display is None:
# If no display selected, show intensity map
@@ -239,6 +248,7 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
im = ax.imshow(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$I_{Stokes}/\sigma_{I}$")
levelsI = np.linspace(SNRi_cut, np.max(SNRi[SNRi > 0.]), 10)
#print(levelsI)
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
elif display.lower() in ['snrp']:
# Display polarization degree signal-to-noise map
@@ -255,26 +265,31 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
levelsI = np.linspace(SNRi_cut, SNRi.max(), 10)
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
fontprops = fm.FontProperties(size=16)
px_size = wcs.wcs.get_cdelt()[0]
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w', fontproperties=fontprops)
ax.add_artist(px_sc)
X, Y = np.meshgrid(np.linspace(-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.,stkI.data.shape[0]), np.linspace(-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,stkI.data.shape[1]))
U, V = pol.data*np.cos(np.pi/2.+pang.data*np.pi/180.), pol.data*np.sin(np.pi/2.+pang.data*np.pi/180.)
Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=50.,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='w')
pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w', fontproperties=fontprops)
ax.add_artist(pol_sc)
north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-Stokes[0].header['orientat'], color='w', arrow_props={'ec': 'w', 'fc': 'w', 'alpha': 1,'lw': 2})
ax.add_artist(north_dir)
# Compute integrated parameters and associated errors for pixels in the cut
n_pix = mask.size
I_int = stkI.data[mask].sum()
Q_int = stkQ.data[mask].sum()
U_int = stkU.data[mask].sum()
I_int_err = np.sqrt(np.sum(stk_cov.data[0,0][mask]))
Q_int_err = np.sqrt(np.sum(stk_cov.data[1,1][mask]))
U_int_err = np.sqrt(np.sum(stk_cov.data[2,2][mask]))
IQ_int_err = np.sqrt(np.sum(stk_cov.data[0,1][mask]**2))
IU_int_err = np.sqrt(np.sum(stk_cov.data[0,2][mask]**2))
QU_int_err = np.sqrt(np.sum(stk_cov.data[1,2][mask]**2))
I_int_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,0][mask]))
Q_int_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[1,1][mask]))
U_int_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[2,2][mask]))
IQ_int_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,1][mask]**2))
IU_int_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,2][mask]**2))
QU_int_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[1,2][mask]**2))
P_int = np.sqrt(Q_int**2+U_int**2)/I_int*100.
P_int_err = (100./I_int)*np.sqrt((Q_int**2*Q_int_err**2 + U_int**2*U_int_err**2 + 2.*Q_int*U_int*QU_int_err)/(Q_int**2 + U_int**2) + ((Q_int/I_int)**2 + (U_int/I_int)**2)*I_int_err**2 - 2.*(Q_int/I_int)*IQ_int_err - 2.*(U_int/I_int)*IU_int_err)
@@ -283,23 +298,26 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
PA_int_err = (90./(np.pi*(Q_int**2 + U_int**2)))*np.sqrt(U_int**2*Q_int_err**2 + Q_int**2*U_int_err**2 - 2.*Q_int*U_int*QU_int_err)
# Compute integrated parameters and associated errors for all pixels
n_pix = stkI.data.size
I_diluted = stkI.data.sum()
Q_diluted = stkQ.data.sum()
U_diluted = stkU.data.sum()
I_diluted_err = np.sqrt(np.sum(stk_cov.data[0,0]))
Q_diluted_err = np.sqrt(np.sum(stk_cov.data[1,1]))
U_diluted_err = np.sqrt(np.sum(stk_cov.data[2,2]))
IQ_diluted_err = np.sqrt(np.sum(stk_cov.data[0,1]**2))
IU_diluted_err = np.sqrt(np.sum(stk_cov.data[0,2]**2))
QU_diluted_err = np.sqrt(np.sum(stk_cov.data[1,2]**2))
I_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,0]))
Q_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[1,1]))
U_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[2,2]))
IQ_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,1]**2))
IU_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,2]**2))
QU_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[1,2]**2))
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted*100.
P_diluted_err = (100./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
P_diluted_err = np.sqrt(2/n_pix)*100.
PA_diluted = (90./np.pi)*np.arctan2(U_diluted,Q_diluted)+90.
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
PA_diluted_err = P_diluted_err/(2.*P_diluted)*180./np.pi
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1:.1e} $\pm$ {2:.1e} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav,I_int*convert_flux,I_int_err*convert_flux)+"\n"+r"$P^{{int}}$ = {0:.2f} $\pm$ {1:.2f} %".format(P_int,P_int_err)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.2f} $\pm$ {1:.2f} °".format(PA_int,PA_int_err)+"\n"+r"$P^{{diluted}}$ = {0:.2f} $\pm$ {1:.2f} %".format(P_diluted,P_diluted_err)+"\n"+r"$\theta_{{P}}^{{diluted}}$ = {0:.2f} $\pm$ {1:.2f} °".format(PA_diluted,PA_diluted_err), color='white', fontsize=11, xy=(0.01, 0.90), xycoords='axes fraction')
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav,sci_not(I_diluted*convert_flux,I_diluted_err*convert_flux,2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_diluted,P_diluted_err)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_diluted,PA_diluted_err), color='white', fontsize=16, xy=(0.01, 0.92), xycoords='axes fraction')
ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
ax.coords[0].set_axislabel('Right Ascension (J2000)')
@@ -311,7 +329,7 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
ax.axis('equal')
if not savename is None:
fig.suptitle(savename)
#fig.suptitle(savename)
fig.savefig(plots_folder+savename+".png",bbox_inches='tight',dpi=200)
plt.show()

View File

@@ -1,247 +0,0 @@
"""
Library functions for displaying informations using matplotlib
prototypes :
- plot_obs(data_array, headers, shape, vmin, vmax, savename, plots_folder)
Plots whole observation raw data in given display shape
- polarization_map(Stokes_hdul, SNRp_cut, SNRi_cut, step_vec, savename, plots_folder, display)
Plots polarization map of polarimetric parameters saved in an HDUList
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
from astropy.wcs import WCS
def plot_obs(data_array, headers, shape=None, vmin=0., vmax=6., rectangle=None,
savename=None, plots_folder=""):
"""
Plots raw observation imagery with some information on the instrument and
filters.
----------
Inputs:
data_array : numpy.ndarray
Array of images (2D floats, aligned and of the same shape) of a
single observation with multiple polarizers of an instrument
headers : header list
List of headers corresponding to the images in data_array
shape : array-like of length 2, optional
Shape of the display, with shape = [#row, #columns]. If None, defaults
to the optimal square.
Defaults to None.
vmin : float, optional
Min pixel value that should be displayed.
Defaults to 0.
vmax : float, optional
Max pixel value that should be displayed.
Defaults to 6.
rectangle : numpy.ndarray, optional
Array of parameters for matplotlib.patches.Rectangle objects that will
be displayed on each output image. If None, no rectangle displayed.
Defaults to None.
savename : str, optional
Name of the figure the map should be saved to. If None, the map won't
be saved (only displayed).
Defaults to None.
plots_folder : str, optional
Relative (or absolute) filepath to the folder in wich the map will
be saved. Not used if savename is None.
Defaults to current folder.
"""
if shape is None:
shape = np.array([np.ceil(np.sqrt(data_array.shape[0])).astype(int),]*2)
fig, ax = plt.subplots(shape[0], shape[1], figsize=(10,10), dpi=200,
sharex=True, sharey=True)
for i, enum in enumerate(list(zip(ax.flatten(),data_array))):
ax = enum[0]
data = enum[1]
instr = headers[i]['instrume']
rootname = headers[i]['rootname']
exptime = headers[i]['exptime']
filt = headers[i]['filtnam1']
#plots
im = ax.imshow(data, vmin=vmin, vmax=vmax, origin='lower')
if not(rectangle is None):
x, y, width, height = rectangle[i]
ax.add_patch(Rectangle((x, y), width, height, edgecolor='r', fill=False))
#position of centroid
ax.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], lw=1,
color='black')
ax.plot([0,data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], lw=1,
color='black')
ax.annotate(instr+":"+rootname, color='white', fontsize=5, xy=(0.02, 0.95), xycoords='axes fraction')
ax.annotate(filt, color='white', fontsize=10, xy=(0.02, 0.02), xycoords='axes fraction')
ax.annotate(exptime, color='white', fontsize=5, xy=(0.80, 0.02), xycoords='axes fraction')
fig.subplots_adjust(hspace=0, wspace=0, right=0.85)
cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75])
fig.colorbar(im, cax=cbar_ax)
if not (savename is None):
fig.suptitle(savename)
fig.savefig(plots_folder+savename+".png",bbox_inches='tight')
plt.show()
return 0
def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
savename=None, plots_folder="", display=None):
"""
Plots polarization map from Stokes HDUList.
----------
Inputs:
Stokes : astropy.io.fits.hdu.hdulist.HDUList
HDUList containing I, Q, U, P, s_P, PA, s_PA (in this particular order)
for one observation.
SNRp_cut : float, optional
Cut that should be applied to the signal-to-noise ratio on P.
Any SNR < SNRp_cut won't be displayed.
Defaults to 3.
SNRi_cut : float, optional
Cut that should be applied to the signal-to-noise ratio on I.
Any SNR < SNRi_cut won't be displayed.
Defaults to 30. This value implies an uncertainty in P of 4.7%
step_vec : int, optional
Number of steps between each displayed polarization vector.
If step_vec = 2, every other vector will be displayed.
Defaults to 1
savename : str, optional
Name of the figure the map should be saved to. If None, the map won't
be saved (only displayed).
Defaults to None.
plots_folder : str, optional
Relative (or absolute) filepath to the folder in wich the map will
be saved. Not used if savename is None.
Defaults to current folder.
display : str, optional
Choose the map to display between intensity (default), polarization
degree ('p','pol','pol_deg') or polarization degree error ('s_p',
'pol_err','pol_deg_err').
Defaults to None (intensity).
"""
#Get data
stkI = Stokes[np.argmax([Stokes[i].header['datatype']=='I_stokes' for i in range(len(Stokes))])]
stkQ = Stokes[np.argmax([Stokes[i].header['datatype']=='Q_stokes' for i in range(len(Stokes))])]
stkU = Stokes[np.argmax([Stokes[i].header['datatype']=='U_stokes' for i in range(len(Stokes))])]
stk_cov = Stokes[np.argmax([Stokes[i].header['datatype']=='IQU_cov_matrix' for i in range(len(Stokes))])]
pol = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg' for i in range(len(Stokes))])]
pol_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err' for i in range(len(Stokes))])]
pol_err_Poisson = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err_Poisson_noise' for i in range(len(Stokes))])]
pang = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang' for i in range(len(Stokes))])]
pang_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang_err' for i in range(len(Stokes))])]
pivot_wav = Stokes[0].header['photplam']
convert_flux = Stokes[0].header['photflam']/Stokes[0].header['exptot']
wcs = WCS(Stokes[0]).deepcopy()
#Compute SNR and apply cuts
pol.data[pol.data == 0.] = np.nan
#SNRp = pol.data/pol_err.data
SNRp = pol.data/pol_err_Poisson.data
pol.data[SNRp < SNRp_cut] = np.nan
#SNRi = stkI.data/np.sqrt(stk_cov.data[0,0])
SNRi = stkI.data/np.std(stkI.data[0:10,0:10])
pol.data[SNRi < SNRi_cut] = np.nan
mask = (SNRp < SNRp_cut) * (SNRi < SNRi_cut)
# Look for pixel of max polarization
if np.isfinite(pol.data).any():
p_max = np.max(pol.data[np.isfinite(pol.data)])
x_max, y_max = np.unravel_index(np.argmax(pol.data==p_max),pol.data.shape)
else:
print("No pixel with polarization information above requested SNR.")
#Plot the map
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111, projection=wcs)
ax.set_facecolor('k')
fig.subplots_adjust(hspace=0, wspace=0, right=0.95)
cbar_ax = fig.add_axes([0.98, 0.12, 0.01, 0.75])
if display is None:
# If no display selected, show intensity map
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux)
im = ax.imshow(stkI.data*convert_flux,extent=[-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsI = np.linspace(SNRi_cut, SNRi.max(), 10)
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
elif display.lower() in ['p','pol','pol_deg']:
# Display polarization degree map
vmin, vmax = 0., 100.
im = ax.imshow(pol.data,extent=[-pol.data.shape[1]/2.,pol.data.shape[1]/2.,-pol.data.shape[0]/2.,pol.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P$ [%]")
elif display.lower() in ['s_p','pol_err','pol_deg_err']:
# Display polarization degree error map
vmin, vmax = 0., 5.
im = ax.imshow(pol_err.data,extent=[-pol_err.data.shape[1]/2.,pol_err.data.shape[1]/2.,-pol_err.data.shape[0]/2.,pol_err.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$\sigma_P$ [%]")
elif display.lower() in ['snr','snri']:
# Display I_stokes signal-to-noise map
vmin, vmax = 0., SNRi.max()
im = ax.imshow(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$I_{Stokes}/\sigma_{I}$")
levelsI = np.linspace(SNRi_cut, SNRi.max(), 20)
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
elif display.lower() in ['snrp']:
# Display polarization degree signal-to-noise map
vmin, vmax = SNRp_cut, np.max(SNRp[SNRp > 0.])
im = ax.imshow(SNRp, extent=[-SNRp.shape[1]/2.,SNRp.shape[1]/2.,-SNRp.shape[0]/2.,SNRp.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P/\sigma_{P}$")
levelsP = np.linspace(SNRp_cut, np.max(SNRp[SNRp > 0.]), 10)
cont = ax.contour(SNRp, extent=[-SNRp.shape[1]/2.,SNRp.shape[1]/2.,-SNRp.shape[0]/2.,SNRp.shape[0]/2.], levels=levelsP, colors='grey', linewidths=0.5)
else:
# Defaults to intensity map
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux)
im = ax.imshow(stkI.data*convert_flux,extent=[-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
levelsI = np.linspace(SNRi_cut, SNRi.max(), 10)
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
px_size = wcs.wcs.get_cdelt()[0]
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
ax.add_artist(px_sc)
X, Y = np.meshgrid(np.linspace(-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.,stkI.data.shape[0]), np.linspace(-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,stkI.data.shape[1]))
U, V = pol.data*np.cos(np.pi/2.+pang.data*np.pi/180.), pol.data*np.sin(np.pi/2.+pang.data*np.pi/180.)
Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=50.,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='w')
pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
ax.add_artist(pol_sc)
# Compute integrated parameters and associated errors
I_int = stkI.data[mask].sum()
Q_int = stkQ.data[mask].sum()
U_int = stkU.data[mask].sum()
I_int_err = np.sqrt(np.sum(stk_cov.data[0,0][mask]))
Q_int_err = np.sqrt(np.sum(stk_cov.data[1,1][mask]))
U_int_err = np.sqrt(np.sum(stk_cov.data[2,2][mask]))
IQ_int_err = np.sqrt(np.sum(stk_cov.data[0,1][mask]**2))
IU_int_err = np.sqrt(np.sum(stk_cov.data[0,2][mask]**2))
QU_int_err = np.sqrt(np.sum(stk_cov.data[1,2][mask]**2))
P_int = np.sqrt(Q_int**2+U_int**2)/I_int*100.
P_int_err = np.sqrt(2.)*(I_int)**(-0.5)*100.#(100./I_int)*np.sqrt((Q_int**2*Q_int_err**2 + U_int**2*U_int_err**2 + 2.*Q_int*U_int*QU_int_err)/(Q_int**2 + U_int**2) + ((Q_int/I_int)**2 + (U_int/I_int)**2)*I_int_err**2 - 2.*(Q_int/I_int)*IQ_int_err - 2.*(U_int/I_int)*IU_int_err)
PA_int = (90./np.pi)*np.arctan2(U_int,Q_int)+90.
PA_int_err = P_int_err/(P_int)*180./np.pi#(90./(np.pi*(Q_int**2 + U_int**2)))*np.sqrt(U_int**2*Q_int_err**2 + Q_int**2*U_int_err**2 - 2.*Q_int*U_int*QU_int_err)
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1:.1e} $\pm$ {2:.1e} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav,I_int*convert_flux,I_int_err*convert_flux)+"\n"+r"$P^{{int}}$ = {0:.2f} $\pm$ {1:.2f} %".format(P_int,P_int_err)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.2f} $\pm$ {1:.2f} °".format(PA_int,PA_int_err), color='white', fontsize=11, xy=(0.01, 0.94), xycoords='axes fraction')
ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
ax.coords[0].set_axislabel('Right Ascension (J2000)')
ax.coords[0].set_axislabel_position('t')
ax.coords[0].set_ticklabel_position('t')
ax.coords[1].set_axislabel('Declination (J2000)')
ax.coords[1].set_axislabel_position('l')
ax.coords[1].set_ticklabel_position('l')
ax.axis('equal')
if not savename is None:
fig.suptitle(savename)
fig.savefig(plots_folder+savename+".png",bbox_inches='tight',dpi=200)
plt.show()
return 0

View File

@@ -45,6 +45,7 @@ import copy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.patches import Rectangle
from datetime import datetime
from scipy.ndimage import rotate as sc_rotate
from scipy.ndimage import shift as sc_shift
@@ -166,7 +167,8 @@ def bin_ndarray(ndarray, new_shape, operation='sum'):
return ndarray
def crop_array(data_array, error_array=None, step=5, null_val=None, inside=False):
def crop_array(data_array, headers, error_array=None, step=5, null_val=None,
inside=False, display=False, savename=None, plots_folder=""):
"""
Homogeneously crop an array: all contained images will have the same shape.
'inside' parameter will decide how much should be cropped.
@@ -175,6 +177,8 @@ def crop_array(data_array, error_array=None, step=5, null_val=None, inside=False
data_array : numpy.ndarray
Array containing the observation data (2D float arrays) to
homogeneously crop.
headers : header list
Headers associated with the images in data_array.
error_array : numpy.ndarray, optional
Array of images (2D floats, aligned and of the same shape) containing
the error in each pixel of the observation images in data_array.
@@ -197,6 +201,10 @@ def crop_array(data_array, error_array=None, step=5, null_val=None, inside=False
inside the image. If False, the cropped image will be the minimum
rectangle in which the whole image is included.
Defaults to False.
display : boolean, optional
If True, data_array will be displayed with a rectangle around the
sub-image selected for region of interest.
Defaults to False.
----------
Returns:
cropped_array : numpy.ndarray
@@ -225,6 +233,46 @@ def crop_array(data_array, error_array=None, step=5, null_val=None, inside=False
v_array[3] = np.max(vertex[:,3]).astype(int)
new_shape = np.array([v_array[1]-v_array[0],v_array[3]-v_array[2]])
rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 'b']
if display:
fig, ax = plt.subplots()
data = data_array[0]
instr = headers[0]['instrume']
rootname = headers[0]['rootname']
exptime = headers[0]['exptime']
filt = headers[0]['filtnam1']
#plots
im = ax.imshow(data, vmin=data.min(), vmax=data.max(), origin='lower')
x, y, width, height, color = rectangle
ax.add_patch(Rectangle((x, y),width,height,edgecolor=color,fill=False))
#position of centroid
ax.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], lw=1,
color='black')
ax.plot([0,data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], lw=1,
color='black')
ax.annotate(instr+":"+rootname, color='white', fontsize=5,
xy=(0.02, 0.95), xycoords='axes fraction')
ax.annotate(filt, color='white', fontsize=10, xy=(0.02, 0.02),
xycoords='axes fraction')
ax.annotate(exptime, color='white', fontsize=5, xy=(0.80, 0.02),
xycoords='axes fraction')
ax.set(title="Location of cropped image.",
xlabel='pixel offset',
ylabel='pixel offset')
fig.subplots_adjust(hspace=0, wspace=0, right=0.85)
cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75])
fig.colorbar(im, cax=cbar_ax)
if not(savename is None):
fig.suptitle(savename+'_'+filt+'_crop_region')
fig.savefig(plots_folder+savename+'_'+filt+'_crop_region.png',
bbox_inches='tight')
plot_obs(data_array, headers, vmin=data_array.min(),
vmax=data_array.max(), rectangle=[rectangle,]*len(headers),
savename=savename+'_crop_region',plots_folder=plots_folder)
plt.show()
crop_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1]))
crop_error_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1]))
for i,image in enumerate(data_array):
@@ -360,7 +408,7 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
Only returned if return_background is True.
"""
# Crop out any null edges
data, error = crop_array(data_array, step=5, null_val=0., inside=False)
data, error = crop_array(data_array, headers, step=5, null_val=0., inside=False)
sub_shape = np.array(sub_shape)
# Make sub_shape of odd values
@@ -371,7 +419,7 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
diff = (sub_shape-1).astype(int)
temp = np.zeros((shape[0],shape[1]-diff[0],shape[2]-diff[1]))
error_array = np.ones(data_array.shape)
rectangle = np.zeros((shape[0],4))
rectangle = []
background = np.zeros((shape[0]))
for i,image in enumerate(data):
@@ -384,7 +432,7 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
minima = np.unravel_index(np.argmin(temp.sum(axis=0)),temp.shape[1:])
for i, image in enumerate(data):
rectangle[i] = minima[1], minima[0], sub_shape[1], sub_shape[0]
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 'r'])
# Compute error : root mean square of the background
sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]]
#error = np.std(sub_image) # Previously computed using standard deviation over the background
@@ -398,6 +446,7 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
if display:
convert_flux = headers[0]['photflam']
date_time = np.array([headers[i]['date-obs']+';'+headers[i]['time-obs']
for i in range(len(headers))])
date_time = np.array([datetime.strptime(d,'%Y-%m-%d;%H:%M:%S')
@@ -406,12 +455,13 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
dict_filt = {"POL0":'r', "POL60":'g', "POL120":'b'}
c_filt = np.array([dict_filt[f] for f in filt])
fig,ax = plt.subplots(figsize=(10,6),constrained_layout=True)
fig,ax = plt.subplots(figsize=(10,6), constrained_layout=True)
for f in np.unique(filt):
mask = [fil==f for fil in filt]
ax.scatter(date_time[mask], background[mask], color=dict_filt[f],
label="Filter : {0:s}".format(f))
ax.errorbar(date_time, background, yerr=error_array[:,0,0], fmt='+k',
ax.scatter(date_time[mask], background[mask]*convert_flux,
color=dict_filt[f],label="Filter : {0:s}".format(f))
ax.errorbar(date_time, background*convert_flux,
yerr=error_array[:,0,0]*convert_flux, fmt='+k',
markersize=0, ecolor=c_filt)
# Date handling
locator = mdates.AutoDateLocator()
@@ -424,12 +474,12 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
plt.legend()
if not(savename is None):
fig.suptitle(savename+"_background_flux.png")
fig.suptitle(savename+"_background_flux")
fig.savefig(plots_folder+savename+"_background_flux.png",
bbox_inches='tight')
vmin = np.min(np.log10(data[data > 0.]))
vmax = np.max(np.log10(data[data > 0.]))
plot_obs(np.log10(data), headers, vmin=vmin, vmax=vmax,
plot_obs(data, headers, vmin=data.min(), vmax=data.max(),
rectangle=rectangle,
savename=savename+"_background_location",
plots_folder=plots_folder)
@@ -555,8 +605,8 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
return rebinned_data, rebinned_error, rebinned_headers, Dxy
def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
ref_center=None, return_shifts=True):
def align_data(data_array, headers, error_array=None, upsample_factor=1.,
ref_data=None, ref_center=None, return_shifts=True):
"""
Align images in data_array using cross correlation, and rescale them to
wider images able to contain any rotation of the reference image.
@@ -565,6 +615,8 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
Inputs:
data_array : numpy.ndarray
Array containing the data to align (2D float arrays).
headers : header list
List of headers corresponding to the images in data_array.
error_array : numpy.ndarray, optional
Array of images (2D floats, aligned and of the same shape) containing
the error in each pixel of the observation images in data_array.
@@ -626,7 +678,7 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
full_array = np.concatenate((data_array,[ref_data]),axis=0)
err_array = np.concatenate((error_array,[np.zeros(ref_data.shape)]),axis=0)
full_array, err_array = crop_array(full_array, err_array, step=5,
full_array, err_array = crop_array(full_array, headers, err_array, step=5,
inside=False)
data_array, ref_data = full_array[:-1], full_array[-1]
@@ -649,6 +701,7 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
res_shape = int(np.ceil(np.sqrt(2.5)*np.max(shape[1:])))
rescaled_image = np.zeros((shape[0],res_shape,res_shape))
rescaled_error = np.ones((shape[0],res_shape,res_shape))
rescaled_mask = np.ones((shape[0],res_shape,res_shape),dtype=bool)
res_center = (np.array(rescaled_image.shape[1:])/2).astype(int)
shifts, errors = [], []
@@ -665,9 +718,12 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
res_shift[1]:res_shift[1]+shape[2]] = copy.deepcopy(image)
rescaled_error[i,res_shift[0]:res_shift[0]+shape[1],
res_shift[1]:res_shift[1]+shape[2]] = copy.deepcopy(error_array[i])
rescaled_mask[i,res_shift[0]:res_shift[0]+shape[1],
res_shift[1]:res_shift[1]+shape[2]] = False
# Shift images to align
rescaled_image[i] = sc_shift(rescaled_image[i], shift, cval=0.)
rescaled_error[i] = sc_shift(rescaled_error[i], shift, cval=background[i])
rescaled_mask[i] = sc_shift(rescaled_mask[i], shift, cval=True)
rescaled_image[i][rescaled_image[i] < 0.] = 0.
@@ -678,13 +734,13 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
errors = np.array(errors)
if return_shifts:
return rescaled_image, rescaled_error, shifts, errors
return rescaled_image, rescaled_error, rescaled_mask.any(axis=0), shifts, errors
else:
return rescaled_image, rescaled_error
return rescaled_image, rescaled_error, rescaled_mask.any(axis=0)
def smooth_data(data_array, error_array, headers, FWHM=1., scale='pixel',
smoothing='gaussian'):
def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
scale='pixel', smoothing='gaussian'):
"""
Smooth a data_array using selected function.
----------
@@ -746,6 +802,7 @@ def smooth_data(data_array, error_array, headers, FWHM=1., scale='pixel',
#Define gaussian stdev
stdev = FWHM/(2.*np.sqrt(2.*np.log(2)))
fmax = np.finfo(np.float64).max
if smoothing.lower() in ['combine','combining']:
# Smooth using N images combination algorithm
@@ -761,11 +818,15 @@ def smooth_data(data_array, error_array, headers, FWHM=1., scale='pixel',
for r in range(smoothed.shape[0]):
for c in range(smoothed.shape[1]):
# Compute distance from current pixel
dist_rc = np.sqrt((r-xx)**2+(c-yy)**2)
dist_rc = np.where(data_mask, fmax, np.sqrt((r-xx)**2+(c-yy)**2))
g_rc = np.array([np.exp(-0.5*(dist_rc/stdev)**2),]*len(data_array))
# Apply weighted combination
smoothed[r,c] = np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc)
error[r,c] = np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc)
if data_mask[r,c]:
smoothed[r,c] = 0.
error[r,c] = 1.
else:
smoothed[r,c] = np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc)
error[r,c] = np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc)
elif smoothing.lower() in ['gauss','gaussian']:
#Convolution with gaussian function
@@ -775,10 +836,14 @@ def smooth_data(data_array, error_array, headers, FWHM=1., scale='pixel',
xx, yy = np.indices(image.shape)
for r in range(image.shape[0]):
for c in range(image.shape[1]):
dist_rc = np.sqrt((r-xx)**2+(c-yy)**2)
dist_rc = np.where(data_mask, fmax, np.sqrt((r-xx)**2+(c-yy)**2))
g_rc = np.exp(-0.5*(dist_rc/stdev)**2)/(2.*np.pi*stdev**2)
smoothed[i][r,c] = np.sum(image*g_rc)
error[i][r,c] = np.sum(error_array*g_rc**2)
if data_mask[r,c]:
smoothed[r,c] = 0.
error[r,c] = 1.
else:
smoothed[r,c] = np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc)
error[r,c] = np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc)
else:
raise ValueError("{} is not a valid smoothing option".format(smoothing))
@@ -786,8 +851,8 @@ def smooth_data(data_array, error_array, headers, FWHM=1., scale='pixel',
return smoothed, error
def polarizer_avg(data_array, error_array, headers, FWHM=None, scale='pixel',
smoothing='gaussian'):
def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
scale='pixel', smoothing='gaussian'):
"""
Make the average image from a single polarizer for a given instrument.
-----------
@@ -862,11 +927,11 @@ def polarizer_avg(data_array, error_array, headers, FWHM=None, scale='pixel',
if not(FWHM is None) and (smoothing.lower() in ['combine','combining']):
# Smooth by combining each polarizer images
pol0, err0 = smooth_data(pol0_array, err0_array, headers0,
pol0, err0 = smooth_data(pol0_array, err0_array, data_mask, headers0,
FWHM=FWHM, scale=scale, smoothing=smoothing)
pol60, err60 = smooth_data(pol60_array, err60_array, headers60,
pol60, err60 = smooth_data(pol60_array, err60_array, data_mask, headers60,
FWHM=FWHM, scale=scale, smoothing=smoothing)
pol120, err120 = smooth_data(pol120_array, err120_array, headers120,
pol120, err120 = smooth_data(pol120_array, err120_array, data_mask, headers120,
FWHM=FWHM, scale=scale, smoothing=smoothing)
else:
@@ -908,7 +973,7 @@ def polarizer_avg(data_array, error_array, headers, FWHM=None, scale='pixel',
if not(FWHM is None) and (smoothing.lower() in ['gaussian','gauss']):
# Smooth by convoluting with a gaussian each polX image.
pol_array, polerr_array = smooth_data(pol_array, polerr_array,
headers_array, FWHM=FWHM, scale=scale)
data_mask, headers_array, FWHM=FWHM, scale=scale)
pol0, pol60, pol120 = pol_array
err0, err60, err120 = polerr_array
@@ -931,8 +996,8 @@ def polarizer_avg(data_array, error_array, headers, FWHM=None, scale='pixel',
return polarizer_array, polarizer_cov
def compute_Stokes(data_array, error_array, headers, FWHM=None,
scale='pixel', smoothing='gaussian_after'):
def compute_Stokes(data_array, error_array, data_mask, headers,
FWHM=None, scale='pixel', smoothing='gaussian_after'):
"""
Compute the Stokes parameters I, Q and U for a given data_set
----------
@@ -989,8 +1054,8 @@ def compute_Stokes(data_array, error_array, headers, FWHM=None,
# Routine for the FOC instrument
if instr == 'FOC':
# Get image from each polarizer and covariance matrix
pol_array, pol_cov = polarizer_avg(data_array, error_array, headers,
FWHM=FWHM, scale=scale, smoothing=smoothing)
pol_array, pol_cov = polarizer_avg(data_array, error_array, data_mask,
headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
pol0, pol60, pol120 = pol_array
if (pol0 < 0.).any() or (pol60 < 0.).any() or (pol120 < 0.).any():
@@ -1012,10 +1077,10 @@ def compute_Stokes(data_array, error_array, headers, FWHM=None,
if mask.any():
print("WARNING : I_pol > I_stokes : ", len(I_stokes[mask]))
plt.imshow(np.sqrt(Q_stokes**2+U_stokes**2)/I_stokes*mask, origin='lower')
plt.colorbar()
plt.title(r"$I_{pol}/I_{tot}$")
plt.show()
#plt.imshow(np.sqrt(Q_stokes**2+U_stokes**2)/I_stokes*mask, origin='lower')
#plt.colorbar()
#plt.title(r"$I_{pol}/I_{tot}$")
#plt.show()
#I_stokes[mask]=0.
#Q_stokes[mask]=0.
@@ -1107,11 +1172,12 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
#Compute the total exposure time so that
#I_stokes*exp_tot = N_tot the total number of events
exp_tot = np.array([header['exptime'] for header in headers]).sum()
print("Total exposure time : {} sec".format(exp_tot))
N_obs = I_stokes*exp_tot
#Errors on P, PA supposing Poisson noise
s_P_P = np.sqrt(2.)/np.sqrt(N_obs)*100.
s_PA_P = s_P_P/(2.*P/100.)*180./np.pi
s_PA_P = s_P_P/(2.*P)*180./np.pi
# Nan handling :
fmax = np.finfo(np.float64).max
@@ -1126,7 +1192,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
def rotate_data(data_array, error_array, headers, ang):
def rotate_data(data_array, error_array, data_mask, headers, ang):
"""
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header
@@ -1164,6 +1230,7 @@ def rotate_data(data_array, error_array, headers, ang):
new_error_array.append(sc_rotate(error_array[i], ang, reshape=False,
cval=error_array.mean()))
new_data_array = np.array(new_data_array)
new_data_mask = sc_rotate(data_mask, ang, reshape=False, cval=True)
new_error_array = np.array(new_error_array)
for i in range(len(new_data_array)):
@@ -1192,10 +1259,10 @@ def rotate_data(data_array, error_array, headers, ang):
new_headers.append(new_header)
return new_data_array, new_error_array, new_headers
return new_data_array, new_error_array, new_data_mask, new_headers
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang, SNRi_cut=None):
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, ang, SNRi_cut=None):
"""
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header
@@ -1266,16 +1333,14 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang, SNRi_c
new_Stokes_cov[1,2] = new_Stokes_cov[2,1] = np.cos(2.*alpha)*np.sin(2.*alpha)*(Stokes_cov[2,2] - Stokes_cov[1,1]) + (np.cos(2.*alpha)**2 - np.sin(2.*alpha)**2)*Stokes_cov[1,2]
#Rotate original images using scipy.ndimage.rotate
new_I_stokes = sc_rotate(new_I_stokes, ang, reshape=False,
cval=0.0*np.sqrt(new_Stokes_cov[0,0][0,0]))
new_Q_stokes = sc_rotate(new_Q_stokes, ang, reshape=False,
cval=0.0*np.sqrt(new_Stokes_cov[1,1][0,0]))
new_U_stokes = sc_rotate(new_U_stokes, ang, reshape=False,
cval=0.0*np.sqrt(new_Stokes_cov[2,2][0,0]))
new_I_stokes = sc_rotate(new_I_stokes, ang, reshape=False, cval=0.)
new_Q_stokes = sc_rotate(new_Q_stokes, ang, reshape=False, cval=0.)
new_U_stokes = sc_rotate(new_U_stokes, ang, reshape=False, cval=0.)
new_data_mask = sc_rotate(data_mask, ang, reshape=False, cval=True)
for i in range(3):
for j in range(3):
new_Stokes_cov[i,j] = sc_rotate(new_Stokes_cov[i,j], ang, reshape=False,
cval=0.0*new_Stokes_cov[i,j].mean())
new_Stokes_cov[i,j] = sc_rotate(new_Stokes_cov[i,j], ang,
reshape=False, cval=0.)
#Update headers to new angle
new_headers = []
@@ -1310,10 +1375,10 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang, SNRi_c
new_U_stokes[np.isnan(new_U_stokes)] = 0.
new_Stokes_cov[np.isnan(new_Stokes_cov)] = fmax
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_headers
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_headers
def rotate2_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang):
def rotate2_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, ang):
"""
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header
@@ -1443,4 +1508,4 @@ def rotate2_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang):
s_P_P[np.isnan(s_P_P)] = fmax
s_PA_P[np.isnan(s_PA_P)] = fmax
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, new_headers
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, data_mask, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, new_headers

File diff suppressed because it is too large Load Diff