force convention on WCS (unitary PCi_ja for given initial cdelt)
|
Before Width: | Height: | Size: 563 KiB |
|
Before Width: | Height: | Size: 77 KiB |
|
Before Width: | Height: | Size: 314 KiB |
|
Before Width: | Height: | Size: 277 KiB |
|
Before Width: | Height: | Size: 536 KiB |
|
Before Width: | Height: | Size: 560 KiB |
|
Before Width: | Height: | Size: 448 KiB |
|
Before Width: | Height: | Size: 397 KiB After Width: | Height: | Size: 441 KiB |
|
Before Width: | Height: | Size: 94 KiB After Width: | Height: | Size: 66 KiB |
|
Before Width: | Height: | Size: 251 KiB After Width: | Height: | Size: 369 KiB |
|
Before Width: | Height: | Size: 199 KiB After Width: | Height: | Size: 342 KiB |
|
Before Width: | Height: | Size: 235 KiB After Width: | Height: | Size: 340 KiB |
|
Before Width: | Height: | Size: 356 KiB After Width: | Height: | Size: 349 KiB |
|
Before Width: | Height: | Size: 408 KiB After Width: | Height: | Size: 452 KiB |
|
Before Width: | Height: | Size: 320 KiB After Width: | Height: | Size: 470 KiB |
BIN
plots/IC5063_x3nl030/IC5063_FOC_combine_FWHM020_pol.png
Normal file
|
After Width: | Height: | Size: 461 KiB |
BIN
plots/IC5063_x3nl030/IC5063_FOC_combine_FWHM020_pol_IQU.png
Normal file
|
After Width: | Height: | Size: 93 KiB |
BIN
plots/IC5063_x3nl030/IC5063_FOC_combine_FWHM020_pol_I_err.png
Normal file
|
After Width: | Height: | Size: 311 KiB |
BIN
plots/IC5063_x3nl030/IC5063_FOC_combine_FWHM020_pol_P.png
Normal file
|
After Width: | Height: | Size: 284 KiB |
BIN
plots/IC5063_x3nl030/IC5063_FOC_combine_FWHM020_pol_P_err.png
Normal file
|
After Width: | Height: | Size: 302 KiB |
BIN
plots/IC5063_x3nl030/IC5063_FOC_combine_FWHM020_pol_P_flux.png
Normal file
|
After Width: | Height: | Size: 293 KiB |
BIN
plots/IC5063_x3nl030/IC5063_FOC_combine_FWHM020_pol_SNRi.png
Normal file
|
After Width: | Height: | Size: 468 KiB |
BIN
plots/IC5063_x3nl030/IC5063_FOC_combine_FWHM020_pol_SNRp.png
Normal file
|
After Width: | Height: | Size: 400 KiB |
@@ -17,11 +17,11 @@ from lib.convex_hull import image_hull
|
|||||||
def main():
|
def main():
|
||||||
##### User inputs
|
##### User inputs
|
||||||
## Input and output locations
|
## Input and output locations
|
||||||
globals()['data_folder'] = "../data/NGC1068_x274020/"
|
# globals()['data_folder'] = "../data/NGC1068_x274020/"
|
||||||
infiles = ['x274020at.c0f.fits','x274020bt.c0f.fits','x274020ct.c0f.fits',
|
# infiles = ['x274020at.c0f.fits','x274020bt.c0f.fits','x274020ct.c0f.fits',
|
||||||
'x274020dt.c0f.fits','x274020et.c0f.fits','x274020ft.c0f.fits',
|
# 'x274020dt.c0f.fits','x274020et.c0f.fits','x274020ft.c0f.fits',
|
||||||
'x274020gt.c0f.fits','x274020ht.c0f.fits','x274020it.c0f.fits']
|
# 'x274020gt.c0f.fits','x274020ht.c0f.fits','x274020it.c0f.fits']
|
||||||
globals()['plots_folder'] = "../plots/NGC1068_x274020/"
|
# globals()['plots_folder'] = "../plots/NGC1068_x274020/"
|
||||||
|
|
||||||
# globals()['data_folder'] = "../data/NGC1068_x14w010/"
|
# globals()['data_folder'] = "../data/NGC1068_x14w010/"
|
||||||
# infiles = ['x14w0101t_c0f.fits','x14w0102t_c0f.fits','x14w0103t_c0f.fits',
|
# infiles = ['x14w0101t_c0f.fits','x14w0102t_c0f.fits','x14w0103t_c0f.fits',
|
||||||
@@ -60,9 +60,9 @@ def main():
|
|||||||
# 'x3995202r_c0f.fits','x3995206r_c0f.fits']
|
# 'x3995202r_c0f.fits','x3995206r_c0f.fits']
|
||||||
# globals()['plots_folder'] = "../plots/PG1630+377_x39510/"
|
# globals()['plots_folder'] = "../plots/PG1630+377_x39510/"
|
||||||
|
|
||||||
# globals()['data_folder'] = "../data/IC5063_x3nl030/"
|
globals()['data_folder'] = "../data/IC5063_x3nl030/"
|
||||||
# infiles = ['x3nl0301r_c0f.fits','x3nl0302r_c0f.fits','x3nl0303r_c0f.fits']
|
infiles = ['x3nl0301r_c0f.fits','x3nl0302r_c0f.fits','x3nl0303r_c0f.fits']
|
||||||
# globals()['plots_folder'] = "../plots/IC5063_x3nl030/"
|
globals()['plots_folder'] = "../plots/IC5063_x3nl030/"
|
||||||
|
|
||||||
# globals()['data_folder'] = "../data/MKN3_x3nl010/"
|
# globals()['data_folder'] = "../data/MKN3_x3nl010/"
|
||||||
# infiles = ['x3nl0101r_c0f.fits','x3nl0102r_c0f.fits','x3nl0103r_c0f.fits']
|
# infiles = ['x3nl0101r_c0f.fits','x3nl0102r_c0f.fits','x3nl0103r_c0f.fits']
|
||||||
@@ -97,12 +97,12 @@ def main():
|
|||||||
# Cropping
|
# Cropping
|
||||||
display_crop = False
|
display_crop = False
|
||||||
# Error estimation
|
# Error estimation
|
||||||
error_sub_shape = (150,150)
|
error_sub_shape = (75,75)
|
||||||
display_error = False
|
display_error = False
|
||||||
# Data binning
|
# Data binning
|
||||||
rebin = True
|
rebin = True
|
||||||
if rebin:
|
if rebin:
|
||||||
pxsize = 0.05
|
pxsize = 0.10
|
||||||
px_scale = 'arcsec' #pixel or arcsec
|
px_scale = 'arcsec' #pixel or arcsec
|
||||||
rebin_operation = 'sum' #sum or average
|
rebin_operation = 'sum' #sum or average
|
||||||
# Alignement
|
# Alignement
|
||||||
@@ -110,16 +110,16 @@ def main():
|
|||||||
display_data = False
|
display_data = False
|
||||||
# Smoothing
|
# Smoothing
|
||||||
smoothing_function = 'combine' #gaussian_after, gaussian or combine
|
smoothing_function = 'combine' #gaussian_after, gaussian or combine
|
||||||
smoothing_FWHM = 0.10 #If None, no smoothing is done
|
smoothing_FWHM = 0.20 #If None, no smoothing is done
|
||||||
smoothing_scale = 'arcsec' #pixel or arcsec
|
smoothing_scale = 'arcsec' #pixel or arcsec
|
||||||
# Rotation
|
# Rotation
|
||||||
rotate_stokes = True #rotation to North convention can give erroneous results
|
rotate_stokes = True #rotation to North convention can give erroneous results
|
||||||
rotate_data = False #rotation to North convention can give erroneous results
|
rotate_data = False #rotation to North convention can give erroneous results
|
||||||
# Polarization map output
|
# Polarization map output
|
||||||
figname = 'NGC1068_FOC' #target/intrument name
|
figname = 'IC5063_FOC' #target/intrument name
|
||||||
figtype = '_combine_FWHM020' #additionnal informations
|
figtype = '_combine_FWHM020' #additionnal informations
|
||||||
SNRp_cut = 50. #P measurments with SNR>3
|
SNRp_cut = 7. #P measurments with SNR>3
|
||||||
SNRi_cut = 350. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
SNRi_cut = 180. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
|
||||||
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
|
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
|
||||||
|
|
||||||
##### Pipeline start
|
##### Pipeline start
|
||||||
@@ -143,6 +143,7 @@ def main():
|
|||||||
if (data < 0.).any():
|
if (data < 0.).any():
|
||||||
print("ETAPE 3 : ", data)
|
print("ETAPE 3 : ", data)
|
||||||
# Rebin data to desired pixel size.
|
# Rebin data to desired pixel size.
|
||||||
|
Dxy = np.ones(2)
|
||||||
if rebin:
|
if rebin:
|
||||||
data_array, error_array, headers, Dxy = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation)
|
data_array, error_array, headers, Dxy = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation)
|
||||||
for data in data_array:
|
for data in data_array:
|
||||||
|
|||||||
@@ -44,6 +44,30 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
|
|||||||
# Prevent negative count value in imported data
|
# Prevent negative count value in imported data
|
||||||
for i in range(len(data_array)):
|
for i in range(len(data_array)):
|
||||||
data_array[i][data_array[i] < 0.] = 0.
|
data_array[i][data_array[i] < 0.] = 0.
|
||||||
|
|
||||||
|
# force WCS to convention PCi_ja unitary, cdelt in deg
|
||||||
|
for header in headers:
|
||||||
|
new_wcs = wcs.WCS(header).deepcopy()
|
||||||
|
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt == np.array([1., 1.])).all():
|
||||||
|
# Update WCS with relevant information
|
||||||
|
HST_aper = 2400. # HST aperture in mm
|
||||||
|
f_ratio = header['f_ratio']
|
||||||
|
px_dim = np.array([25., 25.]) # Pixel dimension in µm
|
||||||
|
if header['pxformt'].lower() == 'zoom':
|
||||||
|
px_dim[0] = 50.
|
||||||
|
new_cdelt = 206.3/3600.*px_dim/(f_ratio*HST_aper)
|
||||||
|
if new_wcs.wcs.has_cd():
|
||||||
|
old_cd = new_wcs.wcs.cd
|
||||||
|
del new_wcs.wcs.cd
|
||||||
|
keys = list(new_wcs.to_header().keys())+['CD1_1','CD1_2','CD2_1','CD2_2']
|
||||||
|
for key in keys:
|
||||||
|
header.remove(key, ignore_missing=True)
|
||||||
|
elif (new_wcs.wcs.cdelt == np.array([1., 1.])).all() and \
|
||||||
|
(new_wcs.array_shape in [(512, 512),(1024,512),(512,1024),(1024,1024)]):
|
||||||
|
old_cd = new_wcs.wcs.pc
|
||||||
|
new_wcs.wcs.pc = np.dot(old_cd, np.diag(1./new_cdelt))
|
||||||
|
new_wcs.wcs.cdelt = new_cdelt
|
||||||
|
header.update(new_wcs.to_header())
|
||||||
|
|
||||||
if compute_flux:
|
if compute_flux:
|
||||||
for i in range(len(infiles)):
|
for i in range(len(infiles)):
|
||||||
@@ -92,22 +116,6 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
|
|||||||
ref_header = headers[0]
|
ref_header = headers[0]
|
||||||
exp_tot = np.array([header['exptime'] for header in headers]).sum()
|
exp_tot = np.array([header['exptime'] for header in headers]).sum()
|
||||||
new_wcs = wcs.WCS(ref_header).deepcopy()
|
new_wcs = wcs.WCS(ref_header).deepcopy()
|
||||||
if new_wcs.wcs.has_cd():
|
|
||||||
del new_wcs.wcs.cd
|
|
||||||
keys = list(new_wcs.to_header().keys())+['CD1_1','CD1_2','CD2_1','CD2_2']
|
|
||||||
for key in keys:
|
|
||||||
ref_header.remove(key, ignore_missing=True)
|
|
||||||
new_wcs.wcs.cdelt = 3600.*np.sqrt(np.sum(new_wcs.wcs.get_pc()**2,axis=1))
|
|
||||||
if (new_wcs.wcs.cdelt == np.array([1., 1.])).all() and \
|
|
||||||
(new_wcs.array_shape in [(512, 512),(1024,512),(512,1024),(1024,1024)]):
|
|
||||||
# Update WCS with relevant information
|
|
||||||
HST_aper = 2400. # HST aperture in mm
|
|
||||||
f_ratio = ref_header['f_ratio']
|
|
||||||
px_dim = np.array([25., 25.]) # Pixel dimension in µm
|
|
||||||
if ref_header['pxformt'].lower() == 'zoom':
|
|
||||||
px_dim[0] = 50.
|
|
||||||
new_wcs.wcs.cdelt = 206.3*px_dim/(f_ratio*HST_aper)
|
|
||||||
new_wcs.wcs.crpix = [I_stokes.shape[0]/2, I_stokes.shape[1]/2]
|
|
||||||
|
|
||||||
header = new_wcs.to_header()
|
header = new_wcs.to_header()
|
||||||
header['instrume'] = (ref_header['instrume'], 'Instrument from which data is reduced')
|
header['instrume'] = (ref_header['instrume'], 'Instrument from which data is reduced')
|
||||||
|
|||||||
108
src/lib/plots.py
@@ -263,7 +263,7 @@ class crop_map(object):
|
|||||||
return Stokes_crop, data_mask
|
return Stokes_crop, data_mask
|
||||||
|
|
||||||
|
|
||||||
def polarization_map(Stokes, data_mask, rectangle=None, SNRp_cut=3., SNRi_cut=30.,
|
def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_cut=30.,
|
||||||
step_vec=1, savename=None, plots_folder="", display=None):
|
step_vec=1, savename=None, plots_folder="", display=None):
|
||||||
"""
|
"""
|
||||||
Plots polarization map from Stokes HDUList.
|
Plots polarization map from Stokes HDUList.
|
||||||
@@ -301,6 +301,10 @@ def polarization_map(Stokes, data_mask, rectangle=None, SNRp_cut=3., SNRi_cut=30
|
|||||||
degree ('p','pol','pol_deg') or polarization degree error ('s_p',
|
degree ('p','pol','pol_deg') or polarization degree error ('s_p',
|
||||||
'pol_err','pol_deg_err').
|
'pol_err','pol_deg_err').
|
||||||
Defaults to None (intensity).
|
Defaults to None (intensity).
|
||||||
|
----------
|
||||||
|
Returns:
|
||||||
|
fig, ax : matplotlib.pyplot object
|
||||||
|
The figure and ax created for interactive contour maps.
|
||||||
"""
|
"""
|
||||||
#Get data
|
#Get data
|
||||||
stkI = Stokes[np.argmax([Stokes[i].header['datatype']=='I_stokes' for i in range(len(Stokes))])]
|
stkI = Stokes[np.argmax([Stokes[i].header['datatype']=='I_stokes' for i in range(len(Stokes))])]
|
||||||
@@ -318,6 +322,10 @@ def polarization_map(Stokes, data_mask, rectangle=None, SNRp_cut=3., SNRi_cut=30
|
|||||||
convert_flux = Stokes[0].header['photflam']
|
convert_flux = Stokes[0].header['photflam']
|
||||||
wcs = WCS(Stokes[0]).deepcopy()
|
wcs = WCS(Stokes[0]).deepcopy()
|
||||||
|
|
||||||
|
#Get image mask
|
||||||
|
if data_mask is None:
|
||||||
|
data_mask = np.ones(stkI.shape).astype(bool)
|
||||||
|
|
||||||
#Plot Stokes parameters map
|
#Plot Stokes parameters map
|
||||||
if display is None:
|
if display is None:
|
||||||
plot_Stokes(Stokes, savename=savename, plots_folder=plots_folder)
|
plot_Stokes(Stokes, savename=savename, plots_folder=plots_folder)
|
||||||
@@ -354,64 +362,62 @@ def polarization_map(Stokes, data_mask, rectangle=None, SNRp_cut=3., SNRi_cut=30
|
|||||||
if display is None:
|
if display is None:
|
||||||
# If no display selected, show intensity map
|
# If no display selected, show intensity map
|
||||||
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||||
im = ax.imshow(stkI.data*convert_flux,extent=[-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
im = ax.imshow(stkI.data*convert_flux, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||||
levelsI = np.linspace(SNRi_cut, np.max(SNRi[SNRi > 0.]), 10)
|
levelsI = np.linspace(SNRi_cut, np.max(SNRi[SNRi > 0.]), 10)
|
||||||
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
|
cont = ax.contour(SNRi, levels=levelsI, colors='grey', linewidths=0.5)
|
||||||
elif display.lower() in ['pol_flux']:
|
elif display.lower() in ['pol_flux']:
|
||||||
# Display polarisation flux
|
# Display polarisation flux
|
||||||
pf_mask = (stkI.data > 0.) * (pol.data > 0.)
|
pf_mask = (stkI.data > 0.) * (pol.data > 0.)
|
||||||
vmin, vmax = 0., np.max(stkI.data[pf_mask]*convert_flux*pol.data[pf_mask])
|
vmin, vmax = 0., np.max(stkI.data[pf_mask]*convert_flux*pol.data[pf_mask])
|
||||||
im = ax.imshow(stkI.data*convert_flux*pol.data,extent=[-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
im = ax.imshow(stkI.data*convert_flux*pol.data, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||||
levelsI = np.linspace(SNRi_cut, np.max(SNRi[SNRi > 0.]), 10)
|
|
||||||
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
|
|
||||||
elif display.lower() in ['p','pol','pol_deg']:
|
elif display.lower() in ['p','pol','pol_deg']:
|
||||||
# Display polarization degree map
|
# Display polarization degree map
|
||||||
vmin, vmax = 0., 100.
|
vmin, vmax = 0., 100.
|
||||||
im = ax.imshow(pol.data*100.,extent=[-pol.data.shape[1]/2.,pol.data.shape[1]/2.,-pol.data.shape[0]/2.,pol.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
im = ax.imshow(pol.data*100., vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P$ [%]")
|
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P$ [%]")
|
||||||
elif display.lower() in ['s_p','pol_err','pol_deg_err']:
|
elif display.lower() in ['s_p','pol_err','pol_deg_err']:
|
||||||
# Display polarization degree error map
|
# Display polarization degree error map
|
||||||
vmin, vmax = 0., 10.
|
vmin, vmax = 0., 10.
|
||||||
p_err = pol_err.data.copy()
|
p_err = pol_err.data.copy()
|
||||||
p_err[p_err > vmax/100.] = np.nan
|
p_err[p_err > vmax/100.] = np.nan
|
||||||
im = ax.imshow(p_err*100.,extent=[-pol_err.data.shape[1]/2.,pol_err.data.shape[1]/2.,-pol_err.data.shape[0]/2.,pol_err.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
im = ax.imshow(p_err*100., vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$\sigma_P$ [%]")
|
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$\sigma_P$ [%]")
|
||||||
elif display.lower() in ['s_i','i_err']:
|
elif display.lower() in ['s_i','i_err']:
|
||||||
# Display intensity error map
|
# Display intensity error map
|
||||||
vmin, vmax = 0., np.max(np.sqrt(stk_cov.data[0,0][stk_cov.data[0,0] > 0.])*convert_flux)
|
vmin, vmax = 0., np.max(np.sqrt(stk_cov.data[0,0][stk_cov.data[0,0] > 0.])*convert_flux)
|
||||||
im = ax.imshow(np.sqrt(stk_cov.data[0,0])*convert_flux,extent=[-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
im = ax.imshow(np.sqrt(stk_cov.data[0,0])*convert_flux, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||||
elif display.lower() in ['snr','snri']:
|
elif display.lower() in ['snr','snri']:
|
||||||
# Display I_stokes signal-to-noise map
|
# Display I_stokes signal-to-noise map
|
||||||
vmin, vmax = 0., np.max(SNRi[SNRi > 0.])
|
vmin, vmax = 0., np.max(SNRi[SNRi > 0.])
|
||||||
im = ax.imshow(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$I_{Stokes}/\sigma_{I}$")
|
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$I_{Stokes}/\sigma_{I}$")
|
||||||
levelsI = np.linspace(SNRi_cut, np.max(SNRi[SNRi > 0.]), 10)
|
levelsI = np.linspace(SNRi_cut, np.max(SNRi[SNRi > 0.]), 10)
|
||||||
#print(levelsI)
|
cont = ax.contour(SNRi, levels=levelsI, colors='grey', linewidths=0.5)
|
||||||
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
|
|
||||||
elif display.lower() in ['snrp']:
|
elif display.lower() in ['snrp']:
|
||||||
# Display polarization degree signal-to-noise map
|
# Display polarization degree signal-to-noise map
|
||||||
vmin, vmax = SNRp_cut, np.max(SNRp[SNRp > 0.])
|
vmin, vmax = SNRp_cut, np.max(SNRp[SNRp > 0.])
|
||||||
im = ax.imshow(SNRp, extent=[-SNRp.shape[1]/2.,SNRp.shape[1]/2.,-SNRp.shape[0]/2.,SNRp.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P/\sigma_{P}$")
|
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P/\sigma_{P}$")
|
||||||
levelsP = np.linspace(SNRp_cut, np.max(SNRp[SNRp > 0.]), 10)
|
levelsP = np.linspace(SNRp_cut, np.max(SNRp[SNRp > 0.]), 10)
|
||||||
cont = ax.contour(SNRp, extent=[-SNRp.shape[1]/2.,SNRp.shape[1]/2.,-SNRp.shape[0]/2.,SNRp.shape[0]/2.], levels=levelsP, colors='grey', linewidths=0.5)
|
cont = ax.contour(SNRp, levels=levelsP, colors='grey', linewidths=0.5)
|
||||||
else:
|
else:
|
||||||
# Defaults to intensity map
|
# Defaults to intensity map
|
||||||
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||||
im = ax.imshow(stkI.data*convert_flux,extent=[-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.], vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
im = ax.imshow(stkI.data*convert_flux, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
|
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
|
||||||
levelsI = np.linspace(SNRi_cut, SNRi.max(), 10)
|
levelsI = np.linspace(SNRi_cut, SNRi.max(), 10)
|
||||||
cont = ax.contour(SNRi, extent=[-SNRi.shape[1]/2.,SNRi.shape[1]/2.,-SNRi.shape[0]/2.,SNRi.shape[0]/2.], levels=levelsI, colors='grey', linewidths=0.5)
|
cont = ax.contour(SNRi, levels=levelsI, colors='grey', linewidths=0.5)
|
||||||
|
|
||||||
fontprops = fm.FontProperties(size=16)
|
fontprops = fm.FontProperties(size=16)
|
||||||
px_size = wcs.wcs.get_cdelt()[0]
|
px_size = wcs.wcs.get_cdelt()[0]*3600.
|
||||||
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w', fontproperties=fontprops)
|
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w', fontproperties=fontprops)
|
||||||
ax.add_artist(px_sc)
|
ax.add_artist(px_sc)
|
||||||
|
|
||||||
X, Y = np.meshgrid(np.linspace(-stkI.data.shape[0]/2.,stkI.data.shape[0]/2.,stkI.data.shape[0]), np.linspace(-stkI.data.shape[1]/2.,stkI.data.shape[1]/2.,stkI.data.shape[1]))
|
#pol.data[np.isfinite(pol.data)] = 1./2.
|
||||||
|
X, Y = np.meshgrid(np.linspace(0,stkI.data.shape[0],stkI.data.shape[0]), np.linspace(0,stkI.data.shape[1],stkI.data.shape[1]))
|
||||||
U, V = pol.data*np.cos(np.pi/2.+pang.data*np.pi/180.), pol.data*np.sin(np.pi/2.+pang.data*np.pi/180.)
|
U, V = pol.data*np.cos(np.pi/2.+pang.data*np.pi/180.), pol.data*np.sin(np.pi/2.+pang.data*np.pi/180.)
|
||||||
Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='w')
|
Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='w')
|
||||||
pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w', fontproperties=fontprops)
|
pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w', fontproperties=fontprops)
|
||||||
@@ -481,4 +487,68 @@ def polarization_map(Stokes, data_mask, rectangle=None, SNRp_cut=3., SNRi_cut=30
|
|||||||
fig.savefig(plots_folder+savename+".png",bbox_inches='tight',dpi=200)
|
fig.savefig(plots_folder+savename+".png",bbox_inches='tight',dpi=200)
|
||||||
|
|
||||||
plt.show()
|
plt.show()
|
||||||
return 0
|
return fig, ax
|
||||||
|
|
||||||
|
class align_maps(object):
|
||||||
|
"""
|
||||||
|
Class to interactively align maps with different WCS.
|
||||||
|
"""
|
||||||
|
def __init__(self, Stokes, other_map, SNRp_cut=3., SNRi_cut=30.):
|
||||||
|
#Get data
|
||||||
|
stkI = Stokes[np.argmax([Stokes[i].header['datatype']=='I_stokes' for i in range(len(Stokes))])]
|
||||||
|
stk_cov = Stokes[np.argmax([Stokes[i].header['datatype']=='IQU_cov_matrix' for i in range(len(Stokes))])]
|
||||||
|
pol = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_debiased' for i in range(len(Stokes))])]
|
||||||
|
pol_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err' for i in range(len(Stokes))])]
|
||||||
|
pang = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang' for i in range(len(Stokes))])]
|
||||||
|
|
||||||
|
wcs1 = WCS(Stokes[0]).deepcopy()
|
||||||
|
convert_flux = Stokes[0].header['photflam']
|
||||||
|
wcs2 = WCS(other_map).deepcopy()
|
||||||
|
|
||||||
|
#Compute SNR and apply cuts
|
||||||
|
pol.data[pol.data == 0.] = np.nan
|
||||||
|
SNRp = pol.data/pol_err.data
|
||||||
|
SNRp[np.isnan(SNRp)] = 0.
|
||||||
|
pol.data[SNRp < SNRp_cut] = np.nan
|
||||||
|
SNRi = stkI.data/np.sqrt(stk_cov.data[0,0])
|
||||||
|
SNRi[np.isnan(SNRi)] = 0.
|
||||||
|
pol.data[SNRi < SNRi_cut] = np.nan
|
||||||
|
|
||||||
|
plt.rcParams.update({'font.size': 16})
|
||||||
|
self.fig = plt.figure(figsize=(25,15))
|
||||||
|
#Plot the UV map
|
||||||
|
self.ax1 = self.fig.add_subplot(121, projection=wcs1)
|
||||||
|
self.ax1.set_facecolor('k')
|
||||||
|
|
||||||
|
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||||
|
im1 = self.ax1.imshow(stkI.data*convert_flux, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||||
|
|
||||||
|
fontprops = fm.FontProperties(size=16)
|
||||||
|
px_size = wcs1.wcs.get_cdelt()[0]*3600.
|
||||||
|
px_sc = AnchoredSizeBar(self.ax1.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w', fontproperties=fontprops)
|
||||||
|
self.ax1.add_artist(px_sc)
|
||||||
|
|
||||||
|
north_dir1 = AnchoredDirectionArrows(self.ax1.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-Stokes[0].header['orientat'], color='w', arrow_props={'ec': 'w', 'fc': 'w', 'alpha': 1,'lw': 2})
|
||||||
|
self.ax1.add_artist(north_dir1)
|
||||||
|
|
||||||
|
pol.data[np.isfinite(pol.data)] = 1./2.
|
||||||
|
step_vec = 1
|
||||||
|
X, Y = np.meshgrid(np.linspace(0,stkI.data.shape[0],stkI.data.shape[0]), np.linspace(0,stkI.data.shape[1],stkI.data.shape[1]))
|
||||||
|
U, V = pol.data*np.cos(np.pi/2.+pang.data*np.pi/180.), pol.data*np.sin(np.pi/2.+pang.data*np.pi/180.)
|
||||||
|
Q = self.ax1.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='w')
|
||||||
|
|
||||||
|
self.ax1.set_title("Click on selected point of reference.")
|
||||||
|
|
||||||
|
#Plot the other map
|
||||||
|
self.ax2 = self.fig.add_subplot(122, projection=wcs2)
|
||||||
|
self.ax2.set_facecolor('k')
|
||||||
|
|
||||||
|
vmin, vmax = 0., np.max(other_map.data[other_map.data > 0.])
|
||||||
|
im2 = self.ax2.imshow(other_map.data, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
|
||||||
|
|
||||||
|
fontprops = fm.FontProperties(size=16)
|
||||||
|
px_size = wcs2.wcs.get_cdelt()[0]*3600.
|
||||||
|
px_sc = AnchoredSizeBar(self.ax2.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w', fontproperties=fontprops)
|
||||||
|
self.ax2.add_artist(px_sc)
|
||||||
|
|
||||||
|
self.ax2.set_title("Click on selected point of reference.")
|
||||||
|
|||||||
@@ -331,23 +331,7 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px',
|
|||||||
for i,header in enumerate(headers):
|
for i,header in enumerate(headers):
|
||||||
# Get current pixel size
|
# Get current pixel size
|
||||||
w = WCS(header).deepcopy()
|
w = WCS(header).deepcopy()
|
||||||
if w.wcs.has_cd():
|
pxsize[i] = np.round(w.wcs.cdelt/3600.,5)
|
||||||
del w.wcs.cd
|
|
||||||
keys = list(w.to_header().keys())+['CD1_1','CD1_2','CD2_1','CD2_2']
|
|
||||||
for key in keys:
|
|
||||||
header.remove(key, ignore_missing=True)
|
|
||||||
w.wcs.cdelt = 3600.*np.sqrt(np.sum(w.wcs.get_pc()**2,axis=1))
|
|
||||||
if (w.wcs.cdelt == np.array([1., 1.])).all() and \
|
|
||||||
(w.array_shape in [(512, 512),(1024,512),(512,1024),(1024,1024)]):
|
|
||||||
# Update WCS with relevant information
|
|
||||||
HST_aper = 2400. # HST aperture in mm
|
|
||||||
f_ratio = header['f_ratio']
|
|
||||||
px_dim = np.array([25., 25.]) # Pixel dimension in µm
|
|
||||||
if header['pxformt'].lower() == 'zoom':
|
|
||||||
px_dim[0] = 50.
|
|
||||||
w.wcs.cdelt = 206.3*px_dim/(f_ratio*HST_aper)
|
|
||||||
header.update(w.to_header())
|
|
||||||
pxsize[i] = np.round(w.wcs.cdelt,5)
|
|
||||||
if (pxsize != pxsize[0]).any():
|
if (pxsize != pxsize[0]).any():
|
||||||
raise ValueError("Not all images in array have same pixel size")
|
raise ValueError("Not all images in array have same pixel size")
|
||||||
FWHM /= pxsize[0].min()
|
FWHM /= pxsize[0].min()
|
||||||
@@ -570,31 +554,17 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
|
|||||||
image, error, header = enum
|
image, error, header = enum
|
||||||
# Get current pixel size
|
# Get current pixel size
|
||||||
w = WCS(header).deepcopy()
|
w = WCS(header).deepcopy()
|
||||||
if w.wcs.has_cd():
|
|
||||||
del w.wcs.cd
|
|
||||||
keys = list(w.to_header().keys())+['CD1_1','CD1_2','CD2_1','CD2_2']
|
|
||||||
for key in keys:
|
|
||||||
header.remove(key, ignore_missing=True)
|
|
||||||
w.wcs.cdelt = 3600.*np.sqrt(np.sum(w.wcs.get_pc()**2,axis=1))
|
|
||||||
if (w.wcs.cdelt == np.array([1., 1.])).all() and \
|
|
||||||
(w.array_shape in [(512, 512),(1024,512),(512,1024),(1024,1024)]):
|
|
||||||
# Update WCS with relevant information
|
|
||||||
f_ratio = header['f_ratio']
|
|
||||||
px_dim = np.array([25., 25.]) # Pixel dimension in µm
|
|
||||||
if header['pxformt'].lower() == 'zoom':
|
|
||||||
px_dim[0] = 50.
|
|
||||||
w.wcs.cdelt = 206.3*px_dim/(f_ratio*HST_aper)
|
|
||||||
header.update(w.to_header())
|
|
||||||
|
|
||||||
# Compute binning ratio
|
# Compute binning ratio
|
||||||
if scale.lower() in ['px', 'pixel']:
|
if scale.lower() in ['px', 'pixel']:
|
||||||
Dxy = np.array([pxsize,]*2)
|
Dxy = np.array([pxsize,]*2)
|
||||||
elif scale.lower() in ['arcsec','arcseconds']:
|
elif scale.lower() in ['arcsec','arcseconds']:
|
||||||
Dxy = np.floor(pxsize/w.wcs.cdelt).astype(int)
|
Dxy = np.floor(pxsize/w.wcs.cdelt/3600.).astype(int)
|
||||||
else:
|
else:
|
||||||
raise ValueError("'{0:s}' invalid scale for binning.".format(scale))
|
raise ValueError("'{0:s}' invalid scale for binning.".format(scale))
|
||||||
|
|
||||||
if (Dxy <= 1.).any():
|
if (Dxy <= 1.).any():
|
||||||
|
print(Dxy, pxsize, w.wcs.cdelt*3600.)
|
||||||
raise ValueError("Requested pixel size is below resolution.")
|
raise ValueError("Requested pixel size is below resolution.")
|
||||||
new_shape = (image.shape//Dxy).astype(int)
|
new_shape = (image.shape//Dxy).astype(int)
|
||||||
|
|
||||||
@@ -605,9 +575,6 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
|
|||||||
# Propagate error
|
# Propagate error
|
||||||
rms_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape,
|
rms_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape,
|
||||||
operation='average'))
|
operation='average'))
|
||||||
#std_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape,
|
|
||||||
# operation='average') - bin_ndarray(image, new_shape=new_shape,
|
|
||||||
# operation='average')**2)
|
|
||||||
new_error = np.sqrt(Dxy[0]*Dxy[1])*bin_ndarray(error,
|
new_error = np.sqrt(Dxy[0]*Dxy[1])*bin_ndarray(error,
|
||||||
new_shape=new_shape, operation='average')
|
new_shape=new_shape, operation='average')
|
||||||
rebinned_error.append(np.sqrt(rms_image**2 + new_error**2))
|
rebinned_error.append(np.sqrt(rms_image**2 + new_error**2))
|
||||||
@@ -806,23 +773,7 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
|
|||||||
for i,header in enumerate(headers):
|
for i,header in enumerate(headers):
|
||||||
# Get current pixel size
|
# Get current pixel size
|
||||||
w = WCS(header).deepcopy()
|
w = WCS(header).deepcopy()
|
||||||
if w.wcs.has_cd():
|
pxsize[i] = np.round(w.wcs.cdelt*3600.,4)
|
||||||
del w.wcs.cd
|
|
||||||
keys = list(w.to_header().keys())+['CD1_1','CD1_2','CD2_1','CD2_2']
|
|
||||||
for key in keys:
|
|
||||||
header.remove(key, ignore_missing=True)
|
|
||||||
w.wcs.cdelt = 3600.*np.sqrt(np.sum(w.wcs.get_pc()**2,axis=1))
|
|
||||||
if (w.wcs.cdelt == np.array([1., 1.])).all() and \
|
|
||||||
(w.array_shape in [(512, 512),(1024,512),(512,1024),(1024,1024)]):
|
|
||||||
# Update WCS with relevant information
|
|
||||||
HST_aper = 2400. # HST aperture in mm
|
|
||||||
f_ratio = header['f_ratio']
|
|
||||||
px_dim = np.array([25., 25.]) # Pixel dimension in µm
|
|
||||||
if header['pxformt'].lower() == 'zoom':
|
|
||||||
px_dim[0] = 50.
|
|
||||||
w.wcs.cdelt = 206.3*px_dim/(f_ratio*HST_aper)
|
|
||||||
header.update(w.to_header())
|
|
||||||
pxsize[i] = np.round(w.wcs.cdelt,4)
|
|
||||||
if (pxsize != pxsize[0]).any():
|
if (pxsize != pxsize[0]).any():
|
||||||
raise ValueError("Not all images in array have same pixel size")
|
raise ValueError("Not all images in array have same pixel size")
|
||||||
FWHM /= pxsize[0].min()
|
FWHM /= pxsize[0].min()
|
||||||
@@ -1176,46 +1127,6 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
|
|||||||
#Stokes_cov[1,1] += s_Q2_axis
|
#Stokes_cov[1,1] += s_Q2_axis
|
||||||
#Stokes_cov[2,2] += s_U2_axis
|
#Stokes_cov[2,2] += s_U2_axis
|
||||||
|
|
||||||
# s_I_I = np.sqrt(Stokes_cov[0,0])/I_stokes*100.
|
|
||||||
# s_I_axis_I = np.sqrt(s_I2_axis)/I_stokes*100.
|
|
||||||
# s_Q_Q = np.sqrt(Stokes_cov[1,1])/Q_stokes*100.
|
|
||||||
# s_Q_axis_Q = np.sqrt(s_Q2_axis)/Q_stokes*100.
|
|
||||||
# s_U_U = np.sqrt(Stokes_cov[2,2])/U_stokes*100.
|
|
||||||
# s_U_axis_U = np.sqrt(s_U2_axis)/U_stokes*100.
|
|
||||||
#
|
|
||||||
# fig, ax = plt.subplots(3,3,figsize=(15,15))
|
|
||||||
# im = ax[0,0].imshow(s_I_I, origin='lower')
|
|
||||||
# ax[0,0].set_title(r"$\frac{\sigma_{I}}{I}$")
|
|
||||||
# fig.colorbar(im, ax=ax[0,0])
|
|
||||||
# im = ax[0,1].imshow(s_I_axis_I, origin='lower')
|
|
||||||
# ax[0,1].set_title(r"$\frac{\sigma_{I}^{axis}}{I}$")
|
|
||||||
# fig.colorbar(im, ax=ax[0,1])
|
|
||||||
# im = ax[0,2].imshow(s_I_axis_I/s_I_I, origin='lower')
|
|
||||||
# ax[0,2].set_title(r"$\frac{\sigma_{I}^{axis}}{\sigma_{I}}$")
|
|
||||||
# fig.colorbar(im, ax=ax[0,2])
|
|
||||||
# im = ax[1,0].imshow(s_Q_Q, origin='lower')
|
|
||||||
# ax[1,0].set_title(r"$\frac{\sigma_{Q}}{Q}$")
|
|
||||||
# fig.colorbar(im, ax=ax[1,0])
|
|
||||||
# im = ax[1,1].imshow(s_Q_axis_Q, origin='lower')
|
|
||||||
# ax[1,1].set_title(r"$\frac{\sigma_{Q}^{axis}}{Q}$")
|
|
||||||
# fig.colorbar(im, ax=ax[1,1])
|
|
||||||
# im = ax[1,2].imshow(s_Q_axis_Q/s_Q_Q, origin='lower')
|
|
||||||
# ax[1,2].set_title(r"$\frac{\sigma_{Q}^{axis}}{\sigma_{Q}}$")
|
|
||||||
# fig.colorbar(im, ax=ax[1,2])
|
|
||||||
# im = ax[2,0].imshow(s_U_U, origin='lower')
|
|
||||||
# ax[2,0].set_title(r"$\frac{\sigma_{U}}{U}$")
|
|
||||||
# fig.colorbar(im, ax=ax[2,0])
|
|
||||||
# im = ax[2,1].imshow(s_U_axis_U, origin='lower')
|
|
||||||
# ax[2,1].set_title(r"$\frac{\sigma_{U}^{axis}}{U}$")
|
|
||||||
# fig.colorbar(im, ax=ax[2,1])
|
|
||||||
# im = ax[2,2].imshow(s_U_axis_U/s_U_U, origin='lower')
|
|
||||||
# ax[2,2].set_title(r"$\frac{\sigma_{U}^{axis}}{\sigma_{U}}$")
|
|
||||||
# fig.colorbar(im, ax=ax[2,2])
|
|
||||||
# plt.show()
|
|
||||||
# print("s_I/I = {}% ; s_I_axis/I = {}%".format(np.mean(s_I_I[I_stokes > 0.]), np.mean(s_I_axis_I[I_stokes > 0.])))
|
|
||||||
# print("s_Q/Q = {}% ; s_Q_axis/Q = {}%".format(np.mean(s_Q_Q[Q_stokes > 0.]), np.mean(s_Q_axis_Q[Q_stokes > 0.])))
|
|
||||||
# print("s_U/U = {}% ; s_U_axis/U = {}%".format(np.mean(s_U_U[U_stokes > 0.]), np.mean(s_U_axis_U[U_stokes > 0.])))
|
|
||||||
|
|
||||||
if not(FWHM is None) and (smoothing.lower() in ['gaussian_after','gauss_after']):
|
if not(FWHM is None) and (smoothing.lower() in ['gaussian_after','gauss_after']):
|
||||||
Stokes_array = np.array([I_stokes, Q_stokes, U_stokes])
|
Stokes_array = np.array([I_stokes, Q_stokes, U_stokes])
|
||||||
Stokes_error = np.array([np.sqrt(Stokes_cov[i,i]) for i in range(3)])
|
Stokes_error = np.array([np.sqrt(Stokes_cov[i,i]) for i in range(3)])
|
||||||
@@ -1416,11 +1327,20 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
|
|||||||
|
|
||||||
new_wcs = WCS(header).deepcopy()
|
new_wcs = WCS(header).deepcopy()
|
||||||
if new_wcs.wcs.has_cd(): # CD matrix
|
if new_wcs.wcs.has_cd(): # CD matrix
|
||||||
|
# Update WCS with relevant information
|
||||||
|
HST_aper = 2400. # HST aperture in mm
|
||||||
|
f_ratio = header['f_ratio']
|
||||||
|
px_dim = np.array([25., 25.]) # Pixel dimension in µm
|
||||||
|
if ref_header['pxformt'].lower() == 'zoom':
|
||||||
|
px_dim[0] = 50.
|
||||||
|
new_cdelt = 206.3/3600.*px_dim/(f_ratio*HST_aper)
|
||||||
|
old_cd = new_wcs.wcs.cd
|
||||||
del new_wcs.wcs.cd
|
del new_wcs.wcs.cd
|
||||||
keys = ['CD1_1','CD1_2','CD2_1','CD2_2']
|
keys = ['CD1_1','CD1_2','CD2_1','CD2_2']
|
||||||
for key in keys:
|
for key in keys:
|
||||||
new_header.remove(key, ignore_missing=True)
|
new_header.remove(key, ignore_missing=True)
|
||||||
new_wcs.wcs.cdelt = 3600.*np.sqrt(np.sum(w.wcs.get_pc()**2,axis=1))
|
new_wcs.wcs.pc = np.dot(mrot, np.dot(old_cd, np.diag(1./new_cdelt)))
|
||||||
|
new_wcs.wcs.cdelt = new_cdelt
|
||||||
elif new_wcs.wcs.has_pc(): # PC matrix + CDELT
|
elif new_wcs.wcs.has_pc(): # PC matrix + CDELT
|
||||||
newpc = np.dot(mrot, new_wcs.wcs.get_pc())
|
newpc = np.dot(mrot, new_wcs.wcs.get_pc())
|
||||||
new_wcs.wcs.pc = newpc
|
new_wcs.wcs.pc = newpc
|
||||||
@@ -1495,11 +1415,20 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
|
|||||||
|
|
||||||
new_wcs = WCS(header).deepcopy()
|
new_wcs = WCS(header).deepcopy()
|
||||||
if new_wcs.wcs.has_cd(): # CD matrix
|
if new_wcs.wcs.has_cd(): # CD matrix
|
||||||
|
# Update WCS with relevant information
|
||||||
|
HST_aper = 2400. # HST aperture in mm
|
||||||
|
f_ratio = ref_header['f_ratio']
|
||||||
|
px_dim = np.array([25., 25.]) # Pixel dimension in µm
|
||||||
|
if ref_header['pxformt'].lower() == 'zoom':
|
||||||
|
px_dim[0] = 50.
|
||||||
|
new_cdelt = 206.3/3600.*px_dim/(f_ratio*HST_aper)
|
||||||
|
old_cd = new_wcs.wcs.cd
|
||||||
del new_wcs.wcs.cd
|
del new_wcs.wcs.cd
|
||||||
keys = ['CD1_1','CD1_2','CD2_1','CD2_2']
|
keys = ['CD1_1','CD1_2','CD2_1','CD2_2']
|
||||||
for key in keys:
|
for key in keys:
|
||||||
new_header.remove(key, ignore_missing=True)
|
new_header.remove(key, ignore_missing=True)
|
||||||
new_wcs.wcs.cdelt = 3600.*np.sqrt(np.sum(new_wcs.wcs.get_pc()**2,axis=1))
|
new_wcs.wcs.pc = np.dot(mrot, np.dot(old_cd, np.diag(1./new_cdelt)))
|
||||||
|
new_wcs.wcs.cdelt = new_cdelt
|
||||||
elif new_wcs.wcs.has_pc(): # PC matrix + CDELT
|
elif new_wcs.wcs.has_pc(): # PC matrix + CDELT
|
||||||
newpc = np.dot(mrot, new_wcs.wcs.get_pc())
|
newpc = np.dot(mrot, new_wcs.wcs.get_pc())
|
||||||
new_wcs.wcs.pc = newpc
|
new_wcs.wcs.pc = newpc
|
||||||
|
|||||||