move alignement before rebinning, before background computation

This commit is contained in:
Thibault Barnouin
2022-04-12 17:17:34 +02:00
parent 7bbd2bc2e8
commit 3770a78940
52 changed files with 269 additions and 187 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 645 KiB

After

Width:  |  Height:  |  Size: 537 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 648 KiB

After

Width:  |  Height:  |  Size: 525 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 690 KiB

After

Width:  |  Height:  |  Size: 573 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 689 KiB

After

Width:  |  Height:  |  Size: 566 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 504 KiB

After

Width:  |  Height:  |  Size: 392 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 507 KiB

After

Width:  |  Height:  |  Size: 388 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 563 KiB

After

Width:  |  Height:  |  Size: 443 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 559 KiB

After

Width:  |  Height:  |  Size: 434 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 689 KiB

After

Width:  |  Height:  |  Size: 574 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 698 KiB

After

Width:  |  Height:  |  Size: 561 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 263 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 572 KiB

After

Width:  |  Height:  |  Size: 626 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 73 KiB

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 319 KiB

After

Width:  |  Height:  |  Size: 330 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 264 KiB

After

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 325 KiB

After

Width:  |  Height:  |  Size: 307 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 316 KiB

After

Width:  |  Height:  |  Size: 351 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 897 KiB

After

Width:  |  Height:  |  Size: 838 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 864 KiB

After

Width:  |  Height:  |  Size: 854 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 667 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 102 KiB

After

Width:  |  Height:  |  Size: 110 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 282 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 231 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 207 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 239 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 230 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 352 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 597 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 442 KiB

After

Width:  |  Height:  |  Size: 429 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 367 KiB

After

Width:  |  Height:  |  Size: 352 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 334 KiB

After

Width:  |  Height:  |  Size: 315 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 391 KiB

After

Width:  |  Height:  |  Size: 322 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 398 KiB

After

Width:  |  Height:  |  Size: 377 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 631 KiB

After

Width:  |  Height:  |  Size: 564 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 711 KiB

After

Width:  |  Height:  |  Size: 518 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 204 KiB

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 882 KiB

After

Width:  |  Height:  |  Size: 108 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 180 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

View File

@@ -14,6 +14,7 @@ import lib.plots as proj_plots #Functions for plotting data
from lib.convex_hull import image_hull from lib.convex_hull import image_hull
from lib.deconvolve import from_file_psf from lib.deconvolve import from_file_psf
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from astropy.wcs import WCS
def main(): def main():
@@ -26,6 +27,11 @@ def main():
psf_file = 'NGC1068_f253m00.fits' psf_file = 'NGC1068_f253m00.fits'
globals()['plots_folder'] = "../plots/NGC1068_x274020/" globals()['plots_folder'] = "../plots/NGC1068_x274020/"
# globals()['data_folder'] = "../data/IC5063_x3nl030/"
# infiles = ['x3nl0301r_c0f.fits','x3nl0302r_c0f.fits','x3nl0303r_c0f.fits']
# psf_file = 'IC5063_f502m00.fits'
# globals()['plots_folder'] = "../plots/IC5063_x3nl030/"
# globals()['data_folder'] = "../data/NGC1068_x14w010/" # globals()['data_folder'] = "../data/NGC1068_x14w010/"
# infiles = ['x14w0101t_c0f.fits','x14w0102t_c0f.fits','x14w0103t_c0f.fits', # infiles = ['x14w0101t_c0f.fits','x14w0102t_c0f.fits','x14w0103t_c0f.fits',
# 'x14w0104t_c0f.fits','x14w0105p_c0f.fits','x14w0106t_c0f.fits'] # 'x14w0104t_c0f.fits','x14w0105p_c0f.fits','x14w0106t_c0f.fits']
@@ -63,11 +69,6 @@ def main():
# 'x3995202r_c0f.fits','x3995206r_c0f.fits'] # 'x3995202r_c0f.fits','x3995206r_c0f.fits']
# globals()['plots_folder'] = "../plots/PG1630+377_x39510/" # globals()['plots_folder'] = "../plots/PG1630+377_x39510/"
# globals()['data_folder'] = "../data/IC5063_x3nl030/"
# infiles = ['x3nl0301r_c0f.fits','x3nl0302r_c0f.fits','x3nl0303r_c0f.fits']
# psf_file = 'IC5063_f502m00.fits'
# globals()['plots_folder'] = "../plots/IC5063_x3nl030/"
# globals()['data_folder'] = "../data/MKN3_x3nl010/" # globals()['data_folder'] = "../data/MKN3_x3nl010/"
# infiles = ['x3nl0101r_c0f.fits','x3nl0102r_c0f.fits','x3nl0103r_c0f.fits'] # infiles = ['x3nl0101r_c0f.fits','x3nl0102r_c0f.fits','x3nl0103r_c0f.fits']
# globals()['plots_folder'] = "../plots/MKN3_x3nl010/" # globals()['plots_folder'] = "../plots/MKN3_x3nl010/"
@@ -93,13 +94,14 @@ def main():
#psf = from_file_psf(data_folder+psf_file) #psf = from_file_psf(data_folder+psf_file)
psf_FWHM = 0.15 psf_FWHM = 0.15
psf_scale = 'arcsec' psf_scale = 'arcsec'
psf_shape=(9,9) psf_shape=(25,25)
iterations = 10 iterations = 5
algo="richardson"
# Initial crop # Initial crop
display_crop = False display_crop = True
# Error estimation # Error estimation
error_sub_shape = (75,75) error_sub_shape = (10,10)
display_error = False display_error = True
# Data binning # Data binning
rebin = True rebin = True
if rebin: if rebin:
@@ -108,9 +110,9 @@ def main():
rebin_operation = 'sum' #sum or average rebin_operation = 'sum' #sum or average
# Alignement # Alignement
align_center = 'image' #If None will align image to image center align_center = 'image' #If None will align image to image center
display_data = False display_data = True
# Smoothing # Smoothing
smoothing_function = 'combine' #gaussian_after, gaussian or combine smoothing_function = 'combine' #gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 0.20 #If None, no smoothing is done smoothing_FWHM = 0.20 #If None, no smoothing is done
smoothing_scale = 'arcsec' #pixel or arcsec smoothing_scale = 'arcsec' #pixel or arcsec
# Rotation # Rotation
@@ -118,11 +120,12 @@ def main():
rotate_data = False #rotation to North convention can give erroneous results rotate_data = False #rotation to North convention can give erroneous results
# Final crop # Final crop
crop = False #Crop to desired ROI crop = False #Crop to desired ROI
final_display = True
# Polarization map output # Polarization map output
figname = 'NGC1068_FOC' #target/intrument name figname = 'NGC1068_FOC' #target/intrument name
figtype = '_combine_FWHM020' #additionnal informations figtype = '_combine_FWHM020' #additionnal informations
SNRp_cut = 10. #P measurments with SNR>3 SNRp_cut = 5. #P measurments with SNR>3
SNRi_cut = 100. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 50. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
# if step_vec = 0 then all vectors are displayed at full length # if step_vec = 0 then all vectors are displayed at full length
@@ -134,17 +137,41 @@ def main():
data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0., inside=True, display=display_crop, savename=figname, plots_folder=plots_folder) data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0., inside=True, display=display_crop, savename=figname, plots_folder=plots_folder)
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM. # Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
if deconvolve: if deconvolve:
data_array = proj_red.deconvolve_array(data_array, headers, psf=psf, FWHM=psf_FWHM, scale=psf_scale, shape=psf_shape, iterations=iterations) data_array = proj_red.deconvolve_array(data_array, headers, psf=psf, FWHM=psf_FWHM, scale=psf_scale, shape=psf_shape, iterations=iterations, algo=algo)
# Estimate error from data background, estimated from sub-image of desired sub_shape. Dxy = np.ones(2)*10
data_array, error_array, headers = proj_red.get_error(data_array, headers, sub_shape=error_sub_shape, display=display_error, savename=figname+"_errors", plots_folder=plots_folder)
# Rebin data to desired pixel size.
Dxy = np.ones(2)
if rebin:
data_array, error_array, headers, Dxy = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation)
# Align and rescale images with oversampling.
data_mask = np.ones(data_array.shape[1:]).astype(bool) data_mask = np.ones(data_array.shape[1:]).astype(bool)
# Align and rescale images with oversampling.
if px_scale.lower() not in ['full','integrate']: if px_scale.lower() not in ['full','integrate']:
data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array, upsample_factor=int(Dxy.min()), ref_center=align_center, return_shifts=False) data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array=error_array, upsample_factor=int(Dxy.min()), ref_center=align_center, return_shifts=False)
im = plt.imshow(error_array[0]/data_array[0]*100, origin='lower', vmin=0, vmax=100)
plt.colorbar(im)
wcs = WCS(headers[0])
plt.plot(*wcs.wcs.crpix,'r+')
plt.title("Align error")
plt.show()
# Rotate data to have North up
ref_header = deepcopy(headers[0])
if rotate_data:
alpha = ref_header['orientat']
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
data_array, error_array, headers, data_mask = proj_red.rotate_data(data_array, error_array, data_mask, headers, -ref_header['orientat'])
im = plt.imshow(error_array[0]/data_array[0]*100, origin='lower', vmin=0, vmax=100)
plt.colorbar(im)
wcs = WCS(headers[0])
plt.plot(*wcs.wcs.crpix,'r+')
plt.title("Rotate error")
plt.show()
# Rebin data to desired pixel size.
if rebin:
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask)
# Estimate error from data background, estimated from sub-image of desired sub_shape.
data_array, error_array, headers = proj_red.get_error(data_array, headers, error_array, data_mask, sub_shape=error_sub_shape, display=display_error, savename=figname+"_errors", plots_folder=plots_folder)
im = plt.imshow(error_array[0]/data_array[0]*100, origin='lower', vmin=0, vmax=100)
plt.colorbar(im)
wcs = WCS(headers[0])
plt.plot(*wcs.wcs.crpix,'r+')
plt.title("Background error")
plt.show()
if px_scale.lower() not in ['full','integrate']: if px_scale.lower() not in ['full','integrate']:
vertex = image_hull(data_mask,step=5,null_val=0.,inside=True) vertex = image_hull(data_mask,step=5,null_val=0.,inside=True)
@@ -153,14 +180,6 @@ def main():
shape = np.array([vertex[1]-vertex[0],vertex[3]-vertex[2]]) shape = np.array([vertex[1]-vertex[0],vertex[3]-vertex[2]])
rectangle = [vertex[2], vertex[0], shape[1], shape[0], 0., 'g'] rectangle = [vertex[2], vertex[0], shape[1], shape[0], 0., 'g']
# Rotate data to have North up
ref_header = deepcopy(headers[0])
if rotate_data:
alpha = ref_header['orientat']
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
rectangle[0:2] = np.dot(mrot, np.asarray(rectangle[0:2]))+np.array(data_array.shape[1:])/2
rectangle[4] = alpha
data_array, error_array, headers, data_mask = proj_red.rotate_data(data_array, error_array, data_mask, headers, -ref_header['orientat'])
#Plot array for checking output #Plot array for checking output
if display_data: if display_data:
proj_plots.plot_obs(data_array, headers, vmin=data_array.min(), vmax=data_array.max(), rectangle =[rectangle,]*data_array.shape[0], savename=figname+"_center_"+align_center, plots_folder=plots_folder) proj_plots.plot_obs(data_array, headers, vmin=data_array.min(), vmax=data_array.max(), rectangle =[rectangle,]*data_array.shape[0], savename=figname+"_center_"+align_center, plots_folder=plots_folder)
@@ -173,6 +192,13 @@ def main():
# Bibcode : 1995chst.conf...10J # Bibcode : 1995chst.conf...10J
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function) I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function)
im = plt.imshow(np.sqrt(Stokes_cov[0,0])/I_stokes*100, origin='lower', vmin=0, vmax=100)
plt.colorbar(im)
wcs = WCS(headers[0])
plt.plot(*wcs.wcs.crpix,'r+')
plt.title("Stokes error")
plt.show()
## Step 3: ## Step 3:
# Rotate images to have North up # Rotate images to have North up
ref_header = deepcopy(headers[0]) ref_header = deepcopy(headers[0])
@@ -183,6 +209,13 @@ def main():
rectangle[0:2] = np.dot(mrot, np.asarray(rectangle[0:2]))+np.array(data_array.shape[1:])/2 rectangle[0:2] = np.dot(mrot, np.asarray(rectangle[0:2]))+np.array(data_array.shape[1:])/2
rectangle[4] = alpha rectangle[4] = alpha
I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, data_mask = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, -ref_header['orientat'], SNRi_cut=None) I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, data_mask = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, -ref_header['orientat'], SNRi_cut=None)
im = plt.imshow(np.sqrt(Stokes_cov[0,0])/I_stokes*100, origin='lower', vmin=0, vmax=100)
plt.colorbar(im)
wcs = WCS(headers[0])
plt.plot(*wcs.wcs.crpix,'r+')
plt.title("Rotate Stokes error")
plt.show()
# Compute polarimetric parameters (polarization degree and angle). # Compute polarimetric parameters (polarization degree and angle).
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers) P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers)
@@ -201,7 +234,7 @@ def main():
Stokes_test, data_mask = stokescrop.hdul_crop, stokescrop.data_mask Stokes_test, data_mask = stokescrop.hdul_crop, stokescrop.data_mask
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error). # Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
if px_scale.lower() not in ['full','integrate']: if px_scale.lower() not in ['full','integrate'] and final_display:
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display=None) proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display=None)
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_flux", plots_folder=plots_folder, display='Pol_Flux') proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_flux", plots_folder=plots_folder, display='Pol_Flux')
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P", plots_folder=plots_folder, display='Pol_deg') proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P", plots_folder=plots_folder, display='Pol_deg')
@@ -209,7 +242,7 @@ def main():
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_err", plots_folder=plots_folder, display='Pol_deg_err') proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_P_err", plots_folder=plots_folder, display='Pol_deg_err')
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRi", plots_folder=plots_folder, display='SNRi') proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRi", plots_folder=plots_folder, display='SNRi')
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRp", plots_folder=plots_folder, display='SNRp') proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype+"_SNRp", plots_folder=plots_folder, display='SNRp')
else: elif final_display:
proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display='default') proj_plots.polarization_map(deepcopy(Stokes_test), data_mask, rectangle=None, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, savename=figname+figtype, plots_folder=plots_folder, display='default')
return 0 return 0

View File

@@ -467,6 +467,8 @@ def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
return im_deconv return im_deconv
def gaussian2d(x,y,sigma):
return np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2)
def gaussian_psf(FWHM=1., shape=(5,5)): def gaussian_psf(FWHM=1., shape=(5,5)):
""" """
@@ -489,11 +491,10 @@ def gaussian_psf(FWHM=1., shape=(5,5)):
stdev = FWHM/(2.*np.sqrt(2.*np.log(2.))) stdev = FWHM/(2.*np.sqrt(2.*np.log(2.)))
# Create kernel of desired shape # Create kernel of desired shape
xx, yy = np.indices(shape) x, y = np.meshgrid(np.arange(-shape[0]/2,shape[0]/2),np.arange(-shape[1]/2,shape[1]/2))
x0, y0 = (np.array(shape)-1.)/2. kernel = gaussian2d(x, y, stdev)
kernel = np.exp(-0.5*((xx-x0)**2+(yy-y0)**2)/stdev**2)
return kernel return kernel/kernel.sum()
def from_file_psf(filename): def from_file_psf(filename):
""" """
@@ -511,7 +512,7 @@ def from_file_psf(filename):
""" """
with fits.open(filename) as f: with fits.open(filename) as f:
psf = f[0].data psf = f[0].data
if (type(psf) != numpy.ndarray) or len(psf) != 2: if (type(psf) != np.ndarray) or len(psf) != 2:
raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename)) raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename))
#Return the normalized Point Spread Function #Return the normalized Point Spread Function
kernel = psf/psf.max() kernel = psf/psf.max()

View File

@@ -13,6 +13,7 @@ import numpy as np
from astropy.io import fits from astropy.io import fits
from astropy import wcs from astropy import wcs
from lib.convex_hull import image_hull, clean_ROI from lib.convex_hull import image_hull, clean_ROI
import matplotlib.pyplot as plt
def get_obs_data(infiles, data_folder="", compute_flux=False): def get_obs_data(infiles, data_folder="", compute_flux=False):
@@ -140,25 +141,25 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
#Crop Data to mask #Crop Data to mask
if data_mask.shape != (1,1): if data_mask.shape != (1,1):
I_stokes = I_stokes[vertex[0]:vertex[1],vertex[2]:vertex[3]] I_stokes = I_stokes[vertex[2]:vertex[3],vertex[0]:vertex[1]]
Q_stokes = Q_stokes[vertex[0]:vertex[1],vertex[2]:vertex[3]] Q_stokes = Q_stokes[vertex[2]:vertex[3],vertex[0]:vertex[1]]
U_stokes = U_stokes[vertex[0]:vertex[1],vertex[2]:vertex[3]] U_stokes = U_stokes[vertex[2]:vertex[3],vertex[0]:vertex[1]]
P = P[vertex[0]:vertex[1],vertex[2]:vertex[3]] P = P[vertex[2]:vertex[3],vertex[0]:vertex[1]]
debiased_P = debiased_P[vertex[0]:vertex[1],vertex[2]:vertex[3]] debiased_P = debiased_P[vertex[2]:vertex[3],vertex[0]:vertex[1]]
s_P = s_P[vertex[0]:vertex[1],vertex[2]:vertex[3]] s_P = s_P[vertex[2]:vertex[3],vertex[0]:vertex[1]]
s_P_P = s_P_P[vertex[0]:vertex[1],vertex[2]:vertex[3]] s_P_P = s_P_P[vertex[2]:vertex[3],vertex[0]:vertex[1]]
PA = PA[vertex[0]:vertex[1],vertex[2]:vertex[3]] PA = PA[vertex[2]:vertex[3],vertex[0]:vertex[1]]
s_PA = s_PA[vertex[0]:vertex[1],vertex[2]:vertex[3]] s_PA = s_PA[vertex[2]:vertex[3],vertex[0]:vertex[1]]
s_PA_P = s_PA_P[vertex[0]:vertex[1],vertex[2]:vertex[3]] s_PA_P = s_PA_P[vertex[2]:vertex[3],vertex[0]:vertex[1]]
new_Stokes_cov = np.zeros((3,3,shape[0],shape[1])) new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2],*shape[::-1]))
for i in range(3): for i in range(3):
for j in range(3): for j in range(3):
Stokes_cov[i,j][(1-data_mask).astype(bool)] = 0. Stokes_cov[i,j][(1-data_mask).astype(bool)] = 0.
new_Stokes_cov[i,j] = Stokes_cov[i,j][vertex[0]:vertex[1],vertex[2]:vertex[3]] new_Stokes_cov[i,j] = Stokes_cov[i,j][vertex[2]:vertex[3],vertex[0]:vertex[1]]
Stokes_cov = new_Stokes_cov Stokes_cov = new_Stokes_cov
data_mask = data_mask[vertex[0]:vertex[1],vertex[2]:vertex[3]] data_mask = data_mask[vertex[2]:vertex[3],vertex[0]:vertex[1]]
data_mask = data_mask.astype(float, copy=False) data_mask = data_mask.astype(float, copy=False)
#Create HDUList object #Create HDUList object
@@ -166,6 +167,7 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
#Add I_stokes as PrimaryHDU #Add I_stokes as PrimaryHDU
header['datatype'] = ('I_stokes', 'type of data stored in the HDU') header['datatype'] = ('I_stokes', 'type of data stored in the HDU')
I_stokes[(1-data_mask).astype(bool)] = 0.
primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header) primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header)
hdul.append(primary_hdu) hdul.append(primary_hdu)
@@ -178,6 +180,8 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
[data_mask, 'Data_mask']]: [data_mask, 'Data_mask']]:
hdu_header = header.copy() hdu_header = header.copy()
hdu_header['datatype'] = name hdu_header['datatype'] = name
if not name == 'IQU_cov_matrix':
data[(1-data_mask).astype(bool)] = 0.
hdu = fits.ImageHDU(data=data,header=hdu_header) hdu = fits.ImageHDU(data=data,header=hdu_header)
hdul.append(hdu) hdul.append(hdu)

View File

@@ -1109,6 +1109,8 @@ class pol_map(object):
self.ax.reset_wcs(self.wcs) self.ax.reset_wcs(self.wcs)
self.ax_cosmetics() self.ax_cosmetics()
self.display() self.display()
self.ax.set_xlim(0,self.I.shape[1])
self.ax.set_ylim(0,self.I.shape[0])
self.pol_vector() self.pol_vector()
else: else:
self.cropped = True self.cropped = True
@@ -1229,26 +1231,31 @@ class pol_map(object):
def d_tf(event): def d_tf(event):
self.display_selection = 'total_flux' self.display_selection = 'total_flux'
self.display() self.display()
self.pol_int()
b_tf.on_clicked(d_tf) b_tf.on_clicked(d_tf)
def d_pf(event): def d_pf(event):
self.display_selection = 'pol_flux' self.display_selection = 'pol_flux'
self.display() self.display()
self.pol_int()
b_pf.on_clicked(d_pf) b_pf.on_clicked(d_pf)
def d_p(event): def d_p(event):
self.display_selection = 'pol_deg' self.display_selection = 'pol_deg'
self.display() self.display()
self.pol_int()
b_p.on_clicked(d_p) b_p.on_clicked(d_p)
def d_snri(event): def d_snri(event):
self.display_selection = 'snri' self.display_selection = 'snri'
self.display() self.display()
self.pol_int()
b_snri.on_clicked(d_snri) b_snri.on_clicked(d_snri)
def d_snrp(event): def d_snrp(event):
self.display_selection = 'snrp' self.display_selection = 'snrp'
self.display() self.display()
self.pol_int()
b_snrp.on_clicked(d_snrp) b_snrp.on_clicked(d_snrp)
plt.show() plt.show()
@@ -1359,8 +1366,6 @@ class pol_map(object):
if hasattr(self, 'im'): if hasattr(self, 'im'):
self.im.remove() self.im.remove()
self.im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno') self.im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno')
ax.set_xlim(0,self.data.shape[1])
ax.set_ylim(0,self.data.shape[0])
self.cbar = plt.colorbar(self.im, cax=self.cbar_ax, label=label) self.cbar = plt.colorbar(self.im, cax=self.cbar_ax, label=label)
fig.canvas.draw_idle() fig.canvas.draw_idle()
return self.im return self.im
@@ -1412,12 +1417,12 @@ class pol_map(object):
I_reg = self.I[self.region].sum() I_reg = self.I[self.region].sum()
Q_reg = self.Q[self.region].sum() Q_reg = self.Q[self.region].sum()
U_reg = self.U[self.region].sum() U_reg = self.U[self.region].sum()
I_reg_err = np.sqrt(n_pix)*np.sqrt(np.sum(s_I[self.region]**2)) I_reg_err = np.sqrt(np.sum(s_I[self.region]**2))
Q_reg_err = np.sqrt(n_pix)*np.sqrt(np.sum(s_Q[self.region]**2)) Q_reg_err = np.sqrt(np.sum(s_Q[self.region]**2))
U_reg_err = np.sqrt(n_pix)*np.sqrt(np.sum(s_U[self.region]**2)) U_reg_err = np.sqrt(np.sum(s_U[self.region]**2))
IQ_reg_err = np.sqrt(n_pix)*np.sqrt(np.sum(s_IQ[self.region]**2)) IQ_reg_err = np.sqrt(np.sum(s_IQ[self.region]**2))
IU_reg_err = np.sqrt(n_pix)*np.sqrt(np.sum(s_IU[self.region]**2)) IU_reg_err = np.sqrt(np.sum(s_IU[self.region]**2))
QU_reg_err = np.sqrt(n_pix)*np.sqrt(np.sum(s_QU[self.region]**2)) QU_reg_err = np.sqrt(np.sum(s_QU[self.region]**2))
P_reg = np.sqrt(Q_reg**2+U_reg**2)/I_reg P_reg = np.sqrt(Q_reg**2+U_reg**2)/I_reg
P_reg_err = np.sqrt((Q_reg**2*Q_reg_err**2 + U_reg**2*U_reg_err**2 + 2.*Q_reg*U_reg*QU_reg_err)/(Q_reg**2 + U_reg**2) + ((Q_reg/I_reg)**2 + (U_reg/I_reg)**2)*I_reg_err**2 - 2.*(Q_reg/I_reg)*IQ_reg_err - 2.*(U_reg/I_reg)*IU_reg_err)/I_reg P_reg_err = np.sqrt((Q_reg**2*Q_reg_err**2 + U_reg**2*U_reg_err**2 + 2.*Q_reg*U_reg*QU_reg_err)/(Q_reg**2 + U_reg**2) + ((Q_reg/I_reg)**2 + (U_reg/I_reg)**2)*I_reg_err**2 - 2.*(Q_reg/I_reg)*IQ_reg_err - 2.*(U_reg/I_reg)*IU_reg_err)/I_reg

View File

@@ -43,13 +43,13 @@ import matplotlib.pyplot as plt
import matplotlib.dates as mdates import matplotlib.dates as mdates
from matplotlib.patches import Rectangle from matplotlib.patches import Rectangle
from datetime import datetime from datetime import datetime
from scipy.ndimage import rotate as sc_rotate from scipy.ndimage import rotate as sc_rotate, shift as sc_shift
from scipy.ndimage import shift as sc_shift from scipy.signal import convolve2d
from astropy.wcs import WCS from astropy.wcs import WCS
from astropy import log from astropy import log
log.setLevel('ERROR') log.setLevel('ERROR')
import warnings import warnings
from lib.deconvolve import deconvolve_im, gaussian_psf from lib.deconvolve import deconvolve_im, gaussian_psf, gaussian2d, zeropad
from lib.convex_hull import image_hull, clean_ROI from lib.convex_hull import image_hull, clean_ROI
from lib.plots import plot_obs from lib.plots import plot_obs
from lib.cross_correlation import phase_cross_correlation from lib.cross_correlation import phase_cross_correlation
@@ -189,7 +189,7 @@ def bin_ndarray(ndarray, new_shape, operation='sum'):
return ndarray return ndarray
def crop_array(data_array, headers, error_array=None, step=5, null_val=None, def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, null_val=None,
inside=False, display=False, savename=None, plots_folder=""): inside=False, display=False, savename=None, plots_folder=""):
""" """
Homogeneously crop an array: all contained images will have the same shape. Homogeneously crop an array: all contained images will have the same shape.
@@ -299,6 +299,7 @@ def crop_array(data_array, headers, error_array=None, step=5, null_val=None,
savename=savename+'_crop_region',plots_folder=plots_folder) savename=savename+'_crop_region',plots_folder=plots_folder)
plt.show() plt.show()
crop_headers = deepcopy(headers)
crop_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1])) crop_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1]))
crop_error_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1])) crop_error_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1]))
for i,image in enumerate(data_array): for i,image in enumerate(data_array):
@@ -306,11 +307,14 @@ def crop_array(data_array, headers, error_array=None, step=5, null_val=None,
crop_array[i] = image[v_array[0]:v_array[1],v_array[2]:v_array[3]] crop_array[i] = image[v_array[0]:v_array[1],v_array[2]:v_array[3]]
crop_error_array[i] = error_array[i][v_array[0]:v_array[1],v_array[2]:v_array[3]] crop_error_array[i] = error_array[i][v_array[0]:v_array[1],v_array[2]:v_array[3]]
#Update CRPIX value in the associated header #Update CRPIX value in the associated header
curr_wcs = deepcopy(WCS(headers[i])) curr_wcs = deepcopy(WCS(crop_headers[i]))
curr_wcs.wcs.crpix = curr_wcs.wcs.crpix - np.array([v_array[0], v_array[2]]) curr_wcs.wcs.crpix = curr_wcs.wcs.crpix - np.array([v_array[2], v_array[0]])
headers[i].update(curr_wcs.to_header()) crop_headers[i].update(curr_wcs.to_header())
if not data_mask is None:
return crop_array, crop_error_array, headers crop_mask = data_mask[v_array[0]:v_array[1],v_array[2]:v_array[3]]
return crop_array, crop_error_array, crop_mask, crop_headers
else:
return crop_array, crop_error_array, crop_headers
def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px', def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px',
@@ -355,7 +359,7 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px',
for i,header in enumerate(headers): for i,header in enumerate(headers):
# Get current pixel size # Get current pixel size
w = WCS(header).deepcopy() w = WCS(header).deepcopy()
pxsize[i] = np.round(w.wcs.cdelt/3600.,5) pxsize[i] = np.round(w.wcs.cdelt/3600.,15)
if (pxsize != pxsize[0]).any(): if (pxsize != pxsize[0]).any():
raise ValueError("Not all images in array have same pixel size") raise ValueError("Not all images in array have same pixel size")
FWHM /= pxsize[0].min() FWHM /= pxsize[0].min()
@@ -377,7 +381,7 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px',
return deconv_array return deconv_array
def get_error(data_array, headers, sub_shape=(15,15), display=False, def get_error(data_array, headers, error_array=None, data_mask=None, sub_shape=None, display=False,
savename=None, plots_folder="", return_background=False): savename=None, plots_folder="", return_background=False):
""" """
Look for sub-image of shape sub_shape that have the smallest integrated Look for sub-image of shape sub_shape that have the smallest integrated
@@ -391,7 +395,7 @@ def get_error(data_array, headers, sub_shape=(15,15), display=False,
Headers associated with the images in data_array. Headers associated with the images in data_array.
sub_shape : tuple, optional sub_shape : tuple, optional
Shape of the sub-image to look for. Must be odd. Shape of the sub-image to look for. Must be odd.
Defaults to (15,15). Defaults to 10% of input array.
display : boolean, optional display : boolean, optional
If True, data_array will be displayed with a rectangle around the If True, data_array will be displayed with a rectangle around the
sub-image selected for background computation. sub-image selected for background computation.
@@ -423,8 +427,15 @@ def get_error(data_array, headers, sub_shape=(15,15), display=False,
Only returned if return_background is True. Only returned if return_background is True.
""" """
# Crop out any null edges # Crop out any null edges
data, error, headers = crop_array(data_array, headers, step=5, null_val=0., inside=False) if error_array is None:
error_array = np.zeros(data_array.shape)
if not data_mask is None:
data, error, mask = data_array, error_array, data_mask#_ = crop_array(data_array, headers, error_array, data_mask, step=5, null_val=0., inside=False)
else:
data, error, _ = crop_array(data_array, headers, error_array, step=5, null_val=0., inside=False)
mask = np.ones(data[0].shape, dtype=bool)
if sub_shape is None:
sub_shape = (np.array(data_array.shape[1:])/10).astype(int)
sub_shape = np.array(sub_shape) sub_shape = np.array(sub_shape)
# Make sub_shape of odd values # Make sub_shape of odd values
if not(np.all(sub_shape%2)): if not(np.all(sub_shape%2)):
@@ -433,16 +444,19 @@ def get_error(data_array, headers, sub_shape=(15,15), display=False,
shape = np.array(data.shape) shape = np.array(data.shape)
diff = (sub_shape-1).astype(int) diff = (sub_shape-1).astype(int)
temp = np.zeros((shape[0],shape[1]-diff[0],shape[2]-diff[1])) temp = np.zeros((shape[0],shape[1]-diff[0],shape[2]-diff[1]))
error_array = np.ones(data_array.shape) error_bkg = np.ones(data_array.shape)
rectangle = [] rectangle = []
background = np.zeros((shape[0])) background = np.zeros((shape[0]))
for i,image in enumerate(data): for i,image in enumerate(data):
# Find the sub-image of smallest integrated flux (suppose no source) # Find the sub-image of smallest integrated flux (suppose no source)
#sub-image dominated by background #sub-image dominated by background
fmax = np.finfo(np.double).max
img = deepcopy(image)
img[1-mask] = fmax/(diff[0]*diff[1])
for r in range(temp.shape[1]): for r in range(temp.shape[1]):
for c in range(temp.shape[2]): for c in range(temp.shape[2]):
temp[i][r,c] = image[r:r+diff[0],c:c+diff[1]].sum() temp[i][r,c] = np.where(mask[r,c], img[r:r+diff[0],c:c+diff[1]].sum(), fmax/(diff[0]*diff[1]))
minima = np.unravel_index(np.argmin(temp.sum(axis=0)),temp.shape[1:]) minima = np.unravel_index(np.argmin(temp.sum(axis=0)),temp.shape[1:])
@@ -451,8 +465,8 @@ def get_error(data_array, headers, sub_shape=(15,15), display=False,
# Compute error : root mean square of the background # Compute error : root mean square of the background
sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]] sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]]
#error = np.std(sub_image) # Previously computed using standard deviation over the background #error = np.std(sub_image) # Previously computed using standard deviation over the background
error = np.sqrt(np.sum((sub_image-sub_image.mean())**2)/sub_image.size) bkg = np.sqrt(np.sum((sub_image-sub_image.mean())**2)/sub_image.size)
error_array[i] *= error error_bkg[i] *= bkg
data_array[i] = np.abs(data_array[i] - sub_image.mean()) data_array[i] = np.abs(data_array[i] - sub_image.mean())
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999) # Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
@@ -466,7 +480,7 @@ def get_error(data_array, headers, sub_shape=(15,15), display=False,
#estimated to less than 3% #estimated to less than 3%
err_flat = data_array[i]*0.03 err_flat = data_array[i]*0.03
error_array[i] = np.sqrt(error_array[i]**2 + err_wav**2 + err_psf**2 + err_flat**2) error_array[i] = np.sqrt(error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
background[i] = sub_image.sum() background[i] = sub_image.sum()
if (data_array[i] < 0.).any(): if (data_array[i] < 0.).any():
@@ -550,7 +564,7 @@ def get_error(data_array, headers, sub_shape=(15,15), display=False,
def rebin_array(data_array, error_array, headers, pxsize, scale, def rebin_array(data_array, error_array, headers, pxsize, scale,
operation='sum'): operation='sum', data_mask=None):
""" """
Homogeneously rebin a data array to get a new pixel size equal to pxsize Homogeneously rebin a data array to get a new pixel size equal to pxsize
where pxsize is given in arcsec. where pxsize is given in arcsec.
@@ -631,9 +645,13 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
if operation.lower() in ["mean", "average", "avg"]: if operation.lower() in ["mean", "average", "avg"]:
new_error[mask] = np.sqrt(bin_ndarray(error**2*image, new_error[mask] = np.sqrt(bin_ndarray(error**2*image,
new_shape=new_shape, operation='average')[mask]/sum_image[mask]) new_shape=new_shape, operation='average')[mask]/sum_image[mask])
#new_error[mask] = np.sqrt(bin_ndarray(error**2,
# new_shape=new_shape, operation='average')[mask])
else: else:
new_error[mask] = np.sqrt(bin_ndarray(error**2*image, new_error[mask] = np.sqrt(bin_ndarray(error**2*image,
new_shape=new_shape, operation='sum')[mask]/sum_image[mask]) new_shape=new_shape, operation='sum')[mask]/sum_image[mask])
#new_error[mask] = np.sqrt(bin_ndarray(error**2,
# new_shape=new_shape, operation='sum')[mask])
rebinned_error.append(np.sqrt(rms_image**2 + new_error**2)) rebinned_error.append(np.sqrt(rms_image**2 + new_error**2))
# Update header # Update header
@@ -641,12 +659,16 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
header['NAXIS1'],header['NAXIS2'] = w.array_shape header['NAXIS1'],header['NAXIS2'] = w.array_shape
header.update(w.to_header()) header.update(w.to_header())
rebinned_headers.append(header) rebinned_headers.append(header)
if not data_mask is None:
data_mask = bin_ndarray(data_mask,new_shape=new_shape,operation='average') > 0.80
rebinned_data = np.array(rebinned_data) rebinned_data = np.array(rebinned_data)
rebinned_error = np.array(rebinned_error) rebinned_error = np.array(rebinned_error)
if data_mask is None:
return rebinned_data, rebinned_error, rebinned_headers, Dxy return rebinned_data, rebinned_error, rebinned_headers, Dxy
else:
return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask
def align_data(data_array, headers, error_array=None, upsample_factor=1., def align_data(data_array, headers, error_array=None, upsample_factor=1.,
@@ -744,7 +766,7 @@ def align_data(data_array, headers, error_array=None, upsample_factor=1.,
# Create a rescaled null array that can contain any rotation of the # Create a rescaled null array that can contain any rotation of the
#original image (and shifted images) #original image (and shifted images)
shape = data_array.shape shape = data_array.shape
res_shape = int(np.ceil(np.sqrt(2.5)*np.max(shape[1:]))) res_shape = int(np.ceil(np.sqrt(2.)*np.max(shape[1:])))
rescaled_image = np.zeros((shape[0],res_shape,res_shape)) rescaled_image = np.zeros((shape[0],res_shape,res_shape))
rescaled_error = np.ones((shape[0],res_shape,res_shape)) rescaled_error = np.ones((shape[0],res_shape,res_shape))
rescaled_mask = np.zeros((shape[0],res_shape,res_shape),dtype=bool) rescaled_mask = np.zeros((shape[0],res_shape,res_shape),dtype=bool)
@@ -758,7 +780,7 @@ def align_data(data_array, headers, error_array=None, upsample_factor=1.,
# Initialize rescaled images to background values # Initialize rescaled images to background values
rescaled_error[i] *= background[i] rescaled_error[i] *= background[i]
# Get shifts and error by cross-correlation to ref_data # Get shifts and error by cross-correlation to ref_data
shift, error, phase_diff = phase_cross_correlation(ref_data, image, shift, error, phase_diff = phase_cross_correlation(ref_data/ref_data.max(), image/image.max(),
upsample_factor=upsample_factor) upsample_factor=upsample_factor)
# Rescale image to requested output # Rescale image to requested output
rescaled_image[i,res_shift[0]:res_shift[0]+shape[1], rescaled_image[i,res_shift[0]:res_shift[0]+shape[1],
@@ -766,9 +788,9 @@ def align_data(data_array, headers, error_array=None, upsample_factor=1.,
rescaled_error[i,res_shift[0]:res_shift[0]+shape[1], rescaled_error[i,res_shift[0]:res_shift[0]+shape[1],
res_shift[1]:res_shift[1]+shape[2]] = deepcopy(error_array[i]) res_shift[1]:res_shift[1]+shape[2]] = deepcopy(error_array[i])
# Shift images to align # Shift images to align
rescaled_image[i] = sc_shift(rescaled_image[i], shift, cval=0.) rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.)
rescaled_error[i] = sc_shift(rescaled_error[i], shift, cval=background[i]) rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i])
curr_mask = sc_shift(res_mask, shift, cval=False) curr_mask = sc_shift(res_mask, shift, order=1, cval=False)
mask_vertex = clean_ROI(curr_mask) mask_vertex = clean_ROI(curr_mask)
rescaled_mask[i,mask_vertex[2]:mask_vertex[3],mask_vertex[0]:mask_vertex[1]] = True rescaled_mask[i,mask_vertex[2]:mask_vertex[3],mask_vertex[0]:mask_vertex[1]] = True
@@ -789,17 +811,19 @@ def align_data(data_array, headers, error_array=None, upsample_factor=1.,
errors = np.array(errors) errors = np.array(errors)
# Update headers CRPIX value # Update headers CRPIX value
added_pix = (np.array(rescaled_image.shape[1:]) - np.array(data_array.shape[1:]))/2
headers_wcs = [deepcopy(WCS(header)) for header in headers] headers_wcs = [deepcopy(WCS(header)) for header in headers]
new_crpix = np.array([wcs.wcs.crpix for wcs in headers_wcs]) + shifts + added_pix new_crpix = np.array([wcs.wcs.crpix for wcs in headers_wcs]) + shifts[:,::-1] + res_shift[::-1]
for i in range(len(headers_wcs)): for i in range(len(headers_wcs)):
headers_wcs[i].wcs.crpix = new_crpix.mean(axis=0) headers_wcs[i].wcs.crpix = new_crpix[0]
headers[i].update(headers_wcs[i].to_header()) headers[i].update(headers_wcs[i].to_header())
data_mask = rescaled_mask.all(axis=0)
data_array, error_array, data_mask, headers = crop_array(rescaled_image, headers, rescaled_error, data_mask)
if return_shifts: if return_shifts:
return rescaled_image, rescaled_error, headers, rescaled_mask.any(axis=0), shifts, errors return data_array, error_array, headers, data_mask, shifts, errors
else: else:
return rescaled_image, rescaled_error, headers, rescaled_mask.any(axis=0) return data_array, error_array, headers, data_mask
def smooth_data(data_array, error_array, data_mask, headers, FWHM=1., def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
@@ -878,20 +902,20 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
smoothed[np.isnan(smoothed)] = 0. smoothed[np.isnan(smoothed)] = 0.
error[np.isnan(error)] = 0. error[np.isnan(error)] = 0.
elif smoothing.lower() in ['gauss','gaussian']: elif smoothing.lower() in ['weight_gauss','weighted_gaussian','gauss','gaussian']:
# Convolution with gaussian function # Convolution with gaussian function
smoothed = np.zeros(data_array.shape) smoothed = np.zeros(data_array.shape)
error = np.zeros(error_array.shape) error = np.zeros(error_array.shape)
for i,image in enumerate(data_array): for i,image in enumerate(data_array):
xx, yy = np.indices(image.shape) x, y = np.meshgrid(np.arange(-image.shape[1]/2,image.shape[1]/2),np.arange(-image.shape[0]/2,image.shape[0]/2))
for r in range(image.shape[0]): weights = np.ones(error_array[i].shape)
for c in range(image.shape[1]): if smoothing.lower()[:6] in ['weight']:
dist_rc = np.where(data_mask, np.sqrt((r-xx)**2+(c-yy)**2), fmax) weights = 1./error_array[i]**2
# Catch expected "OverflowWarning" as we overflow values that are not in the image weights /= weights.sum()
with warnings.catch_warnings(record=True) as w: kernel = gaussian2d(x, y, stdev)
g_rc = np.exp(-0.5*(dist_rc/stdev)**2)/(2.*np.pi*stdev**2) kernel /= kernel.sum()
smoothed[i][r,c] = np.where(data_mask[r,c], np.sum(image*g_rc), 0.) smoothed[i] = convolve2d(image*weights/image.sum(),kernel,'same')*image.sum()
error[i][r,c] = np.where(data_mask[r,c], np.sqrt(np.sum(error_array[i]**2*g_rc**2)), 0.) error[i] = np.sqrt(convolve2d((error_array[i]/error_array[i].sum())**2*weights**2,kernel**2,'same'))*error_array[i].sum()
# Nan handling # Nan handling
error[i][np.isnan(smoothed[i])] = 0. error[i][np.isnan(smoothed[i])] = 0.
@@ -989,16 +1013,25 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
else: else:
# Sum on each polarization filter. # Sum on each polarization filter.
print("Exposure time for polarizer 0°/60°/120° : ", pol0_t = np.sum([header['exptime'] for header in headers0])
np.sum([header['exptime'] for header in headers0]), pol60_t = np.sum([header['exptime'] for header in headers60])
np.sum([header['exptime'] for header in headers60]), pol120_t = np.sum([header['exptime'] for header in headers120])
np.sum([header['exptime'] for header in headers120]))
for i in range(pol0_array.shape[0]):
pol0_array[i] *= headers0[i]['exptime']/pol0_t
err0_array[i] *= headers0[i]['exptime']/pol0_t
for i in range(pol60_array.shape[0]):
pol60_array[i] *= headers60[i]['exptime']/pol60_t
err60_array[i] *= headers60[i]['exptime']/pol60_t
for i in range(pol120_array.shape[0]):
pol120_array[i] *= headers120[i]['exptime']/pol120_t
err120_array[i] *= headers120[i]['exptime']/pol120_t
pol0 = pol0_array.sum(axis=0) pol0 = pol0_array.sum(axis=0)
pol60 = pol60_array.sum(axis=0) pol60 = pol60_array.sum(axis=0)
pol120 = pol120_array.sum(axis=0) pol120 = pol120_array.sum(axis=0)
pol_array = np.array([pol0, pol60, pol120]) pol_array = np.array([pol0, pol60, pol120])
# Propagate uncertainties quadratically # Propagate uncertainties quadratically
err0 = np.sqrt(np.sum(err0_array**2,axis=0)) err0 = np.sqrt(np.sum(err0_array**2,axis=0))
err60 = np.sqrt(np.sum(err60_array**2,axis=0)) err60 = np.sqrt(np.sum(err60_array**2,axis=0))
@@ -1145,33 +1178,22 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
coeff_stokes[2,i] = (pol_eff[(i+1)%3]*np.cos(2.*theta[(i+1)%3]) - pol_eff[(i+2)%3]*np.cos(2.*theta[(i+2)%3]))*2./transmit[i] coeff_stokes[2,i] = (pol_eff[(i+1)%3]*np.cos(2.*theta[(i+1)%3]) - pol_eff[(i+2)%3]*np.cos(2.*theta[(i+2)%3]))*2./transmit[i]
# Normalization parameter for Stokes parameters computation # Normalization parameter for Stokes parameters computation
A = coeff_stokes[0,:].sum() A = (coeff_stokes[0,:]*transmit/2.).sum()
coeff_stokes = coeff_stokes/A coeff_stokes = coeff_stokes/A
I_stokes = np.zeros(pol_array[0].shape)
Q_stokes = np.zeros(pol_array[0].shape)
U_stokes = np.zeros(pol_array[0].shape)
Stokes_cov = np.zeros((3,3,I_stokes.shape[0],I_stokes.shape[1]))
I_stokes = np.sum([coeff_stokes[0,i]*pol_flux[i] for i in range(3)], axis=0) for i in range(I_stokes.shape[0]):
Q_stokes = np.sum([coeff_stokes[1,i]*pol_flux[i] for i in range(3)], axis=0) for j in range(I_stokes.shape[1]):
U_stokes = np.sum([coeff_stokes[2,i]*pol_flux[i] for i in range(3)], axis=0) I_stokes[i,j], Q_stokes[i,j], U_stokes[i,j] = np.dot(coeff_stokes, pol_flux[:,i,j]).T
Stokes_cov[:,:,i,j] = np.dot(coeff_stokes, np.dot(pol_cov[:,:,i,j], coeff_stokes.T))
# Remove nan
I_stokes[np.isnan(I_stokes)]=0.
Q_stokes[I_stokes == 0.]=0.
U_stokes[I_stokes == 0.]=0.
Q_stokes[np.isnan(Q_stokes)]=0.
U_stokes[np.isnan(U_stokes)]=0.
mask = (Q_stokes**2 + U_stokes**2) > I_stokes**2 mask = (Q_stokes**2 + U_stokes**2) > I_stokes**2
if mask.any(): if mask.any():
print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(I_stokes[mask].size)) print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(I_stokes[mask].size))
#Stokes covariance matrix
Stokes_cov = np.zeros((3,3,I_stokes.shape[0],I_stokes.shape[1]))
Stokes_cov[0,0] = coeff_stokes[0,0]**2*pol_cov[0,0]+coeff_stokes[0,1]**2*pol_cov[1,1]+coeff_stokes[0,2]**2*pol_cov[2,2] + 2*(coeff_stokes[0,0]*coeff_stokes[0,1]*pol_cov[0,1]+coeff_stokes[0,0]*coeff_stokes[0,2]*pol_cov[0,2]+coeff_stokes[0,1]*coeff_stokes[0,2]*pol_cov[1,2])
Stokes_cov[1,1] = coeff_stokes[1,0]**2*pol_cov[0,0]+coeff_stokes[1,1]**2*pol_cov[1,1]+coeff_stokes[1,2]**2*pol_cov[2,2] + 2*(coeff_stokes[1,0]*coeff_stokes[1,1]*pol_cov[0,1]+coeff_stokes[1,0]*coeff_stokes[1,2]*pol_cov[0,2]+coeff_stokes[1,1]*coeff_stokes[1,2]*pol_cov[1,2])
Stokes_cov[2,2] = coeff_stokes[2,0]**2*pol_cov[0,0]+coeff_stokes[2,1]**2*pol_cov[1,1]+coeff_stokes[2,2]**2*pol_cov[2,2] + 2*(coeff_stokes[2,0]*coeff_stokes[2,1]*pol_cov[0,1]+coeff_stokes[2,0]*coeff_stokes[2,2]*pol_cov[0,2]+coeff_stokes[2,1]*coeff_stokes[2,2]*pol_cov[1,2])
Stokes_cov[0,1] = Stokes_cov[1,0] = coeff_stokes[0,0]*coeff_stokes[1,0]*pol_cov[0,0]+coeff_stokes[0,1]*coeff_stokes[1,1]*pol_cov[1,1]+coeff_stokes[0,2]*coeff_stokes[1,2]*pol_cov[2,2]+(coeff_stokes[0,0]*coeff_stokes[1,1]+coeff_stokes[1,0]*coeff_stokes[0,1])*pol_cov[0,1]+(coeff_stokes[0,0]*coeff_stokes[1,2]+coeff_stokes[1,0]*coeff_stokes[0,2])*pol_cov[0,2]+(coeff_stokes[0,1]*coeff_stokes[1,2]+coeff_stokes[1,1]*coeff_stokes[0,2])*pol_cov[1,2]
Stokes_cov[0,2] = Stokes_cov[2,0] = coeff_stokes[0,0]*coeff_stokes[2,0]*pol_cov[0,0]+coeff_stokes[0,1]*coeff_stokes[2,1]*pol_cov[1,1]+coeff_stokes[0,2]*coeff_stokes[2,2]*pol_cov[2,2]+(coeff_stokes[0,0]*coeff_stokes[2,1]+coeff_stokes[2,0]*coeff_stokes[0,1])*pol_cov[0,1]+(coeff_stokes[0,0]*coeff_stokes[2,2]+coeff_stokes[2,0]*coeff_stokes[0,2])*pol_cov[0,2]+(coeff_stokes[0,1]*coeff_stokes[2,2]+coeff_stokes[2,1]*coeff_stokes[0,2])*pol_cov[1,2]
Stokes_cov[1,2] = Stokes_cov[2,1] = coeff_stokes[1,0]*coeff_stokes[2,0]*pol_cov[0,0]+coeff_stokes[1,1]*coeff_stokes[2,1]*pol_cov[1,1]+coeff_stokes[1,2]*coeff_stokes[2,2]*pol_cov[2,2]+(coeff_stokes[1,0]*coeff_stokes[2,1]+coeff_stokes[2,0]*coeff_stokes[1,1])*pol_cov[0,1]+(coeff_stokes[1,0]*coeff_stokes[2,2]+coeff_stokes[2,0]*coeff_stokes[1,2])*pol_cov[0,2]+(coeff_stokes[1,1]*coeff_stokes[2,2]+coeff_stokes[2,1]*coeff_stokes[1,2])*pol_cov[1,2]
# Compute the derivative of each Stokes parameter with respect to the polarizer orientation # Compute the derivative of each Stokes parameter with respect to the polarizer orientation
dI_dtheta1 = 2.*pol_eff[0]/A*(pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes)) dI_dtheta1 = 2.*pol_eff[0]/A*(pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes))
dI_dtheta2 = 2.*pol_eff[1]/A*(pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes)) dI_dtheta2 = 2.*pol_eff[1]/A*(pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes))
@@ -1198,13 +1220,14 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
Stokes_cov[1,1] += s_Q2_axis Stokes_cov[1,1] += s_Q2_axis
Stokes_cov[2,2] += s_U2_axis Stokes_cov[2,2] += s_U2_axis
if not(FWHM is None) and (smoothing.lower() in ['gaussian_after','gauss_after']): if not(FWHM is None) and (smoothing.lower() in ['weighted_gaussian_after','weight_gauss_after','gaussian_after','gauss_after']):
smoothing = smoothing.lower()[:-6]
Stokes_array = np.array([I_stokes, Q_stokes, U_stokes]) Stokes_array = np.array([I_stokes, Q_stokes, U_stokes])
Stokes_error = np.array([np.sqrt(Stokes_cov[i,i]) for i in range(3)]) Stokes_error = np.array([np.sqrt(Stokes_cov[i,i]) for i in range(3)])
Stokes_headers = headers[0:3] Stokes_headers = headers[0:3]
Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask,
headers=Stokes_headers, FWHM=FWHM, scale=scale) headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
I_stokes, Q_stokes, U_stokes = Stokes_array I_stokes, Q_stokes, U_stokes = Stokes_array
Stokes_cov[0,0], Stokes_cov[1,1], Stokes_cov[2,2] = Stokes_error**2 Stokes_cov[0,0], Stokes_cov[1,1], Stokes_cov[2,2] = Stokes_error**2
@@ -1215,12 +1238,12 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
I_diluted = I_stokes[mask].sum() I_diluted = I_stokes[mask].sum()
Q_diluted = Q_stokes[mask].sum() Q_diluted = Q_stokes[mask].sum()
U_diluted = U_stokes[mask].sum() U_diluted = U_stokes[mask].sum()
I_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(Stokes_cov[0,0][mask])) I_diluted_err = np.sqrt(np.sum(Stokes_cov[0,0][mask]))
Q_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(Stokes_cov[1,1][mask])) Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1,1][mask]))
U_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(Stokes_cov[2,2][mask])) U_diluted_err = np.sqrt(np.sum(Stokes_cov[2,2][mask]))
IQ_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(Stokes_cov[0,1][mask]**2)) IQ_diluted_err = np.sqrt(np.sum(Stokes_cov[0,1][mask]**2))
IU_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(Stokes_cov[0,2][mask]**2)) IU_diluted_err = np.sqrt(np.sum(Stokes_cov[0,2][mask]**2))
QU_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(Stokes_cov[1,2][mask]**2)) QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1,2][mask]**2))
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err) P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
@@ -1391,33 +1414,41 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
#Rotate I_stokes, Q_stokes, U_stokes using rotation matrix #Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
alpha = ang*np.pi/180. alpha = ang*np.pi/180.
new_I_stokes = 1.*I_stokes mrot = np.array([[1., 0., 0.],
new_Q_stokes = np.cos(2*alpha)*Q_stokes + np.sin(2*alpha)*U_stokes [0., np.cos(2.*alpha), np.sin(2.*alpha)],
new_U_stokes = -np.sin(2*alpha)*Q_stokes + np.cos(2*alpha)*U_stokes [0, -np.sin(2.*alpha), np.cos(2.*alpha)]])
old_center = np.array(I_stokes.shape)/2
shape = np.fix(np.array(I_stokes.shape)*np.sqrt(2.5)).astype(int)
new_center = np.array(shape)/2
#Compute new covariance matrix on rotated parameters I_stokes = zeropad(I_stokes, shape)
new_Stokes_cov = deepcopy(Stokes_cov) Q_stokes = zeropad(Q_stokes, shape)
new_Stokes_cov[1,1] = np.cos(2.*alpha)**2*Stokes_cov[1,1] + np.sin(2.*alpha)**2*Stokes_cov[2,2] + 2.*np.cos(2.*alpha)*np.sin(2.*alpha)*Stokes_cov[1,2] U_stokes = zeropad(U_stokes, shape)
new_Stokes_cov[2,2] = np.sin(2.*alpha)**2*Stokes_cov[1,1] + np.cos(2.*alpha)**2*Stokes_cov[2,2] - 2.*np.cos(2.*alpha)*np.sin(2.*alpha)*Stokes_cov[1,2] data_mask = zeropad(data_mask, shape)
new_Stokes_cov[0,1] = new_Stokes_cov[1,0] = np.cos(2.*alpha)*Stokes_cov[0,1] + np.sin(2.*alpha)*Stokes_cov[0,2] Stokes_cov = zeropad(Stokes_cov, [*Stokes_cov.shape[:-2],*shape])
new_Stokes_cov[0,2] = new_Stokes_cov[2,0] = -np.sin(2.*alpha)*Stokes_cov[0,1] + np.cos(2.*alpha)*Stokes_cov[0,2] new_I_stokes = np.zeros(shape)
new_Stokes_cov[1,2] = new_Stokes_cov[2,1] = np.cos(2.*alpha)*np.sin(2.*alpha)*(Stokes_cov[2,2] - Stokes_cov[1,1]) + (np.cos(2.*alpha)**2 - np.sin(2.*alpha)**2)*Stokes_cov[1,2] new_Q_stokes = np.zeros(shape)
new_U_stokes = np.zeros(shape)
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2],*shape))
for i in range(shape[0]):
for j in range(shape[1]):
new_I_stokes[i,j], new_Q_stokes[i,j], new_U_stokes[i,j] = np.dot(mrot, np.array([I_stokes[i,j], Q_stokes[i,j], U_stokes[i,j]])).T
new_Stokes_cov[:,:,i,j] = np.dot(mrot, np.dot(Stokes_cov[:,:,i,j], mrot.T))
#Rotate original images using scipy.ndimage.rotate #Rotate original images using scipy.ndimage.rotate
new_I_stokes = sc_rotate(new_I_stokes, ang, order=5, reshape=False, cval=0.) new_I_stokes = sc_rotate(new_I_stokes, ang, order=1, reshape=False, cval=0.)
new_Q_stokes = sc_rotate(new_Q_stokes, ang, order=5, reshape=False, cval=0.) new_Q_stokes = sc_rotate(new_Q_stokes, ang, order=1, reshape=False, cval=0.)
new_U_stokes = sc_rotate(new_U_stokes, ang, order=5, reshape=False, cval=0.) new_U_stokes = sc_rotate(new_U_stokes, ang, order=1, reshape=False, cval=0.)
new_data_mask = sc_rotate(data_mask.astype(float)*10., ang, order=5, reshape=False, cval=0.) new_data_mask = sc_rotate(data_mask.astype(float)*10., ang, order=1, reshape=False, cval=0.)
new_data_mask[new_data_mask < 2] = 0. new_data_mask[new_data_mask < 2] = 0.
new_data_mask = new_data_mask.astype(bool) new_data_mask = new_data_mask.astype(bool)
for i in range(3): for i in range(3):
for j in range(3): for j in range(3):
new_Stokes_cov[i,j] = sc_rotate(new_Stokes_cov[i,j], ang, new_Stokes_cov[i,j] = sc_rotate(new_Stokes_cov[i,j], ang, order=1,
reshape=False, cval=0.) reshape=False, cval=0.)
new_Stokes_cov[i,i] = np.abs(new_Stokes_cov[i,i]) new_Stokes_cov[i,i] = np.abs(new_Stokes_cov[i,i])
old_center = np.array(I_stokes.shape)/2
new_center = np.array(new_I_stokes.shape)/2
#Update headers to new angle #Update headers to new angle
new_headers = [] new_headers = []
@@ -1445,7 +1476,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
elif new_wcs.wcs.has_pc(): # PC matrix + CDELT elif new_wcs.wcs.has_pc(): # PC matrix + CDELT
newpc = np.dot(mrot, new_wcs.wcs.get_pc()) newpc = np.dot(mrot, new_wcs.wcs.get_pc())
new_wcs.wcs.pc = newpc new_wcs.wcs.pc = newpc
new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center) + new_center new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center[::-1]) + new_center[::-1]
new_wcs.wcs.set() new_wcs.wcs.set()
new_header.update(new_wcs.to_header()) new_header.update(new_wcs.to_header())
@@ -1467,12 +1498,12 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
I_diluted = new_I_stokes[mask].sum() I_diluted = new_I_stokes[mask].sum()
Q_diluted = new_Q_stokes[mask].sum() Q_diluted = new_Q_stokes[mask].sum()
U_diluted = new_U_stokes[mask].sum() U_diluted = new_U_stokes[mask].sum()
I_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(new_Stokes_cov[0,0][mask])) I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0,0][mask]))
Q_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(new_Stokes_cov[1,1][mask])) Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1,1][mask]))
U_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(new_Stokes_cov[2,2][mask])) U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2,2][mask]))
IQ_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(new_Stokes_cov[0,1][mask]**2)) IQ_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0,1][mask]**2))
IU_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(new_Stokes_cov[0,2][mask]**2)) IU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0,2][mask]**2))
QU_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(new_Stokes_cov[1,2][mask]**2)) QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1,2][mask]**2))
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err) P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
@@ -1518,19 +1549,26 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
#Rotate I_stokes, Q_stokes, U_stokes using rotation matrix #Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
alpha = ang*np.pi/180. alpha = ang*np.pi/180.
old_center = np.array(data_array[0].shape)/2
shape = np.fix(np.array(data_array[0].shape)*np.sqrt(2.5)).astype(int)
new_center = np.array(shape)/2
data_array = zeropad(data_array, [data_array.shape[0],*shape])
error_array = zeropad(error_array, [error_array.shape[0],*shape])
data_mask = zeropad(data_mask, shape)
#Rotate original images using scipy.ndimage.rotate #Rotate original images using scipy.ndimage.rotate
new_data_array = [] new_data_array = []
new_error_array = [] new_error_array = []
for i in range(data_array.shape[0]): for i in range(data_array.shape[0]):
new_data_array.append(sc_rotate(data_array[i], ang, order=5, reshape=False, new_data_array.append(sc_rotate(data_array[i], ang, order=1, reshape=False,
cval=0.))
new_error_array.append(sc_rotate(error_array[i], ang, order=1, reshape=False,
cval=0.)) cval=0.))
new_error_array.append(sc_rotate(error_array[i], ang, order=5, reshape=False,
cval=error_array.mean()))
new_data_array = np.array(new_data_array) new_data_array = np.array(new_data_array)
new_data_mask = sc_rotate(data_mask*10., ang, order=5, reshape=False, cval=0.) new_error_array = np.array(new_error_array)
new_data_mask = sc_rotate(data_mask*10., ang, order=1, reshape=False, cval=0.)
new_data_mask[new_data_mask < 2] = 0. new_data_mask[new_data_mask < 2] = 0.
new_data_mask = new_data_mask.astype(bool) new_data_mask = new_data_mask.astype(bool)
new_error_array = np.array(new_error_array)
for i in range(new_data_array.shape[0]): for i in range(new_data_array.shape[0]):
new_data_array[i][new_data_array[i] < 0.] = 0. new_data_array[i][new_data_array[i] < 0.] = 0.
@@ -1562,6 +1600,7 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
elif new_wcs.wcs.has_pc(): # PC matrix + CDELT elif new_wcs.wcs.has_pc(): # PC matrix + CDELT
newpc = np.dot(mrot, new_wcs.wcs.get_pc()) newpc = np.dot(mrot, new_wcs.wcs.get_pc())
new_wcs.wcs.pc = newpc new_wcs.wcs.pc = newpc
new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center[::-1]) + new_center[::-1]
new_wcs.wcs.set() new_wcs.wcs.set()
new_header.update(new_wcs.to_header()) new_header.update(new_wcs.to_header())

View File

@@ -4,33 +4,33 @@ import numpy as np
from copy import deepcopy from copy import deepcopy
from lib.plots import overplot_radio from lib.plots import overplot_radio
Stokes_UV = fits.open("../../data/IC5063_x3nl030/IC5063_FOC_combine_FWHM020.fits") Stokes_UV = fits.open("../data/IC5063_x3nl030/IC5063_FOC_combine_FWHM020.fits")
Stokes_18GHz = fits.open("../../data/IC5063_x3nl030/radio/IC5063.18GHz.fits") Stokes_18GHz = fits.open("../data/IC5063_x3nl030/radio/IC5063.18GHz.fits")
Stokes_24GHz = fits.open("../../data/IC5063_x3nl030/radio/IC5063.24GHz.fits") Stokes_24GHz = fits.open("../data/IC5063_x3nl030/radio/IC5063.24GHz.fits")
Stokes_103GHz = fits.open("../../data/IC5063_x3nl030/radio/I5063_103GHz.fits") Stokes_103GHz = fits.open("../data/IC5063_x3nl030/radio/I5063_103GHz.fits")
Stokes_229GHz = fits.open("../../data/IC5063_x3nl030/radio/I5063_229GHz.fits") Stokes_229GHz = fits.open("../data/IC5063_x3nl030/radio/I5063_229GHz.fits")
Stokes_357GHz = fits.open("../../data/IC5063_x3nl030/radio/I5063_357GHz.fits") Stokes_357GHz = fits.open("../data/IC5063_x3nl030/radio/I5063_357GHz.fits")
levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.]) levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
#levels18GHz = np.array([0.6, 1.5, 3, 6, 12, 24, 48, 96])/100.*Stokes_18GHz[0].data.max() #levels18GHz = np.array([0.6, 1.5, 3, 6, 12, 24, 48, 96])/100.*Stokes_18GHz[0].data.max()
levels18GHz = levelsMorganti*0.28*1e-3 levels18GHz = levelsMorganti*0.28*1e-3
A = overplot_radio(Stokes_UV, Stokes_18GHz) A = overplot_radio(Stokes_UV, Stokes_18GHz)
A.plot(levels=levels18GHz, SNRp_cut=10.0, SNRi_cut=100.0, savename='../../plots/IC5063_x3nl030/18GHz_overplot_forced.png') A.plot(levels=levels18GHz, SNRp_cut=3.0, SNRi_cut=80.0, savename='../plots/IC5063_x3nl030/18GHz_overplot_forced.png')
#levels24GHz = np.array([1.,1.5, 3, 6, 12, 24, 48, 96])/100.*Stokes_24GHz[0].data.max() #levels24GHz = np.array([1.,1.5, 3, 6, 12, 24, 48, 96])/100.*Stokes_24GHz[0].data.max()
levels24GHz = levelsMorganti*0.46*1e-3 levels24GHz = levelsMorganti*0.46*1e-3
B = overplot_radio(Stokes_UV, Stokes_24GHz) B = overplot_radio(Stokes_UV, Stokes_24GHz)
B.plot(levels=levels24GHz, SNRp_cut=10.0, SNRi_cut=100.0, savename='../../plots/IC5063_x3nl030/24GHz_overplot_forced.png') B.plot(levels=levels24GHz, SNRp_cut=3.0, SNRi_cut=80.0, savename='../plots/IC5063_x3nl030/24GHz_overplot_forced.png')
levels103GHz = np.linspace(1,99,11)/100.*np.max(deepcopy(Stokes_103GHz[0].data[Stokes_103GHz[0].data > 0.])) levels103GHz = np.linspace(1,99,11)/100.*np.max(deepcopy(Stokes_103GHz[0].data[Stokes_103GHz[0].data > 0.]))
C = overplot_radio(Stokes_UV, Stokes_103GHz) C = overplot_radio(Stokes_UV, Stokes_103GHz)
C.plot(levels=levels103GHz, SNRp_cut=10.0, SNRi_cut=100.0, savename='../../plots/IC5063_x3nl030/103GHz_overplot_forced.png') C.plot(levels=levels103GHz, SNRp_cut=3.0, SNRi_cut=80.0, savename='../plots/IC5063_x3nl030/103GHz_overplot_forced.png')
levels229GHz = np.linspace(1,99,11)/100.*np.max(deepcopy(Stokes_229GHz[0].data[Stokes_229GHz[0].data > 0.])) levels229GHz = np.linspace(1,99,11)/100.*np.max(deepcopy(Stokes_229GHz[0].data[Stokes_229GHz[0].data > 0.]))
D = overplot_radio(Stokes_UV, Stokes_229GHz) D = overplot_radio(Stokes_UV, Stokes_229GHz)
D.plot(levels=levels229GHz, SNRp_cut=10.0, SNRi_cut=100.0, savename='../../plots/IC5063_x3nl030/229GHz_overplot_forced.png') D.plot(levels=levels229GHz, SNRp_cut=3.0, SNRi_cut=80.0, savename='../plots/IC5063_x3nl030/229GHz_overplot_forced.png')
levels357GHz = np.linspace(1,99,11)/100.*np.max(deepcopy(Stokes_357GHz[0].data[Stokes_357GHz[0].data > 0.])) levels357GHz = np.linspace(1,99,11)/100.*np.max(deepcopy(Stokes_357GHz[0].data[Stokes_357GHz[0].data > 0.]))
E = overplot_radio(Stokes_UV, Stokes_357GHz) E = overplot_radio(Stokes_UV, Stokes_357GHz)
E.plot(levels=levels357GHz, SNRp_cut=10.0, SNRi_cut=100.0, savename='../../plots/IC5063_x3nl030/357GHz_overplot_forced.png') E.plot(levels=levels357GHz, SNRp_cut=3.0, SNRi_cut=80.0, savename='../plots/IC5063_x3nl030/357GHz_overplot_forced.png')