correct some error propagation

This commit is contained in:
Thibault Barnouin
2022-03-25 17:53:26 +01:00
parent fa322a6653
commit 453afca32c
89 changed files with 132 additions and 41 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 428 KiB

After

Width:  |  Height:  |  Size: 491 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 423 KiB

After

Width:  |  Height:  |  Size: 498 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 426 KiB

After

Width:  |  Height:  |  Size: 491 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 336 KiB

After

Width:  |  Height:  |  Size: 403 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 341 KiB

After

Width:  |  Height:  |  Size: 412 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 332 KiB

After

Width:  |  Height:  |  Size: 404 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 393 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 77 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 244 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 214 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 233 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 226 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 409 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 330 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 356 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 223 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 183 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 203 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 194 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 498 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 327 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 290 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 319 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 317 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 717 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 622 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 422 KiB

After

Width:  |  Height:  |  Size: 436 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 233 KiB

After

Width:  |  Height:  |  Size: 261 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 196 KiB

After

Width:  |  Height:  |  Size: 223 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 213 KiB

After

Width:  |  Height:  |  Size: 253 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 206 KiB

After

Width:  |  Height:  |  Size: 254 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 420 KiB

After

Width:  |  Height:  |  Size: 663 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 294 KiB

After

Width:  |  Height:  |  Size: 575 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 438 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 335 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 312 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 346 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 370 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 677 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 843 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 496 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 347 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 326 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 339 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 321 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 584 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 640 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 546 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 357 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 329 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 366 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 387 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 622 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 740 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 473 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 349 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 307 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 308 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 308 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 487 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 462 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 469 KiB

After

Width:  |  Height:  |  Size: 406 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 470 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 346 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 304 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 303 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 307 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 484 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 424 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

After

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 348 KiB

After

Width:  |  Height:  |  Size: 347 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 306 KiB

After

Width:  |  Height:  |  Size: 318 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 307 KiB

After

Width:  |  Height:  |  Size: 363 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 307 KiB

After

Width:  |  Height:  |  Size: 370 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 486 KiB

After

Width:  |  Height:  |  Size: 581 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 467 KiB

After

Width:  |  Height:  |  Size: 640 KiB

View File

@@ -109,7 +109,7 @@ def main():
align_center = 'image' #If None will align image to image center align_center = 'image' #If None will align image to image center
display_data = False display_data = False
# Smoothing # Smoothing
smoothing_function = 'gaussian' #gaussian_after, gaussian or combine smoothing_function = 'combine' #gaussian_after, gaussian or combine
smoothing_FWHM = 0.20 #If None, no smoothing is done smoothing_FWHM = 0.20 #If None, no smoothing is done
smoothing_scale = 'arcsec' #pixel or arcsec smoothing_scale = 'arcsec' #pixel or arcsec
# Rotation # Rotation
@@ -118,8 +118,8 @@ def main():
# Polarization map output # Polarization map output
figname = 'NGC1068_FOC' #target/intrument name figname = 'NGC1068_FOC' #target/intrument name
figtype = '_combine_FWHM020_wae' #additionnal informations figtype = '_combine_FWHM020_wae' #additionnal informations
SNRp_cut = 3. #P measurments with SNR>3 SNRp_cut = 10. #P measurments with SNR>3
SNRi_cut = 30. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 100. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
# if step_vec = 0 then all vectors are displayed at full length # if step_vec = 0 then all vectors are displayed at full length
@@ -131,14 +131,17 @@ def main():
if (data < 0.).any(): if (data < 0.).any():
print("ETAPE 1 : ", data) print("ETAPE 1 : ", data)
# Crop data to remove outside blank margins. # Crop data to remove outside blank margins.
print(">Crop")
data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0., inside=True, display=display_crop, savename=figname, plots_folder=plots_folder) data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0., inside=True, display=display_crop, savename=figname, plots_folder=plots_folder)
for data in data_array: for data in data_array:
if (data < 0.).any(): if (data < 0.).any():
print("ETAPE 2 : ", data) print("ETAPE 2 : ", data)
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM. # Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
if deconvolve: if deconvolve:
print(">Deconvolve")
data_array = proj_red.deconvolve_array(data_array, headers, psf=psf, FWHM=psf_FWHM, scale=psf_scale, shape=psf_shape, iterations=iterations) data_array = proj_red.deconvolve_array(data_array, headers, psf=psf, FWHM=psf_FWHM, scale=psf_scale, shape=psf_shape, iterations=iterations)
# Estimate error from data background, estimated from sub-image of desired sub_shape. # Estimate error from data background, estimated from sub-image of desired sub_shape.
print(">Get error")
data_array, error_array, headers = proj_red.get_error(data_array, headers, sub_shape=error_sub_shape, display=display_error, savename=figname+"_errors", plots_folder=plots_folder) data_array, error_array, headers = proj_red.get_error(data_array, headers, sub_shape=error_sub_shape, display=display_error, savename=figname+"_errors", plots_folder=plots_folder)
for data in data_array: for data in data_array:
if (data < 0.).any(): if (data < 0.).any():
@@ -146,6 +149,7 @@ def main():
# Rebin data to desired pixel size. # Rebin data to desired pixel size.
Dxy = np.ones(2) Dxy = np.ones(2)
if rebin: if rebin:
print(">Rebin")
data_array, error_array, headers, Dxy = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation) data_array, error_array, headers, Dxy = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation)
for data in data_array: for data in data_array:
if (data < 0.).any(): if (data < 0.).any():
@@ -153,6 +157,7 @@ def main():
# Align and rescale images with oversampling. # Align and rescale images with oversampling.
data_mask = np.zeros(data_array.shape[1:]).astype(bool) data_mask = np.zeros(data_array.shape[1:]).astype(bool)
if px_scale.lower() not in ['full','integrate']: if px_scale.lower() not in ['full','integrate']:
print(">Align")
data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array, upsample_factor=int(Dxy.min()), ref_center=align_center, return_shifts=False) data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array, upsample_factor=int(Dxy.min()), ref_center=align_center, return_shifts=False)
for data in data_array: for data in data_array:
if (data < 0.).any(): if (data < 0.).any():
@@ -168,6 +173,7 @@ def main():
# Rotate data to have North up # Rotate data to have North up
ref_header = deepcopy(headers[0]) ref_header = deepcopy(headers[0])
if rotate_data: if rotate_data:
print(">Rotate data")
alpha = ref_header['orientat'] alpha = ref_header['orientat']
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
rectangle[0:2] = np.dot(mrot, np.asarray(rectangle[0:2]))+np.array(data_array.shape[1:])/2 rectangle[0:2] = np.dot(mrot, np.asarray(rectangle[0:2]))+np.array(data_array.shape[1:])/2
@@ -186,12 +192,14 @@ def main():
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide # FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2 # see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
# Bibcode : 1995chst.conf...10J # Bibcode : 1995chst.conf...10J
print(">Compute Stokes")
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function) I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function)
## Step 3: ## Step 3:
# Rotate images to have North up # Rotate images to have North up
ref_header = deepcopy(headers[0]) ref_header = deepcopy(headers[0])
if rotate_stokes: if rotate_stokes:
print(">Rotate stokes")
alpha = ref_header['orientat'] alpha = ref_header['orientat']
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)],
[np.sin(-alpha), np.cos(-alpha)]]) [np.sin(-alpha), np.cos(-alpha)]])
@@ -199,6 +207,7 @@ def main():
rectangle[4] = alpha rectangle[4] = alpha
I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, data_mask = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, -ref_header['orientat'], SNRi_cut=None) I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, data_mask = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, -ref_header['orientat'], SNRi_cut=None)
# Compute polarimetric parameters (polarization degree and angle). # Compute polarimetric parameters (polarization degree and angle).
print(">Compute Pol")
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers) P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers)
## Step 4: ## Step 4:
@@ -209,7 +218,7 @@ def main():
## Step 5: ## Step 5:
# Save image to FITS. # Save image to FITS.
Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, figname+figtype, data_folder=data_folder, return_hdul=True) Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, data_mask, figname+figtype, data_folder=data_folder, return_hdul=True)
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error). # Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
if px_scale.lower() not in ['full','integrate']: if px_scale.lower() not in ['full','integrate']:

View File

@@ -78,7 +78,7 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
s_P_P, PA, s_PA, s_PA_P, headers, filename, data_folder="", s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="",
return_hdul=False): return_hdul=False):
""" """
Save computed polarimetry parameters to a single fits file, Save computed polarimetry parameters to a single fits file,
@@ -135,12 +135,15 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header) primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header)
hdul.append(primary_hdu) hdul.append(primary_hdu)
data_mask = data_mask.astype(float, copy=False)
#Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList #Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList
for data, name in [[Q_stokes,'Q_stokes'],[U_stokes,'U_stokes'], for data, name in [[Q_stokes,'Q_stokes'],[U_stokes,'U_stokes'],
[Stokes_cov,'IQU_cov_matrix'],[P, 'Pol_deg'], [Stokes_cov,'IQU_cov_matrix'],[P, 'Pol_deg'],
[debiased_P, 'Pol_deg_debiased'],[s_P, 'Pol_deg_err'], [debiased_P, 'Pol_deg_debiased'],[s_P, 'Pol_deg_err'],
[s_P_P, 'Pol_deg_err_Poisson_noise'],[PA, 'Pol_ang'], [s_P_P, 'Pol_deg_err_Poisson_noise'],[PA, 'Pol_ang'],
[s_PA, 'Pol_ang_err'],[s_PA_P, 'Pol_ang_err_Poisson_noise']]: [s_PA, 'Pol_ang_err'],[s_PA_P, 'Pol_ang_err_Poisson_noise'],
[data_mask, 'Data_mask']]:
hdu_header = header.copy() hdu_header = header.copy()
hdu_header['datatype'] = name hdu_header['datatype'] = name
hdu = fits.ImageHDU(data=data,header=hdu_header) hdu = fits.ImageHDU(data=data,header=hdu_header)

View File

@@ -224,6 +224,8 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
#pol_err_Poisson = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err_Poisson_noise' for i in range(len(Stokes))])] #pol_err_Poisson = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err_Poisson_noise' for i in range(len(Stokes))])]
pang = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang' for i in range(len(Stokes))])] pang = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang' for i in range(len(Stokes))])]
pang_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang_err' for i in range(len(Stokes))])] pang_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang_err' for i in range(len(Stokes))])]
if data_mask is None:
data_mask = Stokes[np.argmax([Stokes[i].header['datatype']=='Data_mask' for i in range(len(Stokes))])].data.astype(bool)
pivot_wav = Stokes[0].header['photplam'] pivot_wav = Stokes[0].header['photplam']
convert_flux = Stokes[0].header['photflam'] convert_flux = Stokes[0].header['photflam']
@@ -271,9 +273,9 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux) vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux)
im = ax.imshow(stkI.data*convert_flux, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.) im = ax.imshow(stkI.data*convert_flux, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsI = np.linspace(SNRi_cut, np.max(SNRi[SNRi > 0.]), 10) levelsI = np.linspace(vmax*0.01, vmax*0.99, 10)
print("SNRi contour levels : ", levelsI) print("Total flux contour levels : ", levelsI)
cont = ax.contour(SNRi, levels=levelsI, colors='grey', linewidths=0.5) cont = ax.contour(stkI.data*convert_flux, levels=levelsI, colors='grey', linewidths=0.5)
#ax.clabel(cont,inline=True,fontsize=6) #ax.clabel(cont,inline=True,fontsize=6)
elif display.lower() in ['pol_flux']: elif display.lower() in ['pol_flux']:
# Display polarisation flux # Display polarisation flux
@@ -281,6 +283,10 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
vmin, vmax = 0., np.max(stkI.data[pf_mask]*convert_flux*pol.data[pf_mask]) vmin, vmax = 0., np.max(stkI.data[pf_mask]*convert_flux*pol.data[pf_mask])
im = ax.imshow(stkI.data*convert_flux*pol.data, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.) im = ax.imshow(stkI.data*convert_flux*pol.data, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsPf = np.linspace(vmax*0.01, vmax*0.99, 10)
print("Polarized flux contour levels : ", levelsPf)
cont = ax.contour(stkI.data*convert_flux*pol.data, levels=levelsPf, colors='grey', linewidths=0.5)
#ax.clabel(cont,inline=True,fontsize=6)
elif display.lower() in ['p','pol','pol_deg']: elif display.lower() in ['p','pol','pol_deg']:
# Display polarization degree map # Display polarization degree map
vmin, vmax = 0., 100. vmin, vmax = 0., 100.
@@ -303,15 +309,19 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
vmin, vmax = 0., np.max(SNRi[SNRi > 0.]) vmin, vmax = 0., np.max(SNRi[SNRi > 0.])
im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.) im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$I_{Stokes}/\sigma_{I}$") cbar = plt.colorbar(im, cax=cbar_ax, label=r"$I_{Stokes}/\sigma_{I}$")
levelsI = np.linspace(SNRi_cut, np.max(SNRi[SNRi > 0.]), 10) levelsSNRi = np.linspace(SNRi_cut, vmax*0.99, 10)
cont = ax.contour(SNRi, levels=levelsI, colors='grey', linewidths=0.5) print("SNRi contour levels : ", levelsSNRi)
cont = ax.contour(SNRi, levels=levelsSNRi, colors='grey', linewidths=0.5)
#ax.clabel(cont,inline=True,fontsize=6)
elif display.lower() in ['snrp']: elif display.lower() in ['snrp']:
# Display polarization degree signal-to-noise map # Display polarization degree signal-to-noise map
vmin, vmax = SNRp_cut, np.max(SNRp[SNRp > 0.]) vmin, vmax = SNRp_cut, np.max(SNRp[SNRp > 0.])
im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.) im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect='auto', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P/\sigma_{P}$") cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P/\sigma_{P}$")
levelsP = np.linspace(SNRp_cut, np.max(SNRp[SNRp > 0.]), 10) levelsSNRp = np.linspace(SNRp_cut, vmax*0.99, 10)
cont = ax.contour(SNRp, levels=levelsP, colors='grey', linewidths=0.5) print("SNRp contour levels : ", levelsSNRp)
cont = ax.contour(SNRp, levels=levelsSNRp, colors='grey', linewidths=0.5)
#ax.clabel(cont,inline=True,fontsize=6)
else: else:
# Defaults to intensity map # Defaults to intensity map
vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux*2.) vmin, vmax = 0., np.max(stkI.data[stkI.data > 0.]*convert_flux*2.)

View File

@@ -303,7 +303,7 @@ def crop_array(data_array, headers, error_array=None, step=5, null_val=None,
def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px', def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px',
shape=(9,9), iterations=20): shape=(9,9), iterations=20, algo='richardson'):
""" """
Homogeneously deconvolve a data array using Richardson-Lucy iterative algorithm. Homogeneously deconvolve a data array using Richardson-Lucy iterative algorithm.
---------- ----------
@@ -361,7 +361,7 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px',
deconv_array = np.zeros(data_array.shape) deconv_array = np.zeros(data_array.shape)
for i,image in enumerate(data_array): for i,image in enumerate(data_array):
deconv_array[i] = deconvolve_im(image, kernel, iterations=iterations, deconv_array[i] = deconvolve_im(image, kernel, iterations=iterations,
clip=True, filter_epsilon=None) clip=True, filter_epsilon=None, algo='richardson')
return deconv_array return deconv_array
@@ -440,9 +440,10 @@ def get_error(data_array, headers, sub_shape=(15,15), display=False,
# Compute error : root mean square of the background # Compute error : root mean square of the background
sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]] sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]]
#error = np.std(sub_image) # Previously computed using standard deviation over the background #error = np.std(sub_image) # Previously computed using standard deviation over the background
error = np.sqrt(np.sum(sub_image**2)/sub_image.size) error = np.sqrt(np.sum((sub_image-sub_image.mean())**2)/sub_image.size)
error_array[i] *= error error_array[i] *= error
data_array[i] = np.abs(data_array[i] - sub_image.mean())
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999) # Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
#wavelength dependence of the polariser filters #wavelength dependence of the polariser filters
#estimated to less than 1% #estimated to less than 1%
@@ -453,11 +454,20 @@ def get_error(data_array, headers, sub_shape=(15,15), display=False,
#flatfielding uncertainties #flatfielding uncertainties
#estimated to less than 3% #estimated to less than 3%
err_flat = data_array[i]*0.03 err_flat = data_array[i]*0.03
if i==0:
pr = data_array[i] > 0.
print("Background error = {0:2.2f}%".format(np.median(error_array[i][pr]/data_array[i][pr]*100.)))
print("Wavelength polarizer dependence error = {0:2.2f}%".format(np.median(err_wav[pr]/data_array[i][pr]*100.)))
print("PSF polarizer difference error = {0:2.2f}%".format(np.median(err_psf[pr]/data_array[i][pr]*100.)))
print("Flatfield polarizer difference error = {0:2.2f}%".format(np.median(err_flat[pr]/data_array[i][pr]*100.)))
error_array[i] = np.sqrt(error_array[i]**2 + err_wav**2 + err_psf**2 + err_flat**2) error_array[i] = np.sqrt(error_array[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
if i==0:
pr = data_array[i] > 0.
print("Total estimated error = {0:2.2f}%".format(np.median(error_array[i][pr]/data_array[i][pr]*100.)))
background[i] = sub_image.sum() background[i] = sub_image.sum()
data_array[i] = data_array[i] - sub_image.mean()
data_array[i][data_array[i] < 0.] = 0.
if (data_array[i] < 0.).any(): if (data_array[i] < 0.).any():
print(data_array[i]) print(data_array[i])
@@ -582,19 +592,29 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
new_shape = (image.shape//Dxy).astype(int) new_shape = (image.shape//Dxy).astype(int)
# Rebin data # Rebin data
rebinned_data.append(bin_ndarray(image, new_shape=new_shape, rebin_data = bin_ndarray(image, new_shape=new_shape,
operation=operation)) operation=operation)
rebinned_data.append(rebin_data)
# Propagate error # Propagate error
rms_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape, rms_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape,
operation='average')) operation='average'))
sum_image = bin_ndarray(image, new_shape=new_shape,
operation='sum')
mask = sum_image > 0.
new_error = np.zeros(rms_image.shape)
if operation.lower() in ["mean", "average", "avg"]: if operation.lower() in ["mean", "average", "avg"]:
new_error = 1./np.sqrt(Dxy[0]*Dxy[1])*bin_ndarray(error, new_error[mask] = np.sqrt(bin_ndarray(error**2*image,
new_shape=new_shape, operation='average') new_shape=new_shape, operation='average')[mask]/sum_image[mask])
else: else:
new_error = np.sqrt(Dxy[0]*Dxy[1])*bin_ndarray(error, new_error[mask] = np.sqrt(bin_ndarray(error**2*image,
new_shape=new_shape, operation='average') new_shape=new_shape, operation='sum')[mask]/sum_image[mask])
rebinned_error.append(np.sqrt(rms_image**2 + new_error**2)) rebinned_error.append(np.sqrt(rms_image**2 + new_error**2))
if i==0:
pr = rebin_data > 0.
print("Rebin RMS error = {0:2.2f}%".format(np.median(rms_image[pr]/rebin_data[pr]*100.)))
print("Rebin weigthed sum squarred error = {0:2.2f}%".format(np.median(new_error[pr]/rebin_data[pr]*100.)))
print("Total rebin error = {0:2.2f}%".format(np.median(rebinned_error[0][pr]/rebin_data[pr]*100.)))
# Update header # Update header
w = w.slice((np.s_[::Dxy[0]], np.s_[::Dxy[1]])) w = w.slice((np.s_[::Dxy[0]], np.s_[::Dxy[1]]))
@@ -732,14 +752,25 @@ def align_data(data_array, headers, error_array=None, upsample_factor=1.,
rescaled_mask[i] = sc_shift(rescaled_mask[i], shift, cval=True) rescaled_mask[i] = sc_shift(rescaled_mask[i], shift, cval=True)
rescaled_image[i][rescaled_image[i] < 0.] = 0. rescaled_image[i][rescaled_image[i] < 0.] = 0.
rescaled_image[i][1-rescaled_mask[i]] = 0.
# Uncertainties from shifting # Uncertainties from shifting
prec_shift = np.array([1.,1.])/upsample_factor prec_shift = np.array([1.,1.])/upsample_factor
shifted_image = sc_shift(rescaled_image[i], prec_shift, cval=0.) shifted_image = sc_shift(rescaled_image[i], prec_shift, cval=0.)
error_shift = np.abs(rescaled_image[i] - shifted_image)/2. error_shift = np.abs(rescaled_image[i] - shifted_image)/2.
#sum quadratically the errors #sum quadratically the errors
if i==0:
pr = rescaled_image[0] > 0.
print("Rescaled (aligned) error = {0:2.2f}%".format(np.median(rescaled_error[0][pr]/rescaled_image[0][pr]*100.)))
print("Shift error = {0:2.2f}%".format(np.median(error_shift[pr]/rescaled_image[0][pr]*100.)))
rescaled_error[i] = np.sqrt(rescaled_error[i]**2 + error_shift**2) rescaled_error[i] = np.sqrt(rescaled_error[i]**2 + error_shift**2)
if i==0:
pr = rescaled_image[0] > 0.
print("Total align error = {0:2.2f}%".format(np.median(rescaled_error[0][pr]/rescaled_image[0][pr]*100.)))
#rescaled_error[i][1-rescaled_mask[i]] = 0.
shifts.append(shift) shifts.append(shift)
errors.append(error) errors.append(error)
@@ -849,7 +880,7 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
with warnings.catch_warnings(record=True) as w: with warnings.catch_warnings(record=True) as w:
g_rc = np.exp(-0.5*(dist_rc/stdev)**2)/(2.*np.pi*stdev**2) g_rc = np.exp(-0.5*(dist_rc/stdev)**2)/(2.*np.pi*stdev**2)
smoothed[i][r,c] = (1.-data_mask[r,c])*np.sum(image*g_rc) smoothed[i][r,c] = (1.-data_mask[r,c])*np.sum(image*g_rc)
error[i][r,c] = (1.-data_mask[r,c])*np.sqrt(np.sum(error_array[i]*g_rc**2)) error[i][r,c] = (1.-data_mask[r,c])*np.sqrt(np.sum(error_array[i]**2*g_rc**2))
# Nan handling # Nan handling
error[i][np.isnan(smoothed[i])] = 0. error[i][np.isnan(smoothed[i])] = 0.
@@ -858,6 +889,9 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
else: else:
raise ValueError("{} is not a valid smoothing option".format(smoothing)) raise ValueError("{} is not a valid smoothing option".format(smoothing))
pr = smoothed > 0.
print("Smoothed error = {0:2.2f}%".format(np.median(error[pr]/smoothed[pr]*100.)))
return smoothed, error return smoothed, error
@@ -958,10 +992,13 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
# Propagate uncertainties quadratically # Propagate uncertainties quadratically
err0 = np.sum(err0_array,axis=0)*np.sqrt(err0_array.shape[0]) err0 = np.sqrt(np.sum(err0_array**2,axis=0))
err60 = np.sum(err60_array,axis=0)*np.sqrt(err60_array.shape[0]) err60 = np.sqrt(np.sum(err60_array**2,axis=0))
err120 = np.sum(err120_array,axis=0)*np.sqrt(err120_array.shape[0]) err120 = np.sqrt(np.sum(err120_array**2,axis=0))
polerr_array = np.array([err0, err60, err120]) polerr_array = np.array([err0, err60, err120])
pr = pol0 > 0.
print("Summed POL0 error = {0:2.2f}%".format(np.median(err0[pr]/pol0[pr]*100.)))
# Update headers # Update headers
for header in headers: for header in headers:
@@ -997,6 +1034,9 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
polarizer_cov[1,1] = err60**2 polarizer_cov[1,1] = err60**2
polarizer_cov[2,2] = err120**2 polarizer_cov[2,2] = err120**2
pr = pol0 > 0.
print("Total POL0 error = {0:2.2f}%".format(np.median(err0[pr]/pol0[pr]*100.)))
return polarizer_array, polarizer_cov return polarizer_array, polarizer_cov
@@ -1151,10 +1191,24 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
s_Q2_axis = np.sum([dQ_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0) s_Q2_axis = np.sum([dQ_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
s_U2_axis = np.sum([dU_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0) s_U2_axis = np.sum([dU_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0)
prI = I_stokes > 0.
print("Propagated I_stokes error = {0:2.2f}%".format(np.median(np.sqrt(Stokes_cov[0,0][prI])/I_stokes[prI]*100.)))
print("Axis I_stokes error = {0:2.2f}%".format(np.median(np.sqrt(s_I2_axis[prI])/I_stokes[prI]*100.)))
prQ = Q_stokes > 0.
print("Propagated Q_stokes error = {0:2.2f}%".format(np.median(np.sqrt(Stokes_cov[1,1][prQ])/Q_stokes[prQ]*100.)))
print("Axis Q_stokes error = {0:2.2f}%".format(np.median(np.sqrt(s_Q2_axis[prQ])/Q_stokes[prQ]*100.)))
prU = U_stokes > 0.
print("Propagated U_stokes error = {0:2.2f}%".format(np.median(np.sqrt(Stokes_cov[2,2][prU])/U_stokes[prU]*100.)))
print("Axis U_stokes error = {0:2.2f}%".format(np.median(np.sqrt(s_U2_axis[prU])/U_stokes[prU]*100.)))
# Add quadratically the uncertainty to the Stokes covariance matrix # Add quadratically the uncertainty to the Stokes covariance matrix
# Stokes_cov[0,0] += s_I2_axis Stokes_cov[0,0] += s_I2_axis
# Stokes_cov[1,1] += s_Q2_axis Stokes_cov[1,1] += s_Q2_axis
# Stokes_cov[2,2] += s_U2_axis Stokes_cov[2,2] += s_U2_axis
print("Total I_stokes error = {0:2.2f}%".format(np.median(np.sqrt(Stokes_cov[0,0][prI])/I_stokes[prI]*100.)))
print("Total Q_stokes error = {0:2.2f}%".format(np.median(np.sqrt(Stokes_cov[1,1][prQ])/Q_stokes[prQ]*100.)))
print("Total U_stokes error = {0:2.2f}%".format(np.median(np.sqrt(Stokes_cov[2,2][prU])/U_stokes[prU]*100.)))
if not(FWHM is None) and (smoothing.lower() in ['gaussian_after','gauss_after']): if not(FWHM is None) and (smoothing.lower() in ['gaussian_after','gauss_after']):
Stokes_array = np.array([I_stokes, Q_stokes, U_stokes]) Stokes_array = np.array([I_stokes, Q_stokes, U_stokes])
@@ -1263,6 +1317,11 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
s_P_P[np.isnan(s_P_P)] = fmax s_P_P[np.isnan(s_P_P)] = fmax
s_PA_P[np.isnan(s_PA_P)] = fmax s_PA_P[np.isnan(s_PA_P)] = fmax
prP = P > 0.
prPA = PA > 0.
print("Propagated P error = {0:2.2f}%".format(np.median(s_P[prP]/P[prP]*100.)))
print("Propagated PA error = {0:2.2f}%".format(np.median(s_PA[prPA]/PA[prPA]*100.)))
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
@@ -1338,10 +1397,10 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
new_Stokes_cov[1,2] = new_Stokes_cov[2,1] = np.cos(2.*alpha)*np.sin(2.*alpha)*(Stokes_cov[2,2] - Stokes_cov[1,1]) + (np.cos(2.*alpha)**2 - np.sin(2.*alpha)**2)*Stokes_cov[1,2] new_Stokes_cov[1,2] = new_Stokes_cov[2,1] = np.cos(2.*alpha)*np.sin(2.*alpha)*(Stokes_cov[2,2] - Stokes_cov[1,1]) + (np.cos(2.*alpha)**2 - np.sin(2.*alpha)**2)*Stokes_cov[1,2]
#Rotate original images using scipy.ndimage.rotate #Rotate original images using scipy.ndimage.rotate
new_I_stokes = sc_rotate(new_I_stokes, ang, reshape=False, cval=0.) new_I_stokes = sc_rotate(new_I_stokes, ang, order=5, reshape=False, cval=0.)
new_Q_stokes = sc_rotate(new_Q_stokes, ang, reshape=False, cval=0.) new_Q_stokes = sc_rotate(new_Q_stokes, ang, order=5, reshape=False, cval=0.)
new_U_stokes = sc_rotate(new_U_stokes, ang, reshape=False, cval=0.) new_U_stokes = sc_rotate(new_U_stokes, ang, order=5, reshape=False, cval=0.)
new_data_mask = (sc_rotate(data_mask.astype(float)*10., ang, reshape=False, cval=10.).astype(int)).astype(bool) new_data_mask = (sc_rotate(data_mask.astype(float)*10., ang, order=5, reshape=False, cval=10.).astype(int)).astype(bool)
for i in range(3): for i in range(3):
for j in range(3): for j in range(3):
new_Stokes_cov[i,j] = sc_rotate(new_Stokes_cov[i,j], ang, new_Stokes_cov[i,j] = sc_rotate(new_Stokes_cov[i,j], ang,
@@ -1391,6 +1450,13 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
new_U_stokes[np.isnan(new_U_stokes)] = 0. new_U_stokes[np.isnan(new_U_stokes)] = 0.
new_Stokes_cov[np.isnan(new_Stokes_cov)] = fmax new_Stokes_cov[np.isnan(new_Stokes_cov)] = fmax
prI = new_I_stokes > 0.
prQ = new_Q_stokes > 0.
prU = new_U_stokes > 0.
print("Propagated rotated I_stokes error = {0:2.2f}%".format(np.median(np.sqrt(new_Stokes_cov[0,0][prI])/new_I_stokes[prI]*100.)))
print("Propagated rotated Q_stokes error = {0:2.2f}%".format(np.median(np.sqrt(new_Stokes_cov[1,1][prQ])/new_Q_stokes[prQ]*100.)))
print("Propagated rotated U_stokes error = {0:2.2f}%".format(np.median(np.sqrt(new_Stokes_cov[2,2][prU])/new_U_stokes[prU]*100.)))
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_headers, new_data_mask return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_headers, new_data_mask
def rotate_data(data_array, error_array, data_mask, headers, ang): def rotate_data(data_array, error_array, data_mask, headers, ang):
@@ -1426,12 +1492,15 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
new_data_array = [] new_data_array = []
new_error_array = [] new_error_array = []
for i in range(data_array.shape[0]): for i in range(data_array.shape[0]):
new_data_array.append(sc_rotate(data_array[i], ang, reshape=False, new_data_array.append(sc_rotate(data_array[i], ang, order=5, reshape=False,
cval=0.)) cval=0.))
new_error_array.append(sc_rotate(error_array[i], ang, reshape=False, new_error_array.append(sc_rotate(error_array[i], ang, order=5, reshape=False,
cval=error_array.mean())) cval=error_array.mean()))
if i==0:
pr = new_data_array[0] > 0.
print("Rotated data error = {0:2.2f}%".format(np.median(new_error_array[0][pr]/new_data_array[0][pr]*100.)))
new_data_array = np.array(new_data_array) new_data_array = np.array(new_data_array)
new_data_mask = sc_rotate(data_mask, ang, reshape=False, cval=True) new_data_mask = sc_rotate(data_mask, ang, order=5, reshape=False, cval=True)
new_error_array = np.array(new_error_array) new_error_array = np.array(new_error_array)
for i in range(new_data_array.shape[0]): for i in range(new_data_array.shape[0]):

View File

@@ -3,7 +3,7 @@ from astropy.io import fits
import numpy as np import numpy as np
from plots import overplot_maps from plots import overplot_maps
Stokes_UV = fits.open("../../data/IC5063_x3nl030/IC5063_FOC_combine_FWHM020.fits") Stokes_UV = fits.open("../../data/IC5063_x3nl030/IC5063_FOC_combine_FWHM020_pol_wae.fits")
Stokes_18GHz = fits.open("../../data/IC5063_x3nl030/radio/IC5063.18GHz.fits") Stokes_18GHz = fits.open("../../data/IC5063_x3nl030/radio/IC5063.18GHz.fits")
Stokes_24GHz = fits.open("../../data/IC5063_x3nl030/radio/IC5063.24GHz.fits") Stokes_24GHz = fits.open("../../data/IC5063_x3nl030/radio/IC5063.24GHz.fits")
@@ -12,9 +12,9 @@ levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
#levels18GHz = np.array([0.6, 1.5, 3, 6, 12, 24, 48, 96])/100.*Stokes_18GHz[0].data.max() #levels18GHz = np.array([0.6, 1.5, 3, 6, 12, 24, 48, 96])/100.*Stokes_18GHz[0].data.max()
levels18GHz = levelsMorganti*0.28*1e-3 levels18GHz = levelsMorganti*0.28*1e-3
A = overplot_maps(Stokes_UV, Stokes_18GHz) A = overplot_maps(Stokes_UV, Stokes_18GHz)
A.plot(levels=levels18GHz, SNRp_cut=3.0, SNRi_cut=50.0, savename='../../plots/IC5063_x3nl030/18GHz_overplot.png') A.plot(levels=levels18GHz, SNRp_cut=10.0, SNRi_cut=100.0, savename='../../plots/IC5063_x3nl030/18GHz_overplot_forced_maxUV.png')
#levels24GHz = np.array([1.,1.5, 3, 6, 12, 24, 48, 96])/100.*Stokes_24GHz[0].data.max() #levels24GHz = np.array([1.,1.5, 3, 6, 12, 24, 48, 96])/100.*Stokes_24GHz[0].data.max()
levels24GHz = levelsMorganti*0.46*1e-3 levels24GHz = levelsMorganti*0.46*1e-3
B = overplot_maps(Stokes_UV, Stokes_24GHz) B = overplot_maps(Stokes_UV, Stokes_24GHz)
B.plot(levels=levels24GHz, SNRp_cut=3.0, SNRi_cut=50.0, savename='../../plots/IC5063_x3nl030/24GHz_overplot.png') B.plot(levels=levels24GHz, SNRp_cut=10.0, SNRi_cut=100.0, savename='../../plots/IC5063_x3nl030/24GHz_overplot_forced_maxUV.png')