diff --git a/src/FOC_reduction.py b/src/FOC_reduction.py index 9ab432e..7c5e9df 100755 --- a/src/FOC_reduction.py +++ b/src/FOC_reduction.py @@ -22,10 +22,10 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= # from lib.deconvolve import from_file_psf psf = 'gaussian' # Can be user-defined as well # psf = from_file_psf(data_folder+psf_file) - psf_FWHM = 0.15 + psf_FWHM = 0.015 psf_scale = 'arcsec' - psf_shape = (25, 25) - iterations = 5 + psf_shape = (11, 11) + iterations = 3 algo = "richardson" # Initial crop @@ -33,7 +33,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= # Background estimation error_sub_type = 'freedman-diaconis' # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) - subtract_error = 1.00 + subtract_error = 1.20 + display_bkg = False display_error = False # Data binning @@ -44,13 +45,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= # Alignement align_center = 'center' # If None will not align the images - display_bkg = False display_align = False display_data = False # Smoothing smoothing_function = 'combine' # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine - smoothing_FWHM = 0.10 # If None, no smoothing is done + smoothing_FWHM = 0.2 # If None, no smoothing is done smoothing_scale = 'arcsec' # pixel or arcsec # Rotation @@ -58,8 +58,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= rotate_stokes = True # Final crop - crop = False # Crop to desired ROI - interactive = False # Whether to output to intercative analysis tool + crop = True # Crop to desired ROI + interactive = True # Whether to output to intercative analysis tool # Polarization map output SNRp_cut = 3. # P measurments with SNR>3 @@ -90,7 +90,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= plots_folder = "." if not path_exists(plots_folder): system("mkdir -p {0:s} ".format(plots_folder)) - infiles = [p[1] for p in prod] + infiles = [p[1] for p in prod] # if p[1] not in ['x2rp0202t_c0f.fits', 'x2rp0302t_c0f.fits']] data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=True) figname = "_".join([target, "FOC"]) @@ -102,6 +102,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= if smoothing_FWHM is not None: figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]), "{0:.2f}".format(smoothing_FWHM), smoothing_scale]) # additionnal informations + if deconvolve: + figtype += "_deconv" if align_center is None: figtype += "_not_aligned" @@ -115,20 +117,25 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= # Estimate error from data background, estimated from sub-image of desired sub_shape. background = None - data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, sub_type=error_sub_type, subtract_error=subtract_error, display=display_error, savename="_".join([figname, "errors"]), plots_folder=plots_folder, return_background=True) + data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, sub_type=error_sub_type, subtract_error=subtract_error, display=display_error, savename="_".join([ + figname, "errors"]), plots_folder=plots_folder, return_background=True) if display_bkg: - proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, "bkg"]), plots_folder=plots_folder) + proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min( + )*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, "bkg"]), plots_folder=plots_folder) # Align and rescale images with oversampling. - data_array, error_array, headers, data_mask = proj_red.align_data( data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False) + data_array, error_array, headers, data_mask = proj_red.align_data( + data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False) if display_align: - proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, str(align_center)]), plots_folder=plots_folder) + proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min( + )*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, str(align_center)]), plots_folder=plots_folder) # Rebin data to desired pixel size. if rebin: - data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array( data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask) + data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array( + data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask) # Rotate data to have North up if rotate_data: @@ -138,10 +145,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= # Plot array for checking output if display_data and px_scale.lower() not in ['full', 'integrate']: - proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, "rebin"]), plots_folder=plots_folder) + proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min( + )*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, "rebin"]), plots_folder=plots_folder) background = np.array([np.array(bkg).reshape(1, 1) for bkg in background]) - background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1'] == head['filtnam1'] for h in headers], dtype=bool)].mean()) ** 2/np.sum([h['filtnam1'] == head['filtnam1'] for h in headers]))).reshape(1, 1) for bkg, head in zip(background, headers)]) + background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1'] == head['filtnam1'] for h in headers], dtype=bool)].mean()) + ** 2/np.sum([h['filtnam1'] == head['filtnam1'] for h in headers]))).reshape(1, 1) for bkg, head in zip(background, headers)]) # Step 2: # Compute Stokes I, Q, U with smoothed polarized images @@ -149,13 +158,16 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= # FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide # see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2 # Bibcode : 1995chst.conf...10J - I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False) - I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False) + I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes( + data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False) + I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(background, background_error, np.array(True).reshape( + 1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False) # Step 3: # Rotate images to have North up if rotate_stokes: - I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None) + I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes( + I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None) I_bkg, Q_bkg, U_bkg, S_cov_bkg, _, _ = proj_red.rotate_Stokes(I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), headers, SNRi_cut=None) # Compute polarimetric parameters (polarisation degree and angle). @@ -164,7 +176,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= # Step 4: # Save image to FITS. - Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, data_mask, "_".join([figname, figtype]), data_folder=data_folder, return_hdul=True) + Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, + headers, data_mask, "_".join([figname, figtype]), data_folder=data_folder, return_hdul=True) data_mask = Stokes_test[-1].data.astype(bool) # Step 5: @@ -176,11 +189,13 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= stokescrop.writeto("/".join([data_folder, "_".join([figname, figtype+".fits"])])) Stokes_test, data_mask, headers = stokescrop.hdul_crop, stokescrop.data_mask, [dataset.header for dataset in stokescrop.hdul_crop] - print("F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(headers[0]['photplam'], *proj_plots.sci_not(Stokes_test[0].data[data_mask].sum()*headers[0]['photflam'], np.sqrt(Stokes_test[3].data[0, 0][data_mask].sum())*headers[0]['photflam'], 2, out=int))) + print("F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(headers[0]['photplam'], *proj_plots.sci_not( + Stokes_test[0].data[data_mask].sum()*headers[0]['photflam'], np.sqrt(Stokes_test[3].data[0, 0][data_mask].sum())*headers[0]['photflam'], 2, out=int))) print("P_int = {0:.1f} ± {1:.1f} %".format(headers[0]['p_int']*100., np.ceil(headers[0]['p_int_err']*1000.)/10.)) print("PA_int = {0:.1f} ±t {1:.1f} °".format(headers[0]['pa_int'], np.ceil(headers[0]['pa_int_err']*10.)/10.)) # Background values - print("F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(headers[0]['photplam'], *proj_plots.sci_not(I_bkg[0, 0]*headers[0]['photflam'], np.sqrt(S_cov_bkg[0, 0][0, 0])*headers[0]['photflam'], 2, out=int))) + print("F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(headers[0]['photplam'], *proj_plots.sci_not( + I_bkg[0, 0]*headers[0]['photflam'], np.sqrt(S_cov_bkg[0, 0][0, 0])*headers[0]['photflam'], 2, out=int))) print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0]*100., np.ceil(s_P_bkg[0, 0]*1000.)/10.)) print("PA_bkg = {0:.1f} ± {1:.1f} °".format(PA_bkg[0, 0], np.ceil(s_PA_bkg[0, 0]*10.)/10.)) # Plot polarisation map (Background is either total Flux, Polarization degree or Polarization degree error). diff --git a/src/comparison_Kishimoto.py b/src/comparison_Kishimoto.py index 1b645bd..56b67ae 100755 --- a/src/comparison_Kishimoto.py +++ b/src/comparison_Kishimoto.py @@ -1,23 +1,22 @@ -#!/usr/bin/env python -from lib.reduction import align_data, crop_array, princ_angle +# !/usr/bin/env python from lib.background import gauss, bin_centers from lib.deconvolve import zeropad +from lib.reduction import align_data +from lib.plots import princ_angle from matplotlib.colors import LogNorm from os.path import join as path_join -from os import walk as path_walk from astropy.io import fits from astropy.wcs import WCS -from re import compile as regcompile, IGNORECASE from scipy.ndimage import shift from scipy.optimize import curve_fit import numpy as np import matplotlib.pyplot as plt root_dir = path_join('/home/t.barnouin/Documents/Thesis/HST') -root_dir_K = path_join(root_dir,'Kishimoto','output') -root_dir_S = path_join(root_dir,'FOC_Reduction','output') -root_dir_data_S = path_join(root_dir,'FOC_Reduction','data','NGC1068','5144') -root_dir_plot_S = path_join(root_dir,'FOC_Reduction','plots','NGC1068','5144','notaligned') +root_dir_K = path_join(root_dir, 'Kishimoto', 'output') +root_dir_S = path_join(root_dir, 'FOC_Reduction', 'output') +root_dir_data_S = path_join(root_dir, 'FOC_Reduction', 'data', 'NGC1068', '5144') +root_dir_plot_S = path_join(root_dir, 'FOC_Reduction', 'plots', 'NGC1068', '5144', 'notaligned') filename_S = "NGC1068_FOC_b10.00pixel_not_aligned.fits" plt.rcParams.update({'font.size': 15}) @@ -26,14 +25,14 @@ SNRp_cut = 3. data_K = {} data_S = {} -for d,i in zip(['I','Q','U','P','PA','sI','sQ','sU','sP','sPA'],[0,1,2,5,8,(3,0,0),(3,1,1),(3,2,2),6,9]): - data_K[d] = np.loadtxt(path_join(root_dir_K,d+'.txt')) - with fits.open(path_join(root_dir_data_S,filename_S)) as f: +for d, i in zip(['I', 'Q', 'U', 'P', 'PA', 'sI', 'sQ', 'sU', 'sP', 'sPA'], [0, 1, 2, 5, 8, (3, 0, 0), (3, 1, 1), (3, 2, 2), 6, 9]): + data_K[d] = np.loadtxt(path_join(root_dir_K, d+'.txt')) + with fits.open(path_join(root_dir_data_S, filename_S)) as f: if not type(i) is int: - data_S[d] = np.sqrt(f[i[0]].data[i[1],i[2]]) + data_S[d] = np.sqrt(f[i[0]].data[i[1], i[2]]) else: data_S[d] = f[i].data - if i==0: + if i == 0: header = f[i].header wcs = WCS(header) convert_flux = header['photflam'] @@ -41,64 +40,67 @@ convert_flux = header['photflam'] bkg_S = np.median(data_S['I'])/3 bkg_K = np.median(data_K['I'])/3 -#zeropad data to get same size of array +# zeropad data to get same size of array shape = data_S['I'].shape for d in data_K: - data_K[d] = zeropad(data_K[d],shape) + data_K[d] = zeropad(data_K[d], shape) -#shift array to get same information in same pixel -data_arr, error_ar, heads, data_msk, shifts, shifts_err = align_data(np.array([data_S['I'],data_K['I']]), [header, header], error_array=np.array([data_S['sI'],data_K['sI']]), background=np.array([bkg_S,bkg_K]), upsample_factor=10., ref_center='center', return_shifts=True) +# shift array to get same information in same pixel +data_arr, error_ar, heads, data_msk, shifts, shifts_err = align_data(np.array([data_S['I'], data_K['I']]), [header, header], error_array=np.array( + [data_S['sI'], data_K['sI']]), background=np.array([bkg_S, bkg_K]), upsample_factor=10., ref_center='center', return_shifts=True) for d in data_K: - data_K[d] = shift(data_K[d],shifts[1],order=1,cval=0.) + data_K[d] = shift(data_K[d], shifts[1], order=1, cval=0.) -#compute pol components from shifted array +# compute pol components from shifted array for d in [data_S, data_K]: for i in d: d[i][np.isnan(d[i])] = 0. - d['P'] = np.where(np.logical_and(np.isfinite(d['I']),d['I']>0.),np.sqrt(d['Q']**2+d['U']**2)/d['I'],0.) - d['sP'] = np.where(np.logical_and(np.isfinite(d['I']),d['I']>0.),np.sqrt((d['Q']**2*d['sQ']**2+d['U']**2*d['sU']**2)/(d['Q']**2+d['U']**2)+((d['Q']/d['I'])**2+(d['U']/d['I'])**2)*d['sI']**2)/d['I'],0.) - d['d_P'] = np.where(np.logical_and(np.isfinite(d['P']),np.isfinite(d['sP'])),np.sqrt(d['P']**2-d['sP']**2),0.) - d['PA'] = 0.5*np.arctan2(d['U'],d['Q'])+np.pi + d['P'] = np.where(np.logical_and(np.isfinite(d['I']), d['I'] > 0.), np.sqrt(d['Q']**2+d['U']**2)/d['I'], 0.) + d['sP'] = np.where(np.logical_and(np.isfinite(d['I']), d['I'] > 0.), np.sqrt((d['Q']**2*d['sQ']**2+d['U']**2*d['sU']**2) / + (d['Q']**2+d['U']**2)+((d['Q']/d['I'])**2+(d['U']/d['I'])**2)*d['sI']**2)/d['I'], 0.) + d['d_P'] = np.where(np.logical_and(np.isfinite(d['P']), np.isfinite(d['sP'])), np.sqrt(d['P']**2-d['sP']**2), 0.) + d['PA'] = 0.5*np.arctan2(d['U'], d['Q'])+np.pi d['SNRp'] = np.zeros(d['d_P'].shape) - d['SNRp'][d['sP']>0.] = d['d_P'][d['sP']>0.]/d['sP'][d['sP']>0.] + d['SNRp'][d['sP'] > 0.] = d['d_P'][d['sP'] > 0.]/d['sP'][d['sP'] > 0.] d['SNRi'] = np.zeros(d['I'].shape) - d['SNRi'][d['sI']>0.] = d['I'][d['sI']>0.]/d['sI'][d['sI']>0.] - d['mask'] = np.logical_and(d['SNRi']>SNRi_cut,d['SNRp']>SNRp_cut) -data_S['mask'], data_K['mask'] = np.logical_and(data_S['mask'],data_K['mask']), np.logical_and(data_S['mask'],data_K['mask']) + d['SNRi'][d['sI'] > 0.] = d['I'][d['sI'] > 0.]/d['sI'][d['sI'] > 0.] + d['mask'] = np.logical_and(d['SNRi'] > SNRi_cut, d['SNRp'] > SNRp_cut) +data_S['mask'], data_K['mask'] = np.logical_and(data_S['mask'], data_K['mask']), np.logical_and(data_S['mask'], data_K['mask']) -##### -###Compute histogram of measured polarization in cut -##### -bins=int(data_S['mask'].sum()/5) -bin_size=1./bins -mod_p = np.linspace(0.,1.,300) +# +# Compute histogram of measured polarization in cut +# +bins = int(data_S['mask'].sum()/5) +bin_size = 1./bins +mod_p = np.linspace(0., 1., 300) for d in [data_S, data_K]: - d['hist'], d['bin_edges'] = np.histogram(d['d_P'][d['mask']],bins=bins,range=(0.,1.)) + d['hist'], d['bin_edges'] = np.histogram(d['d_P'][d['mask']], bins=bins, range=(0., 1.)) d['binning'] = bin_centers(d['bin_edges']) - peak, bins_fwhm = d['binning'][np.argmax(d['hist'])], d['binning'][d['hist']>d['hist'].max()/2.] + peak, bins_fwhm = d['binning'][np.argmax(d['hist'])], d['binning'][d['hist'] > d['hist'].max()/2.] fwhm = bins_fwhm[1]-bins_fwhm[0] p0 = [d['hist'].max(), peak, fwhm] try: popt, pcov = curve_fit(gauss, d['binning'], d['hist'], p0=p0) except RuntimeError: popt = p0 - d['hist_chi2'] = np.sum((d['hist']-gauss(d['binning'],*popt))**2)/d['hist'].size + d['hist_chi2'] = np.sum((d['hist']-gauss(d['binning'], *popt))**2)/d['hist'].size d['hist_popt'] = popt - -fig_p, ax_p = plt.subplots(num="Polarization degree histogram",figsize=(10,6),constrained_layout=True) -ax_p.errorbar(data_S['binning'],data_S['hist'],xerr=bin_size/2.,fmt='b.',ecolor='b',label='P through this pipeline') -ax_p.plot(mod_p,gauss(mod_p,*data_S['hist_popt']),'b--',label='mean = {1:.2f}, stdev = {2:.2f}'.format(*data_S['hist_popt'])) -ax_p.errorbar(data_K['binning'],data_K['hist'],xerr=bin_size/2.,fmt='r.',ecolor='r',label="P through Kishimoto's pipeline") -ax_p.plot(mod_p,gauss(mod_p,*data_K['hist_popt']),'r--',label='mean = {1:.2f}, stdev = {2:.2f}'.format(*data_K['hist_popt'])) -ax_p.set(xlabel="Polarization degree",ylabel="Counts",title="Histogram of polarization degree computed in the cut for both pipelines.") -ax_p.legend() -fig_p.savefig(path_join(root_dir_plot_S,"NGC1068_K_pol_deg.png"),bbox_inches="tight",dpi=300) -##### -###Compute angular difference between the maps in cut -##### -dtheta = np.where(data_S['mask'], 0.5*np.arctan((np.sin(2*data_S['PA'])*np.cos(2*data_K['PA'])-np.cos(2*data_S['PA'])*np.cos(2*data_K['PA']))/(np.cos(2*data_S['PA'])*np.cos(2*data_K['PA'])+np.cos(2*data_S['PA'])*np.sin(2*data_K['PA']))),np.nan) +fig_p, ax_p = plt.subplots(num="Polarization degree histogram", figsize=(10, 6), constrained_layout=True) +ax_p.errorbar(data_S['binning'], data_S['hist'], xerr=bin_size/2., fmt='b.', ecolor='b', label='P through this pipeline') +ax_p.plot(mod_p, gauss(mod_p, *data_S['hist_popt']), 'b--', label='mean = {1:.2f}, stdev = {2:.2f}'.format(*data_S['hist_popt'])) +ax_p.errorbar(data_K['binning'], data_K['hist'], xerr=bin_size/2., fmt='r.', ecolor='r', label="P through Kishimoto's pipeline") +ax_p.plot(mod_p, gauss(mod_p, *data_K['hist_popt']), 'r--', label='mean = {1:.2f}, stdev = {2:.2f}'.format(*data_K['hist_popt'])) +ax_p.set(xlabel="Polarization degree", ylabel="Counts", title="Histogram of polarization degree computed in the cut for both pipelines.") +ax_p.legend() +fig_p.savefig(path_join(root_dir_plot_S, "NGC1068_K_pol_deg.png"), bbox_inches="tight", dpi=300) + +# +# Compute angular difference between the maps in cut +# +dtheta = np.where(data_S['mask'], 0.5*np.arctan((np.sin(2*data_S['PA'])*np.cos(2*data_K['PA'])-np.cos(2*data_S['PA']) * + np.cos(2*data_K['PA']))/(np.cos(2*data_S['PA'])*np.cos(2*data_K['PA'])+np.cos(2*data_S['PA'])*np.sin(2*data_K['PA']))), np.nan) fig_pa = plt.figure(num="Polarization degree alignement") ax_pa = fig_pa.add_subplot(111, projection=wcs) cbar_ax_pa = fig_pa.add_axes([0.88, 0.12, 0.01, 0.75]) @@ -107,11 +109,11 @@ im_pa = ax_pa.imshow(np.cos(2*dtheta), vmin=-1., vmax=1., origin='lower', cmap=' cbar_pa = plt.colorbar(im_pa, cax=cbar_ax_pa, label=r"$\zeta = \cos\left( 2 \cdot \delta\theta_P \right)$") ax_pa.coords[0].set_axislabel('Right Ascension (J2000)') ax_pa.coords[1].set_axislabel('Declination (J2000)') -fig_pa.savefig(path_join(root_dir_plot_S,"NGC1068_K_pol_ang.png"),bbox_inches="tight",dpi=300) +fig_pa.savefig(path_join(root_dir_plot_S, "NGC1068_K_pol_ang.png"), bbox_inches="tight", dpi=300) -##### -###Compute power uncertainty difference between the maps in cut -##### +# +# Compute power uncertainty difference between the maps in cut +# eta = np.where(data_S['mask'], np.abs(data_K['d_P']-data_S['d_P'])/np.sqrt(data_S['sP']**2+data_K['sP']**2)/2., np.nan) fig_dif_p = plt.figure(num="Polarization power difference ratio") ax_dif_p = fig_dif_p.add_subplot(111, projection=wcs) @@ -121,54 +123,56 @@ im_dif_p = ax_dif_p.imshow(eta, vmin=0., vmax=2., origin='lower', cmap='bwr_r', cbar_dif_p = plt.colorbar(im_dif_p, cax=cbar_ax_dif_p, label=r"$\eta = \frac{2 \left|P^K-P^S\right|}{\sqrt{{\sigma^K_P}^2+{\sigma^S_P}^2}}$") ax_dif_p.coords[0].set_axislabel('Right Ascension (J2000)') ax_dif_p.coords[1].set_axislabel('Declination (J2000)') -fig_dif_p.savefig(path_join(root_dir_plot_S,"NGC1068_K_pol_diff.png"),bbox_inches="tight",dpi=300) +fig_dif_p.savefig(path_join(root_dir_plot_S, "NGC1068_K_pol_diff.png"), bbox_inches="tight", dpi=300) -##### -###Compute angle uncertainty difference between the maps in cut -##### +# +# Compute angle uncertainty difference between the maps in cut +# eta = np.where(data_S['mask'], np.abs(data_K['PA']-data_S['PA'])/np.sqrt(data_S['sPA']**2+data_K['sPA']**2)/2., np.nan) fig_dif_pa = plt.figure(num="Polarization angle difference ratio") ax_dif_pa = fig_dif_pa.add_subplot(111, projection=wcs) cbar_ax_dif_pa = fig_dif_pa.add_axes([0.88, 0.12, 0.01, 0.75]) ax_dif_pa.set_title(r"Degree of difference $\eta$ of the polarization from the 2 pipelines in the cut") im_dif_pa = ax_dif_pa.imshow(eta, vmin=0., vmax=2., origin='lower', cmap='bwr_r', label=r"$\eta$ between this pipeline and Kishimoto's") -cbar_dif_pa = plt.colorbar(im_dif_pa, cax=cbar_ax_dif_pa, label=r"$\eta = \frac{2 \left|\theta_P^K-\theta_P^S\right|}{\sqrt{{\sigma^K_{\theta_P}}^2+{\sigma^S_{\theta_P}}^2}}$") +cbar_dif_pa = plt.colorbar(im_dif_pa, cax=cbar_ax_dif_pa, + label=r"$\eta = \frac{2 \left|\theta_P^K-\theta_P^S\right|}{\sqrt{{\sigma^K_{\theta_P}}^2+{\sigma^S_{\theta_P}}^2}}$") ax_dif_pa.coords[0].set_axislabel('Right Ascension (J2000)') ax_dif_pa.coords[1].set_axislabel('Declination (J2000)') -fig_dif_pa.savefig(path_join(root_dir_plot_S,"NGC1068_K_polang_diff.png"),bbox_inches="tight",dpi=300) +fig_dif_pa.savefig(path_join(root_dir_plot_S, "NGC1068_K_polang_diff.png"), bbox_inches="tight", dpi=300) -##### -###display both polarization maps to check consistency -##### -#plt.rcParams.update({'font.size': 15}) -fig = plt.figure(num="Polarization maps comparison",figsize=(10,10)) +# display both polarization maps to check consistency +# plt.rcParams.update({'font.size': 15}) +fig = plt.figure(num="Polarization maps comparison", figsize=(10, 10)) ax = fig.add_subplot(111, projection=wcs) fig.subplots_adjust(right=0.85) cbar_ax = fig.add_axes([0.88, 0.12, 0.01, 0.75]) for d in [data_S, data_K]: d['X'], d['Y'] = np.meshgrid(np.arange(d['I'].shape[1]), np.arange(d['I'].shape[0])) - d['xy_U'], d['xy_V'] = np.where(d['mask'],d['d_P']*np.cos(np.pi/2.+d['PA']), np.nan), np.where(d['mask'],d['d_P']*np.sin(np.pi/2.+d['PA']), np.nan) + d['xy_U'], d['xy_V'] = np.where(d['mask'], d['d_P']*np.cos(np.pi/2.+d['PA']), np.nan), np.where(d['mask'], d['d_P']*np.sin(np.pi/2.+d['PA']), np.nan) -im0 = ax.imshow(data_S['I']*convert_flux,norm=LogNorm(data_S['I'][data_S['I']>0].min()*convert_flux,data_S['I'][data_S['I']>0].max()*convert_flux),origin='lower',cmap='gray',label=r"$I_{STOKES}$ through this pipeline") -quiv0 = ax.quiver(data_S['X'],data_S['Y'],data_S['xy_U'],data_S['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.2,color='b',alpha=0.75, label="PA through this pipeline") -quiv1 = ax.quiver(data_K['X'],data_K['Y'],data_K['xy_U'],data_K['xy_V'],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='r',alpha=0.75, label="PA through Kishimoto's pipeline") +im0 = ax.imshow(data_S['I']*convert_flux, norm=LogNorm(data_S['I'][data_S['I'] > 0].min()*convert_flux, data_S['I'] + [data_S['I'] > 0].max()*convert_flux), origin='lower', cmap='gray', label=r"$I_{STOKES}$ through this pipeline") +quiv0 = ax.quiver(data_S['X'], data_S['Y'], data_S['xy_U'], data_S['xy_V'], units='xy', angles='uv', scale=0.5, scale_units='xy', + pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.2, color='b', alpha=0.75, label="PA through this pipeline") +quiv1 = ax.quiver(data_K['X'], data_K['Y'], data_K['xy_U'], data_K['xy_V'], units='xy', angles='uv', scale=0.5, scale_units='xy', + pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, color='r', alpha=0.75, label="PA through Kishimoto's pipeline") ax.set_title(r"$SNR_P \geq$ "+str(SNRi_cut)+r"$\; & \; SNR_I \geq $"+str(SNRp_cut)) -#ax.coords.grid(True, color='white', ls='dotted', alpha=0.5) +# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5) ax.coords[0].set_axislabel('Right Ascension (J2000)') ax.coords[0].set_axislabel_position('b') ax.coords[0].set_ticklabel_position('b') ax.coords[1].set_axislabel('Declination (J2000)') ax.coords[1].set_axislabel_position('l') ax.coords[1].set_ticklabel_position('l') -#ax.axis('equal') +# ax.axis('equal') cbar = plt.colorbar(im0, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") ax.legend(loc='upper right') -fig.savefig(path_join(root_dir_plot_S,"NGC1068_K_comparison.png"),bbox_inches="tight",dpi=300) +fig.savefig(path_join(root_dir_plot_S, "NGC1068_K_comparison.png"), bbox_inches="tight", dpi=300) -#compute integrated polarization parameters on a specific cut +# compute integrated polarization parameters on a specific cut for d in [data_S, data_K]: d['I_dil'] = np.sum(d['I'][d['mask']]) d['sI_dil'] = np.sqrt(np.sum(d['sI'][d['mask']]**2)) @@ -178,28 +182,33 @@ for d in [data_S, data_K]: d['sU_dil'] = np.sqrt(np.sum(d['sU'][d['mask']]**2)) d['P_dil'] = np.sqrt(d['Q_dil']**2+d['U_dil']**2)/d['I_dil'] - d['sP_dil'] = np.sqrt((d['Q_dil']**2*d['sQ_dil']**2+d['U_dil']**2*d['sU_dil']**2)/(d['Q_dil']**2+d['U_dil']**2)+((d['Q_dil']/d['I_dil'])**2+(d['U_dil']/d['I_dil'])**2)*d['sI_dil']**2)/d['I_dil'] + d['sP_dil'] = np.sqrt((d['Q_dil']**2*d['sQ_dil']**2+d['U_dil']**2*d['sU_dil']**2)/(d['Q_dil']**2+d['U_dil']**2) + + ((d['Q_dil']/d['I_dil'])**2+(d['U_dil']/d['I_dil'])**2)*d['sI_dil']**2)/d['I_dil'] d['d_P_dil'] = np.sqrt(d['P_dil']**2-d['sP_dil']**2) - d['PA_dil'] = princ_angle((90./np.pi)*np.arctan2(d['U_dil'],d['Q_dil'])) + d['PA_dil'] = princ_angle((90./np.pi)*np.arctan2(d['U_dil'], d['Q_dil'])) d['sPA_dil'] = princ_angle((90./(np.pi*(d['Q_dil']**2+d['U_dil']**2)))*np.sqrt(d['Q_dil']**2*d['sU_dil']**2+d['U_dil']**2*d['sU_dil']**2)) -print('From this pipeline :\n', "P = {0:.2f} ± {1:.2f} %\n".format(data_S['d_P_dil']*100.,data_S['sP_dil']*100.), "PA = {0:.2f} ± {1:.2f} °".format(data_S['PA_dil'],data_S['sPA_dil'])) -print("From Kishimoto's pipeline :\n", "P = {0:.2f} ± {1:.2f} %\n".format(data_K['d_P_dil']*100.,data_K['sP_dil']*100.), "PA = {0:.2f} ± {1:.2f} °".format(data_K['PA_dil'],data_K['sPA_dil'])) +print('From this pipeline :\n', "P = {0:.2f} ± {1:.2f} %\n".format( + data_S['d_P_dil']*100., data_S['sP_dil']*100.), "PA = {0:.2f} ± {1:.2f} °".format(data_S['PA_dil'], data_S['sPA_dil'])) +print("From Kishimoto's pipeline :\n", "P = {0:.2f} ± {1:.2f} %\n".format( + data_K['d_P_dil']*100., data_K['sP_dil']*100.), "PA = {0:.2f} ± {1:.2f} °".format(data_K['PA_dil'], data_K['sPA_dil'])) -#compare different types of error -print("This pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(np.mean(data_S['sI'][data_S['mask']]/data_S['I'][data_S['mask']]),np.mean(data_S['sQ'][data_S['mask']]/data_S['Q'][data_S['mask']]),np.mean(data_S['sU'][data_S['mask']]/data_S['U'][data_S['mask']]),np.mean(data_S['sP'][data_S['mask']]/data_S['P'][data_S['mask']]))) -print("Kishimoto's pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(np.mean(data_K['sI'][data_S['mask']]/data_K['I'][data_S['mask']]),np.mean(data_K['sQ'][data_S['mask']]/data_K['Q'][data_S['mask']]),np.mean(data_K['sU'][data_S['mask']]/data_K['U'][data_S['mask']]),np.mean(data_K['sP'][data_S['mask']]/data_K['P'][data_S['mask']]))) -for d,i in zip(['I','Q','U','P','PA','sI','sQ','sU','sP','sPA'],[0,1,2,5,8,(3,0,0),(3,1,1),(3,2,2),6,9]): - data_K[d] = np.loadtxt(path_join(root_dir_K,d+'.txt')) - with fits.open(path_join(root_dir_data_S,filename_S)) as f: +# compare different types of error +print("This pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(np.mean(data_S['sI'][data_S['mask']]/data_S['I'][data_S['mask']]), np.mean( + data_S['sQ'][data_S['mask']]/data_S['Q'][data_S['mask']]), np.mean(data_S['sU'][data_S['mask']]/data_S['U'][data_S['mask']]), np.mean(data_S['sP'][data_S['mask']]/data_S['P'][data_S['mask']]))) +print("Kishimoto's pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(np.mean(data_K['sI'][data_S['mask']]/data_K['I'][data_S['mask']]), np.mean( + data_K['sQ'][data_S['mask']]/data_K['Q'][data_S['mask']]), np.mean(data_K['sU'][data_S['mask']]/data_K['U'][data_S['mask']]), np.mean(data_K['sP'][data_S['mask']]/data_K['P'][data_S['mask']]))) +for d, i in zip(['I', 'Q', 'U', 'P', 'PA', 'sI', 'sQ', 'sU', 'sP', 'sPA'], [0, 1, 2, 5, 8, (3, 0, 0), (3, 1, 1), (3, 2, 2), 6, 9]): + data_K[d] = np.loadtxt(path_join(root_dir_K, d+'.txt')) + with fits.open(path_join(root_dir_data_S, filename_S)) as f: if not type(i) is int: - data_S[d] = np.sqrt(f[i[0]].data[i[1],i[2]]) + data_S[d] = np.sqrt(f[i[0]].data[i[1], i[2]]) else: data_S[d] = f[i].data - if i==0: + if i == 0: header = f[i].header -#from Kishimoto's pipeline : IQU_dir, IQU_shift, IQU_stat, IQU_trans -#from my pipeline : raw_bg, raw_flat, raw_psf, raw_shift, raw_wav, IQU_dir -# but errors from my pipeline are propagated all along, how to compare then ? +# from Kishimoto's pipeline : IQU_dir, IQU_shift, IQU_stat, IQU_trans +# from my pipeline : raw_bg, raw_flat, raw_psf, raw_shift, raw_wav, IQU_dir +# but errors from my pipeline are propagated all along, how to compare then ? -plt.show() \ No newline at end of file +plt.show() diff --git a/src/lib/fits.py b/src/lib/fits.py index fa37fbf..47beebe 100755 --- a/src/lib/fits.py +++ b/src/lib/fits.py @@ -1,5 +1,5 @@ #!/usr/bin/python3 -#-*- coding:utf-8 -*- +# -*- coding:utf-8 -*- """ Library function for simplified fits handling. diff --git a/src/lib/plots.py b/src/lib/plots.py index 2c3b6f2..867d56a 100755 --- a/src/lib/plots.py +++ b/src/lib/plots.py @@ -146,7 +146,7 @@ def plot_obs(data_array, headers, shape=None, vmin=None, vmax=None, rectangle=No # im = axe.imshow(convert*data, vmin=vmin, vmax=vmax, origin='lower', cmap='gray') data[data*convert < vmin*10.] = vmin*10./convert im = axe.imshow(convert*data, norm=LogNorm(vmin, vmax), origin='lower', cmap='gray') - if not (rectangle is None): + if rectangle is not None: x, y, width, height, angle, color = rectangle[i] axe.add_patch(Rectangle((x, y), width, height, angle=angle, edgecolor=color, fill=False)) @@ -519,7 +519,7 @@ class align_maps(object): self.map_convert, self.map_unit = (float(self.map_header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list( self.map_header.keys()) else (1., self.map_header['bunit'] if 'BUNIT' in list(self.map_header.keys()) else "Arbitray Units") - self.other_convert, self.other_unit = (float(self.other_map[0].header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list( + self.other_convert, self.other_unit = (float(self.other_header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list( self.other_header.keys()) else (1., self.other_header['bunit'] if 'BUNIT' in list(self.other_header.keys()) else "Arbitray Units") self.map_observer = "/".join([self.map_header['telescop'], self.map_header['instrume']] ) if "INSTRUME" in list(self.map_header.keys()) else self.map_header['telescop'] @@ -1003,7 +1003,7 @@ class overplot_pol(align_maps): # Display Stokes I as contours if levels is None: - levels = np.logspace(np.log(3)/np.log(10), 2., 5)/100.*np.max(stkI[stkI > 0.])*self.map_convert + levels = np.array([2., 5., 10., 20., 90.])/100.*np.max(stkI[stkI > 0.])*self.map_convert cont_stkI = self.ax_overplot.contour(stkI*self.map_convert, levels=levels, colors='grey', alpha=0.75, transform=self.ax_overplot.get_transform(self.wcs_UV)) # self.ax_overplot.clabel(cont_stkI, inline=True, fontsize=5) @@ -1027,7 +1027,7 @@ class overplot_pol(align_maps): self.cr_other, = self.ax_overplot.plot(*(self.other_wcs.celestial.wcs.crpix-(1., 1.)), 'g+') if "PHOTPLAM" in list(self.other_header.keys()): - self.legend_title = r"{0:s} image at $\lambda$ = {1:.0f} $\AA$".format(self.other_map_observer, float(self.other_header['photplam'])) + self.legend_title = r"{0:s} image at $\lambda$ = {1:.0f} $\AA$".format(self.other_observer, float(self.other_header['photplam'])) elif "CRVAL3" in list(self.other_header.keys()): self.legend_title = "{0:s} image at {1:.2f} GHz".format(self.other_observer, float(self.other_header['crval3'])*1e-9) else: @@ -1884,9 +1884,7 @@ class pol_map(object): def submit_save(expression): ax_text_save.set(visible=False) if expression != '': - plt.rcParams.update({'font.size': 15}) - save_fig = plt.figure(figsize=(15, 15)) - save_ax = save_fig.add_subplot(111, projection=self.wcs) + save_fig, save_ax = plt.subplots(figsize=(12, 10), layout='tight', subplot_kw=dict(projection=self.wcs)) self.ax_cosmetics(ax=save_ax) self.display(fig=save_fig, ax=save_ax) self.pol_vector(fig=save_fig, ax=save_ax) @@ -1901,7 +1899,6 @@ class pol_map(object): ax_vec_sc.set(visible=True) ax_save.set(visible=True) ax_dump.set(visible=True) - plt.rcParams.update({'font.size': 10}) self.fig.canvas.draw_idle() text_save.on_submit(submit_save) @@ -2104,7 +2101,7 @@ class pol_map(object): else: vmin, vmax = flux_lim norm = LogNorm(vmin, vmax) - label = r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]" + label = r"$P \cdot F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]" elif self.display_selection.lower() in ['pol_deg']: self.data = self.P*100. vmin, vmax = 0., np.max(self.data[self.P > self.s_P]) @@ -2139,7 +2136,9 @@ class pol_map(object): self.im = ax.imshow(self.data, norm=norm, aspect='equal', cmap='inferno') else: self.im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno') + plt.rcParams.update({'font.size': 14}) self.cbar = fig.colorbar(self.im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=label) + plt.rcParams.update({'font.size': 10}) fig.canvas.draw_idle() return self.im else: @@ -2149,8 +2148,11 @@ class pol_map(object): im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno') ax.set_xlim(0, self.data.shape[1]) ax.set_ylim(0, self.data.shape[0]) - plt.colorbar(im, pad=0.025, aspect=80, label=label) + plt.rcParams.update({'font.size': 14}) + fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=label) + plt.rcParams.update({'font.size': 10}) fig.canvas.draw_idle() + return im def pol_vector(self, fig=None, ax=None): P_cut = np.ones(self.P.shape)*np.nan diff --git a/src/overplot_MRK463E.py b/src/overplot_MRK463E.py index 905881e..e2070df 100755 --- a/src/overplot_MRK463E.py +++ b/src/overplot_MRK463E.py @@ -8,16 +8,18 @@ Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.f Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits") Stokes_Xr = fits.open("./data/MRK463E/Chandra/4913/primary/acisf04913N004_cntr_img2.fits") -levels = np.geomspace(1., 99., 10) +levels = np.geomspace(1., 99., 7) # A = overplot_chandra(Stokes_UV, Stokes_Xr) # A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf') B = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm()) -B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot_forced.pdf') +B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=30.0, vec_scale=3, zoom=1, savename='./plots/MRK463E/Chandra_overplot_forced.pdf') +B.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned") # C = overplot_pol(Stokes_UV, Stokes_IR) # C.plot(SNRp_cut=3.0, SNRi_cut=20.0, savename='./plots/MRK463E/IR_overplot.pdf') D = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm()) -D.plot(SNRp_cut=3.0, SNRi_cut=30.0, vec_scale=2, norm=LogNorm(1e-18, 1e-15), savename='./plots/MRK463E/IR_overplot_forced.pdf') +D.plot(SNRp_cut=3.0, SNRi_cut=30.0, vec_scale=3, norm=LogNorm(1e-18, 1e-15), savename='./plots/MRK463E/IR_overplot_forced.pdf') +D.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned")