diff --git a/src/FOC_reduction.py b/src/FOC_reduction.py index 8761d88..9ab432e 100755 --- a/src/FOC_reduction.py +++ b/src/FOC_reduction.py @@ -1,4 +1,4 @@ -# !/usr/bin/python3 +#!/usr/bin/python3 # -*- coding:utf-8 -*- """ Main script where are progressively added the steps for the FOC pipeline reduction. @@ -15,8 +15,8 @@ from matplotlib.colors import LogNorm def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=0, interactive=0): - ## Reduction parameters - # Deconvolution + # Reduction parameters + # Deconvolution deconvolve = False if deconvolve: # from lib.deconvolve import from_file_psf @@ -28,38 +28,38 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= iterations = 5 algo = "richardson" - # Initial crop + # Initial crop display_crop = False - # Background estimation + # Background estimation error_sub_type = 'freedman-diaconis' # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) subtract_error = 1.00 display_error = False - # Data binning + # Data binning rebin = True pxsize = 0.10 px_scale = 'arcsec' # pixel, arcsec or full rebin_operation = 'sum' # sum or average - # Alignement + # Alignement align_center = 'center' # If None will not align the images display_bkg = False display_align = False display_data = False - # Smoothing + # Smoothing smoothing_function = 'combine' # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_FWHM = 0.10 # If None, no smoothing is done smoothing_scale = 'arcsec' # pixel or arcsec - # Rotation + # Rotation rotate_data = False # rotation to North convention can give erroneous results rotate_stokes = True - # Final crop - # crop = False #Crop to desired ROI - # interactive = False #Whether to output to intercative analysis tool + # Final crop + crop = False # Crop to desired ROI + interactive = False # Whether to output to intercative analysis tool # Polarization map output SNRp_cut = 3. # P measurments with SNR>3 @@ -68,10 +68,10 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= vec_scale = 3 step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length - ##### Pipeline start - ## Step 1: + # Pipeline start + # Step 1: # Get data from fits files and translate to flux in erg/cm²/s/Angstrom. - if not infiles is None: + if infiles is not None: prod = np.array([["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles], dtype=str) obs_dir = "/".join(infiles[0].split("/")[:-1]) if not path_exists(obs_dir): @@ -100,12 +100,14 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= else: figtype = "full" if smoothing_FWHM is not None: - figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]), "{0:.2f}".format(smoothing_FWHM), smoothing_scale]) # additionnal informations + figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]), + "{0:.2f}".format(smoothing_FWHM), smoothing_scale]) # additionnal informations if align_center is None: figtype += "_not_aligned" # Crop data to remove outside blank margins. - data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0., inside=True, display=display_crop, savename=figname, plots_folder=plots_folder) + data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0., + inside=True, display=display_crop, savename=figname, plots_folder=plots_folder) # Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM. if deconvolve: @@ -119,16 +121,16 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, "bkg"]), plots_folder=plots_folder) # Align and rescale images with oversampling. - data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False) + data_array, error_array, headers, data_mask = proj_red.align_data( data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False) if display_align: proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, str(align_center)]), plots_folder=plots_folder) # Rebin data to desired pixel size. if rebin: - data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask) + data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array( data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask) - # Rotate data to have North up + # Rotate data to have North up if rotate_data: data_mask = np.ones(data_array.shape[1:]).astype(bool) alpha = headers[0]['orientat'] @@ -139,34 +141,34 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, "rebin"]), plots_folder=plots_folder) background = np.array([np.array(bkg).reshape(1, 1) for bkg in background]) - background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1']==head['filtnam1'] for h in headers], dtype=bool)].mean())**2/np.sum([h['filtnam1']==head['filtnam1'] for h in headers]))).reshape(1, 1) for bkg, head in zip(background, headers)]) + background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1'] == head['filtnam1'] for h in headers], dtype=bool)].mean()) ** 2/np.sum([h['filtnam1'] == head['filtnam1'] for h in headers]))).reshape(1, 1) for bkg, head in zip(background, headers)]) - ## Step 2: - # Compute Stokes I, Q, U with smoothed polarized images - # SMOOTHING DISCUSSION : - # FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide - # see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2 - # Bibcode : 1995chst.conf...10J + # Step 2: + # Compute Stokes I, Q, U with smoothed polarized images + # SMOOTHING DISCUSSION : + # FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide + # see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2 + # Bibcode : 1995chst.conf...10J I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False) I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False) - ## Step 3: - # Rotate images to have North up + # Step 3: + # Rotate images to have North up if rotate_stokes: I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None) I_bkg, Q_bkg, U_bkg, S_cov_bkg, _, _ = proj_red.rotate_Stokes(I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), headers, SNRi_cut=None) - # Compute polarimetric parameters (polarisation degree and angle). + # Compute polarimetric parameters (polarisation degree and angle). P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers) P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, headers) - ## Step 4: - # Save image to FITS. + # Step 4: + # Save image to FITS. Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, data_mask, "_".join([figname, figtype]), data_folder=data_folder, return_hdul=True) data_mask = Stokes_test[-1].data.astype(bool) - ## Step 5: - # crop to desired region of interest (roi) + # Step 5: + # crop to desired region of interest (roi) if crop: figtype += "_crop" stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_test), norm=LogNorm()) @@ -183,19 +185,29 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop= print("PA_bkg = {0:.1f} ± {1:.1f} °".format(PA_bkg[0, 0], np.ceil(s_PA_bkg[0, 0]*10.)/10.)) # Plot polarisation map (Background is either total Flux, Polarization degree or Polarization degree error). if px_scale.lower() not in ['full', 'integrate'] and not interactive: - proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype]), plots_folder=plots_folder) - proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "I"]), plots_folder=plots_folder, display='Intensity') - proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "P_flux"]), plots_folder=plots_folder, display='Pol_Flux') - proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "P"]), plots_folder=plots_folder, display='Pol_deg') - proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "PA"]), plots_folder=plots_folder, display='Pol_ang') - proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "I_err"]), plots_folder=plots_folder, display='I_err') - proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "P_err"]), plots_folder=plots_folder, display='Pol_deg_err') - proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRi"]), plots_folder=plots_folder, display='SNRi') - proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRp"]), plots_folder=plots_folder, display='SNRp') + proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, + step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype]), plots_folder=plots_folder) + proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, + vec_scale=vec_scale, savename="_".join([figname, figtype, "I"]), plots_folder=plots_folder, display='Intensity') + proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, + vec_scale=vec_scale, savename="_".join([figname, figtype, "P_flux"]), plots_folder=plots_folder, display='Pol_Flux') + proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, + vec_scale=vec_scale, savename="_".join([figname, figtype, "P"]), plots_folder=plots_folder, display='Pol_deg') + proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, + vec_scale=vec_scale, savename="_".join([figname, figtype, "PA"]), plots_folder=plots_folder, display='Pol_ang') + proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, + vec_scale=vec_scale, savename="_".join([figname, figtype, "I_err"]), plots_folder=plots_folder, display='I_err') + proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, + vec_scale=vec_scale, savename="_".join([figname, figtype, "P_err"]), plots_folder=plots_folder, display='Pol_deg_err') + proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, + vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRi"]), plots_folder=plots_folder, display='SNRi') + proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, + vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRp"]), plots_folder=plots_folder, display='SNRp') elif not interactive: - proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename="_".join([figname, figtype]), plots_folder=plots_folder, display='integrate') + proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, + savename="_".join([figname, figtype]), plots_folder=plots_folder, display='integrate') elif px_scale.lower() not in ['full', 'integrate']: - pol_map = proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim) + proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim) return 0 @@ -204,18 +216,15 @@ if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Query MAST for target products') - parser.add_argument('-t', '--target', metavar='targetname', required=False, - help='the name of the target', type=str, default=None) - parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, - help='the proposal id of the data products', type=int, default=None) - parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', - help='the full or relative path to the data products', default=None) + parser.add_argument('-t', '--target', metavar='targetname', required=False, help='the name of the target', type=str, default=None) + parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, help='the proposal id of the data products', type=int, default=None) + parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None) parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False, help='output directory path for the data products', type=str, default="./data") - parser.add_argument('-c', '--crop', metavar='crop_boolean', required=False, - help='whether to crop the analysis region', type=int, default=0) + parser.add_argument('-c', '--crop', metavar='crop_boolean', required=False, help='whether to crop the analysis region', type=int, default=0) parser.add_argument('-i', '--interactive', metavar='interactive_boolean', required=False, help='whether to output to the interactive analysis tool', type=int, default=0) args = parser.parse_args() - exitcode = main(target=args.target, proposal_id=args.proposal_id, infiles=args.files, output_dir=args.output_dir, crop=args.crop, interactive=args.interactive) + exitcode = main(target=args.target, proposal_id=args.proposal_id, infiles=args.files, + output_dir=args.output_dir, crop=args.crop, interactive=args.interactive) print("Finished with ExitCode: ", exitcode) diff --git a/src/analysis.py b/src/analysis.py index 24775b7..9d2aaff 100755 --- a/src/analysis.py +++ b/src/analysis.py @@ -4,7 +4,7 @@ from sys import argv arglist = argv[1:] options = "hf:p:i:l:" -long_options = ["help","fits=","snrp=","snri=","lim="] +long_options = ["help", "fits=", "snrp=", "snri=", "lim="] fits_path = None SNRp_cut, SNRi_cut = 3, 30 @@ -28,12 +28,12 @@ try: except get_error as err: print(str(err)) -if not fits_path is None: +if fits_path is not None: from astropy.io import fits from lib.plots import pol_map Stokes_UV = fits.open(fits_path) - p = pol_map(Stokes_UV, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut,flux_lim=flux_lim) + p = pol_map(Stokes_UV, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim) else: print("python3 analysis.py -f -p -i -l ") diff --git a/src/lib/background.py b/src/lib/background.py index 19a1cee..202b6b8 100755 --- a/src/lib/background.py +++ b/src/lib/background.py @@ -9,7 +9,6 @@ prototypes : - bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background) Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape. """ -import sys from os.path import join as path_join from copy import deepcopy import numpy as np @@ -21,36 +20,40 @@ from datetime import datetime from lib.plots import plot_obs from scipy.optimize import curve_fit + def gauss(x, *p): N, mu, sigma = p return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + def gausspol(x, *p): N, mu, sigma, a, b, c, d = p return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + a*np.log(x) + b/x + c*x + d + def bin_centers(edges): return (edges[1:]+edges[:-1])/2. + def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"): plt.rcParams.update({'font.size': 15}) convert_flux = np.array([head['photflam'] for head in headers]) date_time = np.array([headers[i]['date-obs']+';'+headers[i]['time-obs'] - for i in range(len(headers))]) - date_time = np.array([datetime.strptime(d,'%Y-%m-%d;%H:%M:%S') - for d in date_time]) + for i in range(len(headers))]) + date_time = np.array([datetime.strptime(d, '%Y-%m-%d;%H:%M:%S') + for d in date_time]) filt = np.array([headers[i]['filtnam1'] for i in range(len(headers))]) - dict_filt = {"POL0":'r', "POL60":'g', "POL120":'b'} + dict_filt = {"POL0": 'r', "POL60": 'g', "POL120": 'b'} c_filt = np.array([dict_filt[f] for f in filt]) - fig,ax = plt.subplots(figsize=(10,6), constrained_layout=True) + fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True) for f in np.unique(filt): - mask = [fil==f for fil in filt] + mask = [fil == f for fil in filt] ax.scatter(date_time[mask], background[mask]*convert_flux[mask], - color=dict_filt[f],label="{0:s}".format(f)) + color=dict_filt[f], label="{0:s}".format(f)) ax.errorbar(date_time, background*convert_flux, - yerr=std_bkg*convert_flux, fmt='+k', - markersize=0, ecolor=c_filt) + yerr=std_bkg*convert_flux, fmt='+k', + markersize=0, ecolor=c_filt) # Date handling locator = mdates.AutoDateLocator() formatter = mdates.ConciseDateFormatter(locator) @@ -60,85 +63,89 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non ax.set_xlabel("Observation date and time") ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") plt.legend() - if not(savename is None): + if not (savename is None): this_savename = deepcopy(savename) if not savename[-4:] in ['.png', '.jpg', '.pdf']: this_savename += '_background_flux.pdf' else: this_savename = savename[:-4]+"_background_flux"+savename[-4:] - fig.savefig(path_join(plots_folder,this_savename), bbox_inches='tight') + fig.savefig(path_join(plots_folder, this_savename), bbox_inches='tight') - if not(histograms is None): - filt_obs = {"POL0":0, "POL60":0, "POL120":0} - fig_h, ax_h = plt.subplots(figsize=(10,6), constrained_layout=True) + if not (histograms is None): + filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0} + fig_h, ax_h = plt.subplots(figsize=(10, 6), constrained_layout=True) for i, (hist, bins) in enumerate(zip(histograms, binning)): filt_obs[headers[i]['filtnam1']] += 1 - ax_h.plot(bins*convert_flux[i],hist,'+',color="C{0:d}".format(i),alpha=0.8,label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')') - ax_h.plot([background[i]*convert_flux[i],background[i]*convert_flux[i]],[hist.min(), hist.max()],'x--',color="C{0:d}".format(i),alpha=0.8) - if not(coeff is None): - ax_h.plot(bins*convert_flux[i],gausspol(bins,*coeff[i]),'--',color="C{0:d}".format(i),alpha=0.8) + ax_h.plot(bins*convert_flux[i], hist, '+', color="C{0:d}".format(i), alpha=0.8, + label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')') + ax_h.plot([background[i]*convert_flux[i], background[i]*convert_flux[i]], [hist.min(), hist.max()], 'x--', color="C{0:d}".format(i), alpha=0.8) + if not (coeff is None): + ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8) ax_h.set_xscale('log') - ax_h.set_ylim([0.,np.max([hist.max() for hist in histograms])]) - ax_h.set_xlim([np.min(background*convert_flux)*1e-2,np.max(background*convert_flux)*1e2]) + ax_h.set_ylim([0., np.max([hist.max() for hist in histograms])]) + ax_h.set_xlim([np.min(background*convert_flux)*1e-2, np.max(background*convert_flux)*1e2]) ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") ax_h.set_ylabel(r"Number of pixels in bin") ax_h.set_title("Histogram for each observation") plt.legend() - if not(savename is None): + if not (savename is None): this_savename = deepcopy(savename) if not savename[-4:] in ['.png', '.jpg', '.pdf']: this_savename += '_histograms.pdf' else: this_savename = savename[:-4]+"_histograms"+savename[-4:] - fig_h.savefig(path_join(plots_folder,this_savename), bbox_inches='tight') + fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches='tight') - fig2, ax2 = plt.subplots(figsize=(10,10)) + fig2, ax2 = plt.subplots(figsize=(10, 10)) data0 = data[0]*convert_flux[0] bkg_data0 = data0 <= background[0]*convert_flux[0] instr = headers[0]['instrume'] rootname = headers[0]['rootname'] exptime = headers[0]['exptime'] filt = headers[0]['filtnam1'] - #plots - im2 = ax2.imshow(data0, norm=LogNorm(data0[data0>0.].mean()/10.,data0.max()), origin='lower', cmap='gray') - bkg_im = ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.5) - if not(rectangle is None): + # plots + im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.].mean()/10., data0.max()), origin='lower', cmap='gray') + ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.5) + if not (rectangle is None): x, y, width, height, angle, color = rectangle[0] - ax2.add_patch(Rectangle((x, y),width,height,edgecolor=color,fill=False,lw=2)) - ax2.annotate(instr+":"+rootname, color='white', fontsize=10, xy=(0.01, 1.00), xycoords='axes fraction',verticalalignment='top', horizontalalignment='left') + ax2.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False, lw=2)) + ax2.annotate(instr+":"+rootname, color='white', fontsize=10, xy=(0.01, 1.00), xycoords='axes fraction', verticalalignment='top', horizontalalignment='left') ax2.annotate(filt, color='white', fontsize=14, xy=(0.01, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left') - ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(1.00, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right') - ax2.set(xlabel='pixel offset', ylabel='pixel offset',aspect='equal') + ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(1.00, 0.01), + xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right') + ax2.set(xlabel='pixel offset', ylabel='pixel offset', aspect='equal') fig2.subplots_adjust(hspace=0, wspace=0, right=1.0) fig2.colorbar(im2, ax=ax2, location='right', aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") - if not(savename is None): + if not (savename is None): this_savename = deepcopy(savename) if not savename[-4:] in ['.png', '.jpg', '.pdf']: this_savename += '_'+filt+'_background_location.pdf' else: this_savename = savename[:-4]+'_'+filt+'_background_location'+savename[-4:] - fig2.savefig(path_join(plots_folder,this_savename), bbox_inches='tight') - if not(rectangle is None): + fig2.savefig(path_join(plots_folder, this_savename), bbox_inches='tight') + if not (rectangle is None): plot_obs(data, headers, vmin=data[data > 0.].min()*convert_flux.mean(), vmax=data[data > 0.].max()*convert_flux.mean(), rectangle=rectangle, - savename=savename+"_background_location",plots_folder=plots_folder) - elif not(rectangle is None): + savename=savename+"_background_location", plots_folder=plots_folder) + elif not (rectangle is None): plot_obs(data, headers, vmin=data[data > 0.].min(), vmax=data[data > 0.].max(), rectangle=rectangle) plt.show() + def sky_part(img): - rand_ind = np.unique((np.random.rand(np.floor(img.size/4).astype(int))*2*img.size).astype(int)%img.size) + rand_ind = np.unique((np.random.rand(np.floor(img.size/4).astype(int))*2*img.size).astype(int) % img.size) rand_pix = img.flatten()[rand_ind] # Intensity range sky_med = np.median(rand_pix) - sig = np.min([img[imgsky_med].std()]) - sky_range = [sky_med-2.*sig, np.max([sky_med+sig,7e-4])] #Detector background average FOC Data Handbook Sec. 7.6 + sig = np.min([img[img < sky_med].std(), img[img > sky_med].std()]) + sky_range = [sky_med-2.*sig, np.max([sky_med+sig, 7e-4])] # Detector background average FOC Data Handbook Sec. 7.6 - sky = img[np.logical_and(img>=sky_range[0],img<=sky_range[1])] + sky = img[np.logical_and(img >= sky_range[0], img <= sky_range[1])] return sky, sky_range + def bkg_estimate(img, bins=None, chi2=None, coeff=None): if bins is None or chi2 is None or coeff is None: bins, chi2, coeff = [8], [], [] @@ -147,20 +154,21 @@ def bkg_estimate(img, bins=None, chi2=None, coeff=None): bins.append(int(3./2.*bins[-1])) except IndexError: bins, chi2, coeff = [8], [], [] - hist, bin_edges = np.histogram(img[img>0], bins=bins[-1]) + hist, bin_edges = np.histogram(img[img > 0], bins=bins[-1]) binning = bin_centers(bin_edges) peak = binning[np.argmax(hist)] - bins_fwhm = binning[hist>hist.max()/2.] + bins_fwhm = binning[hist > hist.max()/2.] fwhm = bins_fwhm[-1]-bins_fwhm[0] p0 = [hist.max(), peak, fwhm, 1e-3, 1e-3, 1e-3, 1e-3] try: popt, pcov = curve_fit(gausspol, binning, hist, p0=p0) except RuntimeError: popt = p0 - chi2.append(np.sum((hist - gausspol(binning,*popt))**2)/hist.size) + chi2.append(np.sum((hist - gausspol(binning, *popt))**2)/hist.size) coeff.append(popt) return bins, chi2, coeff + def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, savename=None, plots_folder=""): """ ---------- @@ -208,13 +216,13 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save std_bkg = np.zeros((data.shape[0])) background = np.zeros((data.shape[0])) histograms, binning = [], [] - + for i, image in enumerate(data): - #Compute the Count-rate histogram for the image - sky, sky_range = sky_part(image[image>0.]) + # Compute the Count-rate histogram for the image + sky, sky_range = sky_part(image[image > 0.]) bins, chi2, coeff = bkg_estimate(sky) - while bins[-1]<256: + while bins[-1] < 256: bins, chi2, coeff = bkg_estimate(sky, bins, chi2, coeff) hist, bin_edges = np.histogram(sky, bins=bins[-1]) histograms.append(hist) @@ -223,18 +231,18 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save weights = 1/chi2**2 weights /= weights.sum() - bkg = np.sum(weights*coeff[:,1])*subtract_error if subtract_error>0 else np.sum(weights*coeff[:,1]) - + bkg = np.sum(weights*coeff[:, 1])*subtract_error if subtract_error > 0 else np.sum(weights*coeff[:, 1]) + error_bkg[i] *= bkg - + n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2) - - #Substract background - if subtract_error>0: + + # Substract background + if subtract_error > 0: n_data_array[i][mask] = n_data_array[i][mask] - bkg - n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg - - std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std() + n_data_array[i][np.logical_and(mask, n_data_array[i] <= 0.01*bkg)] = 0.01*bkg + + std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std() background[i] = bkg if display: @@ -293,52 +301,54 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis std_bkg = np.zeros((data.shape[0])) background = np.zeros((data.shape[0])) histograms, binning, coeff = [], [], [] - + for i, image in enumerate(data): - #Compute the Count-rate histogram for the image - n_mask = np.logical_and(mask,image>0.) + # Compute the Count-rate histogram for the image + n_mask = np.logical_and(mask, image > 0.) if not (sub_type is None): - if type(sub_type) == int: + if isinstance(sub_type, int): n_bins = sub_type elif sub_type.lower() in ['sqrt']: - n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root + n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root elif sub_type.lower() in ['sturges']: - n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int)+1 # Sturges + n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int)+1 # Sturges elif sub_type.lower() in ['rice']: - n_bins = 2*np.fix(np.power(image[n_mask].size,1/3)).astype(int) # Rice + n_bins = 2*np.fix(np.power(image[n_mask].size, 1/3)).astype(int) # Rice elif sub_type.lower() in ['scott']: - n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size,1/3))).astype(int) # Scott + n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size, 1/3))).astype(int) # Scott else: - n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25]))/np.power(image[n_mask].size,1/3))).astype(int) # Freedman-Diaconis + n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) / + np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis else: - n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25]))/np.power(image[n_mask].size,1/3))).astype(int) # Freedman-Diaconis - - hist, bin_edges = np.histogram(np.log(image[n_mask]),bins=n_bins) + n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) / + np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis + + hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins) histograms.append(hist) binning.append(np.exp(bin_centers(bin_edges))) - - #Take the background as the count-rate with the maximum number of pixels - #hist_max = binning[-1][np.argmax(hist)] - #bkg = np.sqrt(np.sum(image[np.abs(image-hist_max)/hist_max<0.5]**2)/image[np.abs(image-hist_max)/hist_max<0.5].size) - - #Fit a gaussian to the log-intensity histogram - bins_fwhm = binning[-1][hist>hist.max()/2.] + + # Take the background as the count-rate with the maximum number of pixels + # hist_max = binning[-1][np.argmax(hist)] + # bkg = np.sqrt(np.sum(image[np.abs(image-hist_max)/hist_max<0.5]**2)/image[np.abs(image-hist_max)/hist_max<0.5].size) + + # Fit a gaussian to the log-intensity histogram + bins_fwhm = binning[-1][hist > hist.max()/2.] fwhm = bins_fwhm[-1]-bins_fwhm[0] p0 = [hist.max(), binning[-1][np.argmax(hist)], fwhm, 1e-3, 1e-3, 1e-3, 1e-3] popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0) coeff.append(popt) - bkg = popt[1]*subtract_error if subtract_error>0 else popt[1] - + bkg = popt[1]*subtract_error if subtract_error > 0 else popt[1] + error_bkg[i] *= bkg - + n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2) - - #Substract background + + # Substract background if subtract_error > 0: n_data_array[i][mask] = n_data_array[i][mask] - bkg - n_data_array[i][np.logical_and(mask,n_data_array[i] < 0.)] = 0. - - std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std() + n_data_array[i][np.logical_and(mask, n_data_array[i] < 0.)] = 0. + + std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std() background[i] = bkg if display: @@ -346,7 +356,7 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis return n_data_array, n_error_array, headers, background -def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True, display=False, savename=None, plots_folder=""): +def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True, display=False, savename=None, plots_folder=""): """ Look for sub-image of shape sub_shape that have the smallest integrated flux (no source assumption) and define the background on the image by the @@ -396,11 +406,11 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True, """ sub_shape = np.array(sub_shape) # Make sub_shape of odd values - if not(np.all(sub_shape%2)): - sub_shape += 1-sub_shape%2 + if not (np.all(sub_shape % 2)): + sub_shape += 1-sub_shape % 2 shape = np.array(data.shape) diff = (sub_shape-1).astype(int) - temp = np.zeros((shape[0],shape[1]-diff[0],shape[2]-diff[1])) + temp = np.zeros((shape[0], shape[1]-diff[0], shape[2]-diff[1])) n_data_array, n_error_array = deepcopy(data), deepcopy(error) error_bkg = np.ones(n_data_array.shape) @@ -408,37 +418,36 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True, background = np.zeros((data.shape[0])) rectangle = [] - for i,image in enumerate(data): + for i, image in enumerate(data): # Find the sub-image of smallest integrated flux (suppose no source) - #sub-image dominated by background + # sub-image dominated by background fmax = np.finfo(np.double).max img = deepcopy(image) img[1-mask] = fmax/(diff[0]*diff[1]) for r in range(temp.shape[1]): for c in range(temp.shape[2]): - temp[i][r,c] = np.where(mask[r,c], img[r:r+diff[0],c:c+diff[1]].sum(), fmax/(diff[0]*diff[1])) + temp[i][r, c] = np.where(mask[r, c], img[r:r+diff[0], c:c+diff[1]].sum(), fmax/(diff[0]*diff[1])) - minima = np.unravel_index(np.argmin(temp.sum(axis=0)),temp.shape[1:]) + minima = np.unravel_index(np.argmin(temp.sum(axis=0)), temp.shape[1:]) for i, image in enumerate(data): rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0., 'r']) # Compute error : root mean square of the background - sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]] - #bkg = np.std(sub_image) # Previously computed using standard deviation over the background - bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*subtract_error if subtract_error>0 else np.sqrt(np.sum(sub_image**2)/sub_image.size) + sub_image = image[minima[0]:minima[0]+sub_shape[0], minima[1]:minima[1]+sub_shape[1]] + # bkg = np.std(sub_image) # Previously computed using standard deviation over the background + bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*subtract_error if subtract_error > 0 else np.sqrt(np.sum(sub_image**2)/sub_image.size) error_bkg[i] *= bkg - + n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2) - - #Substract background - if subtract_error>0.: + + # Substract background + if subtract_error > 0.: n_data_array[i][mask] = n_data_array[i][mask] - bkg - n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg - - std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std() + n_data_array[i][np.logical_and(mask, n_data_array[i] <= 0.01*bkg)] = 0.01*bkg + + std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std() background[i] = bkg if display: display_bkg(data, background, std_bkg, headers, rectangle=rectangle, savename=savename, plots_folder=plots_folder) return n_data_array, n_error_array, headers, background - diff --git a/src/lib/convex_hull.py b/src/lib/convex_hull.py index fd6e3fc..7cdfcd8 100755 --- a/src/lib/convex_hull.py +++ b/src/lib/convex_hull.py @@ -1,6 +1,5 @@ """ -Library functions for graham algorithm implementation (find the convex hull -of a given list of points). +Library functions for graham algorithm implementation (find the convex hull of a given list of points). """ from copy import deepcopy @@ -8,30 +7,33 @@ import numpy as np def clean_ROI(image): - H,J = [],[] + """ + Remove instruments borders from an observation. + """ + H, J = [], [] shape = np.array(image.shape) row, col = np.indices(shape) - for i in range(0,shape[0]): - r = row[i,:][image[i,:]>0.] - c = col[i,:][image[i,:]>0.] - if len(r)>1 and len(c)>1: - H.append((r[0],c[0])) - H.append((r[-1],c[-1])) + for i in range(0, shape[0]): + r = row[i, :][image[i, :] > 0.] + c = col[i, :][image[i, :] > 0.] + if len(r) > 1 and len(c) > 1: + H.append((r[0], c[0])) + H.append((r[-1], c[-1])) H = np.array(H) - for j in range(0,shape[1]): - r = row[:,j][image[:,j]>0.] - c = col[:,j][image[:,j]>0.] - if len(r)>1 and len(c)>1: - J.append((r[0],c[0])) - J.append((r[-1],c[-1])) + for j in range(0, shape[1]): + r = row[:, j][image[:, j] > 0.] + c = col[:, j][image[:, j] > 0.] + if len(r) > 1 and len(c) > 1: + J.append((r[0], c[0])) + J.append((r[-1], c[-1])) J = np.array(J) - xmin = np.min([H[:,1].min(),J[:,1].min()]) - xmax = np.max([H[:,1].max(),J[:,1].max()])+1 - ymin = np.min([H[:,0].min(),J[:,0].min()]) - ymax = np.max([H[:,0].max(),J[:,0].max()])+1 - return np.array([xmin,xmax,ymin,ymax]) + xmin = np.min([H[:, 1].min(), J[:, 1].min()]) + xmax = np.max([H[:, 1].max(), J[:, 1].max()])+1 + ymin = np.min([H[:, 0].min(), J[:, 0].min()]) + ymax = np.max([H[:, 0].max(), J[:, 0].max()])+1 + return np.array([xmin, xmax, ymin, ymax]) # Define angle and vectors operations @@ -116,7 +118,8 @@ def min_lexico(s): """ m = s[0] for x in s: - if lexico(x, m): m = x + if lexico(x, m): + m = x return m @@ -145,16 +148,16 @@ def comp(Omega, A, B): # Implement quicksort -def partition(s, l, r, order): +def partition(s, left, right, order): """ - Take a random element of a list 's' between indexes 'l', 'r' and place it + Take a random element of a list 's' between indexes 'left', 'right' and place it at its right spot using relation order 'order'. Return the index at which it was placed. ---------- Inputs: s : list List of elements to be ordered. - l, r : int + left, right : int Index of the first and last elements to be considered. order : func: A, B -> bool Relation order between 2 elements A, B that returns True if A<=B, @@ -164,30 +167,29 @@ def partition(s, l, r, order): index : int Index at which have been placed the element chosen by the function. """ - i = l - 1 - for j in range(l, r): - if order(s[j], s[r]): + i = left - 1 + for j in range(left, right): + if order(s[j], s[right]): i = i + 1 temp = deepcopy(s[i]) s[i] = deepcopy(s[j]) s[j] = deepcopy(temp) temp = deepcopy(s[i+1]) - s[i+1] = deepcopy(s[r]) - s[r] = deepcopy(temp) + s[i+1] = deepcopy(s[right]) + s[right] = deepcopy(temp) return i + 1 -def sort_aux(s, l, r, order): +def sort_aux(s, left, right, order): """ - Sort a list 's' between indexes 'l', 'r' using relation order 'order' by + Sort a list 's' between indexes 'left', 'right' using relation order 'order' by dividing it in 2 sub-lists and sorting these. """ - if l <= r: - # Call partition function that gives an index on which the list will be - #divided - q = partition(s, l, r, order) - sort_aux(s, l, q - 1, order) - sort_aux(s, q + 1, r, order) + if left <= right: + # Call partition function that gives an index on which the list will be divided + q = partition(s, left, right, order) + sort_aux(s, left, q - 1, order) + sort_aux(s, q + 1, right, order) def quicksort(s, order): @@ -204,7 +206,7 @@ def sort_angles_distances(Omega, s): Sort the list of points 's' for the composition order given reference point Omega. """ - order = lambda A, B: comp(Omega, A, B) + def order(A, B): return comp(Omega, A, B) quicksort(s, order) @@ -326,24 +328,24 @@ def image_hull(image, step=5, null_val=0., inside=True): H = [] shape = np.array(image.shape) row, col = np.indices(shape) - for i in range(0,shape[0],step): - r = row[i,:][image[i,:]>null_val] - c = col[i,:][image[i,:]>null_val] - if len(r)>1 and len(c)>1: - H.append((r[0],c[0])) - H.append((r[-1],c[-1])) - for j in range(0,shape[1],step): - r = row[:,j][image[:,j]>null_val] - c = col[:,j][image[:,j]>null_val] - if len(r)>1 and len(c)>1: - if not((r[0],c[0]) in H): - H.append((r[0],c[0])) - if not((r[-1],c[-1]) in H): - H.append((r[-1],c[-1])) + for i in range(0, shape[0], step): + r = row[i, :][image[i, :] > null_val] + c = col[i, :][image[i, :] > null_val] + if len(r) > 1 and len(c) > 1: + H.append((r[0], c[0])) + H.append((r[-1], c[-1])) + for j in range(0, shape[1], step): + r = row[:, j][image[:, j] > null_val] + c = col[:, j][image[:, j] > null_val] + if len(r) > 1 and len(c) > 1: + if not ((r[0], c[0]) in H): + H.append((r[0], c[0])) + if not ((r[-1], c[-1]) in H): + H.append((r[-1], c[-1])) S = np.array(convex_hull(H)) - x_min, y_min = S[:,0]S[:,0].mean(), S[:,1]>S[:,1].mean() + x_min, y_min = S[:, 0] < S[:, 0].mean(), S[:, 1] < S[:, 1].mean() + x_max, y_max = S[:, 0] > S[:, 0].mean(), S[:, 1] > S[:, 1].mean() # Get the 4 extrema S0 = S[x_min*y_min][np.abs(0-S[x_min*y_min].sum(axis=1)).min() == np.abs(0-S[x_min*y_min].sum(axis=1))][0] S1 = S[x_min*y_max][np.abs(shape[1]-S[x_min*y_max].sum(axis=1)).min() == np.abs(shape[1]-S[x_min*y_max].sum(axis=1))][0] @@ -351,14 +353,14 @@ def image_hull(image, step=5, null_val=0., inside=True): S3 = S[x_max*y_max][np.abs(shape.sum()-S[x_max*y_max].sum(axis=1)).min() == np.abs(shape.sum()-S[x_max*y_max].sum(axis=1))][0] # Get the vertex of the biggest included rectangle if inside: - f0 = np.max([S0[0],S1[0]]) - f1 = np.min([S2[0],S3[0]]) - f2 = np.max([S0[1],S2[1]]) - f3 = np.min([S1[1],S3[1]]) + f0 = np.max([S0[0], S1[0]]) + f1 = np.min([S2[0], S3[0]]) + f2 = np.max([S0[1], S2[1]]) + f3 = np.min([S1[1], S3[1]]) else: - f0 = np.min([S0[0],S1[0]]) - f1 = np.max([S2[0],S3[0]]) - f2 = np.min([S0[1],S2[1]]) - f3 = np.max([S1[1],S3[1]]) + f0 = np.min([S0[0], S1[0]]) + f1 = np.max([S2[0], S3[0]]) + f2 = np.min([S0[1], S2[1]]) + f3 = np.max([S1[1], S3[1]]) return np.array([f0, f1, f2, f3]).astype(int) diff --git a/src/lib/cross_correlation.py b/src/lib/cross_correlation.py index 0a5d3f1..d963123 100755 --- a/src/lib/cross_correlation.py +++ b/src/lib/cross_correlation.py @@ -1,10 +1,10 @@ """ Library functions for phase cross-correlation computation. """ -##Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+) -#Otherwise fall back to numpy.fft. -#Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer -#C++/pybind11 version called pypocketfft +# Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+) +# Otherwise fall back to numpy.fft. +# Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer +# C++/pybind11 version called pypocketfft try: import scipy.fft as fft except ImportError: @@ -14,7 +14,7 @@ import numpy as np def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, - axis_offsets=None): + axis_offsets=None): """ Upsampled DFT by matrix multiplication. This code is intended to provide the same result as if the following @@ -243,7 +243,7 @@ def phase_cross_correlation(reference_image, moving_image, *, raise ValueError( "NaN values found, please remove NaNs from your input data") - return shifts, _compute_error(CCmax, src_amp, target_amp),\ + return shifts, _compute_error(CCmax, src_amp, target_amp), \ _compute_phasediff(CCmax) else: return shifts diff --git a/src/lib/deconvolve.py b/src/lib/deconvolve.py index dc12faa..7aefff6 100755 --- a/src/lib/deconvolve.py +++ b/src/lib/deconvolve.py @@ -4,13 +4,13 @@ Library functions for the implementation of various deconvolution algorithms. prototypes : - gaussian_psf(FWHM, shape) -> kernel Return the normalized gaussian point spread function over some kernel shape. - + - from_file_psf(filename) -> kernel Get the point spread function from an external FITS file. - + - wiener(image, psf, alpha, clip) -> im_deconv Implement the simplified Wiener filtering. - + - van_cittert(image, psf, alpha, iterations, clip, filter_epsilon) -> im_deconv Implement Van-Cittert iterative algorithm. @@ -43,494 +43,521 @@ def abs2(x): def zeropad(arr, shape): - """ - Zero-pad array ARR to given shape. - The contents of ARR is approximately centered in the result. - """ - rank = arr.ndim - if len(shape) != rank: - raise ValueError("bad number of dimensions") - diff = np.asarray(shape) - np.asarray(arr.shape) - if diff.min() < 0: - raise ValueError("output dimensions must be larger or equal input dimensions") - offset = diff//2 - z = np.zeros(shape, dtype=arr.dtype) - if rank == 1: - i0 = offset[0]; n0 = i0 + arr.shape[0] - z[i0:n0] = arr - elif rank == 2: - i0 = offset[0]; n0 = i0 + arr.shape[0] - i1 = offset[1]; n1 = i1 + arr.shape[1] - z[i0:n0,i1:n1] = arr - elif rank == 3: - i0 = offset[0]; n0 = i0 + arr.shape[0] - i1 = offset[1]; n1 = i1 + arr.shape[1] - i2 = offset[2]; n2 = i2 + arr.shape[2] - z[i0:n0,i1:n1,i2:n2] = arr - elif rank == 4: - i0 = offset[0]; n0 = i0 + arr.shape[0] - i1 = offset[1]; n1 = i1 + arr.shape[1] - i2 = offset[2]; n2 = i2 + arr.shape[2] - i3 = offset[3]; n3 = i3 + arr.shape[3] - z[i0:n0,i1:n1,i2:n2,i3:n3] = arr - elif rank == 5: - i0 = offset[0]; n0 = i0 + arr.shape[0] - i1 = offset[1]; n1 = i1 + arr.shape[1] - i2 = offset[2]; n2 = i2 + arr.shape[2] - i3 = offset[3]; n3 = i3 + arr.shape[3] - i4 = offset[4]; n4 = i4 + arr.shape[4] - z[i0:n0,i1:n1,i2:n2,i3:n3,i4:n4] = arr - elif rank == 6: - i0 = offset[0]; n0 = i0 + arr.shape[0] - i1 = offset[1]; n1 = i1 + arr.shape[1] - i2 = offset[2]; n2 = i2 + arr.shape[2] - i3 = offset[3]; n3 = i3 + arr.shape[3] - i4 = offset[4]; n4 = i4 + arr.shape[4] - i5 = offset[5]; n5 = i5 + arr.shape[5] - z[i0:n0,i1:n1,i2:n2,i3:n3,i4:n4,i5:n5] = arr - else: - raise ValueError("too many dimensions") - return z + """ + Zero-pad array ARR to given shape. + The contents of ARR is approximately centered in the result. + """ + rank = arr.ndim + if len(shape) != rank: + raise ValueError("bad number of dimensions") + diff = np.asarray(shape) - np.asarray(arr.shape) + if diff.min() < 0: + raise ValueError("output dimensions must be larger or equal input dimensions") + offset = diff//2 + z = np.zeros(shape, dtype=arr.dtype) + if rank == 1: + i0 = offset[0] + n0 = i0 + arr.shape[0] + z[i0:n0] = arr + elif rank == 2: + i0 = offset[0] + n0 = i0 + arr.shape[0] + i1 = offset[1] + n1 = i1 + arr.shape[1] + z[i0:n0, i1:n1] = arr + elif rank == 3: + i0 = offset[0] + n0 = i0 + arr.shape[0] + i1 = offset[1] + n1 = i1 + arr.shape[1] + i2 = offset[2] + n2 = i2 + arr.shape[2] + z[i0:n0, i1:n1, i2:n2] = arr + elif rank == 4: + i0 = offset[0] + n0 = i0 + arr.shape[0] + i1 = offset[1] + n1 = i1 + arr.shape[1] + i2 = offset[2] + n2 = i2 + arr.shape[2] + i3 = offset[3] + n3 = i3 + arr.shape[3] + z[i0:n0, i1:n1, i2:n2, i3:n3] = arr + elif rank == 5: + i0 = offset[0] + n0 = i0 + arr.shape[0] + i1 = offset[1] + n1 = i1 + arr.shape[1] + i2 = offset[2] + n2 = i2 + arr.shape[2] + i3 = offset[3] + n3 = i3 + arr.shape[3] + i4 = offset[4] + n4 = i4 + arr.shape[4] + z[i0:n0, i1:n1, i2:n2, i3:n3, i4:n4] = arr + elif rank == 6: + i0 = offset[0] + n0 = i0 + arr.shape[0] + i1 = offset[1] + n1 = i1 + arr.shape[1] + i2 = offset[2] + n2 = i2 + arr.shape[2] + i3 = offset[3] + n3 = i3 + arr.shape[3] + i4 = offset[4] + n4 = i4 + arr.shape[4] + i5 = offset[5] + n5 = i5 + arr.shape[5] + z[i0:n0, i1:n1, i2:n2, i3:n3, i4:n4, i5:n5] = arr + else: + raise ValueError("too many dimensions") + return z def gaussian2d(x, y, sigma): - return np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2) + return np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2) -def gaussian_psf(FWHM=1., shape=(5,5)): - """ - Define the gaussian Point-Spread-Function of chosen shape and FWHM. - ---------- - Inputs: - FWHM : float, optional - The Full Width at Half Maximum of the desired gaussian function for the - PSF in pixel increments. - Defaults to 1. - shape : tuple, optional - The shape of the PSF kernel. Must be of dimension 2. - Defaults to (5,5). - ---------- - Returns: - kernel : numpy.ndarray - Kernel containing the weights of the desired gaussian PSF. - """ - # Compute standard deviation from FWHM - stdev = FWHM/(2.*np.sqrt(2.*np.log(2.))) +def gaussian_psf(FWHM=1., shape=(5, 5)): + """ + Define the gaussian Point-Spread-Function of chosen shape and FWHM. + ---------- + Inputs: + FWHM : float, optional + The Full Width at Half Maximum of the desired gaussian function for the + PSF in pixel increments. + Defaults to 1. + shape : tuple, optional + The shape of the PSF kernel. Must be of dimension 2. + Defaults to (5,5). + ---------- + Returns: + kernel : numpy.ndarray + Kernel containing the weights of the desired gaussian PSF. + """ + # Compute standard deviation from FWHM + stdev = FWHM/(2.*np.sqrt(2.*np.log(2.))) - # Create kernel of desired shape - x, y = np.meshgrid(np.arange(-shape[0]/2,shape[0]/2),np.arange(-shape[1]/2,shape[1]/2)) - kernel = gaussian2d(x, y, stdev) - - return kernel/kernel.sum() + # Create kernel of desired shape + x, y = np.meshgrid(np.arange(-shape[0]/2, shape[0]/2), np.arange(-shape[1]/2, shape[1]/2)) + kernel = gaussian2d(x, y, stdev) + + return kernel/kernel.sum() def from_file_psf(filename): - """ - Get the Point-Spread-Function from an external FITS file. - Such PSF can be generated using the TinyTim standalone program by STSCI. - See: - [1] https://www.stsci.edu/hst/instrumentation/focus-and-pointing/focus/tiny-tim-hst-psf-modeling - [2] https://doi.org/10.1117/12.892762 - ---------- - Inputs: - filename : str - ---------- - kernel : numpy.ndarray - Kernel containing the weights of the desired gaussian PSF. - """ - with fits.open(filename) as f: - psf = f[0].data - if (type(psf) != np.ndarray) or len(psf) != 2: - raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename)) - #Return the normalized Point Spread Function - kernel = psf/psf.max() - return kernel + """ + Get the Point-Spread-Function from an external FITS file. + Such PSF can be generated using the TinyTim standalone program by STSCI. + See: + [1] https://www.stsci.edu/hst/instrumentation/focus-and-pointing/focus/tiny-tim-hst-psf-modeling + [2] https://doi.org/10.1117/12.892762 + ---------- + Inputs: + filename : str + ---------- + kernel : numpy.ndarray + Kernel containing the weights of the desired gaussian PSF. + """ + with fits.open(filename) as f: + psf = f[0].data + if isinstance(psf, np.ndarray) or len(psf) != 2: + raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename)) + # Return the normalized Point Spread Function + kernel = psf/psf.max() + return kernel def wiener(image, psf, alpha=0.1, clip=True): - """ - Implement the simplified Wiener filtering. - ---------- - Inputs: - image : numpy.ndarray - Input degraded image (can be N dimensional) of floats. - psf : numpy.ndarray - The kernel of the point spread function. Must have shape less or equal to - the image shape. If less, it will be zeropadded. - alpha : float, optional - A parameter value for numerous deconvolution algorithms. - Defaults to 0.1 - clip : boolean, optional - If true, pixel value of the result above 1 or under -1 are thresholded - for skimage pipeline compatibility. - Defaults to True. - ---------- - Returns: - im_deconv : ndarray - The deconvolved image. - """ - float_type = np.promote_types(image.dtype, np.float32) - image = image.astype(float_type, copy=False) - psf = zeropad(psf.astype(float_type, copy=False), image.shape) - psf /= psf.sum() - im_deconv = image.copy() + """ + Implement the simplified Wiener filtering. + ---------- + Inputs: + image : numpy.ndarray + Input degraded image (can be N dimensional) of floats. + psf : numpy.ndarray + The kernel of the point spread function. Must have shape less or equal to + the image shape. If less, it will be zeropadded. + alpha : float, optional + A parameter value for numerous deconvolution algorithms. + Defaults to 0.1 + clip : boolean, optional + If true, pixel value of the result above 1 or under -1 are thresholded + for skimage pipeline compatibility. + Defaults to True. + ---------- + Returns: + im_deconv : ndarray + The deconvolved image. + """ + float_type = np.promote_types(image.dtype, np.float32) + image = image.astype(float_type, copy=False) + psf = zeropad(psf.astype(float_type, copy=False), image.shape) + psf /= psf.sum() + im_deconv = image.copy() - ft_y = np.fft.fftn(im_deconv) - ft_h = np.fft.fftn(np.fft.ifftshift(psf)) + ft_y = np.fft.fftn(im_deconv) + ft_h = np.fft.fftn(np.fft.ifftshift(psf)) - ft_x = ft_h.conj()*ft_y / (abs2(ft_h) + alpha) - im_deconv = np.fft.ifftn(ft_x).real + ft_x = ft_h.conj()*ft_y / (abs2(ft_h) + alpha) + im_deconv = np.fft.ifftn(ft_x).real - if clip: - im_deconv[im_deconv > 1] = 1 - im_deconv[im_deconv < -1] = -1 + if clip: + im_deconv[im_deconv > 1] = 1 + im_deconv[im_deconv < -1] = -1 - return im_deconv/im_deconv.max() + return im_deconv/im_deconv.max() def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon=None): - """ - Van-Citter deconvolution algorithm. - ---------- - Inputs: - image : numpy.darray - Input degraded image (can be N dimensional) of floats between 0 and 1. - psf : numpy.darray - The point spread function. - alpha : float, optional - A weight parameter for the deconvolution step. - iterations : int, optional - Number of iterations. This parameter plays the role of - regularisation. - clip : boolean, optional - True by default. If true, pixel value of the result above 1 or - under -1 are thresholded for skimage pipeline compatibility. - filter_epsilon: float, optional - Value below which intermediate results become 0 to avoid division - by small numbers. - ---------- - Returns: - im_deconv : ndarray - The deconvolved image. - """ - float_type = np.promote_types(image.dtype, np.float32) - image = image.astype(float_type, copy=False) - psf = psf.astype(float_type, copy=False) - psf /= psf.sum() - im_deconv = image.copy() + """ + Van-Citter deconvolution algorithm. + ---------- + Inputs: + image : numpy.darray + Input degraded image (can be N dimensional) of floats between 0 and 1. + psf : numpy.darray + The point spread function. + alpha : float, optional + A weight parameter for the deconvolution step. + iterations : int, optional + Number of iterations. This parameter plays the role of + regularisation. + clip : boolean, optional + True by default. If true, pixel value of the result above 1 or + under -1 are thresholded for skimage pipeline compatibility. + filter_epsilon: float, optional + Value below which intermediate results become 0 to avoid division + by small numbers. + ---------- + Returns: + im_deconv : ndarray + The deconvolved image. + """ + float_type = np.promote_types(image.dtype, np.float32) + image = image.astype(float_type, copy=False) + psf = psf.astype(float_type, copy=False) + psf /= psf.sum() + im_deconv = image.copy() - for _ in range(iterations): - conv = convolve(im_deconv, psf, mode='same') - if filter_epsilon: - relative_blur = np.where(conv < filter_epsilon, 0, image - conv) - else: - relative_blur = image - conv - im_deconv += alpha*relative_blur + for _ in range(iterations): + conv = convolve(im_deconv, psf, mode='same') + if filter_epsilon: + relative_blur = np.where(conv < filter_epsilon, 0, image - conv) + else: + relative_blur = image - conv + im_deconv += alpha*relative_blur - if clip: - im_deconv[im_deconv > 1] = 1 - im_deconv[im_deconv < -1] = -1 + if clip: + im_deconv[im_deconv > 1] = 1 + im_deconv[im_deconv < -1] = -1 - return im_deconv + return im_deconv def richardson_lucy(image, psf, iterations=20, clip=True, filter_epsilon=None): - """ - Richardson-Lucy deconvolution algorithm. - ---------- - Inputs: - image : numpy.darray - Input degraded image (can be N dimensional) of floats between 0 and 1. - psf : numpy.darray - The point spread function. - iterations : int, optional - Number of iterations. This parameter plays the role of - regularisation. - clip : boolean, optional - True by default. If true, pixel value of the result above 1 or - under -1 are thresholded for skimage pipeline compatibility. - filter_epsilon: float, optional - Value below which intermediate results become 0 to avoid division - by small numbers. - ---------- - Returns: - im_deconv : ndarray - The deconvolved image. - ---------- - References - [1] https://doi.org/10.1364/JOSA.62.000055 - [2] https://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution - """ - float_type = np.promote_types(image.dtype, np.float32) - image = image.astype(float_type, copy=False) - psf = psf.astype(float_type, copy=False) - psf /= psf.sum() - im_deconv = image.copy() - psf_mirror = np.flip(psf) + """ + Richardson-Lucy deconvolution algorithm. + ---------- + Inputs: + image : numpy.darray + Input degraded image (can be N dimensional) of floats between 0 and 1. + psf : numpy.darray + The point spread function. + iterations : int, optional + Number of iterations. This parameter plays the role of + regularisation. + clip : boolean, optional + True by default. If true, pixel value of the result above 1 or + under -1 are thresholded for skimage pipeline compatibility. + filter_epsilon: float, optional + Value below which intermediate results become 0 to avoid division + by small numbers. + ---------- + Returns: + im_deconv : ndarray + The deconvolved image. + ---------- + References + [1] https://doi.org/10.1364/JOSA.62.000055 + [2] https://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution + """ + float_type = np.promote_types(image.dtype, np.float32) + image = image.astype(float_type, copy=False) + psf = psf.astype(float_type, copy=False) + psf /= psf.sum() + im_deconv = image.copy() + psf_mirror = np.flip(psf) - for _ in range(iterations): - conv = convolve(im_deconv, psf, mode='same') - if filter_epsilon: - relative_blur = np.where(conv < filter_epsilon, 0, image / conv) - else: - relative_blur = image / conv - im_deconv *= convolve(relative_blur, psf_mirror, mode='same') + for _ in range(iterations): + conv = convolve(im_deconv, psf, mode='same') + if filter_epsilon: + relative_blur = np.where(conv < filter_epsilon, 0, image / conv) + else: + relative_blur = image / conv + im_deconv *= convolve(relative_blur, psf_mirror, mode='same') - if clip: - im_deconv[im_deconv > 1] = 1 - im_deconv[im_deconv < -1] = -1 + if clip: + im_deconv[im_deconv > 1] = 1 + im_deconv[im_deconv < -1] = -1 - return im_deconv + return im_deconv def one_step_gradient(image, psf, iterations=20, clip=True, filter_epsilon=None): - """ - One-step gradient deconvolution algorithm. - ---------- - Inputs: - image : numpy.darray - Input degraded image (can be N dimensional) of floats between 0 and 1. - psf : numpy.darray - The point spread function. - iterations : int, optional - Number of iterations. This parameter plays the role of - regularisation. - clip : boolean, optional - True by default. If true, pixel value of the result above 1 or - under -1 are thresholded for skimage pipeline compatibility. - filter_epsilon: float, optional - Value below which intermediate results become 0 to avoid division - by small numbers. - ---------- - Returns: - im_deconv : ndarray - The deconvolved image. - """ - float_type = np.promote_types(image.dtype, np.float32) - image = image.astype(float_type, copy=False) - psf = psf.astype(float_type, copy=False) - psf /= psf.sum() - im_deconv = image.copy() - psf_mirror = np.flip(psf) + """ + One-step gradient deconvolution algorithm. + ---------- + Inputs: + image : numpy.darray + Input degraded image (can be N dimensional) of floats between 0 and 1. + psf : numpy.darray + The point spread function. + iterations : int, optional + Number of iterations. This parameter plays the role of + regularisation. + clip : boolean, optional + True by default. If true, pixel value of the result above 1 or + under -1 are thresholded for skimage pipeline compatibility. + filter_epsilon: float, optional + Value below which intermediate results become 0 to avoid division + by small numbers. + ---------- + Returns: + im_deconv : ndarray + The deconvolved image. + """ + float_type = np.promote_types(image.dtype, np.float32) + image = image.astype(float_type, copy=False) + psf = psf.astype(float_type, copy=False) + psf /= psf.sum() + im_deconv = image.copy() + psf_mirror = np.flip(psf) - for _ in range(iterations): - conv = convolve(im_deconv, psf, mode='same') - if filter_epsilon: - relative_blur = np.where(conv < filter_epsilon, 0, image - conv) - else: - relative_blur = image - conv - im_deconv += convolve(relative_blur, psf_mirror, mode='same') + for _ in range(iterations): + conv = convolve(im_deconv, psf, mode='same') + if filter_epsilon: + relative_blur = np.where(conv < filter_epsilon, 0, image - conv) + else: + relative_blur = image - conv + im_deconv += convolve(relative_blur, psf_mirror, mode='same') - if clip: - im_deconv[im_deconv > 1] = 1 - im_deconv[im_deconv < -1] = -1 + if clip: + im_deconv[im_deconv > 1] = 1 + im_deconv[im_deconv < -1] = -1 - return im_deconv + return im_deconv def conjgrad(image, psf, alpha=0.1, error=None, iterations=20): - """ - Implement the Conjugate Gradient algorithm. - ---------- - Inputs: - image : numpy.ndarray - Input degraded image (can be N dimensional) of floats. - psf : numpy.ndarray - The kernel of the point spread function. Must have shape less or equal to - the image shape. If less, it will be zeropadded. - alpha : float, optional - A weight parameter for the regularisation matrix. - Defaults to 0.1 - error : numpy.ndarray, optional - Known background noise on the inputed image. Will be used for weighted - deconvolution. If None, all weights will be set to 1. - Defaults to None. - iterations : int, optional - Number of iterations. This parameter plays the role of - regularisation. - Defaults to 20. - ---------- - Returns: - im_deconv : ndarray - The deconvolved image. - """ - float_type = np.promote_types(image.dtype, np.float32) - image = image.astype(float_type, copy=False) - psf = psf.astype(float_type, copy=False) - psf /= psf.sum() - - # A.x = b avec A = HtWH+aDtD et b = HtWy - #Define ft_h : the zeropadded and shifted Fourier transform of the PSF - ft_h = np.fft.fftn(np.fft.ifftshift(zeropad(psf,image.shape))) - #Define weights as normalized signal to noise ratio - if error is None: - wgt = np.ones(image.shape) - else: - wgt = image/error - wgt /= wgt.max() + """ + Implement the Conjugate Gradient algorithm. + ---------- + Inputs: + image : numpy.ndarray + Input degraded image (can be N dimensional) of floats. + psf : numpy.ndarray + The kernel of the point spread function. Must have shape less or equal to + the image shape. If less, it will be zeropadded. + alpha : float, optional + A weight parameter for the regularisation matrix. + Defaults to 0.1 + error : numpy.ndarray, optional + Known background noise on the inputed image. Will be used for weighted + deconvolution. If None, all weights will be set to 1. + Defaults to None. + iterations : int, optional + Number of iterations. This parameter plays the role of + regularisation. + Defaults to 20. + ---------- + Returns: + im_deconv : ndarray + The deconvolved image. + """ + float_type = np.promote_types(image.dtype, np.float32) + image = image.astype(float_type, copy=False) + psf = psf.astype(float_type, copy=False) + psf /= psf.sum() - def W(x): - """Define W operator : apply weights""" - return wgt*x + # A.x = b avec A = HtWH+aDtD et b = HtWy + # Define ft_h : the zeropadded and shifted Fourier transform of the PSF + ft_h = np.fft.fftn(np.fft.ifftshift(zeropad(psf, image.shape))) + # Define weights as normalized signal to noise ratio + if error is None: + wgt = np.ones(image.shape) + else: + wgt = image/error + wgt /= wgt.max() - def H(x): - """Define H operator : convolution with PSF""" - return np.fft.ifftn(ft_h*np.fft.fftn(x)).real + def W(x): + """Define W operator : apply weights""" + return wgt*x - def Ht(x): - """Define Ht operator : transpose of H""" - return np.fft.ifftn(ft_h.conj()*np.fft.fftn(x)).real + def H(x): + """Define H operator : convolution with PSF""" + return np.fft.ifftn(ft_h*np.fft.fftn(x)).real - def DtD(x): - """Returns the result of D'.D.x where D is a (multi-dimensional) - finite difference operator and D' is its transpose.""" - dims = x.shape - r = np.zeros(dims, dtype=x.dtype) # to store the result - rank = x.ndim # number of dimensions - if rank == 0: return r - if dims[0] >= 2: - dx = x[1:-1,...] - x[0:-2,...] - r[1:-1,...] += dx - r[0:-2,...] -= dx - if rank == 1: return r - if dims[1] >= 2: - dx = x[:,1:-1,...] - x[:,0:-2,...] - r[:,1:-1,...] += dx - r[:,0:-2,...] -= dx - if rank == 2: return r - if dims[2] >= 2: - dx = x[:,:,1:-1,...] - x[:,:,0:-2,...] - r[:,:,1:-1,...] += dx - r[:,:,0:-2,...] -= dx - if rank == 3: return r - if dims[3] >= 2: - dx = x[:,:,:,1:-1,...] - x[:,:,:,0:-2,...] - r[:,:,:,1:-1,...] += dx - r[:,:,:,0:-2,...] -= dx - if rank == 4: return r - if dims[4] >= 2: - dx = x[:,:,:,:,1:-1,...] - x[:,:,:,:,0:-2,...] - r[:,:,:,:,1:-1,...] += dx - r[:,:,:,:,0:-2,...] -= dx - if rank == 5: return r - raise ValueError("too many dimensions") + def Ht(x): + """Define Ht operator : transpose of H""" + return np.fft.ifftn(ft_h.conj()*np.fft.fftn(x)).real - def A(x): - """Define symetric positive semi definite operator A""" - return Ht(W(H(x)))+alpha*DtD(x) + def DtD(x): + """Returns the result of D'.D.x where D is a (multi-dimensional) + finite difference operator and D' is its transpose.""" + dims = x.shape + r = np.zeros(dims, dtype=x.dtype) # to store the result + rank = x.ndim # number of dimensions + if rank == 0: + return r + if dims[0] >= 2: + dx = x[1:-1, ...] - x[0:-2, ...] + r[1:-1, ...] += dx + r[0:-2, ...] -= dx + if rank == 1: + return r + if dims[1] >= 2: + dx = x[:, 1:-1, ...] - x[:, 0:-2, ...] + r[:, 1:-1, ...] += dx + r[:, 0:-2, ...] -= dx + if rank == 2: + return r + if dims[2] >= 2: + dx = x[:, :, 1:-1, ...] - x[:, :, 0:-2, ...] + r[:, :, 1:-1, ...] += dx + r[:, :, 0:-2, ...] -= dx + if rank == 3: + return r + if dims[3] >= 2: + dx = x[:, :, :, 1:-1, ...] - x[:, :, :, 0:-2, ...] + r[:, :, :, 1:-1, ...] += dx + r[:, :, :, 0:-2, ...] -= dx + if rank == 4: + return r + if dims[4] >= 2: + dx = x[:, :, :, :, 1:-1, ...] - x[:, :, :, :, 0:-2, ...] + r[:, :, :, :, 1:-1, ...] += dx + r[:, :, :, :, 0:-2, ...] -= dx + if rank == 5: + return r + raise ValueError("too many dimensions") - #Define obtained vector A.x = b - b = Ht(W(image)) - - def inner(x,y): - """Compute inner product of X and Y regardless their shapes - (their number of elements must however match).""" - return np.inner(x.ravel(),y.ravel()) + def A(x): + """Define symetric positive semi definite operator A""" + return Ht(W(H(x)))+alpha*DtD(x) - # Compute initial residuals. - r = np.copy(b) - x = np.zeros(b.shape, dtype=b.dtype) - rho = inner(r,r) - epsilon = np.max([0., 1e-5*np.sqrt(rho)]) + # Define obtained vector A.x = b + b = Ht(W(image)) - # Conjugate gradient iterations. - beta = 0.0 - k = 0 - while (k <= iterations) and (np.sqrt(rho) > epsilon): - if np.sqrt(rho) <= epsilon: - print("Converged before maximum iteration.") - break - k += 1 - if k > iterations: - print("Didn't converge before maximum iteration.") - break + def inner(x, y): + """Compute inner product of X and Y regardless their shapes + (their number of elements must however match).""" + return np.inner(x.ravel(), y.ravel()) - # Next search direction. - if beta == 0.0: - p = r - else: - p = r + beta*p + # Compute initial residuals. + r = np.copy(b) + x = np.zeros(b.shape, dtype=b.dtype) + rho = inner(r, r) + epsilon = np.max([0., 1e-5*np.sqrt(rho)]) - # Make optimal step along search direction. - q = A(p) - gamma = inner(p, q) - if gamma <= 0.0: - raise ValueError("Operator A is not positive definite") - alpha = rho/gamma - x += alpha*p - r -= alpha*q - rho_prev, rho = rho, inner(r,r) - beta = rho/rho_prev + # Conjugate gradient iterations. + beta = 0.0 + k = 0 + while (k <= iterations) and (np.sqrt(rho) > epsilon): + if np.sqrt(rho) <= epsilon: + print("Converged before maximum iteration.") + break + k += 1 + if k > iterations: + print("Didn't converge before maximum iteration.") + break - #Return normalized solution - im_deconv = x/x.max() - return im_deconv + # Next search direction. + if beta == 0.0: + p = r + else: + p = r + beta*p + + # Make optimal step along search direction. + q = A(p) + gamma = inner(p, q) + if gamma <= 0.0: + raise ValueError("Operator A is not positive definite") + alpha = rho/gamma + x += alpha*p + r -= alpha*q + rho_prev, rho = rho, inner(r, r) + beta = rho/rho_prev + + # Return normalized solution + im_deconv = x/x.max() + return im_deconv def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True, filter_epsilon=None, algo='richardson'): - """ - Prepare an image for deconvolution using a chosen algorithm and return - results. - ---------- - Inputs: - image : numpy.ndarray - Input degraded image (can be N dimensional) of floats. - psf : numpy.ndarray - The kernel of the point spread function. Must have shape less or equal to - the image shape. If less, it will be zeropadded. - alpha : float, optional - A parameter value for numerous deconvolution algorithms. - Defaults to 0.1 - error : numpy.ndarray, optional - Known background noise on the inputed image. Will be used for weighted - deconvolution. If None, all weights will be set to 1. - Defaults to None. - iterations : int, optional - Number of iterations. This parameter plays the role of - regularisation. - Defaults to 20. - clip : boolean, optional - If true, pixel value of the result above 1 or under -1 are thresholded - for skimage pipeline compatibility. - Defaults to True. - filter_epsilon: float, optional - Value below which intermediate results become 0 to avoid division - by small numbers. - Defaults to None. - algo : str, optional - Name of the deconvolution algorithm that will be used. Implemented - algorithms are the following : 'Wiener', 'Van-Cittert', 'One Step Gradient', - 'Conjugate Gradient' and 'Richardson-Lucy'. - Defaults to 'Richardson-Lucy'. - ---------- - Returns: - im_deconv : ndarray - The deconvolved image. - """ - # Normalize image to highest pixel value - pxmax = image[np.isfinite(image)].max() - if pxmax == 0.: - raise ValueError("Invalid image") - norm_image = image/pxmax + """ + Prepare an image for deconvolution using a chosen algorithm and return + results. + ---------- + Inputs: + image : numpy.ndarray + Input degraded image (can be N dimensional) of floats. + psf : numpy.ndarray + The kernel of the point spread function. Must have shape less or equal to + the image shape. If less, it will be zeropadded. + alpha : float, optional + A parameter value for numerous deconvolution algorithms. + Defaults to 0.1 + error : numpy.ndarray, optional + Known background noise on the inputed image. Will be used for weighted + deconvolution. If None, all weights will be set to 1. + Defaults to None. + iterations : int, optional + Number of iterations. This parameter plays the role of + regularisation. + Defaults to 20. + clip : boolean, optional + If true, pixel value of the result above 1 or under -1 are thresholded + for skimage pipeline compatibility. + Defaults to True. + filter_epsilon: float, optional + Value below which intermediate results become 0 to avoid division + by small numbers. + Defaults to None. + algo : str, optional + Name of the deconvolution algorithm that will be used. Implemented + algorithms are the following : 'Wiener', 'Van-Cittert', 'One Step Gradient', + 'Conjugate Gradient' and 'Richardson-Lucy'. + Defaults to 'Richardson-Lucy'. + ---------- + Returns: + im_deconv : ndarray + The deconvolved image. + """ + # Normalize image to highest pixel value + pxmax = image[np.isfinite(image)].max() + if pxmax == 0.: + raise ValueError("Invalid image") + norm_image = image/pxmax - # Deconvolve normalized image - if algo.lower() in ['wiener','wiener simple']: - norm_deconv = wiener(image=norm_image, psf=psf, alpha=alpha, clip=clip) - elif algo.lower() in ['van-cittert','vancittert','cittert']: - norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha, - iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) - elif algo.lower() in ['1grad','one_step_grad','one step grad']: - norm_deconv = one_step_gradient(image=norm_image, psf=psf, - iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) - elif algo.lower() in ['conjgrad','conj_grad','conjugate gradient']: - norm_deconv = conj_grad(image=norm_image, psf=psf, alpha=alpha, - error=error, iterations=iterations) - else: # Defaults to Richardson-Lucy - norm_deconv = richardson_lucy(image=norm_image, psf=psf, - iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) + # Deconvolve normalized image + if algo.lower() in ['wiener', 'wiener simple']: + norm_deconv = wiener(image=norm_image, psf=psf, alpha=alpha, clip=clip) + elif algo.lower() in ['van-cittert', 'vancittert', 'cittert']: + norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha, + iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) + elif algo.lower() in ['1grad', 'one_step_grad', 'one step grad']: + norm_deconv = one_step_gradient(image=norm_image, psf=psf, + iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) + elif algo.lower() in ['conjgrad', 'conj_grad', 'conjugate gradient']: + norm_deconv = conj_grad(image=norm_image, psf=psf, alpha=alpha, + error=error, iterations=iterations) + else: # Defaults to Richardson-Lucy + norm_deconv = richardson_lucy(image=norm_image, psf=psf, + iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) - # Output deconvolved image with original pxmax value - im_deconv = pxmax*norm_deconv + # Output deconvolved image with original pxmax value + im_deconv = pxmax*norm_deconv - return im_deconv + return im_deconv diff --git a/src/lib/fits.py b/src/lib/fits.py index acd8d5f..fa37fbf 100755 --- a/src/lib/fits.py +++ b/src/lib/fits.py @@ -15,9 +15,8 @@ import numpy as np from os.path import join as path_join from astropy.io import fits from astropy.wcs import WCS -from lib.convex_hull import image_hull, clean_ROI +from lib.convex_hull import clean_ROI from lib.plots import princ_angle -import matplotlib.pyplot as plt def get_obs_data(infiles, data_folder="", compute_flux=False): @@ -42,29 +41,29 @@ def get_obs_data(infiles, data_folder="", compute_flux=False): """ data_array, headers = [], [] for i in range(len(infiles)): - with fits.open(path_join(data_folder,infiles[i])) as f: + with fits.open(path_join(data_folder, infiles[i])) as f: headers.append(f[0].header) data_array.append(f[0].data) - data_array = np.array(data_array,dtype=np.double) + data_array = np.array(data_array, dtype=np.double) # Prevent negative count value in imported data for i in range(len(data_array)): data_array[i][data_array[i] < 0.] = 0. - + # force WCS to convention PCi_ja unitary, cdelt in deg for header in headers: new_wcs = WCS(header).deepcopy() if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1., 1.])).all(): # Update WCS with relevant information if new_wcs.wcs.has_cd(): - old_cd = new_wcs.wcs.cd[:2,:2] + old_cd = new_wcs.wcs.cd[:2, :2] del new_wcs.wcs.cd - keys = list(new_wcs.to_header().keys())+['CD1_1','CD1_2','CD2_1','CD2_2'] + keys = list(new_wcs.to_header().keys())+['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2'] for key in keys: header.remove(key, ignore_missing=True) new_cdelt = np.linalg.eig(old_cd)[0] elif (new_wcs.wcs.cdelt == np.array([1., 1.])).all() and \ - (new_wcs.array_shape in [(512, 512),(1024,512),(512,1024),(1024,1024)]): + (new_wcs.array_shape in [(512, 512), (1024, 512), (512, 1024), (1024, 1024)]): old_cd = new_wcs.wcs.pc new_wcs.wcs.pc = np.dot(old_cd, np.diag(1./new_cdelt)) new_wcs.wcs.cdelt = new_cdelt @@ -73,14 +72,14 @@ def get_obs_data(infiles, data_folder="", compute_flux=False): header['orientat'] = princ_angle(float(header['orientat'])) # force WCS for POL60 to have same pixel size as POL0 and POL120 - is_pol60 = np.array([head['filtnam1'].lower()=='pol60' for head in headers],dtype=bool) - cdelt = np.round(np.array([WCS(head).wcs.cdelt for head in headers]),14) - if np.unique(cdelt[np.logical_not(is_pol60)],axis=0).size!=2: - print(np.unique(cdelt[np.logical_not(is_pol60)],axis=0)) + is_pol60 = np.array([head['filtnam1'].lower() == 'pol60' for head in headers], dtype=bool) + cdelt = np.round(np.array([WCS(head).wcs.cdelt for head in headers]), 14) + if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2: + print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0)) raise ValueError("Not all images have same pixel size") else: for i in np.arange(len(headers))[is_pol60]: - headers[i]['cdelt1'],headers[i]['cdelt2'] = np.unique(cdelt[np.logical_not(is_pol60)],axis=0)[0] + headers[i]['cdelt1'], headers[i]['cdelt2'] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0] if compute_flux: for i in range(len(infiles)): @@ -91,8 +90,8 @@ def get_obs_data(infiles, data_folder="", compute_flux=False): def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, - s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="", - return_hdul=False): + s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="", + return_hdul=False): """ Save computed polarimetry parameters to a single fits file, updating header accordingly. @@ -127,12 +126,12 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, informations (WCS, orientation, data_type). Only returned if return_hdul is True. """ - #Create new WCS object given the modified images + # Create new WCS object given the modified images ref_header = headers[0] exp_tot = np.array([header['exptime'] for header in headers]).sum() new_wcs = WCS(ref_header).deepcopy() - - if data_mask.shape != (1,1): + + if data_mask.shape != (1, 1): vertex = clean_ROI(data_mask) shape = vertex[1::2]-vertex[0::2] new_wcs.array_shape = shape @@ -153,56 +152,56 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, header['PA_int'] = (ref_header['PA_int'], 'Integrated polarisation angle') header['PA_int_err'] = (ref_header['PA_int_err'], 'Integrated polarisation angle error') - #Crop Data to mask - if data_mask.shape != (1,1): - I_stokes = I_stokes[vertex[2]:vertex[3],vertex[0]:vertex[1]] - Q_stokes = Q_stokes[vertex[2]:vertex[3],vertex[0]:vertex[1]] - U_stokes = U_stokes[vertex[2]:vertex[3],vertex[0]:vertex[1]] - P = P[vertex[2]:vertex[3],vertex[0]:vertex[1]] - debiased_P = debiased_P[vertex[2]:vertex[3],vertex[0]:vertex[1]] - s_P = s_P[vertex[2]:vertex[3],vertex[0]:vertex[1]] - s_P_P = s_P_P[vertex[2]:vertex[3],vertex[0]:vertex[1]] - PA = PA[vertex[2]:vertex[3],vertex[0]:vertex[1]] - s_PA = s_PA[vertex[2]:vertex[3],vertex[0]:vertex[1]] - s_PA_P = s_PA_P[vertex[2]:vertex[3],vertex[0]:vertex[1]] - - new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2],*shape[::-1])) + # Crop Data to mask + if data_mask.shape != (1, 1): + I_stokes = I_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]] + Q_stokes = Q_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]] + U_stokes = U_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]] + P = P[vertex[2]:vertex[3], vertex[0]:vertex[1]] + debiased_P = debiased_P[vertex[2]:vertex[3], vertex[0]:vertex[1]] + s_P = s_P[vertex[2]:vertex[3], vertex[0]:vertex[1]] + s_P_P = s_P_P[vertex[2]:vertex[3], vertex[0]:vertex[1]] + PA = PA[vertex[2]:vertex[3], vertex[0]:vertex[1]] + s_PA = s_PA[vertex[2]:vertex[3], vertex[0]:vertex[1]] + s_PA_P = s_PA_P[vertex[2]:vertex[3], vertex[0]:vertex[1]] + + new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1])) for i in range(3): for j in range(3): - Stokes_cov[i,j][(1-data_mask).astype(bool)] = 0. - new_Stokes_cov[i,j] = Stokes_cov[i,j][vertex[2]:vertex[3],vertex[0]:vertex[1]] + Stokes_cov[i, j][(1-data_mask).astype(bool)] = 0. + new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2]:vertex[3], vertex[0]:vertex[1]] Stokes_cov = new_Stokes_cov - - data_mask = data_mask[vertex[2]:vertex[3],vertex[0]:vertex[1]] + + data_mask = data_mask[vertex[2]:vertex[3], vertex[0]:vertex[1]] data_mask = data_mask.astype(float, copy=False) - #Create HDUList object + # Create HDUList object hdul = fits.HDUList([]) - #Add I_stokes as PrimaryHDU + # Add I_stokes as PrimaryHDU header['datatype'] = ('I_stokes', 'type of data stored in the HDU') I_stokes[(1-data_mask).astype(bool)] = 0. primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header) primary_hdu.name = 'I_stokes' hdul.append(primary_hdu) - #Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList - for data, name in [[Q_stokes,'Q_stokes'],[U_stokes,'U_stokes'], - [Stokes_cov,'IQU_cov_matrix'],[P, 'Pol_deg'], - [debiased_P, 'Pol_deg_debiased'],[s_P, 'Pol_deg_err'], - [s_P_P, 'Pol_deg_err_Poisson_noise'],[PA, 'Pol_ang'], - [s_PA, 'Pol_ang_err'],[s_PA_P, 'Pol_ang_err_Poisson_noise'], - [data_mask, 'Data_mask']]: + # Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList + for data, name in [[Q_stokes, 'Q_stokes'], [U_stokes, 'U_stokes'], + [Stokes_cov, 'IQU_cov_matrix'], [P, 'Pol_deg'], + [debiased_P, 'Pol_deg_debiased'], [s_P, 'Pol_deg_err'], + [s_P_P, 'Pol_deg_err_Poisson_noise'], [PA, 'Pol_ang'], + [s_PA, 'Pol_ang_err'], [s_PA_P, 'Pol_ang_err_Poisson_noise'], + [data_mask, 'Data_mask']]: hdu_header = header.copy() hdu_header['datatype'] = name if not name == 'IQU_cov_matrix': data[(1-data_mask).astype(bool)] = 0. - hdu = fits.ImageHDU(data=data,header=hdu_header) + hdu = fits.ImageHDU(data=data, header=hdu_header) hdu.name = name hdul.append(hdu) - #Save fits file to designated filepath - hdul.writeto(path_join(data_folder,filename+".fits"), overwrite=True) + # Save fits file to designated filepath + hdul.writeto(path_join(data_folder, filename+".fits"), overwrite=True) if return_hdul: return hdul diff --git a/src/lib/plots.py b/src/lib/plots.py index 3a6a340..9c0a374 100755 --- a/src/lib/plots.py +++ b/src/lib/plots.py @@ -4,37 +4,37 @@ Library functions for displaying informations using matplotlib prototypes : - plot_obs(data_array, headers, shape, vmin, vmax, rectangle, savename, plots_folder) Plots whole observation raw data in given display shape. - + - plot_Stokes(Stokes, savename, plots_folder) Plot the I/Q/U maps from the Stokes HDUList. - polarisation_map(Stokes, data_mask, rectangle, SNRp_cut, SNRi_cut, step_vec, savename, plots_folder, display) -> fig, ax Plots polarisation map of polarimetric parameters saved in an HDUList. - + class align_maps(map, other_map, **kwargs) Class to interactively align maps with different WCS. - + class overplot_radio(align_maps) Class inherited from align_maps to overplot radio data as contours. - + class overplot_chandra(align_maps) Class inherited from align_maps to overplot chandra data as contours. - + class overplot_pol(align_maps) Class inherited from align_maps to overplot UV polarisation vectors on other maps. - + class crop_map(hdul, fig, ax) Class to interactively crop a region of interest of a HDUList. - + class crop_Stokes(crop_map) Class inherited from crop_map to work on polarisation maps. - + class image_lasso_selector(img, fig, ax) Class to interactively select part of a map to work on. - + class aperture(img, cdelt, radius, fig, ax) Class to interactively simulate aperture integration. - + class pol_map(Stokes, SNRp_cut, SNRi_cut, selection) Class to interactively study polarisation maps making use of the cropping and selecting tools. """ @@ -60,41 +60,41 @@ def princ_angle(ang): """ Return the principal angle in the 0° to 360° quadrant. """ - if type(ang) != np.ndarray: + if not isinstance(ang, np.ndarray): A = np.array([ang]) else: A = np.array(ang) while np.any(A < 0.): - A[A<0.] = A[A<0.]+360. + A[A < 0.] = A[A < 0.]+360. while np.any(A >= 180.): - A[A>=180.] = A[A>=180.]-180. - if type(ang) == type(A): + A[A >= 180.] = A[A >= 180.]-180. + if type(ang) is type(A): return A else: return A[0] -def sci_not(v,err,rnd=1,out=str): +def sci_not(v, err, rnd=1, out=str): """ Return the scientifque error notation as a string. """ power = - int(('%E' % v)[-3:])+1 - output = [r"({0}".format(round(v*10**power,rnd)),round(v*10**power,rnd)] - if type(err) == list: + output = [r"({0}".format(round(v*10**power, rnd)), round(v*10**power, rnd)] + if isinstance(err, list): for error in err: - output[0] += r" $\pm$ {0}".format(round(error*10**power,rnd)) - output.append(round(error*10**power,rnd)) + output[0] += r" $\pm$ {0}".format(round(error*10**power, rnd)) + output.append(round(error*10**power, rnd)) else: - output[0] += r" $\pm$ {0}".format(round(err*10**power,rnd)) - output.append(round(err*10**power,rnd)) - if out==str: + output[0] += r" $\pm$ {0}".format(round(err*10**power, rnd)) + output.append(round(err*10**power, rnd)) + if out == str: return output[0]+r")e{0}".format(-power) else: - return *output[1:],-power + return *output[1:], -power def plot_obs(data_array, headers, shape=None, vmin=None, vmax=None, rectangle=None, - savename=None, plots_folder=""): + savename=None, plots_folder=""): """ Plots raw observation imagery with some information on the instrument and filters. @@ -130,49 +130,49 @@ def plot_obs(data_array, headers, shape=None, vmin=None, vmax=None, rectangle=No """ plt.rcParams.update({'font.size': 10}) if shape is None: - shape = np.array([np.ceil(np.sqrt(data_array.shape[0])).astype(int),]*2) - fig, ax = plt.subplots(shape[0], shape[1], figsize=(10,10), dpi=200, - sharex=True, sharey=True) + shape = np.array([np.ceil(np.sqrt(data_array.shape[0])).astype(int), ]*2) + fig, ax = plt.subplots(shape[0], shape[1], figsize=(10, 10), dpi=200, + sharex=True, sharey=True) - for i, (axe,data,head) in enumerate(zip(ax.flatten(),data_array,headers)): + for i, (axe, data, head) in enumerate(zip(ax.flatten(), data_array, headers)): instr = head['instrume'] rootname = head['rootname'] exptime = head['exptime'] filt = head['filtnam1'] convert = head['photflam'] - #plots + # plots if vmin is None or vmax is None: - vmin, vmax = convert*data[data>0.].min()/10., convert*data[data>0.].max() - #im = axe.imshow(convert*data, vmin=vmin, vmax=vmax, origin='lower', cmap='gray') - data[data*convert 0.].min()/10., convert*data[data > 0.].max() + # im = axe.imshow(convert*data, vmin=vmin, vmax=vmax, origin='lower', cmap='gray') + data[data*convert < vmin*10.] = vmin*10./convert + im = axe.imshow(convert*data, norm=LogNorm(vmin, vmax), origin='lower', cmap='gray') + if not (rectangle is None): x, y, width, height, angle, color = rectangle[i] axe.add_patch(Rectangle((x, y), width, height, angle=angle, - edgecolor=color, fill=False)) - #position of centroid - axe.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], '--', lw=1, - color='grey', alpha=0.5) - axe.plot([0,data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], '--', lw=1, - color='grey', alpha=0.5) - axe.annotate(instr+":"+rootname,color='white',fontsize=5,xy=(0.01, 1.00), - xycoords='axes fraction', verticalalignment='top', - horizontalalignment='left') - axe.annotate(filt,color='white',fontsize=10,xy=(0.01, 0.01), - xycoords='axes fraction', verticalalignment='bottom', - horizontalalignment='left') - axe.annotate(exptime,color='white',fontsize=5,xy=(1.00, 0.01), - xycoords='axes fraction', verticalalignment='bottom', - horizontalalignment='right') + edgecolor=color, fill=False)) + # position of centroid + axe.plot([data.shape[1]/2, data.shape[1]/2], [0, data.shape[0]-1], '--', lw=1, + color='grey', alpha=0.5) + axe.plot([0, data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], '--', lw=1, + color='grey', alpha=0.5) + axe.annotate(instr+":"+rootname, color='white', fontsize=5, xy=(0.01, 1.00), + xycoords='axes fraction', verticalalignment='top', + horizontalalignment='left') + axe.annotate(filt, color='white', fontsize=10, xy=(0.01, 0.01), + xycoords='axes fraction', verticalalignment='bottom', + horizontalalignment='left') + axe.annotate(exptime, color='white', fontsize=5, xy=(1.00, 0.01), + xycoords='axes fraction', verticalalignment='bottom', + horizontalalignment='right') fig.subplots_adjust(hspace=0.01, wspace=0.01, right=1.02) - fig.colorbar(im, ax=ax[:,:], location='right', shrink=0.75, aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") + fig.colorbar(im, ax=ax[:, :], location='right', shrink=0.75, aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") if not (savename is None): - #fig.suptitle(savename) + # fig.suptitle(savename) if not savename[-4:] in ['.png', '.jpg', 'pdf']: savename += '.pdf' - fig.savefig(path_join(plots_folder,savename),bbox_inches='tight') + fig.savefig(path_join(plots_folder, savename), bbox_inches='tight') plt.show() return 0 @@ -203,35 +203,35 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""): # Plot figure plt.rcParams.update({'font.size': 10}) - fig, (axI,axQ,axU) = plt.subplots(ncols=3,figsize=(20,6),subplot_kw=dict(projection=wcs)) - fig.subplots_adjust(hspace=0,wspace=0.75,bottom=0.01,top=0.99,left=0.08,right=0.95) + fig, (axI, axQ, axU) = plt.subplots(ncols=3, figsize=(20, 6), subplot_kw=dict(projection=wcs)) + fig.subplots_adjust(hspace=0, wspace=0.75, bottom=0.01, top=0.99, left=0.08, right=0.95) fig.suptitle("I, Q, U Stokes parameters") imI = axI.imshow(stkI, origin='lower', cmap='inferno') - fig.colorbar(imI,ax=axI, aspect=50, shrink=0.50, pad=0.025,label='counts/sec') + fig.colorbar(imI, ax=axI, aspect=50, shrink=0.50, pad=0.025, label='counts/sec') axI.set(xlabel="RA", ylabel='DEC', title=r"$I_{stokes}$") imQ = axQ.imshow(stkQ, origin='lower', cmap='inferno') - fig.colorbar(imQ,ax=axQ, aspect=50, shrink=0.50, pad=0.025,label='counts/sec') + fig.colorbar(imQ, ax=axQ, aspect=50, shrink=0.50, pad=0.025, label='counts/sec') axQ.set(xlabel="RA", ylabel='DEC', title=r"$Q_{stokes}$") imU = axU.imshow(stkU, origin='lower', cmap='inferno') - fig.colorbar(imU,ax=axU, aspect=50, shrink=0.50, pad=0.025,label='counts/sec') + fig.colorbar(imU, ax=axU, aspect=50, shrink=0.50, pad=0.025, label='counts/sec') axU.set(xlabel="RA", ylabel='DEC', title=r"$U_{stokes}$") if not (savename is None): - #fig.suptitle(savename+"_IQU") + # fig.suptitle(savename+"_IQU") if not savename[-4:] in ['.png', '.jpg', '.pdf']: savename += '_IQU.pdf' else: savename = savename[:-4]+"_IQU"+savename[-4:] - fig.savefig(path_join(plots_folder,savename),bbox_inches='tight') + fig.savefig(path_join(plots_folder, savename), bbox_inches='tight') plt.show() return 0 def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_cut=30., - flux_lim=None, step_vec=1, vec_scale=2., savename=None, plots_folder="", display="default"): + flux_lim=None, step_vec=1, vec_scale=2., savename=None, plots_folder="", display="default"): """ Plots polarisation map from Stokes HDUList. ---------- @@ -273,25 +273,23 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c Defaults to current folder. display : str, optional Choose the map to display between intensity (default), polarisation - degree ('p','pol','pol_deg') or polarisation degree error ('s_p', - 'pol_err','pol_deg_err'). + degree ('p', 'pol', 'pol_deg') or polarisation degree error ('s_p', + 'pol_err', 'pol_deg_err'). Defaults to None (intensity). ---------- Returns: fig, ax : matplotlib.pyplot object The figure and ax created for interactive contour maps. """ - #Get data - stkI = Stokes[np.argmax([Stokes[i].header['datatype']=='I_stokes' for i in range(len(Stokes))])] - stkQ = Stokes[np.argmax([Stokes[i].header['datatype']=='Q_stokes' for i in range(len(Stokes))])] - stkU = Stokes[np.argmax([Stokes[i].header['datatype']=='U_stokes' for i in range(len(Stokes))])] - stk_cov = Stokes[np.argmax([Stokes[i].header['datatype']=='IQU_cov_matrix' for i in range(len(Stokes))])] - pol = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_debiased' for i in range(len(Stokes))])] - pol_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_deg_err' for i in range(len(Stokes))])] - pang = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang' for i in range(len(Stokes))])] + # Get data + stkI = Stokes[np.argmax([Stokes[i].header['datatype'] == 'I_stokes' for i in range(len(Stokes))])] + stk_cov = Stokes[np.argmax([Stokes[i].header['datatype'] == 'IQU_cov_matrix' for i in range(len(Stokes))])] + pol = Stokes[np.argmax([Stokes[i].header['datatype'] == 'Pol_deg_debiased' for i in range(len(Stokes))])] + pol_err = Stokes[np.argmax([Stokes[i].header['datatype'] == 'Pol_deg_err' for i in range(len(Stokes))])] + pang = Stokes[np.argmax([Stokes[i].header['datatype'] == 'Pol_ang' for i in range(len(Stokes))])] try: if data_mask is None: - data_mask = Stokes[np.argmax([Stokes[i].header['datatype']=='Data_mask' for i in range(len(Stokes))])].data.astype(bool) + data_mask = Stokes[np.argmax([Stokes[i].header['datatype'] == 'Data_mask' for i in range(len(Stokes))])].data.astype(bool) except KeyError: data_mask = np.ones(stkI.shape).astype(bool) @@ -299,19 +297,19 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c convert_flux = Stokes[0].header['photflam'] wcs = WCS(Stokes[0]).deepcopy() - #Plot Stokes parameters map + # Plot Stokes parameters map if display is None or display.lower() in ['default']: plot_Stokes(Stokes, savename=savename, plots_folder=plots_folder) - #Compute SNR and apply cuts + # Compute SNR and apply cuts poldata, pangdata = pol.data.copy(), pang.data.copy() maskP = pol_err.data > 0 SNRp = np.zeros(pol.data.shape) SNRp[maskP] = pol.data[maskP]/pol_err.data[maskP] - maskI = stk_cov.data[0,0] > 0 + maskI = stk_cov.data[0, 0] > 0 SNRi = np.zeros(stkI.data.shape) - SNRi[maskI] = stkI.data[maskI]/np.sqrt(stk_cov.data[0,0][maskI]) + SNRi[maskI] = stkI.data[maskI]/np.sqrt(stk_cov.data[0, 0][maskI]) mask = (SNRp > SNRp_cut) * (SNRi > SNRi_cut) poldata[np.logical_not(mask)] = np.nan @@ -320,121 +318,117 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c # Look for pixel of max polarisation if np.isfinite(pol.data).any(): p_max = np.max(pol.data[np.isfinite(pol.data)]) - x_max, y_max = np.unravel_index(np.argmax(pol.data==p_max),pol.data.shape) + x_max, y_max = np.unravel_index(np.argmax(pol.data == p_max), pol.data.shape) else: print("No pixel with polarisation information above requested SNR.") - #Plot the map + # Plot the map plt.rcParams.update({'font.size': 10}) plt.rcdefaults() - fig, ax = plt.subplots(figsize=(10,10),subplot_kw=dict(projection=wcs)) - ax.set(aspect='equal',fc='k') - fig.subplots_adjust(hspace=0,wspace=0,left=0.102,right=1.02) + fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(projection=wcs)) + ax.set(aspect='equal', fc='k') + fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02) if display.lower() in ['intensity']: # If no display selected, show intensity map - display='i' + display = 'i' if flux_lim is None: if mask.sum() > 0.: - vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) + vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) else: - vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0,0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) + vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) else: vmin, vmax = flux_lim - im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.) - cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") - levelsI = np.logspace(0.31,1.955,6)/100.*vmax + im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.) + fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") + levelsI = np.logspace(0.31, 1.955, 6)/100.*vmax print("Total flux contour levels : ", levelsI) - cont = ax.contour(stkI.data*convert_flux, levels=levelsI, colors='grey', linewidths=0.5) - #ax.clabel(cont,inline=True,fontsize=6) + ax.contour(stkI.data*convert_flux, levels=levelsI, colors='grey', linewidths=0.5) elif display.lower() in ['pol_flux']: # Display polarisation flux - display='pf' - pf_mask = (stkI.data > 0.) * (pol.data > 0.) + display = 'pf' if flux_lim is None: if mask.sum() > 0.: - vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) + vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) else: - vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0,0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) + vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) else: vmin, vmax = flux_lim - im = ax.imshow(stkI.data*convert_flux*pol.data, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.) - cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") + im = ax.imshow(stkI.data*convert_flux*pol.data, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.) + fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") levelsPf = np.linspace(vmax*0.01, vmax*0.99, 10) print("Polarized flux contour levels : ", levelsPf) - cont = ax.contour(stkI.data*convert_flux*pol.data, levels=levelsPf, colors='grey', linewidths=0.5) - #ax.clabel(cont,inline=True,fontsize=6) - elif display.lower() in ['p','pol','pol_deg']: + ax.contour(stkI.data*convert_flux*pol.data, levels=levelsPf, colors='grey', linewidths=0.5) + elif display.lower() in ['p', 'pol', 'pol_deg']: # Display polarisation degree map - display='p' + display = 'p' vmin, vmax = 0., 100. im = ax.imshow(pol.data*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) - cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$P$ [%]") - elif display.lower() in ['pa','pang','pol_ang']: + fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$P$ [%]") + elif display.lower() in ['pa', 'pang', 'pol_ang']: # Display polarisation degree map - display='pa' + display = 'pa' vmin, vmax = 0., 180. im = ax.imshow(princ_angle(pang.data), vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) - cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\theta_P$ [°]") - elif display.lower() in ['s_p','pol_err','pol_deg_err']: + fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\theta_P$ [°]") + elif display.lower() in ['s_p', 'pol_err', 'pol_deg_err']: # Display polarisation degree error map - display='s_p' - if (SNRp>SNRp_cut).any(): + display = 's_p' + if (SNRp > SNRp_cut).any(): vmin, vmax = 0., np.max(pol_err.data[SNRp > SNRp_cut])*100. p_err = deepcopy(pol_err.data) p_err[p_err > vmax/100.] = np.nan im = ax.imshow(p_err*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) else: im = ax.imshow(pol_err.data*100., aspect='equal', cmap='inferno', alpha=1.) - cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\sigma_P$ [%]") - elif display.lower() in ['s_i','i_err']: + fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\sigma_P$ [%]") + elif display.lower() in ['s_i', 'i_err']: # Display intensity error map - display='s_i' - if (SNRi>SNRi_cut).any(): - vmin, vmax = np.min(np.sqrt(stk_cov.data[0,0][stk_cov.data[0,0] > 0.])*convert_flux), np.max(np.sqrt(stk_cov.data[0,0][stk_cov.data[0,0] > 0.])*convert_flux) - im = ax.imshow(np.sqrt(stk_cov.data[0,0])*convert_flux, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) + display = 's_i' + if (SNRi > SNRi_cut).any(): + vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][stk_cov.data[0, 0] > 0.]) * + convert_flux), np.max(np.sqrt(stk_cov.data[0, 0][stk_cov.data[0, 0] > 0.])*convert_flux) + im = ax.imshow(np.sqrt(stk_cov.data[0, 0])*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.) else: - im = ax.imshow(np.sqrt(stk_cov.data[0,0])*convert_flux, aspect='equal', cmap='inferno', alpha=1.) - cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025 , label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") - elif display.lower() in ['snr','snri']: + im = ax.imshow(np.sqrt(stk_cov.data[0, 0])*convert_flux, aspect='equal', cmap='inferno', alpha=1.) + fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") + elif display.lower() in ['snr', 'snri']: # Display I_stokes signal-to-noise map - display='snri' + display = 'snri' vmin, vmax = 0., np.max(SNRi[np.isfinite(SNRi)]) if vmax*0.99 > SNRi_cut: im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) levelsSNRi = np.linspace(SNRi_cut, vmax*0.99, 5) print("SNRi contour levels : ", levelsSNRi) - cont = ax.contour(SNRi, levels=levelsSNRi, colors='grey', linewidths=0.5) - #ax.clabel(cont,inline=True,fontsize=6) + ax.contour(SNRi, levels=levelsSNRi, colors='grey', linewidths=0.5) else: im = ax.imshow(SNRi, aspect='equal', cmap='inferno', alpha=1.) - cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025 , label=r"$I_{Stokes}/\sigma_{I}$") + fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$I_{Stokes}/\sigma_{I}$") elif display.lower() in ['snrp']: # Display polarisation degree signal-to-noise map - display='snrp' + display = 'snrp' vmin, vmax = 0., np.max(SNRp[np.isfinite(SNRp)]) if vmax*0.99 > SNRp_cut: im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) levelsSNRp = np.linspace(SNRp_cut, vmax*0.99, 5) print("SNRp contour levels : ", levelsSNRp) - cont = ax.contour(SNRp, levels=levelsSNRp, colors='grey', linewidths=0.5) - #ax.clabel(cont,inline=True,fontsize=6) + ax.contour(SNRp, levels=levelsSNRp, colors='grey', linewidths=0.5) else: im = ax.imshow(SNRp, aspect='equal', cmap='inferno', alpha=1.) - cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025 , label=r"$P/\sigma_{P}$") + fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$P/\sigma_{P}$") else: # Defaults to intensity map if mask.sum() > 0.: - vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) + vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0, 0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) else: - vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0,0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) - im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.) - cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025 , label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]") + vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0, 0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) + im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.) + fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]") - #Get integrated values from header + # Get integrated values from header n_pix = stkI.data[data_mask].size I_diluted = stkI.data[data_mask].sum() - I_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0,0][data_mask])) + I_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0, 0][data_mask])) P_diluted = Stokes[0].header['P_int'] P_diluted_err = Stokes[0].header['P_int_err'] @@ -443,45 +437,50 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c px_size = wcs.wcs.get_cdelt()[0]*3600. px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w') - north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10., angle=-Stokes[0].header['orientat'], color='white', text_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': 'k','fc':'w','alpha': 1,'lw': 1}) + north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10., + angle=-Stokes[0].header['orientat'], color='white', text_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 1}) - if display.lower() in ['i','s_i','snri','pf','p','pa','s_p','snrp']: + if display.lower() in ['i', 's_i', 'snri', 'pf', 'p', 'pa', 's_p', 'snrp']: if step_vec == 0: poldata[np.isfinite(poldata)] = 1./2. step_vec = 1 vec_scale = 2. X, Y = np.meshgrid(np.arange(stkI.data.shape[1]), np.arange(stkI.data.shape[0])) U, V = poldata*np.cos(np.pi/2.+pangdata*np.pi/180.), poldata*np.sin(np.pi/2.+pangdata*np.pi/180.) - Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=1./vec_scale,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,linewidth=0.5,color='w',edgecolor='k') + ax.quiver(X[::step_vec, ::step_vec], Y[::step_vec, ::step_vec], U[::step_vec, ::step_vec], V[::step_vec, ::step_vec], units='xy', angles='uv', + scale=1./vec_scale, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, linewidth=0.5, color='w', edgecolor='k') pol_sc = AnchoredSizeBar(ax.transData, vec_scale, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w') - + ax.add_artist(pol_sc) ax.add_artist(px_sc) ax.add_artist(north_dir) - - ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav,sci_not(I_diluted*convert_flux,I_diluted_err*convert_flux,2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_diluted*100.,P_diluted_err*100.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_diluted,PA_diluted_err), color='white', xy=(0.01, 1.00), xycoords='axes fraction',path_effects=[pe.withStroke(linewidth=0.5,foreground='k')],verticalalignment='top', horizontalalignment='left') + + ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav, sci_not(I_diluted*convert_flux, I_diluted_err*convert_flux, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_diluted*100., P_diluted_err * + 100.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_diluted, PA_diluted_err), color='white', xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left') else: if display.lower() == 'default': ax.add_artist(px_sc) ax.add_artist(north_dir) - ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav,sci_not(I_diluted*convert_flux,I_diluted_err*convert_flux,2)), color='white', xy=(0.01, 1.00), xycoords='axes fraction',path_effects=[pe.withStroke(linewidth=0.5,foreground='k')],verticalalignment='top', horizontalalignment='left') + ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav, sci_not(I_diluted*convert_flux, I_diluted_err*convert_flux, 2)), + color='white', xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left') # Display instrument FOV - if not(rectangle is None): + if not (rectangle is None): x, y, width, height, angle, color = rectangle - x, y = np.array([x, y])- np.array(stkI.data.shape)/2. + x, y = np.array([x, y]) - np.array(stkI.data.shape)/2. ax.add_patch(Rectangle((x, y), width, height, angle=angle, - edgecolor=color, fill=False)) + edgecolor=color, fill=False)) + # ax.coords.grid(True, color='white', ls='dotted', alpha=0.5) + ax.coords[0].set_axislabel('Right Ascension (J2000)') + ax.coords[0].set_axislabel_position('t') + ax.coords[0].set_ticklabel_position('t') + ax.set_ylabel('Declination (J2000)', labelpad=-1) - #ax.coords.grid(True, color='white', ls='dotted', alpha=0.5) - ax.set_xlabel('Right Ascension (J2000)') - ax.set_ylabel('Declination (J2000)',labelpad=-1) - - if not savename is None: - if not savename[-4:] in ['.png', '.jpg', '.pdf']: + if savename is not None: + if savename[-4:] not in ['.png', '.jpg', '.pdf']: savename += '.pdf' - fig.savefig(path_join(plots_folder,savename),bbox_inches='tight',dpi=300) + fig.savefig(path_join(plots_folder, savename), bbox_inches='tight', dpi=300) plt.show() return fig, ax @@ -491,14 +490,15 @@ class align_maps(object): """ Class to interactively align maps with different WCS. """ + def __init__(self, map, other_map, **kwargs): self.aligned = False - + self.map = map self.other = other_map self.map_path = self.map.fileinfo(0)['filename'] self.other_path = self.other.fileinfo(0)['filename'] - + self.map_header = fits.getheader(self.map_path) self.other_header = fits.getheader(self.other_path) self.map_data = fits.getdata(self.map_path) @@ -506,89 +506,99 @@ class align_maps(object): self.map_wcs = deepcopy(WCS(self.map_header)).celestial if len(self.map_data.shape) == 4: - self.map_data = self.map_data[0,0] + self.map_data = self.map_data[0, 0] elif len(self.map_data.shape) == 3: self.map_data = self.map_data[0] self.other_wcs = deepcopy(WCS(self.other_header)).celestial if len(self.other_data.shape) == 4: - self.other_data = self.other_data[0,0] + self.other_data = self.other_data[0, 0] elif len(self.other_data.shape) == 3: self.other_data = self.other_data[0] - self.map_convert, self.map_unit = (float(self.map_header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list(self.map_header.keys()) else (1., self.map_header['bunit'] if 'BUNIT' in list(self.map_header.keys()) else "Arbitray Units") - self.other_convert, self.other_unit = (float(self.other_map[0].header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list(self.other_header.keys()) else (1., self.other_header['bunit'] if 'BUNIT' in list(self.other_header.keys()) else "Arbitray Units") - self.map_observer = "/".join([self.map_header['telescop'],self.map_header['instrume']]) if "INSTRUME" in list(self.map_header.keys()) else self.map_header['telescop'] - self.other_observer = "/".join([self.other_header['telescop'],self.other_header['instrume']]) if "INSTRUME" in list(self.other_header.keys()) else self.other_header['telescop'] + self.map_convert, self.map_unit = (float(self.map_header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list( + self.map_header.keys()) else (1., self.map_header['bunit'] if 'BUNIT' in list(self.map_header.keys()) else "Arbitray Units") + self.other_convert, self.other_unit = (float(self.other_map[0].header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list( + self.other_header.keys()) else (1., self.other_header['bunit'] if 'BUNIT' in list(self.other_header.keys()) else "Arbitray Units") + self.map_observer = "/".join([self.map_header['telescop'], self.map_header['instrume']] + ) if "INSTRUME" in list(self.map_header.keys()) else self.map_header['telescop'] + self.other_observer = "/".join([self.other_header['telescop'], self.other_header['instrume']] + ) if "INSTRUME" in list(self.other_header.keys()) else self.other_header['telescop'] plt.rcParams.update({'font.size': 10}) fontprops = fm.FontProperties(size=16) - self.fig_align = plt.figure(figsize=(20,10)) + self.fig_align = plt.figure(figsize=(20, 10)) self.map_ax = self.fig_align.add_subplot(121, projection=self.map_wcs) self.other_ax = self.fig_align.add_subplot(122, projection=self.other_wcs) - #Plot the UV map + # Plot the UV map other_kwargs = deepcopy(kwargs) vmin, vmax = self.map_data[self.map_data > 0.].max()/1e3*self.map_convert, self.map_data[self.map_data > 0.].max()*self.map_convert - for key, value in [["cmap",[["cmap","inferno"]]], ["norm",[["norm",LogNorm(vmin,vmax)]]]]: + for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]: try: - test = kwargs[key] + _ = kwargs[key] except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i - im1 = self.map_ax.imshow(self.map_data*self.map_convert, aspect='equal', **kwargs) + self.map_ax.imshow(self.map_data*self.map_convert, aspect='equal', **kwargs) - if kwargs['cmap'] in ['inferno','magma','Greys_r','binary_r','gist_yarg_r','gist_gray','gray','bone','pink','hot','afmhot','gist_heat','copper','gist_earth','gist_stern','gnuplot','gnuplot2','CMRmap','cubehelix','nipy_spectral','gist_ncar','viridis']: + if kwargs['cmap'] in ['inferno', 'magma', 'Greys_r', 'binary_r', 'gist_yarg_r', 'gist_gray', 'gray', 'bone', 'pink', 'hot', 'afmhot', 'gist_heat', 'copper', 'gist_earth', 'gist_stern', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'nipy_spectral', 'gist_ncar', 'viridis']: self.map_ax.set_facecolor('black') self.other_ax.set_facecolor('black') - font_color="white" + font_color = "white" else: self.map_ax.set_facecolor('white') self.other_ax.set_facecolor('white') - font_color="black" + font_color = "black" px_size1 = self.map_wcs.wcs.get_cdelt()[0]*3600. - px_sc1 = AnchoredSizeBar(self.map_ax.transData, 1./px_size1, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + px_sc1 = AnchoredSizeBar(self.map_ax.transData, 1./px_size1, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, + frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) self.map_ax.add_artist(px_sc1) if 'PHOTPLAM' in list(self.map_header.keys()): - annote1 = self.map_ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(self.map_header['photplam']), color=font_color, fontsize=12, xy=(0.01, 0.93), xycoords='axes fraction',path_effects=[pe.withStroke(linewidth=0.5,foreground='k')]) + self.map_ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(self.map_header['photplam']), color=font_color, fontsize=12, xy=( + 0.01, 0.93), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')]) if 'ORIENTAT' in list(self.map_header.keys()): - north_dir1 = AnchoredDirectionArrows(self.map_ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-self.map_header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1,'lw': 0.5}) + north_dir1 = AnchoredDirectionArrows(self.map_ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, + sep_x=0.01, angle=-self.map_header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5}) self.map_ax.add_artist(north_dir1) - self.cr_map, = self.map_ax.plot(*(self.map_wcs.wcs.crpix-(1.,1.)), 'r+') - + self.cr_map, = self.map_ax.plot(*(self.map_wcs.wcs.crpix-(1., 1.)), 'r+') + self.map_ax.set_title("{0:s} observation\nClick on selected point of reference.".format(self.map_observer)) self.map_ax.set_xlabel(label="Right Ascension (J2000)") - self.map_ax.set_ylabel(label="Declination (J2000)",labelpad=-1) + self.map_ax.set_ylabel(label="Declination (J2000)", labelpad=-1) - #Plot the other map + # Plot the other map vmin, vmax = self.other_data[self.other_data > 0.].max()/1e3*self.other_convert, self.other_data[self.other_data > 0.].max()*self.other_convert - for key, value in [["cmap",[["cmap","inferno"]]], ["norm",[["norm",LogNorm(vmin,vmax)]]]]: + for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]: try: - test = other_kwargs[key] + _ = other_kwargs[key] except KeyError: for key_i, val_i in value: other_kwargs[key_i] = val_i - im2 = self.other_ax.imshow(self.other_data*self.other_convert, aspect='equal', **other_kwargs) + self.other_ax.imshow(self.other_data*self.other_convert, aspect='equal', **other_kwargs) px_size2 = self.other_wcs.wcs.get_cdelt()[0]*3600. - px_sc2 = AnchoredSizeBar(self.other_ax.transData, 1./px_size2, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + px_sc2 = AnchoredSizeBar(self.other_ax.transData, 1./px_size2, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, + frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) self.other_ax.add_artist(px_sc2) if 'PHOTPLAM' in list(self.other_header.keys()): - annote2 = self.other_ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(self.other_header['photplam']), color='white', fontsize=12, xy=(0.01, 0.93), xycoords='axes fraction',path_effects=[pe.withStroke(linewidth=0.5,foreground='k')]) + self.other_ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(self.other_header['photplam']), color='white', fontsize=12, xy=( + 0.01, 0.93), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')]) if 'ORIENTAT' in list(self.other_header.keys()): - north_dir2 = AnchoredDirectionArrows(self.map_ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-self.other_header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1,'lw': 0.5}) + north_dir2 = AnchoredDirectionArrows(self.map_ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, + sep_x=0.01, angle=-self.other_header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5}) self.other_ax.add_artist(north_dir2) - self.cr_other, = self.other_ax.plot(*(self.other_wcs.wcs.crpix-(1.,1.)), 'r+') + self.cr_other, = self.other_ax.plot(*(self.other_wcs.wcs.crpix-(1., 1.)), 'r+') self.other_ax.set_title("{0:s} observation\nClick on selected point of reference.".format(self.other_observer)) self.other_ax.set_xlabel(label="Right Ascension (J2000)") - self.other_ax.set_ylabel(label="Declination (J2000)",labelpad=-1) + self.other_ax.set_ylabel(label="Declination (J2000)", labelpad=-1) - #Selection button + # Selection button self.axapply = self.fig_align.add_axes([0.80, 0.01, 0.1, 0.04]) self.bapply = Button(self.axapply, 'Apply reference') self.bapply.label.set_fontsize(8) @@ -610,14 +620,14 @@ class align_maps(object): x = event.xdata y = event.ydata - self.cr_map.set(data=[x,y]) + self.cr_map.set(data=[x, y]) self.fig_align.canvas.draw_idle() if (event.inaxes is not None) and (event.inaxes == self.other_ax): x = event.xdata y = event.ydata - self.cr_other.set(data=[x,y]) + self.cr_other.set(data=[x, y]) self.fig_align.canvas.draw_idle() def reset_align(self, event): @@ -631,14 +641,14 @@ class align_maps(object): self.aligned = True def apply_align(self, event=None): - if np.array(self.cr_map.get_data()).shape == (2,1): - self.map_wcs.wcs.crpix = np.array(self.cr_map.get_data())[:,0]+(1.,1.) + if np.array(self.cr_map.get_data()).shape == (2, 1): + self.map_wcs.wcs.crpix = np.array(self.cr_map.get_data())[:, 0]+(1., 1.) else: - self.map_wcs.wcs.crpix = np.array(self.cr_map.get_data())+(1.,1.) - if np.array(self.cr_other.get_data()).shape == (2,1): - self.other_wcs.wcs.crpix = np.array(self.cr_other.get_data())[:,0]+(1.,1.) + self.map_wcs.wcs.crpix = np.array(self.cr_map.get_data())+(1., 1.) + if np.array(self.cr_other.get_data()).shape == (2, 1): + self.other_wcs.wcs.crpix = np.array(self.cr_other.get_data())[:, 0]+(1., 1.) else: - self.other_wcs.wcs.crpix = np.array(self.cr_other.get_data())+(1.,1.) + self.other_wcs.wcs.crpix = np.array(self.cr_other.get_data())+(1., 1.) self.map_wcs.wcs.crval = np.array(self.map_wcs.pixel_to_world_values(*self.map_wcs.wcs.crpix)) self.other_wcs.wcs.crval = self.map_wcs.wcs.crval self.fig_align.canvas.draw_idle() @@ -662,34 +672,36 @@ class align_maps(object): plt.show(block=True) return self.get_aligned_wcs() - def write_map_to(self,path="map.fits",suffix="aligned",data_dir="."): + def write_map_to(self, path="map.fits", suffix="aligned", data_dir="."): new_head = deepcopy(self.map_header) new_head.update(self.map_wcs.to_header()) - new_hdul = fits.HDUList(fits.PrimaryHDU(self.map_data,new_head)) - new_hdul.writeto("_".join([path[:-5],suffix])+".fits") + new_hdul = fits.HDUList(fits.PrimaryHDU(self.map_data, new_head)) + new_hdul.writeto("_".join([path[:-5], suffix])+".fits") return 0 - def write_other_to(self,path="other_map.fits",suffix="aligned",data_dir="."): + def write_other_to(self, path="other_map.fits", suffix="aligned", data_dir="."): new_head = deepcopy(self.other_header) new_head.update(self.other_wcs.to_header()) - new_hdul = fits.HDUList(fits.PrimaryHDU(self.other_data,new_head)) - new_hdul.writeto("_".join([path[:-5],suffix])+".fits") + new_hdul = fits.HDUList(fits.PrimaryHDU(self.other_data, new_head)) + new_hdul.writeto("_".join([path[:-5], suffix])+".fits") return 0 - def write_to(self,path1="map.fits",path2="other_map.fits",suffix="aligned",data_dir="."): - self.write_map_to(path=path1,suffix=suffix,data_dir=data_dir) - self.write_other_to(path=path2,suffix=suffix,data_dir=data_dir) + def write_to(self, path1="map.fits", path2="other_map.fits", suffix="aligned", data_dir="."): + self.write_map_to(path=path1, suffix=suffix, data_dir=data_dir) + self.write_other_to(path=path2, suffix=suffix, data_dir=data_dir) return 0 + class overplot_radio(align_maps): """ Class to overplot maps from different observations. Inherit from class align_maps in order to get the same WCS on both maps. """ + def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2, savename=None, **kwargs): self.Stokes_UV = self.map self.wcs_UV = self.map_wcs - #Get Data + # Get Data obj = self.Stokes_UV[0].header['targname'] stkI = self.Stokes_UV['I_STOKES'].data stk_cov = self.Stokes_UV['IQU_COV_MATRIX'].data @@ -706,194 +718,210 @@ class overplot_radio(align_maps): self.map_convert = self.Stokes_UV[0].header['photflam'] - #Compute SNR and apply cuts + # Compute SNR and apply cuts pol[pol == 0.] = np.nan SNRp = pol/pol_err SNRp[np.isnan(SNRp)] = 0. pol[SNRp < SNRp_cut] = np.nan - SNRi = stkI/np.sqrt(stk_cov[0,0]) + SNRi = stkI/np.sqrt(stk_cov[0, 0]) SNRi[np.isnan(SNRi)] = 0. pol[SNRi < SNRi_cut] = np.nan plt.rcParams.update({'font.size': 16}) - self.fig_overplot, self.ax_overplot = plt.subplots(figsize=(10,10), subplot_kw=dict(projection=self.wcs_UV)) - self.fig_overplot.subplots_adjust(hspace=0,wspace=0,bottom=0.1,left=0.1,top=0.8,right=1) + self.fig_overplot, self.ax_overplot = plt.subplots(figsize=(10, 10), subplot_kw=dict(projection=self.wcs_UV)) + self.fig_overplot.subplots_adjust(hspace=0, wspace=0, bottom=0.1, left=0.1, top=0.8, right=1) - #Display UV intensity map with polarisation vectors - vmin, vmax = stkI[np.isfinite(stkI)].max()/1e3*self.map_convert,stkI[np.isfinite(stkI)].max()*self.map_convert - for key, value in [["cmap",[["cmap","inferno"]]], ["norm",[["norm",LogNorm(vmin,vmax)]]]]: + # Display UV intensity map with polarisation vectors + vmin, vmax = stkI[np.isfinite(stkI)].max()/1e3*self.map_convert, stkI[np.isfinite(stkI)].max()*self.map_convert + for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]: try: - test = kwargs[key] + _ = kwargs[key] except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i - if kwargs['cmap'] in ['inferno','magma','Greys_r','binary_r','gist_yarg_r','gist_gray','gray','bone','pink','hot','afmhot','gist_heat','copper','gist_earth','gist_stern','gnuplot','gnuplot2','CMRmap','cubehelix','nipy_spectral','gist_ncar','viridis']: + if kwargs['cmap'] in ['inferno', 'magma', 'Greys_r', 'binary_r', 'gist_yarg_r', 'gist_gray', 'gray', 'bone', 'pink', 'hot', 'afmhot', 'gist_heat', 'copper', 'gist_earth', 'gist_stern', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'nipy_spectral', 'gist_ncar', 'viridis']: self.ax_overplot.set_facecolor('black') - font_color="white" + font_color = "white" else: self.ax_overplot.set_facecolor('white') - font_color="black" - self.im = self.ax_overplot.imshow(stkI*self.map_convert, aspect='equal',label="{0:s} observation".format(self.map_observer), **kwargs) - self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{{\lambda}}$ [{0:s}]".format(self.map_unit)) + font_color = "black" + self.im = self.ax_overplot.imshow(stkI*self.map_convert, aspect='equal', label="{0:s} observation".format(self.map_observer), **kwargs) + self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=50, shrink=0.75, pad=0.025, + label=r"$F_{{\lambda}}$ [{0:s}]".format(self.map_unit)) - #Display full size polarisation vectors + # Display full size polarisation vectors if vec_scale is None: self.vec_scale = 2. pol[np.isfinite(pol)] = 1./2. else: self.vec_scale = vec_scale step_vec = 1 - px_scale = self.other_wcs.wcs.get_cdelt()[0]/self.wcs_UV.wcs.get_cdelt()[0] self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.) - self.Q = self.ax_overplot.quiver(self.X[::step_vec,::step_vec],self.Y[::step_vec,::step_vec],self.U[::step_vec,::step_vec],self.V[::step_vec,::step_vec],units='xy',angles='uv',scale=1./self.vec_scale,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,linewidth=0.5,color='white',edgecolor='black',label="{0:s} polarisation map".format(self.map_observer)) + self.Q = self.ax_overplot.quiver(self.X[::step_vec, ::step_vec], self.Y[::step_vec, ::step_vec], self.U[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec], units='xy', angles='uv', scale=1./self.vec_scale, + scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, linewidth=0.5, color='white', edgecolor='black', label="{0:s} polarisation map".format(self.map_observer)) self.ax_overplot.autoscale(False) - #Display other map as contours + # Display other map as contours if levels is None: - levels = np.logspace(np.log(3)/np.log(10),2.,5)/100.*other_data[other_data > 0.].max() - other_cont = self.ax_overplot.contour(other_data*self.other_convert, transform=self.ax_overplot.get_transform(self.other_wcs.celestial), levels=levels*self.other_convert, colors='grey') + levels = np.logspace(np.log(3)/np.log(10), 2., 5)/100.*other_data[other_data > 0.].max() + other_cont = self.ax_overplot.contour( + other_data*self.other_convert, transform=self.ax_overplot.get_transform(self.other_wcs.celestial), levels=levels*self.other_convert, colors='grey') self.ax_overplot.clabel(other_cont, inline=True, fontsize=5) - other_proxy = Rectangle((0,0),1,1,fc='w',ec=other_cont.collections[0].get_edgecolor()[0], label=r"{0:s} contour".format(self.other_observer)) + other_proxy = Rectangle((0, 0), 1, 1, fc='w', ec=other_cont.collections[0].get_edgecolor()[0], label=r"{0:s} contour".format(self.other_observer)) self.ax_overplot.add_patch(other_proxy) self.ax_overplot.set_xlabel(label="Right Ascension (J2000)") - self.ax_overplot.set_ylabel(label="Declination (J2000)",labelpad=-1) - self.fig_overplot.suptitle("{0:s} polarisation map of {1:s} overplotted with {2:s} {3:.2f}GHz map in {4:s}.".format(self.map_observer, obj, self.other_observer, other_freq*1e-9, self.other_unit),wrap=True) + self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1) + self.fig_overplot.suptitle("{0:s} polarisation map of {1:s} overplotted with {2:s} {3:.2f}GHz map in {4:s}.".format( + self.map_observer, obj, self.other_observer, other_freq*1e-9, self.other_unit), wrap=True) - #Display pixel scale and North direction + # Display pixel scale and North direction fontprops = fm.FontProperties(size=16) px_size = self.wcs_UV.wcs.get_cdelt()[0]*3600. - px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, + frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) self.ax_overplot.add_artist(px_sc) - north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1,'lw': 0.5}) + north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, + sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5}) self.ax_overplot.add_artist(north_dir) - pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, + frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) self.ax_overplot.add_artist(pol_sc) - self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1.,1.)), 'r+') - self.cr_other, = self.ax_overplot.plot(*(self.other_wcs.celestial.wcs.crpix-(1.,1.)), 'g+', transform=self.ax_overplot.get_transform(self.other_wcs)) + self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1., 1.)), 'r+') + self.cr_other, = self.ax_overplot.plot(*(self.other_wcs.celestial.wcs.crpix-(1., 1.)), 'g+', transform=self.ax_overplot.get_transform(self.other_wcs)) - h,l = self.ax_overplot.get_legend_handles_labels() - h[np.argmax([li=="{0:s} polarisation map".format(self.map_observer) for li in l])] = FancyArrowPatch((0,0),(0,1),arrowstyle='-',fc='w',ec='k',lw=2) - self.legend = self.ax_overplot.legend(handles=h,labels=l,bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) + handles, labels = self.ax_overplot.get_legend_handles_labels() + handles[np.argmax([li == "{0:s} polarisation map".format(self.map_observer) for li in labels]) + ] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2) + self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=( + 0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) - if not(savename is None): + if not (savename is None): if not savename[-4:] in ['.png', '.jpg', '.pdf']: savename += '.pdf' - self.fig_overplot.savefig(savename,bbox_inches='tight',dpi=200) + self.fig_overplot.savefig(savename, bbox_inches='tight', dpi=200) self.fig_overplot.canvas.draw() - def plot(self, levels=None, SNRp_cut=3., SNRi_cut=30., savename=None, **kwargs) -> None: while not self.aligned: self.align() self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename, **kwargs) plt.show(block=True) + class overplot_chandra(align_maps): """ Class to overplot maps from different observations. Inherit from class align_maps in order to get the same WCS on both maps. """ + def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2, zoom=1, savename=None, **kwargs): self.Stokes_UV = self.map self.wcs_UV = self.map_wcs - #Get Data + # Get Data obj = self.Stokes_UV[0].header['targname'] stkI = self.Stokes_UV['I_STOKES'].data stk_cov = self.Stokes_UV['IQU_COV_MATRIX'].data pol = deepcopy(self.Stokes_UV['POL_DEG_DEBIASED'].data) pol_err = self.Stokes_UV['POL_DEG_ERR'].data pang = self.Stokes_UV['POL_ANG'].data - + other_data = deepcopy(self.other_data) other_wcs = deepcopy(self.other_wcs) if zoom != 1: - other_data = sc_zoom(other_data,zoom) + other_data = sc_zoom(other_data, zoom) other_wcs.wcs.crpix *= zoom other_wcs.wcs.cdelt /= zoom - other_unit = 'counts' + self.other_unit = 'counts' - #Compute SNR and apply cuts + # Compute SNR and apply cuts pol[pol == 0.] = np.nan SNRp = pol/pol_err SNRp[np.isnan(SNRp)] = 0. pol[SNRp < SNRp_cut] = np.nan - SNRi = stkI/np.sqrt(stk_cov[0,0]) + SNRi = stkI/np.sqrt(stk_cov[0, 0]) SNRi[np.isnan(SNRi)] = 0. pol[SNRi < SNRi_cut] = np.nan plt.rcParams.update({'font.size': 16}) - self.fig_overplot, self.ax_overplot = plt.subplots(figsize=(11,10), subplot_kw=dict(projection=self.wcs_UV)) - self.fig_overplot.subplots_adjust(hspace=0,wspace=0,bottom=0.1,left=0.1,top=0.8,right=1) + self.fig_overplot, self.ax_overplot = plt.subplots(figsize=(11, 10), subplot_kw=dict(projection=self.wcs_UV)) + self.fig_overplot.subplots_adjust(hspace=0, wspace=0, bottom=0.1, left=0.1, top=0.8, right=1) - #Display UV intensity map with polarisation vectors - vmin, vmax = stkI[np.isfinite(stkI)].max()/1e3*self.map_convert,stkI[np.isfinite(stkI)].max()*self.map_convert - for key, value in [["cmap",[["cmap","inferno"]]], ["norm",[["norm",LogNorm(vmin,vmax)]]]]: + # Display UV intensity map with polarisation vectors + vmin, vmax = stkI[np.isfinite(stkI)].max()/1e3*self.map_convert, stkI[np.isfinite(stkI)].max()*self.map_convert + for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]: try: - test = kwargs[key] + _ = kwargs[key] except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i - if kwargs['cmap'] in ['inferno','magma','Greys_r','binary_r','gist_yarg_r','gist_gray','gray','bone','pink','hot','afmhot','gist_heat','copper','gist_earth','gist_stern','gnuplot','gnuplot2','CMRmap','cubehelix','nipy_spectral','gist_ncar','viridis']: + if kwargs['cmap'] in ['inferno', 'magma', 'Greys_r', 'binary_r', 'gist_yarg_r', 'gist_gray', 'gray', 'bone', 'pink', 'hot', 'afmhot', 'gist_heat', 'copper', 'gist_earth', 'gist_stern', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'nipy_spectral', 'gist_ncar', 'viridis']: self.ax_overplot.set_facecolor('black') - font_color="white" + font_color = "white" else: self.ax_overplot.set_facecolor('white') - font_color="black" + font_color = "black" self.im = self.ax_overplot.imshow(stkI*self.map_convert, aspect='equal', **kwargs) - self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{{\lambda}}$ [{0:s}]".format(self.map_unit)) + self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=50, shrink=0.75, pad=0.025, + label=r"$F_{{\lambda}}$ [{0:s}]".format(self.map_unit)) - #Display full size polarisation vectors + # Display full size polarisation vectors if vec_scale is None: self.vec_scale = 2. pol[np.isfinite(pol)] = 1./2. else: self.vec_scale = vec_scale step_vec = 1 - px_scale = 1./self.wcs_UV.wcs.get_cdelt()[0] self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.) - self.Q = self.ax_overplot.quiver(self.X[::step_vec,::step_vec],self.Y[::step_vec,::step_vec],self.U[::step_vec,::step_vec],self.V[::step_vec,::step_vec],units='xy',angles='uv',scale=1./self.vec_scale,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,linewidth=0.5,color='white',edgecolor='black',label="{0:s} polarisation map".format(self.map_observer)) - proxy_Q = FancyArrowPatch((0,0),(0,1),arrowstyle='-',fc='w',ec='k',lw=3) + self.Q = self.ax_overplot.quiver(self.X[::step_vec, ::step_vec], self.Y[::step_vec, ::step_vec], self.U[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec], units='xy', angles='uv', scale=1./self.vec_scale, + scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, linewidth=0.5, color='white', edgecolor='black', label="{0:s} polarisation map".format(self.map_observer)) self.ax_overplot.autoscale(False) - #Display other map as contours + # Display other map as contours if levels is None: - levels = np.logspace(np.log(3)/np.log(10),2.,5)/100.*other_data[other_data > 0.].max()*self.other_convert + levels = np.logspace(np.log(3)/np.log(10), 2., 5)/100.*other_data[other_data > 0.].max()*self.other_convert elif zoom != 1: levels *= other_data.max()/self.other_data.max() other_cont = self.ax_overplot.contour(other_data*self.other_convert, transform=self.ax_overplot.get_transform(other_wcs), levels=levels, colors='grey') self.ax_overplot.clabel(other_cont, inline=True, fontsize=8) - other_proxy = Rectangle((0,0),1.,1.,fc='w',ec=other_cont.collections[0].get_edgecolor()[0], lw=2, label=r"{0:s} contour in counts".format(self.other_observer)) + other_proxy = Rectangle((0, 0), 1., 1., fc='w', ec=other_cont.collections[0].get_edgecolor()[ + 0], lw=2, label=r"{0:s} contour in counts".format(self.other_observer)) self.ax_overplot.add_patch(other_proxy) self.ax_overplot.set_xlabel(label="Right Ascension (J2000)") - self.ax_overplot.set_ylabel(label="Declination (J2000)",labelpad=-1) - self.fig_overplot.suptitle("{0:s} polarisation map of {1:s} overplotted\nwith {2:s} contour in counts.".format(self.map_observer,obj,self.other_observer),wrap=True) + self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1) + self.fig_overplot.suptitle("{0:s} polarisation map of {1:s} overplotted\nwith {2:s} contour in counts.".format( + self.map_observer, obj, self.other_observer), wrap=True) - #Display pixel scale and North direction + # Display pixel scale and North direction fontprops = fm.FontProperties(size=16) px_size = self.wcs_UV.wcs.get_cdelt()[0]*3600. - px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, + frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) self.ax_overplot.add_artist(px_sc) - north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1,'lw': 0.5}) + north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, + sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5}) self.ax_overplot.add_artist(north_dir) - pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, + frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) self.ax_overplot.add_artist(pol_sc) - self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1.,1.)), 'r+') - self.cr_other, = self.ax_overplot.plot(*(other_wcs.celestial.wcs.crpix-(1.,1.)), 'g+', transform=self.ax_overplot.get_transform(other_wcs)) - h,l = self.ax_overplot.get_legend_handles_labels() - h[np.argmax([li=="{0:s} polarisation map".format(self.map_observer) for li in l])] = FancyArrowPatch((0,0),(0,1),arrowstyle='-',fc='w',ec='k',lw=2) - self.legend = self.ax_overplot.legend(handles=h,labels=l,bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) + self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1., 1.)), 'r+') + self.cr_other, = self.ax_overplot.plot(*(other_wcs.celestial.wcs.crpix-(1., 1.)), 'g+', transform=self.ax_overplot.get_transform(other_wcs)) + handles, labels = self.ax_overplot.get_legend_handles_labels() + handles[np.argmax([li == "{0:s} polarisation map".format(self.map_observer) for li in labels]) + ] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2) + self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=( + 0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) - if not(savename is None): + if not (savename is None): if not savename[-4:] in ['.png', '.jpg', '.pdf']: savename += '.pdf' - self.fig_overplot.savefig(savename,bbox_inches='tight',dpi=200) + self.fig_overplot.savefig(savename, bbox_inches='tight', dpi=200) self.fig_overplot.canvas.draw() @@ -909,10 +937,11 @@ class overplot_pol(align_maps): Class to overplot maps from different observations. Inherit from class align_maps in order to get the same WCS on both maps. """ + def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2., savename=None, **kwargs): self.Stokes_UV = self.map self.wcs_UV = self.map_wcs - #Get Data + # Get Data obj = self.Stokes_UV[0].header['targname'] stkI = self.Stokes_UV['I_STOKES'].data stk_cov = self.Stokes_UV['IQU_COV_MATRIX'].data @@ -922,41 +951,43 @@ class overplot_pol(align_maps): other_data = self.other_data - #Compute SNR and apply cuts + # Compute SNR and apply cuts pol[pol == 0.] = np.nan SNRp = pol/pol_err SNRp[np.isnan(SNRp)] = 0. pol[SNRp < SNRp_cut] = np.nan - SNRi = stkI/np.sqrt(stk_cov[0,0]) + SNRi = stkI/np.sqrt(stk_cov[0, 0]) SNRi[np.isnan(SNRi)] = 0. pol[SNRi < SNRi_cut] = np.nan plt.rcParams.update({'font.size': 16}) - self.fig_overplot, self.ax_overplot = plt.subplots(figsize=(11,10), subplot_kw=dict(projection=self.other_wcs)) - self.fig_overplot.subplots_adjust(hspace=0,wspace=0,bottom=0.1,left=0.1,top=0.80,right=1.02) + self.fig_overplot, self.ax_overplot = plt.subplots(figsize=(11, 10), subplot_kw=dict(projection=self.other_wcs)) + self.fig_overplot.subplots_adjust(hspace=0, wspace=0, bottom=0.1, left=0.1, top=0.80, right=1.02) self.ax_overplot.set_xlabel(label="Right Ascension (J2000)") - self.ax_overplot.set_ylabel(label="Declination (J2000)",labelpad=-1) - self.fig_overplot.suptitle("{0:s} observation from {1:s} overplotted with polarisation vectors and Stokes I contours from {2:s}".format(obj,self.other_observer,self.map_observer),wrap=True) + self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1) + self.fig_overplot.suptitle("{0:s} observation from {1:s} overplotted with polarisation vectors and Stokes I contours from {2:s}".format( + obj, self.other_observer, self.map_observer), wrap=True) - #Display "other" intensity map + # Display "other" intensity map vmin, vmax = other_data[other_data > 0.].max()/1e3*self.other_convert, other_data[other_data > 0.].max()*self.other_convert - for key, value in [["cmap",[["cmap","inferno"]]], ["norm",[["vmin",vmin],["vmax",vmax]]]]: + for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["vmin", vmin], ["vmax", vmax]]]]: try: - test = kwargs[key] + _ = kwargs[key] except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i - if kwargs['cmap'] in ['inferno','magma','Greys_r','binary_r','gist_yarg_r','gist_gray','gray','bone','pink','hot','afmhot','gist_heat','copper','gist_earth','gist_stern','gnuplot','gnuplot2','CMRmap','cubehelix','nipy_spectral','gist_ncar','viridis']: + if kwargs['cmap'] in ['inferno', 'magma', 'Greys_r', 'binary_r', 'gist_yarg_r', 'gist_gray', 'gray', 'bone', 'pink', 'hot', 'afmhot', 'gist_heat', 'copper', 'gist_earth', 'gist_stern', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'nipy_spectral', 'gist_ncar', 'viridis']: self.ax_overplot.set_facecolor('black') - font_color="white" + font_color = "white" else: self.ax_overplot.set_facecolor('white') - font_color="black" + font_color = "black" self.im = self.ax_overplot.imshow(other_data*self.other_convert, alpha=1., label="{0:s} observation".format(self.other_observer), **kwargs) - self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=80, shrink=0.75, pad=0.025, label=r"$F_{{\lambda}}$ [{0:s}]".format(self.other_unit)) + self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=80, shrink=0.75, pad=0.025, + label=r"$F_{{\lambda}}$ [{0:s}]".format(self.other_unit)) - #Display full size polarisation vectors + # Display full size polarisation vectors if vec_scale is None: self.vec_scale = 2. pol[np.isfinite(pol)] = 1./2. @@ -966,44 +997,51 @@ class overplot_pol(align_maps): px_scale = self.other_wcs.wcs.get_cdelt()[0]/self.wcs_UV.wcs.get_cdelt()[0] self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.) - self.Q = self.ax_overplot.quiver(self.X[::step_vec,::step_vec],self.Y[::step_vec,::step_vec],self.U[::step_vec,::step_vec],self.V[::step_vec,::step_vec],units='xy',angles='uv',scale=px_scale/self.vec_scale,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1/px_scale,linewidth=0.5,color='white',edgecolor='black', transform=self.ax_overplot.get_transform(self.wcs_UV),label="{0:s} polarisation map".format(self.map_observer)) + self.Q = self.ax_overplot.quiver(self.X[::step_vec, ::step_vec], self.Y[::step_vec, ::step_vec], self.U[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec], units='xy', angles='uv', scale=px_scale/self.vec_scale, scale_units='xy', pivot='mid', + headwidth=0., headlength=0., headaxislength=0., width=0.1/px_scale, linewidth=0.5, color='white', edgecolor='black', transform=self.ax_overplot.get_transform(self.wcs_UV), label="{0:s} polarisation map".format(self.map_observer)) - #Display Stokes I as contours + # Display Stokes I as contours if levels is None: - levels = np.logspace(np.log(3)/np.log(10),2.,5)/100.*np.max(stkI[stkI > 0.])*self.map_convert - cont_stkI = self.ax_overplot.contour(stkI*self.map_convert, levels=levels, colors='grey', alpha=0.75, transform=self.ax_overplot.get_transform(self.wcs_UV)) - #self.ax_overplot.clabel(cont_stkI, inline=True, fontsize=5) - cont_proxy = Rectangle((0,0),1,1,fc='w',ec=cont_stkI.collections[0].get_edgecolor()[0], label="{0:s} Stokes I contour".format(self.map_observer)) + levels = np.logspace(np.log(3)/np.log(10), 2., 5)/100.*np.max(stkI[stkI > 0.])*self.map_convert + cont_stkI = self.ax_overplot.contour(stkI*self.map_convert, levels=levels, colors='grey', alpha=0.75, + transform=self.ax_overplot.get_transform(self.wcs_UV)) + # self.ax_overplot.clabel(cont_stkI, inline=True, fontsize=5) + cont_proxy = Rectangle((0, 0), 1, 1, fc='w', ec=cont_stkI.collections[0].get_edgecolor()[0], label="{0:s} Stokes I contour".format(self.map_observer)) self.ax_overplot.add_patch(cont_proxy) - #Display pixel scale and North direction + # Display pixel scale and North direction fontprops = fm.FontProperties(size=16) px_size = self.other_wcs.wcs.get_cdelt()[0]*3600. - px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, + frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) self.ax_overplot.add_artist(px_sc) - north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1,'lw': 0.5}) + north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, + sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5}) self.ax_overplot.add_artist(north_dir) - pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale/px_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale/px_scale, r"$P$= 100%", 4, pad=0.5, sep=5, + borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) self.ax_overplot.add_artist(pol_sc) - self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1.,1.)), 'r+', transform=self.ax_overplot.get_transform(self.wcs_UV)) - self.cr_other, = self.ax_overplot.plot(*(self.other_wcs.celestial.wcs.crpix-(1.,1.)), 'g+') + self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1., 1.)), 'r+', transform=self.ax_overplot.get_transform(self.wcs_UV)) + self.cr_other, = self.ax_overplot.plot(*(self.other_wcs.celestial.wcs.crpix-(1., 1.)), 'g+') if "PHOTPLAM" in list(self.other_header.keys()): - self.legend_title = r"{0:s} image at $\lambda$ = {1:.0f} $\AA$".format(self.other_map_observer,float(self.other_header['photplam'])) + self.legend_title = r"{0:s} image at $\lambda$ = {1:.0f} $\AA$".format(self.other_map_observer, float(self.other_header['photplam'])) elif "CRVAL3" in list(self.other_header.keys()): - self.legend_title = "{0:s} image at {1:.2f} GHz".format(self.other_observer,float(self.other_header['crval3'])*1e-9) + self.legend_title = "{0:s} image at {1:.2f} GHz".format(self.other_observer, float(self.other_header['crval3'])*1e-9) else: self.legend_title = r"{0:s} image".format(self.other_observer) - h,l = self.ax_overplot.get_legend_handles_labels() - h[np.argmax([li=="{0:s} polarisation map".format(self.map_observer) for li in l])] = FancyArrowPatch((0,0),(0,1),arrowstyle='-',fc='w',ec='k',lw=2) - self.legend = self.ax_overplot.legend(handles=h,labels=l,bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) - - if not(savename is None): + handles, labels = self.ax_overplot.get_legend_handles_labels() + handles[np.argmax([li == "{0:s} polarisation map".format(self.map_observer) for li in labels]) + ] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2) + self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=( + 0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) + + if not (savename is None): if not savename[-4:] in ['.png', '.jpg', '.pdf']: savename += '.pdf' - self.fig_overplot.savefig(savename,bbox_inches='tight',dpi=200) + self.fig_overplot.savefig(savename, bbox_inches='tight', dpi=200) self.fig_overplot.canvas.draw() @@ -1013,25 +1051,27 @@ class overplot_pol(align_maps): self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, vec_scale=vec_scale, savename=savename, **kwargs) plt.show(block=True) - def add_vector(self,position='center',pol_deg=1.,pol_ang=0.,**kwargs): + def add_vector(self, position='center', pol_deg=1., pol_ang=0., **kwargs): if position == 'center': position = np.array(self.X.shape)/2. - if type(position) == SkyCoord: + if isinstance(position, SkyCoord): position = self.other_wcs.world_to_pixel(position) u, v = pol_deg*np.cos(np.radians(pol_ang)+np.pi/2.), pol_deg*np.sin(np.radians(pol_ang)+np.pi/2.) - for key, value in [["scale",[["scale",self.vec_scale]]], ["width",[["width",0.1]]], ["color",[["color",'k']]]]: + for key, value in [["scale", [["scale", self.vec_scale]]], ["width", [["width", 0.1]]], ["color", [["color", 'k']]]]: try: - test = kwargs[key] + _ = kwargs[key] except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i - new_vec = self.ax_overplot.quiver(*position,u,v,units='xy',angles='uv',scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,**kwargs) + new_vec = self.ax_overplot.quiver(*position, u, v, units='xy', angles='uv', scale_units='xy', + pivot='mid', headwidth=0., headlength=0., headaxislength=0., **kwargs) self.legend.remove() - self.legend = self.ax_overplot.legend(title=self.legend_title,bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) + self.legend = self.ax_overplot.legend(title=self.legend_title, bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) self.fig_overplot.canvas.draw() return new_vec + class align_pol(object): def __init__(self, maps, **kwargs): order = np.argsort(np.array([curr[0].header['mjd-obs'] for curr in maps])) @@ -1044,88 +1084,88 @@ class align_pol(object): self.aligned = np.zeros(self.other_maps.shape[0], dtype=bool) self.kwargs = kwargs - + def single_plot(self, curr_map, wcs, v_lim=None, ax_lim=None, SNRp_cut=3., SNRi_cut=30., savename=None, **kwargs): - #Get data - stkI = deepcopy(curr_map['I_STOKES'].data) - stkQ = deepcopy(curr_map['Q_STOKES'].data) - stkU = deepcopy(curr_map['U_STOKES'].data) - stk_cov = deepcopy(curr_map['IQU_COV_MATRIX'].data) + # Get data + stkI = curr_map['I_STOKES'].data + stk_cov = curr_map['IQU_COV_MATRIX'].data pol = deepcopy(curr_map['POL_DEG_DEBIASED'].data) - pol_err = deepcopy(curr_map['POL_DEG_ERR'].data) - pang = deepcopy(curr_map['POL_ANG'].data) + pol_err = curr_map['POL_DEG_ERR'].data + pang = curr_map['POL_ANG'].data try: data_mask = curr_map['DATA_MASK'].data.astype(bool) except KeyError: data_mask = np.ones(stkI.shape).astype(bool) - pivot_wav = curr_map[0].header['photplam'] convert_flux = curr_map[0].header['photflam'] - #Compute SNR and apply cuts - pol[pol == 0.] = np.nan - pol_err[pol_err == 0.] = np.nan - SNRp = pol/pol_err - SNRp[np.isnan(SNRp)] = 0. - pol[SNRp < SNRp_cut] = np.nan + # Compute SNR and apply cuts + maskpol = np.logical_and(pol_err > 0., data_mask) + SNRp = np.zeros(pol.shape) + SNRp[maskpol] = pol[maskpol]/pol_err[maskpol] - maskI = stk_cov[0,0] > 0 + maskI = np.logical_and(stk_cov[0, 0] > 0, data_mask) SNRi = np.zeros(stkI.shape) - SNRi[maskI] = stkI[maskI]/np.sqrt(stk_cov[0,0][maskI]) - pol[SNRi < SNRi_cut] = np.nan + SNRi[maskI] = stkI[maskI]/np.sqrt(stk_cov[0, 0][maskI]) - mask = (SNRp > SNRp_cut) * (SNRi > SNRi_cut) + mask = (SNRp > SNRp_cut) * (SNRi > SNRi_cut) * (pol >= 0.) + pol[mask] = np.nan - #Plot the map + # Plot the map plt.rcParams.update({'font.size': 10}) plt.rcdefaults() - fig = plt.figure(figsize=(10,10)) + fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111, projection=wcs) ax.set(xlabel="Right Ascension (J2000)", ylabel="Declination (J2000)", facecolor='k', - title="target {0:s} observed on {1:s}".format(curr_map[0].header['targname'], curr_map[0].header['date-obs'])) - fig.subplots_adjust(hspace=0, wspace=0, right=0.9) - cbar_ax = fig.add_axes([0.95, 0.12, 0.01, 0.75]) + title="target {0:s} observed on {1:s}".format(curr_map[0].header['targname'], curr_map[0].header['date-obs'])) + fig.subplots_adjust(hspace=0, wspace=0, right=0.102) - if not ax_lim is None: + if ax_lim is not None: lim = np.concatenate([wcs.world_to_pixel(ax_lim[i]) for i in range(len(ax_lim))]) x_lim, y_lim = lim[0::2], lim[1::2] - ax.set(xlim=x_lim,ylim=y_lim) + ax.set(xlim=x_lim, ylim=y_lim) if v_lim is None: vmin, vmax = 0., np.max(stkI[stkI > 0.]*convert_flux) else: vmin, vmax = v_lim*convert_flux - - for key, value in [["cmap",[["cmap","inferno"]]], ["norm",[["vmin",vmin],["vmax",vmax]]]]: + + for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["vmin", vmin], ["vmax", vmax]]]]: try: test = kwargs[key] - if str(type(test)) == "": + if isinstance(test, LogNorm): kwargs[key] = LogNorm(vmin, vmax) except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i im = ax.imshow(stkI*convert_flux, aspect='equal', **kwargs) - cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") + fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") px_size = wcs.wcs.get_cdelt()[0]*3600. px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w') ax.add_artist(px_sc) - north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10., angle=curr_map[0].header['orientat'], color='white', text_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': None,'fc':'w','alpha': 1,'lw': 1}) + north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10., + angle=curr_map[0].header['orientat'], color='white', text_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 1}) ax.add_artist(north_dir) - + step_vec = 1 X, Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) U, V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.) - Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='w') + ax.quiver(X[::step_vec, ::step_vec], Y[::step_vec, ::step_vec], U[::step_vec, ::step_vec], V[::step_vec, ::step_vec], units='xy', + angles='uv', scale=0.5, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, color='w') pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w') ax.add_artist(pol_sc) - if not savename is None: - if not savename[-4:] in ['.png', '.jpg', '.pdf']: + if 'PHOTPLAM' in list(curr_map[0].header.keys()): + ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(curr_map[0].header['photplam']), color='white', fontsize=12, xy=( + 0.01, 0.93), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')]) + + if savename is not None: + if savename[-4:] not in ['.png', '.jpg', '.pdf']: savename += '.pdf' - fig.savefig(savename,bbox_inches='tight',dpi=300) + fig.savefig(savename, bbox_inches='tight', dpi=300) plt.show(block=True) return fig, ax @@ -1135,32 +1175,38 @@ class align_pol(object): curr_align = align_maps(self.ref_map, curr_map, **self.kwargs) self.wcs, self.wcs_other[i] = curr_align.align() self.aligned[i] = curr_align.aligned - + def plot(self, SNRp_cut=3., SNRi_cut=30., savename=None, **kwargs): while not self.aligned.all(): self.align() eps = 1e-35 - vmin = np.min([np.min(curr_map[0].data[curr_map[0].data > SNRi_cut*np.max([eps*np.ones(curr_map[0].data.shape),np.sqrt(curr_map[3].data[0,0])],axis=0)]) for curr_map in self.other_maps])/2.5 - vmax = np.max([np.max(curr_map[0].data[curr_map[0].data > SNRi_cut*np.max([eps*np.ones(curr_map[0].data.shape),np.sqrt(curr_map[3].data[0,0])],axis=0)]) for curr_map in self.other_maps]) - vmin = np.min([vmin, np.min(self.ref_map[0].data[self.ref_map[0].data > SNRi_cut*np.max([eps*np.ones(self.ref_map[0].data.shape),np.sqrt(self.ref_map[3].data[0,0])],axis=0)])])/2.5 - vmax = np.max([vmax, np.max(self.ref_map[0].data[self.ref_map[0].data > SNRi_cut*np.max([eps*np.ones(self.ref_map[0].data.shape),np.sqrt(self.ref_map[3].data[0,0])],axis=0)])]) + vmin = np.min([np.min(curr_map[0].data[curr_map[0].data > SNRi_cut*np.max([eps*np.ones(curr_map[0].data.shape), + np.sqrt(curr_map[3].data[0, 0])], axis=0)]) for curr_map in self.other_maps])/2.5 + vmax = np.max([np.max(curr_map[0].data[curr_map[0].data > SNRi_cut*np.max([eps*np.ones(curr_map[0].data.shape), + np.sqrt(curr_map[3].data[0, 0])], axis=0)]) for curr_map in self.other_maps]) + vmin = np.min([vmin, np.min(self.ref_map[0].data[self.ref_map[0].data > SNRi_cut * + np.max([eps*np.ones(self.ref_map[0].data.shape), np.sqrt(self.ref_map[3].data[0, 0])], axis=0)])])/2.5 + vmax = np.max([vmax, np.max(self.ref_map[0].data[self.ref_map[0].data > SNRi_cut * + np.max([eps*np.ones(self.ref_map[0].data.shape), np.sqrt(self.ref_map[3].data[0, 0])], axis=0)])]) v_lim = np.array([vmin, vmax]) - fig, ax = self.single_plot(self.ref_map, self.wcs, v_lim = v_lim, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename+'_0', **kwargs) + fig, ax = self.single_plot(self.ref_map, self.wcs, v_lim=v_lim, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename+'_0', **kwargs) x_lim, y_lim = ax.get_xlim(), ax.get_ylim() ax_lim = np.array([self.wcs.pixel_to_world(x_lim[i], y_lim[i]) for i in range(len(x_lim))]) - + for i, curr_map in enumerate(self.other_maps): - self.single_plot(curr_map, self.wcs_other[i], v_lim=v_lim, ax_lim=ax_lim, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename+'_'+str(i+1), **kwargs) + self.single_plot(curr_map, self.wcs_other[i], v_lim=v_lim, ax_lim=ax_lim, SNRp_cut=SNRp_cut, + SNRi_cut=SNRi_cut, savename=savename+'_'+str(i+1), **kwargs) class crop_map(object): """ Class to interactively crop a map to desired Region of Interest """ + def __init__(self, hdul, fig=None, ax=None, **kwargs): - #Get data - self.cropped=False + # Get data + self.cropped = False self.hdul = hdul self.header = deepcopy(self.hdul[0].header) self.wcs = WCS(self.header).deepcopy() @@ -1175,17 +1221,17 @@ class crop_map(object): except AttributeError: self.kwargs = {} - #Plot the map + # Plot the map plt.rcParams.update({'font.size': 12}) if fig is None: - self.fig = plt.figure(figsize=(15,15)) + self.fig = plt.figure(figsize=(15, 15)) self.fig.suptitle("Click and drag to crop to desired Region of Interest.") else: self.fig = fig if ax is None: self.ax = self.fig.add_subplot(111, projection=self.wcs) - self.mask_alpha=1. - #Selection button + self.mask_alpha = 1. + # Selection button self.axapply = self.fig.add_axes([0.80, 0.01, 0.1, 0.04]) self.bapply = Button(self.axapply, 'Apply') self.axreset = self.fig.add_axes([0.60, 0.01, 0.1, 0.04]) @@ -1195,16 +1241,15 @@ class crop_map(object): self.ax = ax self.mask_alpha = 0.75 self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, - button=[1]) + button=[1]) self.embedded = True self.display(self.data, self.wcs, self.map_convert, **self.kwargs) - self.extent = np.array([0.,self.data.shape[0],0., self.data.shape[1]]) + self.extent = np.array([0., self.data.shape[0], 0., self.data.shape[1]]) self.center = np.array(self.data.shape)/2 self.RSextent = deepcopy(self.extent) self.RScenter = deepcopy(self.center) - def display(self, data=None, wcs=None, convert_flux=None, **kwargs): if data is None: data = self.data @@ -1218,9 +1263,9 @@ class crop_map(object): kwargs = {**self.kwargs, **kwargs} vmin, vmax = np.min(data[data > 0.]*convert_flux), np.max(data[data > 0.]*convert_flux) - for key, value in [["cmap",[["cmap","inferno"]]], ["origin",[["origin","lower"]]], ["aspect",[["aspect","equal"]]], ["alpha",[["alpha",self.mask_alpha]]], ["norm",[["vmin",vmin],["vmax",vmax]]]]: + for key, value in [["cmap", [["cmap", "inferno"]]], ["origin", [["origin", "lower"]]], ["aspect", [["aspect", "equal"]]], ["alpha", [["alpha", self.mask_alpha]]], ["norm", [["vmin", vmin], ["vmax", vmax]]]]: try: - test = kwargs[key] + _ = kwargs[key] except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i @@ -1251,7 +1296,7 @@ class crop_map(object): if self.fig.canvas.manager.toolbar.mode == '': self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, - button=[1]) + button=[1]) self.RSextent = deepcopy(self.extent) self.RScenter = deepcopy(self.center) @@ -1282,35 +1327,35 @@ class crop_map(object): extent = np.array(self.im.get_extent()) shape_im = extent[1::2] - extent[0::2] if (shape_im.astype(int) != shape).any() and (self.RSextent != self.extent).any(): - #Update WCS and header in new cropped image + # Update WCS and header in new cropped image crpix = np.array(wcs.wcs.crpix) self.wcs_crop = wcs.deepcopy() self.wcs_crop.array_shape = shape if self.crpix_in_RS: self.wcs_crop.wcs.crpix = np.array(self.wcs_crop.wcs.crpix) - self.RSextent[::2] else: - self.wcs_crop.wcs.crval = wcs.wcs_pix2world([self.RScenter],1)[0] + self.wcs_crop.wcs.crval = wcs.wcs_pix2world([self.RScenter], 1)[0] self.wcs_crop.wcs.crpix = self.RScenter-self.RSextent[::2] # Crop dataset self.data_crop = deepcopy(data[vertex[2]:vertex[3], vertex[0]:vertex[1]]) - #Write cropped map to new HDUList + # Write cropped map to new HDUList self.header_crop = deepcopy(header) self.header_crop.update(self.wcs_crop.to_header()) - self.hdul_crop = fits.HDUList([fits.PrimaryHDU(self.data_crop,self.header_crop)]) + self.hdul_crop = fits.HDUList([fits.PrimaryHDU(self.data_crop, self.header_crop)]) self.rect_selector.clear() self.ax.reset_wcs(self.wcs_crop) self.display(data=self.data_crop, wcs=self.wcs_crop) xlim, ylim = self.RSextent[1::2]-self.RSextent[0::2] - self.ax.set_xlim(0,xlim) - self.ax.set_ylim(0,ylim) + self.ax.set_xlim(0, xlim) + self.ax.set_ylim(0, ylim) if self.fig.canvas.manager.toolbar.mode == '': self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, - button=[1]) + button=[1]) self.fig.canvas.draw_idle() @@ -1323,14 +1368,14 @@ class crop_map(object): def crop(self) -> None: if self.fig.canvas.manager.toolbar.mode == '': self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, - button=[1]) + button=[1]) self.bapply.on_clicked(self.apply_crop) self.breset.on_clicked(self.reset_crop) self.fig.canvas.mpl_connect('close_event', self.on_close) plt.show() def writeto(self, filename): - self.hdul_crop.writeto(filename,overwrite=True) + self.hdul_crop.writeto(filename, overwrite=True) class crop_Stokes(crop_map): @@ -1338,7 +1383,8 @@ class crop_Stokes(crop_map): Class to interactively crop a polarisation map to desired Region of Interest. Inherit from crop_map. """ - def apply_crop(self,event): + + def apply_crop(self, event): """ Redefine apply_crop method for the Stokes HDUList. """ @@ -1357,7 +1403,7 @@ class crop_Stokes(crop_map): extent = np.array(self.im.get_extent()) shape_im = extent[1::2] - extent[0::2] if (shape_im.astype(int) != shape).any() and (self.RSextent != self.extent).any(): - #Update WCS and header in new cropped image + # Update WCS and header in new cropped image self.hdul_crop = deepcopy(hdul) crpix = np.array(wcs.wcs.crpix) self.wcs_crop = wcs.deepcopy() @@ -1365,16 +1411,16 @@ class crop_Stokes(crop_map): if self.crpix_in_RS: self.wcs_crop.wcs.crpix = np.array(self.wcs_crop.wcs.crpix) - self.RSextent[::2] else: - self.wcs_crop.wcs.crval = wcs.wcs_pix2world([self.RScenter],1)[0] + self.wcs_crop.wcs.crval = wcs.wcs_pix2world([self.RScenter], 1)[0] self.wcs_crop.wcs.crpix = self.RScenter-self.RSextent[::2] # Crop dataset for dataset in self.hdul_crop: - if dataset.header['datatype']=='IQU_cov_matrix': - stokes_cov = np.zeros((3,3,shape[1],shape[0])) + if dataset.header['datatype'] == 'IQU_cov_matrix': + stokes_cov = np.zeros((3, 3, shape[1], shape[0])) for i in range(3): for j in range(3): - stokes_cov[i,j] = deepcopy(dataset.data[i,j][vertex[2]:vertex[3], vertex[0]:vertex[1]]) + stokes_cov[i, j] = deepcopy(dataset.data[i, j][vertex[2]:vertex[3], vertex[0]:vertex[1]]) dataset.data = stokes_cov else: dataset.data = deepcopy(dataset.data[vertex[2]:vertex[3], vertex[0]:vertex[1]]) @@ -1387,32 +1433,34 @@ class crop_Stokes(crop_map): self.display(data=self.data_crop, wcs=self.wcs_crop) xlim, ylim = self.RSextent[1::2]-self.RSextent[0::2] - self.ax.set_xlim(0,xlim) - self.ax.set_ylim(0,ylim) + self.ax.set_xlim(0, xlim) + self.ax.set_ylim(0, ylim) else: self.on_close(event) if self.fig.canvas.manager.toolbar.mode == '': self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, - button=[1]) + button=[1]) # Update integrated values - mask = np.logical_and(self.hdul_crop[-1].data.astype(bool), self.hdul_crop[0].data >0) + mask = np.logical_and(self.hdul_crop[-1].data.astype(bool), self.hdul_crop[0].data > 0) I_diluted = self.hdul_crop[0].data[mask].sum() Q_diluted = self.hdul_crop[1].data[mask].sum() U_diluted = self.hdul_crop[2].data[mask].sum() - I_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[0,0][mask])) - Q_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[1,1][mask])) - U_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[2,2][mask])) - IQ_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[0,1][mask]**2)) - IU_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[0,2][mask]**2)) - QU_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[1,2][mask]**2)) + I_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[0, 0][mask])) + Q_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[1, 1][mask])) + U_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[2, 2][mask])) + IQ_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[0, 1][mask]**2)) + IU_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[0, 2][mask]**2)) + QU_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[1, 2][mask]**2)) P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted - P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err) + P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted ** + 2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err) + + PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted)) + PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err ** + 2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err) - PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted,Q_diluted)) - PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err) - for dataset in self.hdul_crop: dataset.header['P_int'] = (P_diluted, 'Integrated polarisation degree') dataset.header['P_int_err'] = (np.ceil(P_diluted_err*1000.)/1000., 'Integrated polarisation degree error') @@ -1432,10 +1480,10 @@ class image_lasso_selector(object): """ self.selected = False self.img = img - self.vmin, self.vmax = 0., np.max(self.img[self.img>0.]) - plt.ioff() # see https://github.com/matplotlib/matplotlib/issues/17013 + self.vmin, self.vmax = 0., np.max(self.img[self.img > 0.]) + plt.ioff() # see https://github.com/matplotlib/matplotlib/issues/17013 if fig is None: - self.fig = plt.figure(figsize=(15,15)) + self.fig = plt.figure(figsize=(15, 15)) else: self.fig = fig if ax is None: @@ -1446,7 +1494,7 @@ class image_lasso_selector(object): self.ax = ax self.mask_alpha = 0.1 self.embedded = True - self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno',alpha=self.mask_alpha) + self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno', alpha=self.mask_alpha) plt.ion() lineprops = {'color': 'grey', 'linewidth': 1, 'alpha': 0.8} @@ -1455,15 +1503,15 @@ class image_lasso_selector(object): pix_x = np.arange(self.img.shape[0]) pix_y = np.arange(self.img.shape[1]) - xv, yv = np.meshgrid(pix_y,pix_x) - self.pix = np.vstack( (xv.flatten(), yv.flatten()) ).T + xv, yv = np.meshgrid(pix_y, pix_x) + self.pix = np.vstack((xv.flatten(), yv.flatten())).T self.fig.canvas.mpl_connect('close_event', self.on_close) plt.show() def on_close(self, event=None) -> None: if not hasattr(self, 'mask'): - self.mask = np.zeros(self.img.shape[:2],dtype=bool) + self.mask = np.zeros(self.img.shape[:2], dtype=bool) self.lasso.disconnect_events() self.selected = True @@ -1475,15 +1523,15 @@ class image_lasso_selector(object): def update_mask(self): self.displayed.remove() - self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno',alpha=self.mask_alpha) + self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno', alpha=self.mask_alpha) array = self.displayed.get_array().data - self.mask = np.zeros(self.img.shape[:2],dtype=bool) + self.mask = np.zeros(self.img.shape[:2], dtype=bool) self.mask[self.indices] = True if hasattr(self, 'cont'): for coll in self.cont.collections: coll.remove() - self.cont = self.ax.contour(self.mask.astype(float),levels=[0.5], colors='white', linewidths=1) + self.cont = self.ax.contour(self.mask.astype(float), levels=[0.5], colors='white', linewidths=1) if not self.embedded: self.displayed.set_data(array) self.fig.canvas.draw_idle() @@ -1492,16 +1540,16 @@ class image_lasso_selector(object): class aperture(object): - def __init__(self, img, cdelt=np.array([1.,1.]), radius=1., fig=None, ax=None): + def __init__(self, img, cdelt=np.array([1., 1.]), radius=1., fig=None, ax=None): """ img must have shape (X, Y) """ self.selected = False self.img = img - self.vmin, self.vmax = 0., np.max(self.img[self.img>0.]) - plt.ioff() # see https://github.com/matplotlib/matplotlib/issues/17013 + self.vmin, self.vmax = 0., np.max(self.img[self.img > 0.]) + plt.ioff() # see https://github.com/matplotlib/matplotlib/issues/17013 if fig is None: - self.fig = plt.figure(figsize=(15,15)) + self.fig = plt.figure(figsize=(15, 15)) else: self.fig = fig if ax is None: @@ -1512,21 +1560,21 @@ class aperture(object): self.ax = ax self.mask_alpha = 0.1 self.embedded = True - - self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno',alpha=self.mask_alpha) + + self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno', alpha=self.mask_alpha) plt.ion() xx, yy = np.indices(self.img.shape) - self.pix = np.vstack( (xx.flatten(), yy.flatten()) ).T + self.pix = np.vstack((xx.flatten(), yy.flatten())).T self.x0, self.y0 = np.array(self.img.shape)/2. if np.abs(cdelt).max() != 1.: self.cdelt = cdelt self.radius = radius/np.abs(self.cdelt).max()/3600. - self.circ = Circle((self.x0, self.y0), self.radius, alpha=0.8, ec='grey',fc='none') - self.ax.add_patch(self.circ) - + self.circ = Circle((self.x0, self.y0), self.radius, alpha=0.8, ec='grey', fc='none') + self.ax.add_patch(self.circ) + self.fig.canvas.mpl_connect('button_press_event', self.on_press) self.fig.canvas.mpl_connect('button_release_event', self.on_release) self.fig.canvas.mpl_connect('motion_notify_event', self.on_move) @@ -1537,7 +1585,7 @@ class aperture(object): def on_close(self, event=None) -> None: if not hasattr(self, 'mask'): - self.mask = np.zeros(self.img.shape[:2],dtype=bool) + self.mask = np.zeros(self.img.shape[:2], dtype=bool) self.selected = True def on_press(self, event): @@ -1562,7 +1610,7 @@ class aperture(object): dy = event.ydata - self.pressevent.ydata self.circ.center = self.x0 + dx, self.y0 + dy self.fig.canvas.draw_idle() - + def update_radius(self, radius): self.radius = radius/np.abs(self.cdelt).max()/3600 self.circ.set_radius(self.radius) @@ -1572,9 +1620,9 @@ class aperture(object): if hasattr(self, 'displayed'): try: self.displayed.remove() - except: + except AttributeError: return - self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno',alpha=self.mask_alpha) + self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno', alpha=self.mask_alpha) array = self.displayed.get_array().data yy, xx = np.indices(self.img.shape[:2]) @@ -1584,9 +1632,9 @@ class aperture(object): for coll in self.cont.collections: try: coll.remove() - except: + except AttributeError: return - self.cont = self.ax.contour(self.mask.astype(float),levels=[0.5], colors='white', linewidths=1) + self.cont = self.ax.contour(self.mask.astype(float), levels=[0.5], colors='white', linewidths=1) if not self.embedded: self.displayed.set_data(array) self.fig.canvas.draw_idle() @@ -1598,9 +1646,10 @@ class pol_map(object): """ Class to interactively study polarisation maps. """ + def __init__(self, Stokes, SNRp_cut=3., SNRi_cut=30., flux_lim=None, selection=None): - if type(Stokes) == str: + if isinstance(Stokes, str): Stokes = fits.open(Stokes) self.Stokes = deepcopy(Stokes) self.SNRp_cut = SNRp_cut @@ -1613,35 +1662,35 @@ class pol_map(object): self.display_selection = selection self.vec_scale = 2. - #Get data + # Get data self.targ = self.Stokes[0].header['targname'] self.pivot_wav = self.Stokes[0].header['photplam'] self.map_convert = self.Stokes[0].header['photflam'] - #Create figure + # Create figure plt.rcParams.update({'font.size': 10}) - self.fig, self.ax = plt.subplots(figsize=(10,10),subplot_kw=dict(projection=self.wcs)) + self.fig, self.ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(projection=self.wcs)) self.fig.subplots_adjust(hspace=0, wspace=0, right=1.02) self.ax_cosmetics() - #Display selected data (Default to total flux) + # Display selected data (Default to total flux) self.display() - #Display polarisation vectors in SNR_cut + # Display polarisation vectors in SNR_cut self.pol_vector() - #Display integrated values in ROI + # Display integrated values in ROI self.pol_int() - #Set axes for sliders (SNRp_cut, SNRi_cut) + # Set axes for sliders (SNRp_cut, SNRi_cut) ax_I_cut = self.fig.add_axes([0.125, 0.080, 0.35, 0.01]) ax_P_cut = self.fig.add_axes([0.125, 0.055, 0.35, 0.01]) ax_vec_sc = self.fig.add_axes([0.300, 0.030, 0.175, 0.01]) ax_snr_reset = self.fig.add_axes([0.125, 0.020, 0.05, 0.02]) - SNRi_max = np.max(self.I[self.IQU_cov[0,0]>0.]/np.sqrt(self.IQU_cov[0,0][self.IQU_cov[0,0]>0.])) - SNRp_max = np.max(self.P[self.s_P>0.]/self.s_P[self.s_P > 0.]) - s_I_cut = Slider(ax_I_cut,r"$SNR^{I}_{cut}$",1.,int(SNRi_max*0.95),valstep=1,valinit=self.SNRi_cut) - s_P_cut = Slider(ax_P_cut,r"$SNR^{P}_{cut}$",1.,int(SNRp_max*0.95),valstep=1,valinit=self.SNRp_cut) - s_vec_sc = Slider(ax_vec_sc,r"Vectors scale",1.,10.,valstep=1,valinit=self.vec_scale) - b_snr_reset = Button(ax_snr_reset,"Reset") + SNRi_max = np.max(self.I[self.IQU_cov[0, 0] > 0.]/np.sqrt(self.IQU_cov[0, 0][self.IQU_cov[0, 0] > 0.])) + SNRp_max = np.max(self.P[self.s_P > 0.]/self.s_P[self.s_P > 0.]) + s_I_cut = Slider(ax_I_cut, r"$SNR^{I}_{cut}$", 1., int(SNRi_max*0.95), valstep=1, valinit=self.SNRi_cut) + s_P_cut = Slider(ax_P_cut, r"$SNR^{P}_{cut}$", 1., int(SNRp_max*0.95), valstep=1, valinit=self.SNRp_cut) + s_vec_sc = Slider(ax_vec_sc, r"Vectors scale", 1., 10., valstep=1, valinit=self.vec_scale) + b_snr_reset = Button(ax_snr_reset, "Reset") b_snr_reset.label.set_fontsize(8) def update_snri(val): @@ -1672,15 +1721,14 @@ class pol_map(object): s_vec_sc.on_changed(update_vecsc) b_snr_reset.on_clicked(reset_snr) - - #Set axe for Aperture selection + # Set axe for Aperture selection ax_aper = self.fig.add_axes([0.55, 0.040, 0.05, 0.02]) ax_aper_reset = self.fig.add_axes([0.605, 0.040, 0.05, 0.02]) ax_aper_radius = self.fig.add_axes([0.55, 0.020, 0.10, 0.01]) self.selected = False - b_aper = Button(ax_aper,"Aperture") + b_aper = Button(ax_aper, "Aperture") b_aper.label.set_fontsize(8) - b_aper_reset = Button(ax_aper_reset,"Reset") + b_aper_reset = Button(ax_aper_reset, "Reset") b_aper_reset.label.set_fontsize(8) s_aper_radius = Slider(ax_aper_radius, r"$R_{aper}$", np.ceil(self.wcs.wcs.cdelt.max()/1.33*3.6e5)/1e2, 3.5, valstep=1e-2, valinit=1.) @@ -1704,7 +1752,7 @@ class pol_map(object): self.select_instance.circ.set_visible(True) self.fig.canvas.draw_idle() - + def update_aperture(val): if hasattr(self, 'select_instance'): if hasattr(self.select_instance, 'radius'): @@ -1716,7 +1764,6 @@ class pol_map(object): self.selected = True self.select_instance = aperture(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, radius=val) self.fig.canvas.draw_idle() - def reset_aperture(event): self.region = None @@ -1728,13 +1775,13 @@ class pol_map(object): b_aper_reset.on_clicked(reset_aperture) s_aper_radius.on_changed(update_aperture) - #Set axe for ROI selection + # Set axe for ROI selection ax_select = self.fig.add_axes([0.55, 0.070, 0.05, 0.02]) ax_roi_reset = self.fig.add_axes([0.605, 0.070, 0.05, 0.02]) - b_select = Button(ax_select,"Select") + b_select = Button(ax_select, "Select") b_select.label.set_fontsize(8) self.selected = False - b_roi_reset = Button(ax_roi_reset,"Reset") + b_roi_reset = Button(ax_roi_reset, "Reset") b_roi_reset.label.set_fontsize(8) def select_roi(event): @@ -1755,9 +1802,9 @@ class pol_map(object): self.select_instance = image_lasso_selector(self.data, fig=self.fig, ax=self.ax) self.select_instance.lasso.set_active(True) k = 0 - while not self.select_instance.selected and k<60: + while not self.select_instance.selected and k < 60: self.fig.canvas.start_event_loop(timeout=1) - k+=1 + k += 1 select_roi(event) self.fig.canvas.draw_idle() @@ -1769,13 +1816,13 @@ class pol_map(object): b_select.on_clicked(select_roi) b_roi_reset.on_clicked(reset_roi) - #Set axe for crop Stokes + # Set axe for crop Stokes ax_crop = self.fig.add_axes([0.70, 0.070, 0.05, 0.02]) ax_crop_reset = self.fig.add_axes([0.755, 0.070, 0.05, 0.02]) - b_crop = Button(ax_crop,"Crop") + b_crop = Button(ax_crop, "Crop") b_crop.label.set_fontsize(8) self.cropped = False - b_crop_reset = Button(ax_crop_reset,"Reset") + b_crop_reset = Button(ax_crop_reset, "Reset") b_crop_reset.label.set_fontsize(8) def crop(event): @@ -1790,17 +1837,17 @@ class pol_map(object): self.ax.reset_wcs(self.wcs) self.ax_cosmetics() self.display() - self.ax.set_xlim(0,self.I.shape[1]) - self.ax.set_ylim(0,self.I.shape[0]) + self.ax.set_xlim(0, self.I.shape[1]) + self.ax.set_ylim(0, self.I.shape[0]) self.pol_vector() else: self.cropped = True self.crop_instance = crop_Stokes(self.Stokes, fig=self.fig, ax=self.ax) self.crop_instance.rect_selector.set_active(True) k = 0 - while not self.crop_instance.cropped and k<60: + while not self.crop_instance.cropped and k < 60: self.fig.canvas.start_event_loop(timeout=1) - k+=1 + k += 1 crop(event) self.fig.canvas.draw_idle() @@ -1816,11 +1863,11 @@ class pol_map(object): b_crop.on_clicked(crop) b_crop_reset.on_clicked(reset_crop) - #Set axe for saving plot + # Set axe for saving plot ax_save = self.fig.add_axes([0.850, 0.070, 0.05, 0.02]) b_save = Button(ax_save, "Save") b_save.label.set_fontsize(8) - ax_text_save = self.fig.add_axes([0.3, 0.020, 0.5, 0.025],visible=False) + ax_text_save = self.fig.add_axes([0.3, 0.020, 0.5, 0.025], visible=False) text_save = TextBox(ax_text_save, "Save to:", initial='') def saveplot(event): @@ -1837,12 +1884,12 @@ class pol_map(object): ax_text_save.set(visible=False) if expression != '': plt.rcParams.update({'font.size': 15}) - save_fig = plt.figure(figsize=(15,15)) + save_fig = plt.figure(figsize=(15, 15)) save_ax = save_fig.add_subplot(111, projection=self.wcs) self.ax_cosmetics(ax=save_ax) - self.display(fig=save_fig,ax=save_ax) - self.pol_vector(fig=save_fig,ax=save_ax) - self.pol_int(fig=save_fig,ax=save_ax) + self.display(fig=save_fig, ax=save_ax) + self.pol_vector(fig=save_fig, ax=save_ax) + self.pol_int(fig=save_fig, ax=save_ax) save_fig.suptitle(r"{0:s} with $SNR_{{p}} \geq$ {1:d} and $SNR_{{I}} \geq$ {2:d}".format(self.targ, int(self.SNRp), int(self.SNRi))) if not expression[-4:] in ['.png', '.jpg', '.pdf']: expression += '.pdf' @@ -1858,11 +1905,11 @@ class pol_map(object): text_save.on_submit(submit_save) - #Set axe for data dump + # Set axe for data dump ax_dump = self.fig.add_axes([0.850, 0.045, 0.05, 0.02]) b_dump = Button(ax_dump, "Dump") b_dump.label.set_fontsize(8) - ax_text_dump = self.fig.add_axes([0.3, 0.020, 0.5, 0.025],visible=False) + ax_text_dump = self.fig.add_axes([0.3, 0.020, 0.5, 0.025], visible=False) text_dump = TextBox(ax_text_dump, "Dump to:", initial='') def dump(event): @@ -1885,7 +1932,8 @@ class pol_map(object): dump_list = [] for i in range(shape[0]): for j in range(shape[1]): - dump_list.append([x[i,j], y[i,j], self.I[i,j]*self.map_convert, self.Q[i,j]*self.map_convert, self.U[i,j]*self.map_convert, P[i,j], PA[i,j]]) + dump_list.append([x[i, j], y[i, j], self.I[i, j]*self.map_convert, self.Q[i, j] * + self.map_convert, self.U[i, j]*self.map_convert, P[i, j], PA[i, j]]) self.data_dump = np.array(dump_list) b_dump.on_clicked(dump) @@ -1905,19 +1953,19 @@ class pol_map(object): text_dump.on_submit(submit_dump) - #Set axes for display buttons + # Set axes for display buttons ax_tf = self.fig.add_axes([0.925, 0.105, 0.05, 0.02]) ax_pf = self.fig.add_axes([0.925, 0.085, 0.05, 0.02]) ax_p = self.fig.add_axes([0.925, 0.065, 0.05, 0.02]) ax_pa = self.fig.add_axes([0.925, 0.045, 0.05, 0.02]) ax_snri = self.fig.add_axes([0.925, 0.025, 0.05, 0.02]) ax_snrp = self.fig.add_axes([0.925, 0.005, 0.05, 0.02]) - b_tf = Button(ax_tf,r"$F_{\lambda}$") - b_pf = Button(ax_pf,r"$F_{\lambda} \cdot P$") - b_p = Button(ax_p,r"$P$") - b_pa = Button(ax_pa,r"$\theta_{P}$") - b_snri = Button(ax_snri,r"$I / \sigma_{I}$") - b_snrp = Button(ax_snrp,r"$P / \sigma_{P}$") + b_tf = Button(ax_tf, r"$F_{\lambda}$") + b_pf = Button(ax_pf, r"$F_{\lambda} \cdot P$") + b_p = Button(ax_p, r"$P$") + b_pa = Button(ax_pa, r"$\theta_{P}$") + b_snri = Button(ax_snri, r"$I / \sigma_{I}$") + b_snrp = Button(ax_snrp, r"$P / \sigma_{P}$") def d_tf(event): self.display_selection = 'total_flux' @@ -1960,65 +2008,78 @@ class pol_map(object): @property def wcs(self): return WCS(self.Stokes[0].header).celestial + @property def I(self): return self.Stokes['I_STOKES'].data + @property def Q(self): return self.Stokes['Q_STOKES'].data + @property def U(self): return self.Stokes['U_STOKES'].data + @property def IQU_cov(self): return self.Stokes['IQU_COV_MATRIX'].data + @property def P(self): return self.Stokes['POL_DEG_DEBIASED'].data + @property def s_P(self): return self.Stokes['POL_DEG_ERR'].data + @property def PA(self): return self.Stokes['POL_ANG'].data + @property def data_mask(self): return self.Stokes['DATA_MASK'].data def set_data_mask(self, mask): - self.Stokes[np.argmax([self.Stokes[i].header['datatype']=='Data_mask' for i in range(len(self.Stokes))])].data = mask.astype(float) + self.Stokes[np.argmax([self.Stokes[i].header['datatype'] == 'Data_mask' for i in range(len(self.Stokes))])].data = mask.astype(float) @property def cut(self): - s_I = np.sqrt(self.IQU_cov[0,0]) + s_I = np.sqrt(self.IQU_cov[0, 0]) SNRp_mask, SNRi_mask = np.zeros(self.P.shape).astype(bool), np.zeros(self.I.shape).astype(bool) SNRp_mask[self.s_P > 0.] = self.P[self.s_P > 0.] / self.s_P[self.s_P > 0.] > self.SNRp SNRi_mask[s_I > 0.] = self.I[s_I > 0.] / s_I[s_I > 0.] > self.SNRi - return np.logical_and(SNRi_mask,SNRp_mask) + return np.logical_and(SNRi_mask, SNRp_mask) def ax_cosmetics(self, ax=None): if ax is None: ax = self.ax - ax.set(aspect='equal',fc='black') + ax.set(aspect='equal', fc='black') ax.coords.grid(True, color='white', ls='dotted', alpha=0.5) - ax.set_xlabel('Right Ascension (J2000)') - ax.set_ylabel('Declination (J2000)',labelpad=-1) + ax.coords[0].set_axislabel('Right Ascension (J2000)') + ax.coords[0].set_axislabel_position('t') + ax.coords[0].set_ticklabel_position('t') + ax.set_ylabel('Declination (J2000)', labelpad=-1) - #Display scales and orientation + # Display scales and orientation fontprops = fm.FontProperties(size=14) px_size = self.wcs.wcs.cdelt[0]*3600. - if hasattr(self,'px_sc'): + if hasattr(self, 'px_sc'): self.px_sc.remove() - self.px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='white', fontproperties=fontprops) + self.px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, + frameon=False, size_vertical=0.005, color='white', fontproperties=fontprops) ax.add_artist(self.px_sc) - if hasattr(self,'pol_sc'): + if hasattr(self, 'pol_sc'): self.pol_sc.remove() - self.pol_sc = AnchoredSizeBar(ax.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='white', fontproperties=fontprops) + self.pol_sc = AnchoredSizeBar(ax.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, + frameon=False, size_vertical=0.005, color='white', fontproperties=fontprops) ax.add_artist(self.pol_sc) - if hasattr(self,'north_dir'): + if hasattr(self, 'north_dir'): self.north_dir.remove() - self.north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10., angle=-self.Stokes[0].header['orientat'], color='white', text_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': None,'fc':'w','alpha': 1,'lw': 1}) + self.north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10., + angle=-self.Stokes[0].header['orientat'], color='white', text_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 1}) ax.add_artist(self.north_dir) def display(self, fig=None, ax=None, flux_lim=None): @@ -2052,7 +2113,7 @@ class pol_map(object): vmin, vmax = 0, 180. label = r"$\theta_{P}$ [°]" elif self.display_selection.lower() in ['snri']: - s_I = np.sqrt(self.IQU_cov[0,0]) + s_I = np.sqrt(self.IQU_cov[0, 0]) SNRi = np.zeros(self.I.shape) SNRi[s_I > 0.] = self.I[s_I > 0.]/s_I[s_I > 0.] self.data = SNRi @@ -2073,7 +2134,7 @@ class pol_map(object): self.cbar.remove() if hasattr(self, 'im'): self.im.remove() - if not norm is None: + if norm is not None: self.im = ax.imshow(self.data, norm=norm, aspect='equal', cmap='inferno') else: self.im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno') @@ -2081,19 +2142,19 @@ class pol_map(object): fig.canvas.draw_idle() return self.im else: - if not norm is None: + if norm is not None: im = ax.imshow(self.data, norm=norm, aspect='equal', cmap='inferno') else: im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno') - ax.set_xlim(0,self.data.shape[1]) - ax.set_ylim(0,self.data.shape[0]) + ax.set_xlim(0, self.data.shape[1]) + ax.set_ylim(0, self.data.shape[0]) plt.colorbar(im, pad=0.025, aspect=80, label=label) fig.canvas.draw_idle() def pol_vector(self, fig=None, ax=None): P_cut = np.ones(self.P.shape)*np.nan P_cut[self.cut] = self.P[self.cut] - X, Y = np.meshgrid(np.arange(self.I.shape[1]),np.arange(self.I.shape[0])) + X, Y = np.meshgrid(np.arange(self.I.shape[1]), np.arange(self.I.shape[0])) XY_U, XY_V = P_cut*np.cos(np.pi/2. + self.PA*np.pi/180.), P_cut*np.sin(np.pi/2. + self.PA*np.pi/180.) if fig is None: @@ -2102,17 +2163,19 @@ class pol_map(object): ax = self.ax if hasattr(self, 'quiver'): self.quiver.remove() - self.quiver = ax.quiver(X, Y, XY_U, XY_V, units='xy', scale=1./self.vec_scale, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.15, linewidth=0.5, color='white',edgecolor='black') + self.quiver = ax.quiver(X, Y, XY_U, XY_V, units='xy', scale=1./self.vec_scale, scale_units='xy', pivot='mid', headwidth=0., + headlength=0., headaxislength=0., width=0.15, linewidth=0.5, color='white', edgecolor='black') fig.canvas.draw_idle() return self.quiver else: - ax.quiver(X, Y, XY_U, XY_V, units='xy', scale=1./self.vec_scale, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.15, linewidth=0.5, color='white',edgecolor='black') + ax.quiver(X, Y, XY_U, XY_V, units='xy', scale=1./self.vec_scale, scale_units='xy', pivot='mid', headwidth=0., + headlength=0., headaxislength=0., width=0.15, linewidth=0.5, color='white', edgecolor='black') fig.canvas.draw_idle() def pol_int(self, fig=None, ax=None): if self.region is None: n_pix = self.I.size - s_I = np.sqrt(self.IQU_cov[0,0]) + s_I = np.sqrt(self.IQU_cov[0, 0]) I_reg = self.I.sum() I_reg_err = np.sqrt(n_pix)*np.sqrt(np.sum(s_I**2)) P_reg = self.Stokes[0].header['P_int'] @@ -2120,12 +2183,12 @@ class pol_map(object): PA_reg = self.Stokes[0].header['PA_int'] PA_reg_err = self.Stokes[0].header['PA_int_err'] - s_I = np.sqrt(self.IQU_cov[0,0]) - s_Q = np.sqrt(self.IQU_cov[1,1]) - s_U = np.sqrt(self.IQU_cov[2,2]) - s_IQ = self.IQU_cov[0,1] - s_IU = self.IQU_cov[0,2] - s_QU = self.IQU_cov[1,2] + s_I = np.sqrt(self.IQU_cov[0, 0]) + s_Q = np.sqrt(self.IQU_cov[1, 1]) + s_U = np.sqrt(self.IQU_cov[2, 2]) + s_IQ = self.IQU_cov[0, 1] + s_IU = self.IQU_cov[0, 2] + s_QU = self.IQU_cov[1, 2] I_cut = self.I[self.cut].sum() Q_cut = self.Q[self.cut].sum() @@ -2138,19 +2201,21 @@ class pol_map(object): QU_cut_err = np.sqrt(np.sum(s_QU[self.cut]**2)) P_cut = np.sqrt(Q_cut**2+U_cut**2)/I_cut - P_cut_err = np.sqrt((Q_cut**2*Q_cut_err**2 + U_cut**2*U_cut_err**2 + 2.*Q_cut*U_cut*QU_cut_err)/(Q_cut**2 + U_cut**2) + ((Q_cut/I_cut)**2 + (U_cut/I_cut)**2)*I_cut_err**2 - 2.*(Q_cut/I_cut)*IQ_cut_err - 2.*(U_cut/I_cut)*IU_cut_err)/I_cut + P_cut_err = np.sqrt((Q_cut**2*Q_cut_err**2 + U_cut**2*U_cut_err**2 + 2.*Q_cut*U_cut*QU_cut_err)/(Q_cut**2 + U_cut**2) + + ((Q_cut/I_cut)**2 + (U_cut/I_cut)**2)*I_cut_err**2 - 2.*(Q_cut/I_cut)*IQ_cut_err - 2.*(U_cut/I_cut)*IU_cut_err)/I_cut - PA_cut = princ_angle(np.degrees((1./2.)*np.arctan2(U_cut,Q_cut))) - PA_cut_err = princ_angle(np.degrees((1./(2.*(Q_cut**2+U_cut**2)))*np.sqrt(U_cut**2*Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err))) + PA_cut = princ_angle(np.degrees((1./2.)*np.arctan2(U_cut, Q_cut))) + PA_cut_err = princ_angle(np.degrees((1./(2.*(Q_cut**2+U_cut**2)))*np.sqrt(U_cut**2 * + Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err))) else: n_pix = self.I[self.region].size - s_I = np.sqrt(self.IQU_cov[0,0]) - s_Q = np.sqrt(self.IQU_cov[1,1]) - s_U = np.sqrt(self.IQU_cov[2,2]) - s_IQ = self.IQU_cov[0,1] - s_IU = self.IQU_cov[0,2] - s_QU = self.IQU_cov[1,2] + s_I = np.sqrt(self.IQU_cov[0, 0]) + s_Q = np.sqrt(self.IQU_cov[1, 1]) + s_U = np.sqrt(self.IQU_cov[2, 2]) + s_IQ = self.IQU_cov[0, 1] + s_IU = self.IQU_cov[0, 2] + s_QU = self.IQU_cov[1, 2] I_reg = self.I[self.region].sum() Q_reg = self.Q[self.region].sum() @@ -2163,9 +2228,10 @@ class pol_map(object): QU_reg_err = np.sqrt(np.sum(s_QU[self.region]**2)) P_reg = np.sqrt(Q_reg**2+U_reg**2)/I_reg - P_reg_err = np.sqrt((Q_reg**2*Q_reg_err**2 + U_reg**2*U_reg_err**2 + 2.*Q_reg*U_reg*QU_reg_err)/(Q_reg**2 + U_reg**2) + ((Q_reg/I_reg)**2 + (U_reg/I_reg)**2)*I_reg_err**2 - 2.*(Q_reg/I_reg)*IQ_reg_err - 2.*(U_reg/I_reg)*IU_reg_err)/I_reg + P_reg_err = np.sqrt((Q_reg**2*Q_reg_err**2 + U_reg**2*U_reg_err**2 + 2.*Q_reg*U_reg*QU_reg_err)/(Q_reg**2 + U_reg**2) + + ((Q_reg/I_reg)**2 + (U_reg/I_reg)**2)*I_reg_err**2 - 2.*(Q_reg/I_reg)*IQ_reg_err - 2.*(U_reg/I_reg)*IU_reg_err)/I_reg - PA_reg = princ_angle((90./np.pi)*np.arctan2(U_reg,Q_reg)) + PA_reg = princ_angle((90./np.pi)*np.arctan2(U_reg, Q_reg)) PA_reg_err = (90./(np.pi*(Q_reg**2+U_reg**2)))*np.sqrt(U_reg**2*Q_reg_err**2 + Q_reg**2*U_reg_err**2 - 2.*Q_reg*U_reg*QU_reg_err) new_cut = np.logical_and(self.region, self.cut) @@ -2180,17 +2246,18 @@ class pol_map(object): QU_cut_err = np.sqrt(np.sum(s_QU[new_cut]**2)) P_cut = np.sqrt(Q_cut**2+U_cut**2)/I_cut - P_cut_err = np.sqrt((Q_cut**2*Q_cut_err**2 + U_cut**2*U_cut_err**2 + 2.*Q_cut*U_cut*QU_cut_err)/(Q_cut**2 + U_cut**2) + ((Q_cut/I_cut)**2 + (U_cut/I_cut)**2)*I_cut_err**2 - 2.*(Q_cut/I_cut)*IQ_cut_err - 2.*(U_cut/I_cut)*IU_cut_err)/I_cut + P_cut_err = np.sqrt((Q_cut**2*Q_cut_err**2 + U_cut**2*U_cut_err**2 + 2.*Q_cut*U_cut*QU_cut_err)/(Q_cut**2 + U_cut**2) + + ((Q_cut/I_cut)**2 + (U_cut/I_cut)**2)*I_cut_err**2 - 2.*(Q_cut/I_cut)*IQ_cut_err - 2.*(U_cut/I_cut)*IU_cut_err)/I_cut - PA_cut = 360.-princ_angle((90./np.pi)*np.arctan2(U_cut,Q_cut)) + PA_cut = 360.-princ_angle((90./np.pi)*np.arctan2(U_cut, Q_cut)) PA_cut_err = (90./(np.pi*(Q_cut**2+U_cut**2)))*np.sqrt(U_cut**2*Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err) if hasattr(self, 'cont'): for coll in self.cont.collections: try: coll.remove() - except: - return + except AttributeError: + del coll del self.cont if fig is None: fig = self.fig @@ -2198,13 +2265,15 @@ class pol_map(object): ax = self.ax if hasattr(self, 'an_int'): self.an_int.remove() - self.an_int = ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav,sci_not(I_reg*self.map_convert,I_reg_err*self.map_convert,2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100.,np.ceil(P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg,np.ceil(PA_reg_err*10.)/10.), color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction',path_effects=[pe.withStroke(linewidth=0.5,foreground='k')], verticalalignment='top', horizontalalignment='left') - if not self.region is None: - self.cont = ax.contour(self.region.astype(float),levels=[0.5], colors='white', linewidths=0.8) + self.an_int = ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_reg*self.map_convert, I_reg_err*self.map_convert, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100., np.ceil( + P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err*10.)/10.), color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left') + if self.region is not None: + self.cont = ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8) fig.canvas.draw_idle() return self.an_int else: - ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav,sci_not(I_reg*self.map_convert,I_reg_err*self.map_convert,2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100.,np.ceil(P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg,np.ceil(PA_reg_err*10.)/10.), color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction',path_effects=[pe.withStroke(linewidth=0.5,foreground='k')], verticalalignment='top', horizontalalignment='left') - if not self.region is None: - ax.contour(self.region.astype(float),levels=[0.5], colors='white', linewidths=0.8) + ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_reg*self.map_convert, I_reg_err*self.map_convert, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100., np.ceil(P_reg_err*1000.)/10.) + + "\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err*10.)/10.), color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left') + if self.region is not None: + ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8) fig.canvas.draw_idle() diff --git a/src/lib/query.py b/src/lib/query.py index bf76650..856e736 100755 --- a/src/lib/query.py +++ b/src/lib/query.py @@ -17,17 +17,20 @@ def divide_proposal(products): Divide observation in proposals by time or filter """ for pid in np.unique(products['Proposal ID']): - obs = products[products['Proposal ID']==pid].copy() - close_date = np.unique(np.array([TimeDelta(np.abs(Time(obs['Start']).unix-date.unix),format='sec') < 7.*u.d for date in obs['Start']], dtype=bool), axis=0) - if len(close_date)>1: + obs = products[products['Proposal ID'] == pid].copy() + close_date = np.unique(np.array([TimeDelta(np.abs(Time(obs['Start']).unix-date.unix), format='sec') + < 7.*u.d for date in obs['Start']], dtype=bool), axis=0) + if len(close_date) > 1: for date in close_date: - products['Proposal ID'][np.any([products['Dataset']==dataset for dataset in obs['Dataset'][date]],axis=0)] = "_".join([obs['Proposal ID'][date][0],str(obs['Start'][date][0])[:10]]) + products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][date]], axis=0) + ] = "_".join([obs['Proposal ID'][date][0], str(obs['Start'][date][0])[:10]]) for pid in np.unique(products['Proposal ID']): - obs = products[products['Proposal ID']==pid].copy() - same_filt = np.unique(np.array(np.sum([obs['Filters'][:,1:]==filt[1:] for filt in obs['Filters']],axis=2)<3,dtype=bool),axis=0) - if len(same_filt)>1: + obs = products[products['Proposal ID'] == pid].copy() + same_filt = np.unique(np.array(np.sum([obs['Filters'][:, 1:] == filt[1:] for filt in obs['Filters']], axis=2) < 3, dtype=bool), axis=0) + if len(same_filt) > 1: for filt in same_filt: - products['Proposal ID'][np.any([products['Dataset']==dataset for dataset in obs['Dataset'][filt]],axis=0)] = "_".join([obs['Proposal ID'][filt][0],"_".join([fi for fi in obs['Filters'][filt][0][1:] if fi[:-1]!="CLEAR"])]) + products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][filt]], axis=0)] = "_".join( + [obs['Proposal ID'][filt][0], "_".join([fi for fi in obs['Filters'][filt][0][1:] if fi[:-1] != "CLEAR"])]) return products @@ -78,22 +81,22 @@ def get_product_list(target=None, proposal_id=None): for c, n_c in zip(select_cols, cols): results.rename_column(c, n_c) - results['Proposal ID'] = Column(results['Proposal ID'],dtype='U35') - results['Filters'] = Column(np.array([filt.split(";") for filt in results['Filters']],dtype=str)) + results['Proposal ID'] = Column(results['Proposal ID'], dtype='U35') + results['Filters'] = Column(np.array([filt.split(";") for filt in results['Filters']], dtype=str)) results['Start'] = Column(Time(results['Start'])) results['Stop'] = Column(Time(results['Stop'])) results = divide_proposal(results) obs = results.copy() - ### Remove single observations for which a FIND filter is used - to_remove=[] + # Remove single observations for which a FIND filter is used + to_remove = [] for i in range(len(obs)): if "F1ND" in obs[i]['Filters']: to_remove.append(i) obs.remove_rows(to_remove) - ### Remove observations for which a polarization filter is missing - polfilt = {"POL0":0,"POL60":1,"POL120":2} + # Remove observations for which a polarization filter is missing + polfilt = {"POL0": 0, "POL60": 1, "POL120": 2} for pid in np.unique(obs['Proposal ID']): used_pol = np.zeros(3) for dataset in obs[obs['Proposal ID'] == pid]: @@ -102,26 +105,26 @@ def get_product_list(target=None, proposal_id=None): obs.remove_rows(np.arange(len(obs))[obs['Proposal ID'] == pid]) tab = unique(obs, ['Target name', 'Proposal ID']) - obs["Obs"] = [np.argmax(np.logical_and(tab['Proposal ID']==data['Proposal ID'],tab['Target name']==data['Target name']))+1 for data in obs] + obs["Obs"] = [np.argmax(np.logical_and(tab['Proposal ID'] == data['Proposal ID'], tab['Target name'] == data['Target name']))+1 for data in obs] try: - n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", - "Size", "Target name", "Proposal ID", "PI last name"]], 'Obs') + n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], 'Obs') except IndexError: raise ValueError( "There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target)) b = np.zeros(len(results), dtype=bool) - if not proposal_id is None and str(proposal_id) in obs['Proposal ID']: + if proposal_id is not None and str(proposal_id) in obs['Proposal ID']: b[results['Proposal ID'] == str(proposal_id)] = True else: n_obs.pprint(len(n_obs)+2) - a = [np.array(i.split(":"), dtype=str) for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(',')] - if a[0][0]=='': + a = [np.array(i.split(":"), dtype=str) + for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(',')] + if a[0][0] == '': a = [[1]] - if a[0][0] in ['a','all','*']: - b = np.ones(len(results),dtype=bool) + if a[0][0] in ['a', 'all', '*']: + b = np.ones(len(results), dtype=bool) else: - a = [np.array(i,dtype=int) for i in a] + a = [np.array(i, dtype=int) for i in a] for i in a: if len(i) > 1: for j in range(i[0], i[1]+1): @@ -135,19 +138,19 @@ def get_product_list(target=None, proposal_id=None): dataproduct_type=['image'], calib_level=[2], description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP") - products['proposal_id'] = Column(products['proposal_id'],dtype='U35') + products['proposal_id'] = Column(products['proposal_id'], dtype='U35') products['target_name'] = Column(observations['target_name']) - + for prod in products: - prod['proposal_id'] = results['Proposal ID'][results['Dataset']==prod['productFilename'][:len(results['Dataset'][0])].upper()][0] - + prod['proposal_id'] = results['Proposal ID'][results['Dataset'] == prod['productFilename'][:len(results['Dataset'][0])].upper()][0] + for prod in products: - prod['target_name'] = observations['target_name'][observations['obsid']==prod['obsID']][0] + prod['target_name'] = observations['target_name'][observations['obsid'] == prod['obsID']][0] tab = unique(products, ['target_name', 'proposal_id']) - if len(tab)>1 and np.all(tab['target_name']==tab['target_name'][0]): + if len(tab) > 1 and np.all(tab['target_name'] == tab['target_name'][0]): target = tab['target_name'][0] - - products["Obs"] = [np.argmax(np.logical_and(tab['proposal_id']==data['proposal_id'],tab['target_name']==data['target_name']))+1 for data in products] + + products["Obs"] = [np.argmax(np.logical_and(tab['proposal_id'] == data['proposal_id'], tab['target_name'] == data['target_name']))+1 for data in products] return target, products @@ -155,17 +158,17 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'): """ Given a target name and a proposal_id, create the local directories and retrieve the fits files from the MAST Archive """ - target, products = get_product_list(target=target,proposal_id=proposal_id) + target, products = get_product_list(target=target, proposal_id=proposal_id) prodpaths = [] - data_dir = path_join(output_dir, target) + # data_dir = path_join(output_dir, target) out = "" - for obs in unique(products,'Obs'): + for obs in unique(products, 'Obs'): filepaths = [] - #obs_dir = path_join(data_dir, obs['prodposal_id']) - #if obs['target_name']!=target: + # obs_dir = path_join(data_dir, obs['prodposal_id']) + # if obs['target_name']!=target: obs_dir = path_join(path_join(output_dir, target), obs['proposal_id']) if not path_exists(obs_dir): - system("mkdir -p {0:s} {1:s}".format(obs_dir,obs_dir.replace("data","plots"))) + system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots"))) for file in products['productFilename'][products['Obs'] == obs['Obs']]: fpath = path_join(obs_dir, file) if not path_exists(fpath): @@ -173,8 +176,8 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'): products['dataURI'][products['productFilename'] == file][0], local_path=fpath)[0]) else: out += "{0:s} : Exists\n".format(file) - filepaths.append([obs_dir,file]) - prodpaths.append(np.array(filepaths,dtype=str)) + filepaths.append([obs_dir, file]) + prodpaths.append(np.array(filepaths, dtype=str)) return target, prodpaths @@ -183,12 +186,12 @@ if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Query MAST for target products') - parser.add_argument('-t','--target', metavar='targetname', required=False, + parser.add_argument('-t', '--target', metavar='targetname', required=False, help='the name of the target', type=str, default=None) - parser.add_argument('-p','--proposal_id', metavar='proposal_id', required=False, + parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, help='the proposal id of the data products', type=int, default=None) - parser.add_argument('-o','--output_dir', metavar='directory_path', required=False, + parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False, help='output directory path for the data products', type=str, default="./data") args = parser.parse_args() prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id) - print(prodpaths) \ No newline at end of file + print(prodpaths) diff --git a/src/lib/reduction.py b/src/lib/reduction.py index 3b00c78..0a2104d 100755 --- a/src/lib/reduction.py +++ b/src/lib/reduction.py @@ -34,7 +34,7 @@ prototypes : - rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, ang, SNRi_cut) -> I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers Rotate I, Q, U given an angle in degrees using scipy functions. - + - rotate_data(data_array, error_array, data_mask, headers, ang) -> data_array, error_array, data_mask, headers Rotate data before reduction given an angle in degrees using scipy functions. """ @@ -42,54 +42,37 @@ prototypes : from copy import deepcopy import numpy as np import matplotlib.pyplot as plt -import matplotlib.dates as mdates from matplotlib.patches import Rectangle from matplotlib.colors import LogNorm from scipy.ndimage import rotate as sc_rotate, shift as sc_shift from scipy.signal import fftconvolve from astropy.wcs import WCS from astropy import log -log.setLevel('ERROR') import warnings from lib.deconvolve import deconvolve_im, gaussian_psf, gaussian2d, zeropad from lib.convex_hull import image_hull, clean_ROI from lib.background import bkg_fit, bkg_hist, bkg_mini -from lib.plots import plot_obs +from lib.plots import plot_obs, princ_angle from lib.cross_correlation import phase_cross_correlation +log.setLevel('ERROR') # Useful tabulated values -#FOC instrument -globals()['trans2'] = {'f140w' : 0.21, 'f175w' : 0.24, 'f220w' : 0.39, 'f275w' : 0.40, 'f320w' : 0.89, 'f342w' : 0.81, 'f430w' : 0.74, 'f370lp' : 0.83, 'f486n' : 0.63, 'f501n' : 0.68, 'f480lp' : 0.82, 'clear2' : 1.0} -globals()['trans3'] = {'f120m' : 0.10, 'f130m' : 0.10, 'f140m' : 0.08, 'f152m' : 0.08, 'f165w' : 0.28, 'f170m' : 0.18, 'f195w' : 0.42, 'f190m' : 0.15, 'f210m' : 0.18, 'f231m' : 0.18, 'clear3' : 1.0} -globals()['trans4'] = {'f253m' : 0.18, 'f278m' : 0.26, 'f307m' : 0.26, 'f130lp' : 0.92, 'f346m' : 0.58, 'f372m' : 0.73, 'f410m' : 0.58, 'f437m' : 0.71, 'f470m' : 0.79, 'f502m' : 0.82, 'f550m' : 0.77, 'clear4' : 1.0} -globals()['pol_efficiency'] = {'pol0' : 0.92, 'pol60' : 0.92, 'pol120' : 0.91} +# FOC instrument +globals()['trans2'] = {'f140w': 0.21, 'f175w': 0.24, 'f220w': 0.39, 'f275w': 0.40, 'f320w': 0.89, 'f342w': 0.81, + 'f430w': 0.74, 'f370lp': 0.83, 'f486n': 0.63, 'f501n': 0.68, 'f480lp': 0.82, 'clear2': 1.0} +globals()['trans3'] = {'f120m': 0.10, 'f130m': 0.10, 'f140m': 0.08, 'f152m': 0.08, 'f165w': 0.28, + 'f170m': 0.18, 'f195w': 0.42, 'f190m': 0.15, 'f210m': 0.18, 'f231m': 0.18, 'clear3': 1.0} +globals()['trans4'] = {'f253m': 0.18, 'f278m': 0.26, 'f307m': 0.26, 'f130lp': 0.92, 'f346m': 0.58, + 'f372m': 0.73, 'f410m': 0.58, 'f437m': 0.71, 'f470m': 0.79, 'f502m': 0.82, 'f550m': 0.77, 'clear4': 1.0} +globals()['pol_efficiency'] = {'pol0': 0.92, 'pol60': 0.92, 'pol120': 0.91} # POL0 = 0deg, POL60 = 60deg, POL120=120deg globals()['theta'] = np.array([180.*np.pi/180., 60.*np.pi/180., 120.*np.pi/180.]) # Uncertainties on the orientation of the polarizers' axes taken to be 3deg (see Nota et. al 1996, p36; Robinson & Thomson 1995) globals()['sigma_theta'] = np.array([3.*np.pi/180., 3.*np.pi/180., 3.*np.pi/180.]) # Image shift between polarizers as measured by Hodge (1995) -globals()['pol_shift'] = {'pol0' : np.array([0.,0.])*1., 'pol60' : np.array([3.63,-0.68])*1., 'pol120' : np.array([0.65,0.20])*1.} -globals()['sigma_shift'] = {'pol0' : [0.3,0.3], 'pol60' : [0.3,0.3], 'pol120' : [0.3,0.3]} - - -def princ_angle(ang): - """ - Return the principal angle in the 0° to 180° quadrant. - as PA is always defined at p/m 180°. - """ - if type(ang) != np.ndarray: - A = np.array([ang]) - else: - A = np.array(ang) - while np.any(A < 0.): - A[A<0.] = A[A<0.]+360. - while np.any(A >= 180.): - A[A>=180.] = A[A>=180.]-180. - if type(ang) == type(A): - return A - else: - return A[0] +globals()['pol_shift'] = {'pol0': np.array([0., 0.])*1., 'pol60': np.array([3.63, -0.68])*1., 'pol120': np.array([0.65, 0.20])*1.} +globals()['sigma_shift'] = {'pol0': [0.3, 0.3], 'pol60': [0.3, 0.3], 'pol120': [0.3, 0.3]} def get_row_compressor(old_dimension, new_dimension, operation='sum'): @@ -185,8 +168,8 @@ def bin_ndarray(ndarray, new_shape, operation='sum'): if ndarray.ndim != len(new_shape): raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape, new_shape)) - if (np.array(ndarray.shape)%np.array(new_shape) == np.array([0.,0.])).all(): - compression_pairs = [(d, c//d) for d,c in zip(new_shape, ndarray.shape)] + if (np.array(ndarray.shape) % np.array(new_shape) == np.array([0., 0.])).all(): + compression_pairs = [(d, c//d) for d, c in zip(new_shape, ndarray.shape)] flattened = [l for p in compression_pairs for l in p] ndarray = ndarray.reshape(flattened) @@ -203,9 +186,7 @@ def bin_ndarray(ndarray, new_shape, operation='sum'): return ndarray -def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, - null_val=None, inside=False, display=False, savename=None, - plots_folder=""): +def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, null_val=None, inside=False, display=False, savename=None, plots_folder=""): """ Homogeneously crop an array: all contained images will have the same shape. 'inside' parameter will decide how much should be cropped. @@ -262,31 +243,31 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, elif type(null_val) is float: null_val = [null_val,]*error_array.shape[0] - vertex = np.zeros((data_array.shape[0],4),dtype=int) - for i,image in enumerate(data_array): # Get vertex of the rectangular convex hull of each image - vertex[i] = image_hull(image,step=step,null_val=null_val[i],inside=inside) - v_array = np.zeros(4,dtype=int) + vertex = np.zeros((data_array.shape[0], 4), dtype=int) + for i, image in enumerate(data_array): # Get vertex of the rectangular convex hull of each image + vertex[i] = image_hull(image, step=step, null_val=null_val[i], inside=inside) + v_array = np.zeros(4, dtype=int) if inside: # Get vertex of the maximum convex hull for all images - v_array[0] = np.max(vertex[:,0]).astype(int) - v_array[1] = np.min(vertex[:,1]).astype(int) - v_array[2] = np.max(vertex[:,2]).astype(int) - v_array[3] = np.min(vertex[:,3]).astype(int) + v_array[0] = np.max(vertex[:, 0]).astype(int) + v_array[1] = np.min(vertex[:, 1]).astype(int) + v_array[2] = np.max(vertex[:, 2]).astype(int) + v_array[3] = np.min(vertex[:, 3]).astype(int) else: # Get vertex of the minimum convex hull for all images - v_array[0] = np.min(vertex[:,0]).astype(int) - v_array[1] = np.max(vertex[:,1]).astype(int) - v_array[2] = np.min(vertex[:,2]).astype(int) - v_array[3] = np.max(vertex[:,3]).astype(int) + v_array[0] = np.min(vertex[:, 0]).astype(int) + v_array[1] = np.max(vertex[:, 1]).astype(int) + v_array[2] = np.min(vertex[:, 2]).astype(int) + v_array[3] = np.max(vertex[:, 3]).astype(int) - new_shape = np.array([v_array[1]-v_array[0],v_array[3]-v_array[2]]) + new_shape = np.array([v_array[1]-v_array[0], v_array[3]-v_array[2]]) rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 0., 'b'] crop_headers = deepcopy(headers) - crop_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1])) - crop_error_array = np.zeros((data_array.shape[0],new_shape[0],new_shape[1])) - for i,image in enumerate(data_array): - #Put the image data in the cropped array - crop_array[i] = image[v_array[0]:v_array[1],v_array[2]:v_array[3]] - crop_error_array[i] = error_array[i][v_array[0]:v_array[1],v_array[2]:v_array[3]] - #Update CRPIX value in the associated header + crop_array = np.zeros((data_array.shape[0], new_shape[0], new_shape[1])) + crop_error_array = np.zeros((data_array.shape[0], new_shape[0], new_shape[1])) + for i, image in enumerate(data_array): + # Put the image data in the cropped array + crop_array[i] = image[v_array[0]:v_array[1], v_array[2]:v_array[3]] + crop_error_array[i] = error_array[i][v_array[0]:v_array[1], v_array[2]:v_array[3]] + # Update CRPIX value in the associated header curr_wcs = deepcopy(WCS(crop_headers[i])) curr_wcs.wcs.crpix[:2] = curr_wcs.wcs.crpix[:2] - np.array([v_array[2], v_array[0]]) crop_headers[i].update(curr_wcs.to_header()) @@ -294,57 +275,57 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, if display: plt.rcParams.update({'font.size': 15}) - fig, ax = plt.subplots(figsize=(10,10)) + fig, ax = plt.subplots(figsize=(10, 10)) convert_flux = headers[0]['photflam'] data = deepcopy(data_array[0]*convert_flux) - data[data <= data[data>0.].min()] = data[data > 0.].min() + data[data <= data[data > 0.].min()] = data[data > 0.].min() crop = crop_array[0]*convert_flux instr = headers[0]['instrume'] rootname = headers[0]['rootname'] exptime = headers[0]['exptime'] filt = headers[0]['filtnam1'] - #plots - #im = ax.imshow(data, vmin=data.min(), vmax=data.max(), origin='lower', cmap='gray') - im = ax.imshow(data, norm=LogNorm(crop[crop>0.].mean()/5.,crop.max()), origin='lower', cmap='gray') + # plots + # im = ax.imshow(data, vmin=data.min(), vmax=data.max(), origin='lower', cmap='gray') + im = ax.imshow(data, norm=LogNorm(crop[crop > 0.].mean()/5., crop.max()), origin='lower', cmap='gray') x, y, width, height, angle, color = rectangle - ax.add_patch(Rectangle((x, y),width,height,edgecolor=color,fill=False)) - #position of centroid - ax.plot([data.shape[1]/2, data.shape[1]/2], [0,data.shape[0]-1], '--', lw=1, + ax.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False)) + # position of centroid + ax.plot([data.shape[1]/2, data.shape[1]/2], [0, data.shape[0]-1], '--', lw=1, color='grey', alpha=0.3) - ax.plot([0,data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], '--', lw=1, + ax.plot([0, data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], '--', lw=1, color='grey', alpha=0.3) ax.annotate(instr+":"+rootname, color='white', fontsize=10, - xy=(0.02, 0.95), xycoords='axes fraction') + xy=(0.02, 0.95), xycoords='axes fraction') ax.annotate(filt, color='white', fontsize=14, xy=(0.02, 0.02), - xycoords='axes fraction') + xycoords='axes fraction') ax.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(0.80, 0.02), - xycoords='axes fraction') - ax.set(#title="Location of cropped image.", - xlabel='pixel offset', - ylabel='pixel offset') + xycoords='axes fraction') + ax.set( # title="Location of cropped image.", + xlabel='pixel offset', + ylabel='pixel offset') fig.subplots_adjust(hspace=0, wspace=0, right=0.85) cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75]) fig.colorbar(im, cax=cbar_ax, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") - if not(savename is None): - #fig.suptitle(savename+'_'+filt+'_crop_region') - fig.savefig("/".join([plots_folder,savename+'_'+filt+'_crop_region.png']), - bbox_inches='tight') - plot_obs(data_array, headers, vmin=convert_flux*data_array[data_array>0.].mean()/5., - vmax=convert_flux*data_array[data_array>0.].max(), rectangle=[rectangle,]*len(headers), - savename=savename+'_crop_region',plots_folder=plots_folder) + if savename is not None: + # fig.suptitle(savename+'_'+filt+'_crop_region') + fig.savefig("/".join([plots_folder, savename+'_'+filt+'_crop_region.png']), + bbox_inches='tight') + plot_obs(data_array, headers, vmin=convert_flux*data_array[data_array > 0.].mean()/5., + vmax=convert_flux*data_array[data_array > 0.].max(), rectangle=[rectangle,]*len(headers), + savename=savename+'_crop_region', plots_folder=plots_folder) plt.show() - if not data_mask is None: - crop_mask = data_mask[v_array[0]:v_array[1],v_array[2]:v_array[3]] + if data_mask is not None: + crop_mask = data_mask[v_array[0]:v_array[1], v_array[2]:v_array[3]] return crop_array, crop_error_array, crop_mask, crop_headers else: return crop_array, crop_error_array, crop_headers def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px', - shape=(9,9), iterations=20, algo='richardson'): + shape=(9, 9), iterations=20, algo='richardson'): """ Homogeneously deconvolve a data array using Richardson-Lucy iterative algorithm. ---------- @@ -374,7 +355,7 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px', as a regulation of the process. Defaults to 20. algo : str, optional - Name of the deconvolution algorithm that will be used. Implemented + Name of the deconvolution algorithm that will be used. Implemented algorithms are the following : 'Wiener', 'Van-Cittert', 'One Step Gradient', 'Conjugate Gradient' and 'Richardson-Lucy'. Defaults to 'Richardson-Lucy'. @@ -385,36 +366,33 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px', point spread function. """ # If chosen FWHM scale is 'arcsec', compute FWHM in pixel scale - if scale.lower() in ['arcsec','arcseconds']: - pxsize = np.zeros((data_array.shape[0],2)) - for i,header in enumerate(headers): + if scale.lower() in ['arcsec', 'arcseconds']: + pxsize = np.zeros((data_array.shape[0], 2)) + for i, header in enumerate(headers): # Get current pixel size w = WCS(header).deepcopy() - pxsize[i] = np.round(w.wcs.cdelt/3600.,15) + pxsize[i] = np.round(w.wcs.cdelt/3600., 15) if (pxsize != pxsize[0]).any(): raise ValueError("Not all images in array have same pixel size") FWHM /= pxsize[0].min() # Define Point-Spread-Function kernel - if psf.lower() in ['gauss','gaussian']: + if psf.lower() in ['gauss', 'gaussian']: kernel = gaussian_psf(FWHM=FWHM, shape=shape) - elif (type(psf) == np.ndarray) and (len(psf.shape) == 2): + elif isinstance(psf, np.ndarray) and (len(psf.shape) == 2): kernel = psf else: raise ValueError("{} is not a valid value for 'psf'".format(psf)) # Deconvolve images in the array using given PSF deconv_array = np.zeros(data_array.shape) - for i,image in enumerate(data_array): - deconv_array[i] = deconvolve_im(image, kernel, iterations=iterations, - clip=True, filter_epsilon=None, algo='richardson') + for i, image in enumerate(data_array): + deconv_array[i] = deconvolve_im(image, kernel, iterations=iterations, clip=True, filter_epsilon=None, algo='richardson') return deconv_array -def get_error(data_array, headers, error_array=None, data_mask=None, - sub_type=None, subtract_error=True, display=False, savename=None, - plots_folder="", return_background=False): +def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=None, subtract_error=True, display=False, savename=None, plots_folder="", return_background=False): """ Look for sub-image of shape sub_shape that have the smallest integrated flux (no source assumption) and define the background on the image by the @@ -478,38 +456,42 @@ def get_error(data_array, headers, error_array=None, data_mask=None, if error_array is None: error_array = np.zeros(data_array.shape) data, error = deepcopy(data_array), deepcopy(error_array) - if not data_mask is None: + if data_mask is not None: mask = deepcopy(data_mask) else: data_c, error_c, _ = crop_array(data, headers, error, step=5, null_val=0., inside=False) mask_c = np.ones(data_c[0].shape, dtype=bool) - for i,(data_ci, error_ci) in enumerate(zip(data_c, error_c)): + for i, (data_ci, error_ci) in enumerate(zip(data_c, error_c)): data[i], error[i] = zeropad(data_ci, data[i].shape), zeropad(error_ci, error[i].shape) mask = zeropad(mask_c, data[0].shape).astype(bool) background = np.zeros((data.shape[0])) - - #wavelength dependence of the polariser filters - #estimated to less than 1% + + # wavelength dependence of the polariser filters + # estimated to less than 1% err_wav = data*0.01 - #difference in PSFs through each polarizers - #estimated to less than 3% + # difference in PSFs through each polarizers + # estimated to less than 3% err_psf = data*0.03 - #flatfielding uncertainties - #estimated to less than 3% + # flatfielding uncertainties + # estimated to less than 3% err_flat = data*0.03 - + if (sub_type is None): - n_data_array, c_error_bkg, headers, background = bkg_hist(data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) - elif type(sub_type)==str: + n_data_array, c_error_bkg, headers, background = bkg_hist( + data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) + elif isinstance(sub_type, str): if sub_type.lower() in ['auto']: - n_data_array, c_error_bkg, headers, background = bkg_fit(data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) + n_data_array, c_error_bkg, headers, background = bkg_fit( + data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) else: - n_data_array, c_error_bkg, headers, background = bkg_hist(data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) - elif type(sub_type)==tuple: - n_data_array, c_error_bkg, headers, background = bkg_mini(data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) + n_data_array, c_error_bkg, headers, background = bkg_hist( + data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) + elif isinstance(sub_type, tuple): + n_data_array, c_error_bkg, headers, background = bkg_mini( + data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) else: print("Warning: Invalid subtype.") - + # Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999) n_error_array = np.sqrt(err_wav**2+err_psf**2+err_flat**2+c_error_bkg**2) @@ -519,8 +501,7 @@ def get_error(data_array, headers, error_array=None, data_mask=None, return n_data_array, n_error_array, headers -def rebin_array(data_array, error_array, headers, pxsize, scale, - operation='sum', data_mask=None): +def rebin_array(data_array, error_array, headers, pxsize, scale, operation='sum', data_mask=None): """ Homogeneously rebin a data array to get a new pixel size equal to pxsize where pxsize is given in arcsec. @@ -564,7 +545,7 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, if not same_instr: raise ValueError("All images in data_array are not from the same\ instrument, cannot proceed.") - if not instr in ['FOC']: + if instr not in ['FOC']: raise ValueError("Cannot reduce images from {0:s} instrument\ (yet)".format(instr)) @@ -574,7 +555,7 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, # Routine for the FOC instrument if instr == 'FOC': HST_aper = 2400. # HST aperture in mm - Dxy_arr = np.ones((data_array.shape[0],2)) + Dxy_arr = np.ones((data_array.shape[0], 2)) for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))): # Get current pixel size w = WCS(header).deepcopy() @@ -583,13 +564,13 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, # Compute binning ratio if scale.lower() in ['px', 'pixel']: Dxy_arr[i] = np.array([pxsize,]*2) - elif scale.lower() in ['arcsec','arcseconds']: + elif scale.lower() in ['arcsec', 'arcseconds']: Dxy_arr[i] = np.array(pxsize/np.abs(w.wcs.cdelt)/3600.) - elif scale.lower() in ['full','integrate']: + elif scale.lower() in ['full', 'integrate']: Dxy_arr[i] = image.shape else: raise ValueError("'{0:s}' invalid scale for binning.".format(scale)) - new_shape = np.ceil(min(image.shape/Dxy_arr,key=lambda x:x[0]+x[1])).astype(int) + new_shape = np.ceil(min(image.shape/Dxy_arr, key=lambda x: x[0]+x[1])).astype(int) for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))): # Get current pixel size @@ -601,23 +582,18 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, raise ValueError("Requested pixel size is below resolution.") # Rebin data - rebin_data = bin_ndarray(image, new_shape=new_shape, - operation=operation) + rebin_data = bin_ndarray(image, new_shape=new_shape, operation=operation) rebinned_data.append(rebin_data) # Propagate error - rms_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape, - operation='average')) - sum_image = bin_ndarray(image, new_shape=new_shape, - operation='sum') + rms_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape, operation='average')) + sum_image = bin_ndarray(image, new_shape=new_shape, operation='sum') mask = sum_image > 0. new_error = np.zeros(rms_image.shape) if operation.lower() in ["mean", "average", "avg"]: - new_error = np.sqrt(bin_ndarray(error**2, - new_shape=new_shape, operation='average')) + new_error = np.sqrt(bin_ndarray(error**2, new_shape=new_shape, operation='average')) else: - new_error = np.sqrt(bin_ndarray(error**2, - new_shape=new_shape, operation='sum')) + new_error = np.sqrt(bin_ndarray(error**2, new_shape=new_shape, operation='sum')) rebinned_error.append(np.sqrt(rms_image**2 + new_error**2)) # Update header @@ -625,12 +601,12 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, nw.wcs.cdelt *= Dxy nw.wcs.crpix /= Dxy nw.array_shape = new_shape - new_header['NAXIS1'],new_header['NAXIS2'] = nw.array_shape + new_header['NAXIS1'], new_header['NAXIS2'] = nw.array_shape for key, val in nw.to_header().items(): - new_header.set(key,val) + new_header.set(key, val) rebinned_headers.append(new_header) - if not data_mask is None: - data_mask = bin_ndarray(data_mask,new_shape=new_shape,operation='average') > 0.80 + if data_mask is not None: + data_mask = bin_ndarray(data_mask, new_shape=new_shape, operation='average') > 0.80 rebinned_data = np.array(rebinned_data) rebinned_error = np.array(rebinned_error) @@ -641,8 +617,7 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask -def align_data(data_array, headers, error_array=None, background=None, - upsample_factor=1., ref_data=None, ref_center=None, return_shifts=False): +def align_data(data_array, headers, error_array=None, background=None, upsample_factor=1., ref_data=None, ref_center=None, return_shifts=False): """ Align images in data_array using cross correlation, and rescale them to wider images able to contain any rotation of the reference image. @@ -696,12 +671,12 @@ def align_data(data_array, headers, error_array=None, background=None, """ if ref_data is None: # Define the reference to be the first image of the inputed array - #if None have been specified + # if None have been specified ref_data = data_array[0] same = 1 for array in data_array: # Check if all images have the same shape. If not, cross-correlation - #cannot be computed. + # cannot be computed. same *= (array.shape == ref_data.shape) if not same: raise ValueError("All images in data_array must have same shape as\ @@ -710,76 +685,72 @@ def align_data(data_array, headers, error_array=None, background=None, _, error_array, headers, background = get_error(data_array, headers, return_background=True) # Crop out any null edges - #(ref_data must be cropped as well) - full_array = np.concatenate((data_array,[ref_data]),axis=0) + # (ref_data must be cropped as well) + full_array = np.concatenate((data_array, [ref_data]), axis=0) full_headers = deepcopy(headers) full_headers.append(headers[0]) - err_array = np.concatenate((error_array,[np.zeros(ref_data.shape)]),axis=0) + err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0) - full_array, err_array, full_headers = crop_array(full_array, full_headers, - err_array, step=5, inside=False, null_val=0.) + full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.) data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1] error_array = err_array[:-1] do_shift = True if ref_center is None: # Define the center of the reference image to be the center pixel - #if None have been specified + # if None have been specified ref_center = (np.array(ref_data.shape)/2).astype(int) do_shift = False elif ref_center.lower() in ['max', 'flux', 'maxflux', 'max_flux']: # Define the center of the reference image to be the pixel of max flux. - ref_center = np.unravel_index(np.argmax(ref_data),ref_data.shape) + ref_center = np.unravel_index(np.argmax(ref_data), ref_data.shape) else: # Default to image center. ref_center = (np.array(ref_data.shape)/2).astype(int) # Create a rescaled null array that can contain any rotation of the - #original image (and shifted images) + # original image (and shifted images) shape = data_array.shape res_shape = int(np.ceil(np.sqrt(2.)*np.max(shape[1:]))) - rescaled_image = np.zeros((shape[0],res_shape,res_shape)) - rescaled_error = np.ones((shape[0],res_shape,res_shape)) - rescaled_mask = np.zeros((shape[0],res_shape,res_shape),dtype=bool) + rescaled_image = np.zeros((shape[0], res_shape, res_shape)) + rescaled_error = np.ones((shape[0], res_shape, res_shape)) + rescaled_mask = np.zeros((shape[0], res_shape, res_shape), dtype=bool) res_center = (np.array(rescaled_image.shape[1:])/2).astype(int) res_shift = res_center-ref_center - res_mask = np.zeros((res_shape,res_shape),dtype=bool) + res_mask = np.zeros((res_shape, res_shape), dtype=bool) res_mask[res_shift[0]:res_shift[0]+shape[1], res_shift[1]:res_shift[1]+shape[2]] = True shifts, errors = [], [] - for i,image in enumerate(data_array): + for i, image in enumerate(data_array): # Initialize rescaled images to background values rescaled_error[i] *= 0.01*background[i] # Get shifts and error by cross-correlation to ref_data if do_shift: - shift, error, _ = phase_cross_correlation(ref_data/ref_data.max(), image/image.max(), - upsample_factor=upsample_factor) + shift, error, _ = phase_cross_correlation(ref_data/ref_data.max(), image/image.max(), upsample_factor=upsample_factor) else: shift = pol_shift[headers[i]['filtnam1'].lower()] error = sigma_shift[headers[i]['filtnam1'].lower()] # Rescale image to requested output - rescaled_image[i,res_shift[0]:res_shift[0]+shape[1], - res_shift[1]:res_shift[1]+shape[2]] = deepcopy(image) - rescaled_error[i,res_shift[0]:res_shift[0]+shape[1], - res_shift[1]:res_shift[1]+shape[2]] = deepcopy(error_array[i]) + rescaled_image[i, res_shift[0]:res_shift[0]+shape[1], res_shift[1]:res_shift[1]+shape[2]] = deepcopy(image) + rescaled_error[i, res_shift[0]:res_shift[0]+shape[1], res_shift[1]:res_shift[1]+shape[2]] = deepcopy(error_array[i]) # Shift images to align rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.) rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i]) - + curr_mask = sc_shift(res_mask, shift, order=1, cval=False) mask_vertex = clean_ROI(curr_mask) - rescaled_mask[i,mask_vertex[2]:mask_vertex[3],mask_vertex[0]:mask_vertex[1]] = True + rescaled_mask[i, mask_vertex[2]:mask_vertex[3], mask_vertex[0]:mask_vertex[1]] = True rescaled_image[i][rescaled_image[i] < 0.] = 0. rescaled_image[i][(1-rescaled_mask[i]).astype(bool)] = 0. # Uncertainties from shifting - prec_shift = np.array([1.,1.])/upsample_factor + prec_shift = np.array([1., 1.])/upsample_factor shifted_image = sc_shift(rescaled_image[i], prec_shift, cval=0.) error_shift = np.abs(rescaled_image[i] - shifted_image)/2. - #sum quadratically the errors + # sum quadratically the errors rescaled_error[i] = np.sqrt(rescaled_error[i]**2 + error_shift**2) - + shifts.append(shift) errors.append(error) @@ -788,7 +759,7 @@ def align_data(data_array, headers, error_array=None, background=None, # Update headers CRPIX value headers_wcs = [deepcopy(WCS(header)) for header in headers] - new_crpix = np.array([wcs.wcs.crpix for wcs in headers_wcs]) + shifts[:,::-1] + res_shift[::-1] + new_crpix = np.array([wcs.wcs.crpix for wcs in headers_wcs]) + shifts[:, ::-1] + res_shift[::-1] for i in range(len(headers_wcs)): headers_wcs[i].wcs.crpix = new_crpix[0] headers[i].update(headers_wcs[i].to_header()) @@ -802,8 +773,7 @@ def align_data(data_array, headers, error_array=None, background=None, return data_array, error_array, headers, data_mask -def smooth_data(data_array, error_array, data_mask, headers, FWHM=1., - scale='pixel', smoothing='gaussian'): +def smooth_data(data_array, error_array, data_mask, headers, FWHM=1., scale='pixel', smoothing='gaussian'): """ Smooth a data_array using selected function. ---------- @@ -839,12 +809,12 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1., smoothed_array. """ # If chosen FWHM scale is 'arcsec', compute FWHM in pixel scale - if scale.lower() in ['arcsec','arcseconds']: - pxsize = np.zeros((data_array.shape[0],2)) - for i,header in enumerate(headers): + if scale.lower() in ['arcsec', 'arcseconds']: + pxsize = np.zeros((data_array.shape[0], 2)) + for i, header in enumerate(headers): # Get current pixel size w = WCS(header).deepcopy() - pxsize[i] = np.round(w.wcs.cdelt*3600.,4) + pxsize[i] = np.round(w.wcs.cdelt*3600., 4) if (pxsize != pxsize[0]).any(): raise ValueError("Not all images in array have same pixel size") FWHM /= pxsize[0].min() @@ -853,7 +823,7 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1., stdev = FWHM/(2.*np.sqrt(2.*np.log(2))) fmax = np.finfo(np.double).max - if smoothing.lower() in ['combine','combining']: + if smoothing.lower() in ['combine', 'combining']: # Smooth using N images combination algorithm # Weight array weight = 1./error_array**2 @@ -872,19 +842,20 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1., with warnings.catch_warnings(record=True) as w: g_rc = np.array([np.exp(-0.5*(dist_rc/stdev)**2)/(2.*np.pi*stdev**2),]*data_array.shape[0]) # Apply weighted combination - smoothed[r,c] = np.where(data_mask[r,c], np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc), data_array.mean(axis=0)[r,c]) - error[r,c] = np.where(data_mask[r,c], np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc), (np.sqrt(np.sum(error_array**2,axis=0)/error_array.shape[0]))[r,c]) + smoothed[r, c] = np.where(data_mask[r, c], np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc), data_array.mean(axis=0)[r, c]) + error[r, c] = np.where(data_mask[r, c], np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc), + (np.sqrt(np.sum(error_array**2, axis=0)/error_array.shape[0]))[r, c]) # Nan handling - error[np.logical_or(np.isnan(smoothed*error),1-data_mask)] = 0. - smoothed[np.logical_or(np.isnan(smoothed*error),1-data_mask)] = 0. + error[np.logical_or(np.isnan(smoothed*error), 1-data_mask)] = 0. + smoothed[np.logical_or(np.isnan(smoothed*error), 1-data_mask)] = 0. - elif smoothing.lower() in ['weight_gauss','weighted_gaussian','gauss','gaussian']: + elif smoothing.lower() in ['weight_gauss', 'weighted_gaussian', 'gauss', 'gaussian']: # Convolution with gaussian function smoothed = np.zeros(data_array.shape) error = np.zeros(error_array.shape) - for i,(image,image_error) in enumerate(zip(data_array, error_array)): - x, y = np.meshgrid(np.arange(-image.shape[1]/2,image.shape[1]/2),np.arange(-image.shape[0]/2,image.shape[0]/2)) + for i, (image, image_error) in enumerate(zip(data_array, error_array)): + x, y = np.meshgrid(np.arange(-image.shape[1]/2, image.shape[1]/2), np.arange(-image.shape[0]/2, image.shape[0]/2)) weights = np.ones(image_error.shape) if smoothing.lower()[:6] in ['weight']: weights = 1./image_error**2 @@ -893,12 +864,12 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1., weights /= weights.sum() kernel = gaussian2d(x, y, stdev) kernel /= kernel.sum() - smoothed[i] = np.where(data_mask, fftconvolve(image*weights,kernel,'same')/fftconvolve(weights,kernel,'same'), image) - error[i] = np.where(data_mask, np.sqrt(fftconvolve(image_error**2*weights**2,kernel**2,'same'))/fftconvolve(weights,kernel,'same'), image_error) + smoothed[i] = np.where(data_mask, fftconvolve(image*weights, kernel, 'same')/fftconvolve(weights, kernel, 'same'), image) + error[i] = np.where(data_mask, np.sqrt(fftconvolve(image_error**2*weights**2, kernel**2, 'same'))/fftconvolve(weights, kernel, 'same'), image_error) # Nan handling - error[i][np.logical_or(np.isnan(smoothed[i]*error[i]),1-data_mask)] = 0. - smoothed[i][np.logical_or(np.isnan(smoothed[i]*error[i]),1-data_mask)] = 0. + error[i][np.logical_or(np.isnan(smoothed[i]*error[i]), 1-data_mask)] = 0. + smoothed[i][np.logical_or(np.isnan(smoothed[i]*error[i]), 1-data_mask)] = 0. else: raise ValueError("{} is not a valid smoothing option".format(smoothing)) @@ -906,8 +877,7 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1., return smoothed, error -def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, - scale='pixel', smoothing='gaussian'): +def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale='pixel', smoothing='gaussian'): """ Make the average image from a single polarizer for a given instrument. ----------- @@ -950,29 +920,29 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, if not same_instr: raise ValueError("All images in data_array are not from the same\ instrument, cannot proceed.") - if not instr in ['FOC']: + if instr not in ['FOC']: raise ValueError("Cannot reduce images from {0:s} instrument\ (yet)".format(instr)) # Routine for the FOC instrument if instr == 'FOC': # Sort images by polarizer filter : can be 0deg, 60deg, 120deg for the FOC - is_pol0 = np.array([header['filtnam1']=='POL0' for header in headers]) + is_pol0 = np.array([header['filtnam1'] == 'POL0' for header in headers]) if (1-is_pol0).all(): print("Warning : no image for POL0 of FOC found, averaged data\ will be NAN") - is_pol60 = np.array([header['filtnam1']=='POL60' for header in headers]) + is_pol60 = np.array([header['filtnam1'] == 'POL60' for header in headers]) if (1-is_pol60).all(): print("Warning : no image for POL60 of FOC found, averaged data\ will be NAN") - is_pol120 = np.array([header['filtnam1']=='POL120' for header in headers]) + is_pol120 = np.array([header['filtnam1'] == 'POL120' for header in headers]) if (1-is_pol120).all(): print("Warning : no image for POL120 of FOC found, averaged data\ will be NAN") # Put each polarizer images in separate arrays - headers0 = [header for header in headers if header['filtnam1']=='POL0'] - headers60 = [header for header in headers if header['filtnam1']=='POL60'] - headers120 = [header for header in headers if header['filtnam1']=='POL120'] + headers0 = [header for header in headers if header['filtnam1'] == 'POL0'] + headers60 = [header for header in headers if header['filtnam1'] == 'POL60'] + headers120 = [header for header in headers if header['filtnam1'] == 'POL120'] pol0_array = data_array[is_pol0] pol60_array = data_array[is_pol60] @@ -982,14 +952,11 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, err60_array = error_array[is_pol60] err120_array = error_array[is_pol120] - if not(FWHM is None) and (smoothing.lower() in ['combine','combining']): + if (FWHM is not None) and (smoothing.lower() in ['combine', 'combining']): # Smooth by combining each polarizer images - pol0, err0 = smooth_data(pol0_array, err0_array, data_mask, headers0, - FWHM=FWHM, scale=scale, smoothing=smoothing) - pol60, err60 = smooth_data(pol60_array, err60_array, data_mask, headers60, - FWHM=FWHM, scale=scale, smoothing=smoothing) - pol120, err120 = smooth_data(pol120_array, err120_array, data_mask, headers120, - FWHM=FWHM, scale=scale, smoothing=smoothing) + pol0, err0 = smooth_data(pol0_array, err0_array, data_mask, headers0, FWHM=FWHM, scale=scale, smoothing=smoothing) + pol60, err60 = smooth_data(pol60_array, err60_array, data_mask, headers60, FWHM=FWHM, scale=scale, smoothing=smoothing) + pol120, err120 = smooth_data(pol120_array, err120_array, data_mask, headers120, FWHM=FWHM, scale=scale, smoothing=smoothing) else: # Sum on each polarisation filter. @@ -1014,25 +981,24 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, pol_headers = [headers0[0], headers60[0], headers120[0]] # Propagate uncertainties quadratically - err0 = np.sqrt(np.sum(err0_array**2,axis=0))/pol0_t - err60 = np.sqrt(np.sum(err60_array**2,axis=0))/pol60_t - err120 = np.sqrt(np.sum(err120_array**2,axis=0))/pol120_t + err0 = np.sqrt(np.sum(err0_array**2, axis=0))/pol0_t + err60 = np.sqrt(np.sum(err60_array**2, axis=0))/pol60_t + err120 = np.sqrt(np.sum(err120_array**2, axis=0))/pol120_t polerr_array = np.array([err0, err60, err120]) - if not(FWHM is None) and (smoothing.lower() in ['gaussian','gauss','weighted_gaussian','weight_gauss']): + if not (FWHM is None) and (smoothing.lower() in ['gaussian', 'gauss', 'weighted_gaussian', 'weight_gauss']): # Smooth by convoluting with a gaussian each polX image. - pol_array, polerr_array = smooth_data(pol_array, polerr_array, - data_mask, pol_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) + pol_array, polerr_array = smooth_data(pol_array, polerr_array, data_mask, pol_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) pol0, pol60, pol120 = pol_array err0, err60, err120 = polerr_array - + # Update headers for header in headers: - if header['filtnam1']=='POL0': + if header['filtnam1'] == 'POL0': list_head = headers0 - elif header['filtnam1']=='POL60': + elif header['filtnam1'] == 'POL60': list_head = headers60 - elif header['filtnam1']=='POL120': + elif header['filtnam1'] == 'POL120': list_head = headers120 header['exptime'] = np.sum([head['exptime'] for head in list_head]) pol_headers = [headers0[0], headers60[0], headers120[0]] @@ -1041,23 +1007,22 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, shape = pol0.shape # Construct the polarizer array - polarizer_array = np.zeros((3,shape[0],shape[1])) + polarizer_array = np.zeros((3, shape[0], shape[1])) polarizer_array[0] = pol0 polarizer_array[1] = pol60 polarizer_array[2] = pol120 # Define the covariance matrix for the polarizer images - #We assume cross terms are null - polarizer_cov = np.zeros((3,3,shape[0],shape[1])) - polarizer_cov[0,0] = err0**2 - polarizer_cov[1,1] = err60**2 - polarizer_cov[2,2] = err120**2 + # We assume cross terms are null + polarizer_cov = np.zeros((3, 3, shape[0], shape[1])) + polarizer_cov[0, 0] = err0**2 + polarizer_cov[1, 1] = err60**2 + polarizer_cov[2, 2] = err120**2 return polarizer_array, polarizer_cov, pol_headers -def compute_Stokes(data_array, error_array, data_mask, headers, - FWHM=None, scale='pixel', smoothing='combine', transmitcorr=False): +def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale='pixel', smoothing='combine', transmitcorr=False): """ Compute the Stokes parameters I, Q and U for a given data_set ---------- @@ -1114,23 +1079,22 @@ def compute_Stokes(data_array, error_array, data_mask, headers, if not same_instr: raise ValueError("All images in data_array are not from the same\ instrument, cannot proceed.") - if not instr in ['FOC']: + if instr not in ['FOC']: raise ValueError("Cannot reduce images from {0:s} instrument\ (yet)".format(instr)) # Routine for the FOC instrument if instr == 'FOC': # Get image from each polarizer and covariance matrix - pol_array, pol_cov, pol_headers = polarizer_avg(data_array, error_array, data_mask, - headers, FWHM=FWHM, scale=scale, smoothing=smoothing) + pol_array, pol_cov, pol_headers = polarizer_avg(data_array, error_array, data_mask, headers, FWHM=FWHM, scale=scale, smoothing=smoothing) pol0, pol60, pol120 = pol_array if (pol0 < 0.).any() or (pol60 < 0.).any() or (pol120 < 0.).any(): print("WARNING : Negative value in polarizer array.") # Stokes parameters - #transmittance corrected - transmit = np.ones((3,)) #will be filter dependant + # transmittance corrected + transmit = np.ones((3,)) # will be filter dependant filt2, filt3, filt4 = headers[0]['filtnam2'], headers[0]['filtnam3'], headers[0]['filtnam4'] same_filt2 = np.array([filt2 == header['filtnam2'] for header in headers]).all() same_filt3 = np.array([filt3 == header['filtnam3'] for header in headers]).all() @@ -1147,114 +1111,124 @@ def compute_Stokes(data_array, error_array, data_mask, headers, transmit *= transmit2*transmit3*transmit4 pol_eff = np.array([pol_efficiency['pol0'], pol_efficiency['pol60'], pol_efficiency['pol120']]) - #Calculating correction factor + # Calculating correction factor corr = np.array([1.0*h['photflam']/h['exptime'] for h in pol_headers])*pol_headers[0]['exptime']/pol_headers[0]['photflam'] # Orientation and error for each polarizer fmax = np.finfo(np.float64).max pol_flux = np.array([corr[0]*pol0, corr[1]*pol60, corr[2]*pol120]) - coeff_stokes = np.zeros((3,3)) + coeff_stokes = np.zeros((3, 3)) # Coefficients linking each polarizer flux to each Stokes parameter for i in range(3): - coeff_stokes[0,i] = pol_eff[(i+1)%3]*pol_eff[(i+2)%3]*np.sin(-2.*theta[(i+1)%3]+2.*theta[(i+2)%3])*2./transmit[i] - coeff_stokes[1,i] = (-pol_eff[(i+1)%3]*np.sin(2.*theta[(i+1)%3]) + pol_eff[(i+2)%3]*np.sin(2.*theta[(i+2)%3]))*2./transmit[i] - coeff_stokes[2,i] = (pol_eff[(i+1)%3]*np.cos(2.*theta[(i+1)%3]) - pol_eff[(i+2)%3]*np.cos(2.*theta[(i+2)%3]))*2./transmit[i] + coeff_stokes[0, i] = pol_eff[(i+1) % 3]*pol_eff[(i+2) % 3]*np.sin(-2.*theta[(i+1) % 3]+2.*theta[(i+2) % 3])*2./transmit[i] + coeff_stokes[1, i] = (-pol_eff[(i+1) % 3]*np.sin(2.*theta[(i+1) % 3]) + pol_eff[(i+2) % 3]*np.sin(2.*theta[(i+2) % 3]))*2./transmit[i] + coeff_stokes[2, i] = (pol_eff[(i+1) % 3]*np.cos(2.*theta[(i+1) % 3]) - pol_eff[(i+2) % 3]*np.cos(2.*theta[(i+2) % 3]))*2./transmit[i] # Normalization parameter for Stokes parameters computation - A = (coeff_stokes[0,:]*transmit/2.).sum() + A = (coeff_stokes[0, :]*transmit/2.).sum() coeff_stokes = coeff_stokes/A I_stokes = np.zeros(pol_array[0].shape) Q_stokes = np.zeros(pol_array[0].shape) U_stokes = np.zeros(pol_array[0].shape) - Stokes_cov = np.zeros((3,3,I_stokes.shape[0],I_stokes.shape[1])) + Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1])) for i in range(I_stokes.shape[0]): for j in range(I_stokes.shape[1]): - I_stokes[i,j], Q_stokes[i,j], U_stokes[i,j] = np.dot(coeff_stokes, pol_flux[:,i,j]).T - Stokes_cov[:,:,i,j] = np.dot(coeff_stokes, np.dot(pol_cov[:,:,i,j], coeff_stokes.T)) + I_stokes[i, j], Q_stokes[i, j], U_stokes[i, j] = np.dot(coeff_stokes, pol_flux[:, i, j]).T + Stokes_cov[:, :, i, j] = np.dot(coeff_stokes, np.dot(pol_cov[:, :, i, j], coeff_stokes.T)) - if not(FWHM is None) and (smoothing.lower() in ['weighted_gaussian_after','weight_gauss_after','gaussian_after','gauss_after']): + if not (FWHM is None) and (smoothing.lower() in ['weighted_gaussian_after', 'weight_gauss_after', 'gaussian_after', 'gauss_after']): smoothing = smoothing.lower()[:-6] Stokes_array = np.array([I_stokes, Q_stokes, U_stokes]) - Stokes_error = np.array([np.sqrt(Stokes_cov[i,i]) for i in range(3)]) + Stokes_error = np.array([np.sqrt(Stokes_cov[i, i]) for i in range(3)]) Stokes_headers = headers[0:3] - Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask, - headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) + Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask, headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) I_stokes, Q_stokes, U_stokes = Stokes_array - Stokes_cov[0,0], Stokes_cov[1,1], Stokes_cov[2,2] = deepcopy(Stokes_error**2) + Stokes_cov[0, 0], Stokes_cov[1, 1], Stokes_cov[2, 2] = deepcopy(Stokes_error**2) sStokes_array = np.array([I_stokes*Q_stokes, I_stokes*U_stokes, Q_stokes*U_stokes]) - sStokes_error = np.array([Stokes_cov[0,1], Stokes_cov[0,2], Stokes_cov[1,2]]) - uStokes_error = np.array([Stokes_cov[1,0], Stokes_cov[2,0], Stokes_cov[2,1]]) + sStokes_error = np.array([Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2]]) + uStokes_error = np.array([Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1]]) sStokes_array, sStokes_error = smooth_data(sStokes_array, sStokes_error, data_mask, - headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) + headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) uStokes_array, uStokes_error = smooth_data(sStokes_array, uStokes_error, data_mask, - headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) + headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) - Stokes_cov[0,1], Stokes_cov[0,2], Stokes_cov[1,2] = deepcopy(sStokes_error) - Stokes_cov[1,0], Stokes_cov[2,0], Stokes_cov[2,1] = deepcopy(uStokes_error) + Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2] = deepcopy(sStokes_error) + Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1] = deepcopy(uStokes_error) mask = (Q_stokes**2 + U_stokes**2) > I_stokes**2 if mask.any(): print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(I_stokes[mask].size)) # Statistical error: Poisson noise is assumed - sigma_flux = np.array([np.sqrt(flux/head['exptime']) for flux,head in zip(pol_flux,pol_headers)]) - s_I2_stat = np.sum([coeff_stokes[0,i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))],axis=0) - s_Q2_stat = np.sum([coeff_stokes[1,i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))],axis=0) - s_U2_stat = np.sum([coeff_stokes[2,i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))],axis=0) + sigma_flux = np.array([np.sqrt(flux/head['exptime']) for flux, head in zip(pol_flux, pol_headers)]) + s_I2_stat = np.sum([coeff_stokes[0, i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))], axis=0) + s_Q2_stat = np.sum([coeff_stokes[1, i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))], axis=0) + s_U2_stat = np.sum([coeff_stokes[2, i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))], axis=0) # Compute the derivative of each Stokes parameter with respect to the polarizer orientation - dI_dtheta1 = 2.*pol_eff[0]/A*(pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes)) - dI_dtheta2 = 2.*pol_eff[1]/A*(pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes)) - dI_dtheta3 = 2.*pol_eff[2]/A*(pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes)) + dI_dtheta1 = 2.*pol_eff[0]/A*(pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes) - + pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes)) + dI_dtheta2 = 2.*pol_eff[1]/A*(pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes) - + pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes)) + dI_dtheta3 = 2.*pol_eff[2]/A*(pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes) - + pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes)) dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3]) - dQ_dtheta1 = 2.*pol_eff[0]/A*(np.cos(2.*theta[0])*(pol_flux[1]-pol_flux[2]) - (pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0]) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1]))*Q_stokes) - dQ_dtheta2 = 2.*pol_eff[1]/A*(np.cos(2.*theta[1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1]) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2]))*Q_stokes) - dQ_dtheta3 = 2.*pol_eff[2]/A*(np.cos(2.*theta[2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2]) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0]))*Q_stokes) + dQ_dtheta1 = 2.*pol_eff[0]/A*(np.cos(2.*theta[0])*(pol_flux[1]-pol_flux[2]) - (pol_eff[2]*np.cos(-2. * + theta[2]+2.*theta[0]) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1]))*Q_stokes) + dQ_dtheta2 = 2.*pol_eff[1]/A*(np.cos(2.*theta[1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2. * + theta[0]+2.*theta[1]) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2]))*Q_stokes) + dQ_dtheta3 = 2.*pol_eff[2]/A*(np.cos(2.*theta[2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2. * + theta[1]+2.*theta[2]) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0]))*Q_stokes) dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3]) - dU_dtheta1 = 2.*pol_eff[0]/A*(np.sin(2.*theta[0])*(pol_flux[1]-pol_flux[2]) - (pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0]) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1]))*U_stokes) - dU_dtheta2 = 2.*pol_eff[1]/A*(np.sin(2.*theta[1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1]) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2]))*U_stokes) - dU_dtheta3 = 2.*pol_eff[2]/A*(np.sin(2.*theta[2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2]) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0]))*U_stokes) + dU_dtheta1 = 2.*pol_eff[0]/A*(np.sin(2.*theta[0])*(pol_flux[1]-pol_flux[2]) - (pol_eff[2]*np.cos(-2. * + theta[2]+2.*theta[0]) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1]))*U_stokes) + dU_dtheta2 = 2.*pol_eff[1]/A*(np.sin(2.*theta[1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2. * + theta[0]+2.*theta[1]) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2]))*U_stokes) + dU_dtheta3 = 2.*pol_eff[2]/A*(np.sin(2.*theta[2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2. * + theta[1]+2.*theta[2]) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0]))*U_stokes) dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3]) # Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999) - s_I2_axis = np.sum([dI_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0) - s_Q2_axis = np.sum([dQ_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0) - s_U2_axis = np.sum([dU_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))],axis=0) - #np.savetxt("output/sI_dir.txt", np.sqrt(s_I2_axis)) - #np.savetxt("output/sQ_dir.txt", np.sqrt(s_Q2_axis)) - #np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis)) + s_I2_axis = np.sum([dI_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))], axis=0) + s_Q2_axis = np.sum([dQ_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))], axis=0) + s_U2_axis = np.sum([dU_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))], axis=0) + # np.savetxt("output/sI_dir.txt", np.sqrt(s_I2_axis)) + # np.savetxt("output/sQ_dir.txt", np.sqrt(s_Q2_axis)) + # np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis)) # Add quadratically the uncertainty to the Stokes covariance matrix - Stokes_cov[0,0] += s_I2_axis + s_I2_stat - Stokes_cov[1,1] += s_Q2_axis + s_Q2_stat - Stokes_cov[2,2] += s_U2_axis + s_U2_stat + Stokes_cov[0, 0] += s_I2_axis + s_I2_stat + Stokes_cov[1, 1] += s_Q2_axis + s_Q2_stat + Stokes_cov[2, 2] += s_U2_axis + s_U2_stat - #Compute integrated values for P, PA before any rotation + # Compute integrated values for P, PA before any rotation mask = np.logical_and(data_mask.astype(bool), (I_stokes > 0.)) n_pix = I_stokes[mask].size I_diluted = I_stokes[mask].sum() Q_diluted = Q_stokes[mask].sum() U_diluted = U_stokes[mask].sum() - I_diluted_err = np.sqrt(np.sum(Stokes_cov[0,0][mask])) - Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1,1][mask])) - U_diluted_err = np.sqrt(np.sum(Stokes_cov[2,2][mask])) - IQ_diluted_err = np.sqrt(np.sum(Stokes_cov[0,1][mask]**2)) - IU_diluted_err = np.sqrt(np.sum(Stokes_cov[0,2][mask]**2)) - QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1,2][mask]**2)) + I_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 0][mask])) + Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 1][mask])) + U_diluted_err = np.sqrt(np.sum(Stokes_cov[2, 2][mask])) + IQ_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 1][mask]**2)) + IU_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 2][mask]**2)) + QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask]**2)) P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted - P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err) + P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted ** + 2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err) - PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted,Q_diluted)) - PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err) + PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted)) + PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err ** + 2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err) for header in headers: header['P_int'] = (P_diluted, 'Integrated polarisation degree') @@ -1306,26 +1280,28 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers): Updated list of headers corresponding to the reduced images accounting for the new orientation angle. """ - #Polarization degree and angle computation - mask = I_stokes>0. + # Polarization degree and angle computation + mask = I_stokes > 0. I_pol = np.zeros(I_stokes.shape) I_pol[mask] = np.sqrt(Q_stokes[mask]**2 + U_stokes[mask]**2) P = np.zeros(I_stokes.shape) P[mask] = I_pol[mask]/I_stokes[mask] PA = np.zeros(I_stokes.shape) - PA[mask] = (90./np.pi)*np.arctan2(U_stokes[mask],Q_stokes[mask]) + PA[mask] = (90./np.pi)*np.arctan2(U_stokes[mask], Q_stokes[mask]) - if (P>1).any(): - print("WARNING : found {0:d} pixels for which P > 1".format(P[P>1.].size)) + if (P > 1).any(): + print("WARNING : found {0:d} pixels for which P > 1".format(P[P > 1.].size)) - #Associated errors + # Associated errors fmax = np.finfo(np.float64).max s_P = np.ones(I_stokes.shape)*fmax s_PA = np.ones(I_stokes.shape)*fmax # Propagate previously computed errors - s_P[mask] = (1/I_stokes[mask])*np.sqrt((Q_stokes[mask]**2*Stokes_cov[1,1][mask] + U_stokes[mask]**2*Stokes_cov[2,2][mask] + 2.*Q_stokes[mask]*U_stokes[mask]*Stokes_cov[1,2][mask])/(Q_stokes[mask]**2 + U_stokes[mask]**2) + ((Q_stokes[mask]/I_stokes[mask])**2 + (U_stokes[mask]/I_stokes[mask])**2)*Stokes_cov[0,0][mask] - 2.*(Q_stokes[mask]/I_stokes[mask])*Stokes_cov[0,1][mask] - 2.*(U_stokes[mask]/I_stokes[mask])*Stokes_cov[0,2][mask]) - s_PA[mask] = (90./(np.pi*(Q_stokes[mask]**2 + U_stokes[mask]**2)))*np.sqrt(U_stokes[mask]**2*Stokes_cov[1,1][mask] + Q_stokes[mask]**2*Stokes_cov[2,2][mask] - 2.*Q_stokes[mask]*U_stokes[mask]*Stokes_cov[1,2][mask]) + s_P[mask] = (1/I_stokes[mask])*np.sqrt((Q_stokes[mask]**2*Stokes_cov[1, 1][mask] + U_stokes[mask]**2*Stokes_cov[2, 2][mask] + 2.*Q_stokes[mask]*U_stokes[mask]*Stokes_cov[1, 2][mask])/(Q_stokes[mask]**2 + U_stokes[mask]**2) + + ((Q_stokes[mask]/I_stokes[mask])**2 + (U_stokes[mask]/I_stokes[mask])**2)*Stokes_cov[0, 0][mask] - 2.*(Q_stokes[mask]/I_stokes[mask])*Stokes_cov[0, 1][mask] - 2.*(U_stokes[mask]/I_stokes[mask])*Stokes_cov[0, 2][mask]) + s_PA[mask] = (90./(np.pi*(Q_stokes[mask]**2 + U_stokes[mask]**2)))*np.sqrt(U_stokes[mask]**2*Stokes_cov[1, 1][mask] + + Q_stokes[mask]**2*Stokes_cov[2, 2][mask] - 2.*Q_stokes[mask]*U_stokes[mask]*Stokes_cov[1, 2][mask]) s_P[np.isnan(s_P)] = fmax s_PA[np.isnan(s_PA)] = fmax @@ -1335,16 +1311,16 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers): debiased_P = np.zeros(I_stokes.shape) debiased_P[mask2] = np.sqrt(P[mask2]**2 - s_P[mask2]**2) - if (debiased_P>1.).any(): - print("WARNING : found {0:d} pixels for which debiased_P > 100%".format(debiased_P[debiased_P>1.].size)) + if (debiased_P > 1.).any(): + print("WARNING : found {0:d} pixels for which debiased_P > 100%".format(debiased_P[debiased_P > 1.].size)) - #Compute the total exposure time so that - #I_stokes*exp_tot = N_tot the total number of events + # Compute the total exposure time so that + # I_stokes*exp_tot = N_tot the total number of events exp_tot = np.array([header['exptime'] for header in headers]).sum() - #print("Total exposure time : {} sec".format(exp_tot)) + # print("Total exposure time : {} sec".format(exp_tot)) N_obs = I_stokes*exp_tot - #Errors on P, PA supposing Poisson noise + # Errors on P, PA supposing Poisson noise s_P_P = np.ones(I_stokes.shape)*fmax s_P_P[mask] = np.sqrt(2.)/np.sqrt(N_obs[mask])*100. s_PA_P = np.ones(I_stokes.shape)*fmax @@ -1361,8 +1337,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers): return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P -def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, - ang=None, SNRi_cut=None): +def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, ang=None, SNRi_cut=None): """ Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation matrix to rotate Q, U of a given angle in degrees and update header @@ -1411,22 +1386,22 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, new_data_mask : numpy.ndarray Updated 2D boolean array delimiting the data to work on. """ - #Apply cuts - if not(SNRi_cut is None): - SNRi = I_stokes/np.sqrt(Stokes_cov[0,0]) + # Apply cuts + if SNRi_cut is not None: + SNRi = I_stokes/np.sqrt(Stokes_cov[0, 0]) mask = SNRi < SNRi_cut eps = 1e-5 for i in range(I_stokes.shape[0]): for j in range(I_stokes.shape[1]): - if mask[i,j]: - I_stokes[i,j] = eps*np.sqrt(Stokes_cov[0,0][i,j]) - Q_stokes[i,j] = eps*np.sqrt(Stokes_cov[1,1][i,j]) - U_stokes[i,j] = eps*np.sqrt(Stokes_cov[2,2][i,j]) + if mask[i, j]: + I_stokes[i, j] = eps*np.sqrt(Stokes_cov[0, 0][i, j]) + Q_stokes[i, j] = eps*np.sqrt(Stokes_cov[1, 1][i, j]) + U_stokes[i, j] = eps*np.sqrt(Stokes_cov[2, 2][i, j]) - #Rotate I_stokes, Q_stokes, U_stokes using rotation matrix + # Rotate I_stokes, Q_stokes, U_stokes using rotation matrix if ang is None: ang = np.zeros((len(headers),)) - for i,head in enumerate(headers): + for i, head in enumerate(headers): ang[i] = -head['orientat'] ang = ang.mean() alpha = np.radians(ang) @@ -1442,13 +1417,13 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, Q_stokes = zeropad(Q_stokes, shape) U_stokes = zeropad(U_stokes, shape) data_mask = zeropad(data_mask, shape) - Stokes_cov = zeropad(Stokes_cov, [*Stokes_cov.shape[:-2],*shape]) + Stokes_cov = zeropad(Stokes_cov, [*Stokes_cov.shape[:-2], *shape]) new_I_stokes = np.zeros(shape) new_Q_stokes = np.zeros(shape) new_U_stokes = np.zeros(shape) - new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2],*shape)) + new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape)) - #Rotate original images using scipy.ndimage.rotate + # Rotate original images using scipy.ndimage.rotate new_I_stokes = sc_rotate(I_stokes, ang, order=1, reshape=False, cval=0.) new_Q_stokes = sc_rotate(Q_stokes, ang, order=1, reshape=False, cval=0.) new_U_stokes = sc_rotate(U_stokes, ang, order=1, reshape=False, cval=0.) @@ -1457,15 +1432,15 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, new_data_mask = new_data_mask.astype(bool) for i in range(3): for j in range(3): - new_Stokes_cov[i,j] = sc_rotate(Stokes_cov[i,j], ang, order=1, reshape=False, cval=0.) - new_Stokes_cov[i,i] = np.abs(new_Stokes_cov[i,i]) + new_Stokes_cov[i, j] = sc_rotate(Stokes_cov[i, j], ang, order=1, reshape=False, cval=0.) + new_Stokes_cov[i, i] = np.abs(new_Stokes_cov[i, i]) for i in range(shape[0]): for j in range(shape[1]): - new_I_stokes[i,j], new_Q_stokes[i,j], new_U_stokes[i,j] = np.dot(mrot, np.array([new_I_stokes[i,j], new_Q_stokes[i,j], new_U_stokes[i,j]])).T - new_Stokes_cov[:,:,i,j] = np.dot(mrot, np.dot(new_Stokes_cov[:,:,i,j], mrot.T)) + new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j] = np.dot(mrot, np.array([new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j]])).T + new_Stokes_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:, :, i, j], mrot.T)) - #Update headers to new angle + # Update headers to new angle new_headers = [] mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) @@ -1478,11 +1453,11 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center[::-1]) + new_center[::-1] new_wcs.wcs.set() for key, val in new_wcs.to_header().items(): - new_header.set(key,val) - if new_wcs.wcs.pc[0,0] == 1.: - new_header.set('PC1_1',1.) - if new_wcs.wcs.pc[1,1] == 1.: - new_header.set('PC2_2',1.) + new_header.set(key, val) + if new_wcs.wcs.pc[0, 0] == 1.: + new_header.set('PC1_1', 1.) + if new_wcs.wcs.pc[1, 1] == 1.: + new_header.set('PC2_2', 1.) new_headers.append(new_header) @@ -1496,24 +1471,26 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, new_U_stokes[np.isnan(new_U_stokes)] = 0. new_Stokes_cov[np.isnan(new_Stokes_cov)] = fmax - #Compute updated integrated values for P, PA + # Compute updated integrated values for P, PA mask = deepcopy(new_data_mask).astype(bool) n_pix = new_I_stokes[mask].size I_diluted = new_I_stokes[mask].sum() Q_diluted = new_Q_stokes[mask].sum() U_diluted = new_U_stokes[mask].sum() - I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0,0][mask])) - Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1,1][mask])) - U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2,2][mask])) - IQ_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0,1][mask]**2)) - IU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0,2][mask]**2)) - QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1,2][mask]**2)) + I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 0][mask])) + Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 1][mask])) + U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2, 2][mask])) + IQ_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 1][mask]**2)) + IU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 2][mask]**2)) + QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 2][mask]**2)) P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted - P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err) + P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted ** + 2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err) - PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted,Q_diluted)) - PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err) + PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted)) + PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err ** + 2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err) for header in new_headers: header['P_int'] = (P_diluted, 'Integrated polarisation degree') @@ -1521,7 +1498,6 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, header['PA_int'] = (PA_diluted, 'Integrated polarisation angle') header['PA_int_err'] = (np.ceil(PA_diluted_err*10.)/10., 'Integrated polarisation angle error') - return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_headers @@ -1555,24 +1531,22 @@ def rotate_data(data_array, error_array, data_mask, headers, ang): Updated list of headers corresponding to the reduced images accounting for the new orientation angle. """ - #Rotate I_stokes, Q_stokes, U_stokes using rotation matrix + # Rotate I_stokes, Q_stokes, U_stokes using rotation matrix alpha = ang*np.pi/180. old_center = np.array(data_array[0].shape)/2 shape = np.fix(np.array(data_array[0].shape)*np.sqrt(2.5)).astype(int) new_center = np.array(shape)/2 - data_array = zeropad(data_array, [data_array.shape[0],*shape]) - error_array = zeropad(error_array, [error_array.shape[0],*shape]) + data_array = zeropad(data_array, [data_array.shape[0], *shape]) + error_array = zeropad(error_array, [error_array.shape[0], *shape]) data_mask = zeropad(data_mask, shape) - #Rotate original images using scipy.ndimage.rotate + # Rotate original images using scipy.ndimage.rotate new_data_array = [] new_error_array = [] for i in range(data_array.shape[0]): - new_data_array.append(sc_rotate(data_array[i], ang, order=1, reshape=False, - cval=0.)) - new_error_array.append(sc_rotate(error_array[i], ang, order=1, reshape=False, - cval=0.)) + new_data_array.append(sc_rotate(data_array[i], ang, order=1, reshape=False, cval=0.)) + new_error_array.append(sc_rotate(error_array[i], ang, order=1, reshape=False, cval=0.)) new_data_array = np.array(new_data_array) new_error_array = np.array(new_error_array) new_data_mask = sc_rotate(data_mask*10., ang, order=1, reshape=False, cval=0.) @@ -1582,17 +1556,16 @@ def rotate_data(data_array, error_array, data_mask, headers, ang): for i in range(new_data_array.shape[0]): new_data_array[i][new_data_array[i] < 0.] = 0. - #Update headers to new angle + # Update headers to new angle new_headers = [] - mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], - [np.sin(-alpha), np.cos(-alpha)]]) + mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) for header in headers: new_header = deepcopy(header) new_header['orientat'] = header['orientat'] + ang new_wcs = WCS(header).deepcopy() - new_wcs.wcs.pc[:2,:2] = np.dot(mrot, new_wcs.wcs.pc[:2,:2]) + new_wcs.wcs.pc[:2, :2] = np.dot(mrot, new_wcs.wcs.pc[:2, :2]) new_wcs.wcs.crpix[:2] = np.dot(mrot, new_wcs.wcs.crpix[:2] - old_center[::-1]) + new_center[::-1] new_wcs.wcs.set() for key, val in new_wcs.to_header().items(): diff --git a/src/overplot_IC5063.py b/src/overplot_IC5063.py index 67c033a..61d7962 100755 --- a/src/overplot_IC5063.py +++ b/src/overplot_IC5063.py @@ -7,65 +7,66 @@ from lib.plots import overplot_radio, overplot_pol, align_pol from matplotlib.colors import LogNorm Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits") -#Stokes_18GHz = fits.open("./data/IC5063/radio/IC5063_18GHz.fits") -#Stokes_24GHz = fits.open("./data/IC5063/radio/IC5063_24GHz.fits") -#Stokes_103GHz = fits.open("./data/IC5063/radio/IC5063_103GHz.fits") -#Stokes_229GHz = fits.open("./data/IC5063/radio/IC5063_229GHz.fits") -#Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits") -#Stokes_S2 = fits.open("./data/IC5063/POLARIZATION_COMPARISON/S2_rot_crop.fits") +# Stokes_18GHz = fits.open("./data/IC5063/radio/IC5063_18GHz.fits") +# Stokes_24GHz = fits.open("./data/IC5063/radio/IC5063_24GHz.fits") +# Stokes_103GHz = fits.open("./data/IC5063/radio/IC5063_103GHz.fits") +# Stokes_229GHz = fits.open("./data/IC5063/radio/IC5063_229GHz.fits") +# Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits") +# Stokes_S2 = fits.open("./data/IC5063/POLARIZATION_COMPARISON/S2_rot_crop.fits") Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits") -##levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.]) -#levelsMorganti = np.logspace(0.,1.97,5)/100. +# levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.]) +# levelsMorganti = np.logspace(0.,1.97,5)/100. # -#levels18GHz = levelsMorganti*Stokes_18GHz[0].data.max() -#A = overplot_radio(Stokes_UV, Stokes_18GHz) -#A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/18GHz_overplot_forced.pdf',vec_scale=None) +# levels18GHz = levelsMorganti*Stokes_18GHz[0].data.max() +# A = overplot_radio(Stokes_UV, Stokes_18GHz) +# A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/18GHz_overplot_forced.pdf',vec_scale=None) ## -#levels24GHz = levelsMorganti*Stokes_24GHz[0].data.max() -#B = overplot_radio(Stokes_UV, Stokes_24GHz) -#B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/24GHz_overplot_forced.pdf',vec_scale=None) +# levels24GHz = levelsMorganti*Stokes_24GHz[0].data.max() +# B = overplot_radio(Stokes_UV, Stokes_24GHz) +# B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/24GHz_overplot_forced.pdf',vec_scale=None) ## -#levels103GHz = levelsMorganti*Stokes_103GHz[0].data.max() -#C = overplot_radio(Stokes_UV, Stokes_103GHz) -#C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/103GHz_overplot_forced.pdf',vec_scale=None) +# levels103GHz = levelsMorganti*Stokes_103GHz[0].data.max() +# C = overplot_radio(Stokes_UV, Stokes_103GHz) +# C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/103GHz_overplot_forced.pdf',vec_scale=None) ## -#levels229GHz = levelsMorganti*Stokes_229GHz[0].data.max() -#D = overplot_radio(Stokes_UV, Stokes_229GHz) -#D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/229GHz_overplot_forced.pdf',vec_scale=None) +# levels229GHz = levelsMorganti*Stokes_229GHz[0].data.max() +# D = overplot_radio(Stokes_UV, Stokes_229GHz) +# D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/229GHz_overplot_forced.pdf',vec_scale=None) ## -#levels357GHz = levelsMorganti*Stokes_357GHz[0].data.max() -#E = overplot_radio(Stokes_UV, Stokes_357GHz) -#E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/357GHz_overplot_forced.pdf',vec_scale=None) +# levels357GHz = levelsMorganti*Stokes_357GHz[0].data.max() +# E = overplot_radio(Stokes_UV, Stokes_357GHz) +# E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/357GHz_overplot_forced.pdf',vec_scale=None) ## -#F = overplot_pol(Stokes_UV, Stokes_S2) -#F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot_forced.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18)) +# F = overplot_pol(Stokes_UV, Stokes_S2) +# F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot_forced.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18)) G = overplot_pol(Stokes_UV, Stokes_IR, cmap='inferno') -G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot_forced.pdf',vec_scale=None,norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3,Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']),cmap='inferno_r') +G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot_forced.pdf', vec_scale=None, + norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3, Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']), cmap='inferno_r') -#data_folder1 = "./data/M87/POS1/" -#plots_folder1 = "./plots/M87/POS1/" -#basename1 = "M87_020_log" -#M87_1_95 = fits.open(data_folder1+"M87_POS1_1995_FOC_combine_FWHM020.fits") -#M87_1_96 = fits.open(data_folder1+"M87_POS1_1996_FOC_combine_FWHM020.fits") -#M87_1_97 = fits.open(data_folder1+"M87_POS1_1997_FOC_combine_FWHM020.fits") -#M87_1_98 = fits.open(data_folder1+"M87_POS1_1998_FOC_combine_FWHM020.fits") -#M87_1_99 = fits.open(data_folder1+"M87_POS1_1999_FOC_combine_FWHM020.fits") +# data_folder1 = "./data/M87/POS1/" +# plots_folder1 = "./plots/M87/POS1/" +# basename1 = "M87_020_log" +# M87_1_95 = fits.open(data_folder1+"M87_POS1_1995_FOC_combine_FWHM020.fits") +# M87_1_96 = fits.open(data_folder1+"M87_POS1_1996_FOC_combine_FWHM020.fits") +# M87_1_97 = fits.open(data_folder1+"M87_POS1_1997_FOC_combine_FWHM020.fits") +# M87_1_98 = fits.open(data_folder1+"M87_POS1_1998_FOC_combine_FWHM020.fits") +# M87_1_99 = fits.open(data_folder1+"M87_POS1_1999_FOC_combine_FWHM020.fits") -#H = align_pol(np.array([M87_1_95,M87_1_96,M87_1_97,M87_1_98,M87_1_99]), norm=LogNorm()) -#H.plot(SNRp_cut=5.0, SNRi_cut=50.0, savename=plots_folder1+'animated_loop/'+basename1, norm=LogNorm()) -#command("convert -delay 50 -loop 0 {0:s}animated_loop/{1:s}*.pdf {0:s}animated_loop/{1:s}.gif".format(plots_folder1, basename1)) +# H = align_pol(np.array([M87_1_95,M87_1_96,M87_1_97,M87_1_98,M87_1_99]), norm=LogNorm()) +# H.plot(SNRp_cut=5.0, SNRi_cut=50.0, savename=plots_folder1+'animated_loop/'+basename1, norm=LogNorm()) +# command("convert -delay 50 -loop 0 {0:s}animated_loop/{1:s}*.pdf {0:s}animated_loop/{1:s}.gif".format(plots_folder1, basename1)) -#data_folder3 = "./data/M87/POS3/" -#plots_folder3 = "./plots/M87/POS3/" -#basename3 = "M87_020_log" -#M87_3_95 = fits.open(data_folder3+"M87_POS3_1995_FOC_combine_FWHM020.fits") -#M87_3_96 = fits.open(data_folder3+"M87_POS3_1996_FOC_combine_FWHM020.fits") -#M87_3_97 = fits.open(data_folder3+"M87_POS3_1997_FOC_combine_FWHM020.fits") -#M87_3_98 = fits.open(data_folder3+"M87_POS3_1998_FOC_combine_FWHM020.fits") -#M87_3_99 = fits.open(data_folder3+"M87_POS3_1999_FOC_combine_FWHM020.fits") +# data_folder3 = "./data/M87/POS3/" +# plots_folder3 = "./plots/M87/POS3/" +# basename3 = "M87_020_log" +# M87_3_95 = fits.open(data_folder3+"M87_POS3_1995_FOC_combine_FWHM020.fits") +# M87_3_96 = fits.open(data_folder3+"M87_POS3_1996_FOC_combine_FWHM020.fits") +# M87_3_97 = fits.open(data_folder3+"M87_POS3_1997_FOC_combine_FWHM020.fits") +# M87_3_98 = fits.open(data_folder3+"M87_POS3_1998_FOC_combine_FWHM020.fits") +# M87_3_99 = fits.open(data_folder3+"M87_POS3_1999_FOC_combine_FWHM020.fits") -#I = align_pol(np.array([M87_3_95,M87_3_96,M87_3_97,M87_3_98,M87_3_99]), norm=LogNorm()) -#I.plot(SNRp_cut=5.0, SNRi_cut=50.0, savename=plots_folder3+'animated_loop/'+basename3, norm=LogNorm()) -#command("convert -delay 20 -loop 0 {0:s}animated_loop/{1:s}*.pdf {0:s}animated_loop/{1:s}.gif".format(plots_folder3, basename3)) +# I = align_pol(np.array([M87_3_95,M87_3_96,M87_3_97,M87_3_98,M87_3_99]), norm=LogNorm()) +# I.plot(SNRp_cut=5.0, SNRi_cut=50.0, savename=plots_folder3+'animated_loop/'+basename3, norm=LogNorm()) +# command("convert -delay 20 -loop 0 {0:s}animated_loop/{1:s}*.pdf {0:s}animated_loop/{1:s}.gif".format(plots_folder3, basename3)) diff --git a/src/overplot_MRK463E.py b/src/overplot_MRK463E.py index c94f0ea..905881e 100755 --- a/src/overplot_MRK463E.py +++ b/src/overplot_MRK463E.py @@ -1,23 +1,23 @@ #!/usr/bin/python3 from astropy.io import fits import numpy as np -from lib.plots import overplot_chandra, overplot_pol, align_pol +from lib.plots import overplot_chandra, overplot_pol from matplotlib.colors import LogNorm Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.fits") Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits") Stokes_Xr = fits.open("./data/MRK463E/Chandra/4913/primary/acisf04913N004_cntr_img2.fits") -levels = np.geomspace(1.,99.,10) +levels = np.geomspace(1., 99., 10) -#A = overplot_chandra(Stokes_UV, Stokes_Xr) -#A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf') +# A = overplot_chandra(Stokes_UV, Stokes_Xr) +# A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf') -#B = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm()) -#B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot_forced.pdf') +B = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm()) +B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot_forced.pdf') -#C = overplot_pol(Stokes_UV, Stokes_IR) -#C.plot(SNRp_cut=3.0, SNRi_cut=20.0, savename='./plots/MRK463E/IR_overplot.pdf') +# C = overplot_pol(Stokes_UV, Stokes_IR) +# C.plot(SNRp_cut=3.0, SNRi_cut=20.0, savename='./plots/MRK463E/IR_overplot.pdf') D = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm()) -D.plot(SNRp_cut=3.0, SNRi_cut=30.0, vec_scale=2, norm=LogNorm(1e-18,1e-15), savename='./plots/MRK463E/IR_overplot_forced.pdf') +D.plot(SNRp_cut=3.0, SNRi_cut=30.0, vec_scale=2, norm=LogNorm(1e-18, 1e-15), savename='./plots/MRK463E/IR_overplot_forced.pdf')