diff --git a/package/Combine.py b/package/Combine.py new file mode 100755 index 0000000..b3871f1 --- /dev/null +++ b/package/Combine.py @@ -0,0 +1,237 @@ +#!/usr/bin/python +# -*- coding:utf-8 -*- +# Project libraries + +import numpy as np + + +def same_reduction(infiles): + """ + Test if infiles are pipeline productions with same parameters. + """ + from astropy.io.fits import open as fits_open + from astropy.wcs import WCS + + params = {"IQU": [], "ROT": [], "SIZE": [], "TARGNAME": [], "BKG_SUB": [], "SAMPLING": [], "SMOOTH": []} + for file in infiles: + with fits_open(file) as f: + # test for presence of I, Q, U images + datatype = [] + for hdu in f: + try: + datatype.append(hdu.header["datatype"]) + except KeyError: + pass + test_IQU = True + for look in ["I_stokes", "Q_stokes", "U_stokes", "IQU_cov_matrix"]: + test_IQU *= look in datatype + params["IQU"].append(test_IQU) + # test for orientation and pixel size + wcs = WCS(f[0].header).celestial + if wcs.wcs.has_cd() or (wcs.wcs.cdelt[:2] == np.array([1.0, 1.0])).all(): + cdelt = np.linalg.eig(wcs.wcs.cd)[0] + pc = np.dot(wcs.wcs.cd, np.diag(1.0 / cdelt)) + else: + cdelt = wcs.wcs.cdelt + pc = wcs.wcs.pc + params["ROT"].append(np.round(np.arccos(pc[0, 0]), 2) if np.abs(pc[0, 0]) < 1.0 else 0.0) + params["SIZE"].append(np.round(np.max(np.abs(cdelt * 3600.0)), 2)) + # look for information on reduction procedure + for key in [k for k in params.keys() if k not in ["IQU", "ROT", "SIZE"]]: + try: + params[key].append(f[0].header[key]) + except KeyError: + params[key].append("null") + result = np.all(params["IQU"]) + for key in [k for k in params.keys() if k != "IQU"]: + result *= np.unique(params[key]).size == 1 + if np.all(params["IQU"]) and not result: + print(np.unique(params["SIZE"])) + raise ValueError("Not all observations were reduced with the same parameters, please provide the raw files.") + + return result + + +def same_obs(infiles, data_folder): + """ + Group infiles into same observations. + """ + + import astropy.units as u + from astropy.io.fits import getheader + from astropy.table import Table + from astropy.time import Time, TimeDelta + + headers = [getheader("/".join([data_folder, file])) for file in infiles] + files = {} + files["PROPOSID"] = np.array([str(head["PROPOSID"]) for head in headers], dtype=str) + files["ROOTNAME"] = np.array([head["ROOTNAME"].lower() + "_c0f.fits" for head in headers], dtype=str) + files["EXPSTART"] = np.array([Time(head["EXPSTART"], format="mjd") for head in headers]) + products = Table(files) + + new_infiles = [] + for pid in np.unique(products["PROPOSID"]): + obs = products[products["PROPOSID"] == pid].copy() + close_date = np.unique( + [[np.abs(TimeDelta(obs["EXPSTART"][i].unix - date.unix, format="sec")) < 7.0 * u.d for i in range(len(obs))] for date in obs["EXPSTART"]], axis=0 + ) + if len(close_date) > 1: + for date in close_date: + new_infiles.append(list(products["ROOTNAME"][np.any([products["ROOTNAME"] == dataset for dataset in obs["ROOTNAME"][date]], axis=0)])) + else: + new_infiles.append(list(products["ROOTNAME"][products["PROPOSID"] == pid])) + return new_infiles + + +def combine_Stokes(infiles): + """ + Combine I, Q, U from different observations of a same object. + """ + from astropy.io.fits import open as fits_open + from lib.reduction import align_data, zeropad + from scipy.ndimage import shift as sc_shift + + I_array, Q_array, U_array, IQU_cov_array, data_mask, headers = [], [], [], [], [], [] + shape = np.array([0, 0]) + for file in infiles: + with fits_open(file) as f: + headers.append(f[0].header) + I_array.append(f["I_stokes"].data) + Q_array.append(f["Q_stokes"].data) + U_array.append(f["U_stokes"].data) + IQU_cov_array.append(f["IQU_cov_matrix"].data) + data_mask.append(f["data_mask"].data.astype(bool)) + shape[0] = np.max([shape[0], f["I_stokes"].data.shape[0]]) + shape[1] = np.max([shape[1], f["I_stokes"].data.shape[1]]) + + exposure_array = np.array([float(head["EXPTIME"]) for head in headers]) + + shape += np.array([5, 5]) + data_mask = np.sum([zeropad(mask, shape) for mask in data_mask], axis=0).astype(bool) + I_array = np.array([zeropad(I, shape) for I in I_array]) + Q_array = np.array([zeropad(Q, shape) for Q in Q_array]) + U_array = np.array([zeropad(U, shape) for U in U_array]) + IQU_cov_array = np.array([[[zeropad(cov[i, j], shape) for j in range(3)] for i in range(3)] for cov in IQU_cov_array]) + + sI_array = np.sqrt(IQU_cov_array[:, 0, 0]) + sQ_array = np.sqrt(IQU_cov_array[:, 1, 1]) + sU_array = np.sqrt(IQU_cov_array[:, 2, 2]) + + _, _, _, _, shifts, errors = align_data(I_array, headers, error_array=sI_array, data_mask=data_mask, ref_center="center", return_shifts=True) + data_mask_aligned = np.sum([sc_shift(data_mask, s, order=1, cval=0.0) for s in shifts], axis=0).astype(bool) + I_aligned, sI_aligned = ( + np.array([sc_shift(I, s, order=1, cval=0.0) for I, s in zip(I_array, shifts)]), + np.array([sc_shift(sI, s, order=1, cval=0.0) for sI, s in zip(sI_array, shifts)]), + ) + Q_aligned, sQ_aligned = ( + np.array([sc_shift(Q, s, order=1, cval=0.0) for Q, s in zip(Q_array, shifts)]), + np.array([sc_shift(sQ, s, order=1, cval=0.0) for sQ, s in zip(sQ_array, shifts)]), + ) + U_aligned, sU_aligned = ( + np.array([sc_shift(U, s, order=1, cval=0.0) for U, s in zip(U_array, shifts)]), + np.array([sc_shift(sU, s, order=1, cval=0.0) for sU, s in zip(sU_array, shifts)]), + ) + IQU_cov_aligned = np.array([[[sc_shift(cov[i, j], s, order=1, cval=0.0) for j in range(3)] for i in range(3)] for cov, s in zip(IQU_cov_array, shifts)]) + + I_combined = np.sum([exp * I for exp, I in zip(exposure_array, I_aligned)], axis=0) / exposure_array.sum() + Q_combined = np.sum([exp * Q for exp, Q in zip(exposure_array, Q_aligned)], axis=0) / exposure_array.sum() + U_combined = np.sum([exp * U for exp, U in zip(exposure_array, U_aligned)], axis=0) / exposure_array.sum() + + IQU_cov_combined = np.zeros((3, 3, shape[0], shape[1])) + for i in range(3): + IQU_cov_combined[i, i] = np.sum([exp**2 * cov for exp, cov in zip(exposure_array, IQU_cov_aligned[:, i, i])], axis=0) / exposure_array.sum() ** 2 + for j in [x for x in range(3) if x != i]: + IQU_cov_combined[i, j] = np.sqrt( + np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, IQU_cov_aligned[:, i, j])], axis=0) / exposure_array.sum() ** 2 + ) + IQU_cov_combined[j, i] = np.sqrt( + np.sum([exp**2 * cov**2 for exp, cov in zip(exposure_array, IQU_cov_aligned[:, j, i])], axis=0) / exposure_array.sum() ** 2 + ) + + header_combined = headers[0] + header_combined["EXPTIME"] = exposure_array.sum() + + return I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_aligned, header_combined + + +def main(infiles, target=None, output_dir="./data/"): + """ """ + from lib.fits import save_Stokes + from lib.plots import pol_map + from lib.reduction import compute_pol, rotate_Stokes + + if target is None: + target = input("Target name:\n>") + + prod = np.array([["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles], dtype=str) + data_folder = prod[0][0] + files = [p[1] for p in prod] + + # Reduction parameters + kwargs = {} + # Polarization map output + kwargs["SNRp_cut"] = 3.0 + kwargs["SNRi_cut"] = 1.0 + kwargs["flux_lim"] = 1e-19, 3e-17 + kwargs["scale_vec"] = 5 + kwargs["step_vec"] = 1 + + if not same_reduction(infiles): + from FOC_reduction import main as FOC_reduction + + grouped_infiles = same_obs(files, data_folder) + + new_infiles = [] + for i, group in enumerate(grouped_infiles): + new_infiles.append( + FOC_reduction(target=target + "-" + str(i + 1), infiles=["/".join([data_folder, file]) for file in group], interactive=True)[0] + ) + + infiles = new_infiles + + I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = combine_Stokes(infiles=infiles) + I_combined, Q_combined, U_combined, IQU_cov_combined, data_mask_combined, header_combined = rotate_Stokes( + I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, data_mask=data_mask_combined, header_stokes=header_combined + ) + + P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = compute_pol( + I_stokes=I_combined, Q_stokes=Q_combined, U_stokes=U_combined, Stokes_cov=IQU_cov_combined, header_stokes=header_combined + ) + filename = header_combined["FILENAME"] + figname = "_".join([target, filename[filename.find("FOC_") :], "combined"]) + Stokes_combined = save_Stokes( + I_stokes=I_combined, + Q_stokes=Q_combined, + U_stokes=U_combined, + Stokes_cov=IQU_cov_combined, + P=P, + debiased_P=debiased_P, + s_P=s_P, + s_P_P=s_P_P, + PA=PA, + s_PA=s_PA, + s_PA_P=s_PA_P, + header_stokes=header_combined, + data_mask=data_mask_combined, + filename=figname, + data_folder=data_folder, + return_hdul=True, + ) + + pol_map(Stokes_combined, **kwargs) + + return "/".join([data_folder, figname + ".fits"]) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Combine different observations of a single object") + parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None) + parser.add_argument("-f", "--files", metavar="path", required=False, nargs="*", help="the full or relative path to the data products", default=None) + parser.add_argument( + "-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data" + ) + args = parser.parse_args() + exitcode = main(target=args.target, infiles=args.files, output_dir=args.output_dir) + print("Written to: ", exitcode) diff --git a/package/FOC_reduction.py b/package/FOC_reduction.py index ea0c60e..e1ae8da 100755 --- a/package/FOC_reduction.py +++ b/package/FOC_reduction.py @@ -22,16 +22,17 @@ from lib.utils import sci_not, princ_angle + def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir="./data", crop=False, interactive=False): # Reduction parameters # Deconvolution deconvolve = False if deconvolve: # from lib.deconvolve import from_file_psf - psf = 'gaussian' # Can be user-defined as well + psf = "gaussian" # Can be user-defined as well # psf = from_file_psf(data_folder+psf_file) psf_FWHM = 3.1 - psf_scale = 'px' + psf_scale = "px" psf_shape = None # (151, 151) iterations = 1 algo = "conjgrad" @@ -40,18 +41,20 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir= display_crop = False # Background estimation - error_sub_type = 'freedman-diaconis' # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) - subtract_error = 0.01 + + error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) + subtract_error = 1.0 + display_bkg = False # Data binning - rebin = True pxsize = 2 - px_scale = 'px' # pixel, arcsec or full - rebin_operation = 'sum' # sum or average + pxscale = "px" # pixel, arcsec or full + rebin_operation = "sum" # sum or average # Alignement - align_center = 'center' # If None will not align the images + align_center = "center" # If None will not align the images + display_align = False display_data = False @@ -59,20 +62,19 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir= transmitcorr = True # Smoothing - smoothing_function = 'combine' # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine - smoothing_FWHM = None # If None, no smoothing is done - smoothing_scale = 'px' # pixel or arcsec + smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine + smoothing_FWHM = 2.0 # If None, no smoothing is done + smoothing_scale = "px" # pixel or arcsec # Rotation - rotate_data = False # rotation to North convention can give erroneous results - rotate_stokes = True + rotate_North = True # Polarization map output - SNRp_cut = 3. # P measurments with SNR>3 - SNRi_cut = 3. # I measurments with SNR>30, which implies an uncertainty in P of 4.7%. - flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None - vec_scale = 5 - step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length + SNRp_cut = 3.0 # P measurments with SNR>3 + SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%. + flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None + scale_vec = 5 + step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length # Adaptive binning # in order to perfrom optimal binning, there are several steps to follow: @@ -85,9 +87,10 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir= optimize = False # Pipeline start + # Step 1: # Get data from fits files and translate to flux in erg/cm²/s/Angstrom. - + if data_dir is None: if infiles is not None: prod = np.array([["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles], dtype=str) @@ -114,6 +117,7 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir= target = input("Target name:\n>") data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=True) + try: plots_folder = data_folder.replace("data", "plots") except ValueError: @@ -123,18 +127,20 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir= figname = "_".join([target, "FOC"]) figtype = "" - if rebin: - if px_scale not in ['full']: - figtype = "".join(["b", "{0:.2f}".format(pxsize), px_scale]) # additionnal informations + if (pxsize is not None) and not (pxsize == 1 and pxscale.lower() in ["px", "pixel", "pixels"]): + if pxscale not in ["full"]: + figtype = "".join(["b", "{0:.2f}".format(pxsize), pxscale]) # additionnal informations else: figtype = "full" - if smoothing_FWHM is not None: - figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]), - "{0:.2f}".format(smoothing_FWHM), smoothing_scale]) # additionnal informations + + if smoothing_FWHM is not None and smoothing_scale is not None: + smoothstr = "".join([*[s[0] for s in smoothing_function.split("_")], "{0:.2f}".format(smoothing_FWHM), smoothing_scale]) + figtype = "_".join([figtype, smoothstr] if figtype != "" else [smoothstr]) + if deconvolve: - figtype += "_deconv" + figtype = "_".join([figtype, "deconv"] if figtype != "" else ["deconv"]) if align_center is None: - figtype += "_not_aligned" + figtype = "_".join([figtype, "not_aligned"] if figtype != "" else ["not_aligned"]) if optimal_binning: options = {'optimize': optimize, 'optimal_binning': True} @@ -337,12 +343,14 @@ def main(target=None, proposal_id=None, data_dir=None, infiles=None, output_dir= elif px_scale.lower() not in ['full', 'integrate']: proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim) - return 0 + + return outfiles if __name__ == "__main__": import argparse + parser = argparse.ArgumentParser(description='Query MAST for target products') parser.add_argument('-t', '--target', metavar='targetname', required=False, help='the name of the target', type=str, default=None) parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, help='the proposal id of the data products', type=int, default=None) @@ -355,4 +363,4 @@ if __name__ == "__main__": args = parser.parse_args() exitcode = main(target=args.target, proposal_id=args.proposal_id, data_dir=args.data_dir, infiles=args.files, output_dir=args.output_dir, crop=args.crop, interactive=args.interactive) - print("Finished with ExitCode: ", exitcode) + print("Finished with ExitCode: ", exitcode) \ No newline at end of file diff --git a/package/__init__.py b/package/__init__.py index 80d0eb7..094aa13 100644 --- a/package/__init__.py +++ b/package/__init__.py @@ -1,2 +1,3 @@ from . import lib from . import src +from . import FOC_reduction diff --git a/package/lib/background.py b/package/lib/background.py index 7d5c4bc..a869135 100755 --- a/package/lib/background.py +++ b/package/lib/background.py @@ -9,139 +9,155 @@ prototypes : - bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background) Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape. """ -from os.path import join as path_join + from copy import deepcopy -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.dates as mdates -from matplotlib.colors import LogNorm -from matplotlib.patches import Rectangle from datetime import datetime, timedelta +from os.path import join as path_join + +import matplotlib.dates as mdates +import matplotlib.pyplot as plt +import numpy as np from astropy.time import Time from lib.plots import plot_obs +from matplotlib.colors import LogNorm +from matplotlib.patches import Rectangle from scipy.optimize import curve_fit def gauss(x, *p): N, mu, sigma = p - return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + return N * np.exp(-((x - mu) ** 2) / (2.0 * sigma**2)) def gausspol(x, *p): N, mu, sigma, a, b, c, d = p - return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + a*np.log(x) + b/x + c*x + d + return N * np.exp(-((x - mu) ** 2) / (2.0 * sigma**2)) + a * np.log(x) + b / x + c * x + d def bin_centers(edges): - return (edges[1:]+edges[:-1])/2. + return (edges[1:] + edges[:-1]) / 2.0 def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"): - plt.rcParams.update({'font.size': 15}) - convert_flux = np.array([head['photflam'] for head in headers]) - date_time = np.array([Time((headers[i]['expstart']+headers[i]['expend'])/2., format='mjd', precision=0).iso for i in range(len(headers))]) - date_time = np.array([datetime.strptime(d, '%Y-%m-%d %H:%M:%S') for d in date_time]) - date_err = np.array([timedelta(seconds=headers[i]['exptime']/2.) for i in range(len(headers))]) - filt = np.array([headers[i]['filtnam1'] for i in range(len(headers))]) - dict_filt = {"POL0": 'r', "POL60": 'g', "POL120": 'b'} + plt.rcParams.update({"font.size": 15}) + convert_flux = np.array([head["photflam"] for head in headers]) + date_time = np.array([Time((headers[i]["expstart"] + headers[i]["expend"]) / 2.0, format="mjd", precision=0).iso for i in range(len(headers))]) + date_time = np.array([datetime.strptime(d, "%Y-%m-%d %H:%M:%S") for d in date_time]) + date_err = np.array([timedelta(seconds=headers[i]["exptime"] / 2.0) for i in range(len(headers))]) + filt = np.array([headers[i]["filtnam1"] for i in range(len(headers))]) + dict_filt = {"POL0": "r", "POL60": "g", "POL120": "b"} c_filt = np.array([dict_filt[f] for f in filt]) fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True) for f in np.unique(filt): mask = [fil == f for fil in filt] - ax.scatter(date_time[mask], background[mask]*convert_flux[mask], color=dict_filt[f], - label="{0:s}".format(f)) - ax.errorbar(date_time, background*convert_flux, xerr=date_err, yerr=std_bkg*convert_flux, fmt='+k', - markersize=0, ecolor=c_filt) + ax.scatter(date_time[mask], background[mask] * convert_flux[mask], color=dict_filt[f], label="{0:s}".format(f)) + ax.errorbar(date_time, background * convert_flux, xerr=date_err, yerr=std_bkg * convert_flux, fmt="+k", markersize=0, ecolor=c_filt) # Date handling locator = mdates.AutoDateLocator() formatter = mdates.ConciseDateFormatter(locator) ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(formatter) # ax.set_ylim(bottom=0.) - ax.set_yscale('log') + ax.set_yscale("log") ax.set_xlabel("Observation date and time") ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") plt.legend() - if not (savename is None): + if savename is not None: this_savename = deepcopy(savename) - if not savename[-4:] in ['.png', '.jpg', '.pdf']: - this_savename += '_background_flux.pdf' + if savename[-4:] not in [".png", ".jpg", ".pdf"]: + this_savename += "_background_flux.pdf" else: - this_savename = savename[:-4]+"_background_flux"+savename[-4:] - fig.savefig(path_join(plots_folder, this_savename), bbox_inches='tight') + this_savename = savename[:-4] + "_background_flux" + savename[-4:] + fig.savefig(path_join(plots_folder, this_savename), bbox_inches="tight") - if not (histograms is None): + if histograms is not None: filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0} - fig_h, ax_h = plt.subplots(figsize=(10, 6), constrained_layout=True) + fig_h, ax_h = plt.subplots(figsize=(10, 8), constrained_layout=True) for i, (hist, bins) in enumerate(zip(histograms, binning)): - filt_obs[headers[i]['filtnam1']] += 1 - ax_h.plot(bins*convert_flux[i], hist, '+', color="C{0:d}".format(i), alpha=0.8, - label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')') - ax_h.plot([background[i]*convert_flux[i], background[i]*convert_flux[i]], [hist.min(), hist.max()], 'x--', color="C{0:d}".format(i), alpha=0.8) - if not (coeff is None): + filt_obs[headers[i]["filtnam1"]] += 1 + ax_h.plot( + bins * convert_flux[i], + hist, + "+", + color="C{0:d}".format(i), + alpha=0.8, + label=headers[i]["filtnam1"] + " (Obs " + str(filt_obs[headers[i]["filtnam1"]]) + ")", + ) + ax_h.plot([background[i] * convert_flux[i], background[i] * convert_flux[i]], [hist.min(), hist.max()], "x--", color="C{0:d}".format(i), alpha=0.8) + if coeff is not None: # ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8) - ax_h.plot(bins*convert_flux[i], gauss(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8) - ax_h.set_xscale('log') - ax_h.set_ylim([0., np.max([hist.max() for hist in histograms])]) - ax_h.set_xlim([np.min(background*convert_flux)*1e-2, np.max(background*convert_flux)*1e2]) + ax_h.plot(bins * convert_flux[i], gauss(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8) + ax_h.set_xscale("log") + ax_h.set_ylim([0.0, np.max([hist.max() for hist in histograms])]) + ax_h.set_xlim([np.min(background * convert_flux) * 1e-2, np.max(background * convert_flux) * 1e2]) ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") ax_h.set_ylabel(r"Number of pixels in bin") ax_h.set_title("Histogram for each observation") plt.legend() - if not (savename is None): + if savename is not None: this_savename = deepcopy(savename) - if not savename[-4:] in ['.png', '.jpg', '.pdf']: - this_savename += '_histograms.pdf' + if savename[-4:] not in [".png", ".jpg", ".pdf"]: + this_savename += "_histograms.pdf" else: - this_savename = savename[:-4]+"_histograms"+savename[-4:] - fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches='tight') + this_savename = savename[:-4] + "_histograms" + savename[-4:] + fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches="tight") fig2, ax2 = plt.subplots(figsize=(10, 10)) - data0 = data[0]*convert_flux[0] - bkg_data0 = data0 <= background[0]*convert_flux[0] - instr = headers[0]['instrume'] - rootname = headers[0]['rootname'] - exptime = headers[0]['exptime'] - filt = headers[0]['filtnam1'] + data0 = data[0] * convert_flux[0] + bkg_data0 = data0 <= background[0] * convert_flux[0] + instr = headers[0]["instrume"] + rootname = headers[0]["rootname"] + exptime = headers[0]["exptime"] + filt = headers[0]["filtnam1"] # plots - im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.].mean()/10., data0.max()), origin='lower', cmap='gray') - ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.5) - if not (rectangle is None): + im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.0].mean() / 10.0, data0.max()), origin="lower", cmap="gray") + ax2.imshow(bkg_data0, origin="lower", cmap="Reds", alpha=0.5) + if rectangle is not None: x, y, width, height, angle, color = rectangle[0] ax2.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False, lw=2)) - ax2.annotate(instr+":"+rootname, color='white', fontsize=10, xy=(0.01, 1.00), xycoords='axes fraction', verticalalignment='top', horizontalalignment='left') - ax2.annotate(filt, color='white', fontsize=14, xy=(0.01, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left') - ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(1.00, 0.01), - xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right') - ax2.set(xlabel='pixel offset', ylabel='pixel offset', aspect='equal') + ax2.annotate( + instr + ":" + rootname, color="white", fontsize=10, xy=(0.01, 1.00), xycoords="axes fraction", verticalalignment="top", horizontalalignment="left" + ) + ax2.annotate(filt, color="white", fontsize=14, xy=(0.01, 0.01), xycoords="axes fraction", verticalalignment="bottom", horizontalalignment="left") + ax2.annotate( + str(exptime) + " s", color="white", fontsize=10, xy=(1.00, 0.01), xycoords="axes fraction", verticalalignment="bottom", horizontalalignment="right" + ) + ax2.set(xlabel="pixel offset", ylabel="pixel offset", aspect="equal") fig2.subplots_adjust(hspace=0, wspace=0, right=1.0) - fig2.colorbar(im2, ax=ax2, location='right', aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") + fig2.colorbar(im2, ax=ax2, location="right", aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") - if not (savename is None): + if savename is not None: this_savename = deepcopy(savename) - if not savename[-4:] in ['.png', '.jpg', '.pdf']: - this_savename += '_'+filt+'_background_location.pdf' + if savename[-4:] not in [".png", ".jpg", ".pdf"]: + this_savename += "_" + filt + "_background_location.pdf" else: - this_savename = savename[:-4]+'_'+filt+'_background_location'+savename[-4:] - fig2.savefig(path_join(plots_folder, this_savename), bbox_inches='tight') - if not (rectangle is None): - plot_obs(data, headers, vmin=data[data > 0.].min()*convert_flux.mean(), vmax=data[data > 0.].max()*convert_flux.mean(), rectangle=rectangle, - savename=savename+"_background_location", plots_folder=plots_folder) - elif not (rectangle is None): - plot_obs(data, headers, vmin=data[data > 0.].min(), vmax=data[data > 0.].max(), rectangle=rectangle) + this_savename = savename[:-4] + "_" + filt + "_background_location" + savename[-4:] + fig2.savefig(path_join(plots_folder, this_savename), bbox_inches="tight") + if rectangle is not None: + plot_obs( + data, + headers, + vmin=data[data > 0.0].min() * convert_flux.mean(), + vmax=data[data > 0.0].max() * convert_flux.mean(), + rectangle=rectangle, + savename=savename + "_background_location", + plots_folder=plots_folder, + ) + elif rectangle is not None: + plot_obs(data, headers, vmin=data[data > 0.0].min(), vmax=data[data > 0.0].max(), rectangle=rectangle) plt.show() def sky_part(img): - rand_ind = np.unique((np.random.rand(np.floor(img.size/4).astype(int))*2*img.size).astype(int) % img.size) + rand_ind = np.unique((np.random.rand(np.floor(img.size / 4).astype(int)) * 2 * img.size).astype(int) % img.size) rand_pix = img.flatten()[rand_ind] # Intensity range sky_med = np.median(rand_pix) sig = np.min([img[img < sky_med].std(), img[img > sky_med].std()]) - sky_range = [sky_med-2.*sig, np.max([sky_med+sig, 7e-4])] # Detector background average FOC Data Handbook Sec. 7.6 + sky_range = [sky_med - 2.0 * sig, np.max([sky_med + sig, 7e-4])] # Detector background average FOC Data Handbook Sec. 7.6 sky = img[np.logical_and(img >= sky_range[0], img <= sky_range[1])] return sky, sky_range @@ -152,14 +168,14 @@ def bkg_estimate(img, bins=None, chi2=None, coeff=None): bins, chi2, coeff = [8], [], [] else: try: - bins.append(int(3./2.*bins[-1])) + bins.append(int(3.0 / 2.0 * bins[-1])) except IndexError: bins, chi2, coeff = [8], [], [] hist, bin_edges = np.histogram(img[img > 0], bins=bins[-1]) binning = bin_centers(bin_edges) peak = binning[np.argmax(hist)] - bins_stdev = binning[hist > hist.max()/2.] - stdev = bins_stdev[-1]-bins_stdev[0] + bins_stdev = binning[hist > hist.max() / 2.0] + stdev = bins_stdev[-1] - bins_stdev[0] # p0 = [hist.max(), peak, stdev, 1e-3, 1e-3, 1e-3, 1e-3] p0 = [hist.max(), peak, stdev] try: @@ -168,7 +184,7 @@ def bkg_estimate(img, bins=None, chi2=None, coeff=None): except RuntimeError: popt = p0 # chi2.append(np.sum((hist - gausspol(binning, *popt))**2)/hist.size) - chi2.append(np.sum((hist - gauss(binning, *popt))**2)/hist.size) + chi2.append(np.sum((hist - gauss(binning, *popt)) ** 2) / hist.size) coeff.append(popt) return bins, chi2, coeff @@ -223,7 +239,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save for i, image in enumerate(data): # Compute the Count-rate histogram for the image - sky, sky_range = sky_part(image[image > 0.]) + sky, sky_range = sky_part(image[image > 0.0]) bins, chi2, coeff = bkg_estimate(sky) while bins[-1] < 256: @@ -232,9 +248,10 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save histograms.append(hist) binning.append(bin_centers(bin_edges)) chi2, coeff = np.array(chi2), np.array(coeff) - weights = 1/chi2**2 + weights = 1 / chi2**2 weights /= weights.sum() + bkg = np.sum(weights*(coeff[:, 1]+np.abs(coeff[:, 2]) * 0.01)) # why not just use 0.01 error_bkg[i] *= bkg @@ -246,7 +263,8 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save # n_data_array[i][mask] = n_data_array[i][mask] - bkg # n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg - std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std() + + std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std() background[i] = bkg if subtract_error > 0: @@ -311,37 +329,43 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis for i, image in enumerate(data): # Compute the Count-rate histogram for the image - n_mask = np.logical_and(mask, image > 0.) - if not (sub_type is None): + n_mask = np.logical_and(mask, image > 0.0) + if sub_type is not None: if isinstance(sub_type, int): n_bins = sub_type - elif sub_type.lower() in ['sqrt']: + elif sub_type.lower() in ["sqrt"]: n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root - elif sub_type.lower() in ['sturges']: - n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int)+1 # Sturges - elif sub_type.lower() in ['rice']: - n_bins = 2*np.fix(np.power(image[n_mask].size, 1/3)).astype(int) # Rice - elif sub_type.lower() in ['scott']: - n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size, 1/3))).astype(int) # Scott + elif sub_type.lower() in ["sturges"]: + n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int) + 1 # Sturges + elif sub_type.lower() in ["rice"]: + n_bins = 2 * np.fix(np.power(image[n_mask].size, 1 / 3)).astype(int) # Rice + elif sub_type.lower() in ["scott"]: + n_bins = np.fix((image[n_mask].max() - image[n_mask].min()) / (3.5 * image[n_mask].std() / np.power(image[n_mask].size, 1 / 3))).astype( + int + ) # Scott else: - n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) / - np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis + n_bins = np.fix( + (image[n_mask].max() - image[n_mask].min()) + / (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3)) + ).astype(int) # Freedman-Diaconis else: - n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) / - np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis + n_bins = np.fix( + (image[n_mask].max() - image[n_mask].min()) / (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3)) + ).astype(int) # Freedman-Diaconis hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins) histograms.append(hist) binning.append(np.exp(bin_centers(bin_edges))) # Fit a gaussian to the log-intensity histogram - bins_stdev = binning[-1][hist > hist.max()/2.] - stdev = bins_stdev[-1]-bins_stdev[0] + bins_stdev = binning[-1][hist > hist.max() / 2.0] + stdev = bins_stdev[-1] - bins_stdev[0] # p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3] p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev] # popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0) popt, pcov = curve_fit(gauss, binning[-1], hist, p0=p0) coeff.append(popt) + bkg = popt[1]+np.abs(popt[2]) * 0.01 # why not just use 0.01 error_bkg[i] *= bkg @@ -353,7 +377,8 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis # n_data_array[i][mask] = n_data_array[i][mask] - bkg # n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg - std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std() + + std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std() background[i] = bkg if subtract_error > 0: @@ -415,10 +440,10 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True sub_shape = np.array(sub_shape) # Make sub_shape of odd values if not (np.all(sub_shape % 2)): - sub_shape += 1-sub_shape % 2 + sub_shape += 1 - sub_shape % 2 shape = np.array(data.shape) - diff = (sub_shape-1).astype(int) - temp = np.zeros((shape[0], shape[1]-diff[0], shape[2]-diff[1])) + diff = (sub_shape - 1).astype(int) + temp = np.zeros((shape[0], shape[1] - diff[0], shape[2] - diff[1])) n_data_array, n_error_array = deepcopy(data), deepcopy(error) error_bkg = np.ones(n_data_array.shape) @@ -431,18 +456,19 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True # sub-image dominated by background fmax = np.finfo(np.double).max img = deepcopy(image) - img[1-mask] = fmax/(diff[0]*diff[1]) + img[1 - mask] = fmax / (diff[0] * diff[1]) for r in range(temp.shape[1]): for c in range(temp.shape[2]): - temp[i][r, c] = np.where(mask[r, c], img[r:r+diff[0], c:c+diff[1]].sum(), fmax/(diff[0]*diff[1])) + temp[i][r, c] = np.where(mask[r, c], img[r : r + diff[0], c : c + diff[1]].sum(), fmax / (diff[0] * diff[1])) minima = np.unravel_index(np.argmin(temp.sum(axis=0)), temp.shape[1:]) for i, image in enumerate(data): - rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0., 'r']) + rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0.0, "r"]) # Compute error : root mean square of the background - sub_image = image[minima[0]:minima[0]+sub_shape[0], minima[1]:minima[1]+sub_shape[1]] + sub_image = image[minima[0] : minima[0] + sub_shape[0], minima[1] : minima[1] + sub_shape[1]] # bkg = np.std(sub_image) # Previously computed using standard deviation over the background + bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*0.01 if subtract_error > 0 else np.sqrt(np.sum(sub_image**2)/sub_image.size) error_bkg[i] *= bkg @@ -453,7 +479,8 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True # n_data_array[i][mask] = n_data_array[i][mask] - bkg # n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3*bkg)] = 1e-3*bkg - std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std() + + std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std() background[i] = bkg if subtract_error > 0: diff --git a/package/lib/convex_hull.py b/package/lib/convex_hull.py index 5e576fe..0ace8ee 100755 --- a/package/lib/convex_hull.py +++ b/package/lib/convex_hull.py @@ -3,6 +3,7 @@ Library functions for graham algorithm implementation (find the convex hull of a """ from copy import deepcopy + import numpy as np @@ -16,23 +17,23 @@ def clean_ROI(image): row, col = np.indices(shape) for i in range(0, shape[0]): - r = row[i, :][image[i, :] > 0.] - c = col[i, :][image[i, :] > 0.] + r = row[i, :][image[i, :] > 0.0] + c = col[i, :][image[i, :] > 0.0] if len(r) > 1 and len(c) > 1: H.append((r[0], c[0])) H.append((r[-1], c[-1])) H = np.array(H) for j in range(0, shape[1]): - r = row[:, j][image[:, j] > 0.] - c = col[:, j][image[:, j] > 0.] + r = row[:, j][image[:, j] > 0.0] + c = col[:, j][image[:, j] > 0.0] if len(r) > 1 and len(c) > 1: J.append((r[0], c[0])) J.append((r[-1], c[-1])) J = np.array(J) xmin = np.min([H[:, 1].min(), J[:, 1].min()]) - xmax = np.max([H[:, 1].max(), J[:, 1].max()])+1 + xmax = np.max([H[:, 1].max(), J[:, 1].max()]) + 1 ymin = np.min([H[:, 0].min(), J[:, 0].min()]) - ymax = np.max([H[:, 0].max(), J[:, 0].max()])+1 + ymax = np.max([H[:, 0].max(), J[:, 0].max()]) + 1 return np.array([xmin, xmax, ymin, ymax]) @@ -81,7 +82,7 @@ def distance(A, B): Euclidian distance between A, B. """ x, y = vector(A, B) - return np.sqrt(x ** 2 + y ** 2) + return np.sqrt(x**2 + y**2) # Define lexicographic and composition order @@ -174,8 +175,8 @@ def partition(s, left, right, order): temp = deepcopy(s[i]) s[i] = deepcopy(s[j]) s[j] = deepcopy(temp) - temp = deepcopy(s[i+1]) - s[i+1] = deepcopy(s[right]) + temp = deepcopy(s[i + 1]) + s[i + 1] = deepcopy(s[right]) s[right] = deepcopy(temp) return i + 1 @@ -206,16 +207,32 @@ def sort_angles_distances(Omega, s): Sort the list of points 's' for the composition order given reference point Omega. """ - def order(A, B): return comp(Omega, A, B) + + def order(A, B): + return comp(Omega, A, B) + quicksort(s, order) # Define fuction for stacks (use here python lists with stack operations). -def empty_stack(): return [] -def stack(S, A): S.append(A) -def unstack(S): S.pop() -def stack_top(S): return S[-1] -def stack_sub_top(S): return S[-2] +def empty_stack(): + return [] + + +def stack(S, A): + S.append(A) + + +def unstack(S): + S.pop() + + +def stack_top(S): + return S[-1] + + +def stack_sub_top(S): + return S[-2] # Alignement handling @@ -299,7 +316,7 @@ def convex_hull(H): return S -def image_hull(image, step=5, null_val=0., inside=True): +def image_hull(image, step=5, null_val=0.0, inside=True): """ Compute the convex hull of a 2D image and return the 4 relevant coordinates of the maximum included rectangle (ie. crop image to maximum rectangle). @@ -331,7 +348,7 @@ def image_hull(image, step=5, null_val=0., inside=True): H = [] shape = np.array(image.shape) row, col = np.indices(shape) - for i in range(0, int(min(shape)/2), step): + for i in range(0, int(min(shape) / 2), step): r1, r2 = row[i, :][image[i, :] > null_val], row[-i, :][image[-i, :] > null_val] c1, c2 = col[i, :][image[i, :] > null_val], col[-i, :][image[-i, :] > null_val] if r1.shape[0] > 1: @@ -349,10 +366,10 @@ def image_hull(image, step=5, null_val=0., inside=True): # S1 = S[x_min*y_max][np.argmax(S[x_min*y_max][:, 1])] # S2 = S[x_max*y_min][np.argmin(S[x_max*y_min][:, 1])] # S3 = S[x_max*y_max][np.argmax(S[x_max*y_max][:, 0])] - S0 = S[x_min*y_min][np.abs(0-S[x_min*y_min].sum(axis=1)).min() == np.abs(0-S[x_min*y_min].sum(axis=1))][0] - S1 = S[x_min*y_max][np.abs(shape[1]-S[x_min*y_max].sum(axis=1)).min() == np.abs(shape[1]-S[x_min*y_max].sum(axis=1))][0] - S2 = S[x_max*y_min][np.abs(shape[0]-S[x_max*y_min].sum(axis=1)).min() == np.abs(shape[0]-S[x_max*y_min].sum(axis=1))][0] - S3 = S[x_max*y_max][np.abs(shape.sum()-S[x_max*y_max].sum(axis=1)).min() == np.abs(shape.sum()-S[x_max*y_max].sum(axis=1))][0] + S0 = S[x_min * y_min][np.abs(0 - S[x_min * y_min].sum(axis=1)).min() == np.abs(0 - S[x_min * y_min].sum(axis=1))][0] + S1 = S[x_min * y_max][np.abs(shape[1] - S[x_min * y_max].sum(axis=1)).min() == np.abs(shape[1] - S[x_min * y_max].sum(axis=1))][0] + S2 = S[x_max * y_min][np.abs(shape[0] - S[x_max * y_min].sum(axis=1)).min() == np.abs(shape[0] - S[x_max * y_min].sum(axis=1))][0] + S3 = S[x_max * y_max][np.abs(shape.sum() - S[x_max * y_max].sum(axis=1)).min() == np.abs(shape.sum() - S[x_max * y_max].sum(axis=1))][0] # Get the vertex of the biggest included rectangle if inside: f0 = np.max([S0[0], S1[0]]) diff --git a/package/lib/cross_correlation.py b/package/lib/cross_correlation.py index d963123..5613c15 100755 --- a/package/lib/cross_correlation.py +++ b/package/lib/cross_correlation.py @@ -1,6 +1,7 @@ """ Library functions for phase cross-correlation computation. """ + # Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+) # Otherwise fall back to numpy.fft. # Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer @@ -13,8 +14,7 @@ except ImportError: import numpy as np -def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, - axis_offsets=None): +def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, axis_offsets=None): """ Upsampled DFT by matrix multiplication. This code is intended to provide the same result as if the following @@ -48,26 +48,27 @@ def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, """ # if people pass in an integer, expand it to a list of equal-sized sections if not hasattr(upsampled_region_size, "__iter__"): - upsampled_region_size = [upsampled_region_size, ] * data.ndim + upsampled_region_size = [ + upsampled_region_size, + ] * data.ndim else: if len(upsampled_region_size) != data.ndim: - raise ValueError("shape of upsampled region sizes must be equal " - "to input data's number of dimensions.") + raise ValueError("shape of upsampled region sizes must be equal " "to input data's number of dimensions.") if axis_offsets is None: - axis_offsets = [0, ] * data.ndim + axis_offsets = [ + 0, + ] * data.ndim else: if len(axis_offsets) != data.ndim: - raise ValueError("number of axis offsets must be equal to input " - "data's number of dimensions.") + raise ValueError("number of axis offsets must be equal to input " "data's number of dimensions.") im2pi = 1j * 2 * np.pi dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets)) - for (n_items, ups_size, ax_offset) in dim_properties[::-1]: - kernel = ((np.arange(ups_size) - ax_offset)[:, None] - * fft.fftfreq(n_items, upsample_factor)) + for n_items, ups_size, ax_offset in dim_properties[::-1]: + kernel = (np.arange(ups_size) - ax_offset)[:, None] * fft.fftfreq(n_items, upsample_factor) kernel = np.exp(-im2pi * kernel) # Equivalent to: @@ -100,14 +101,11 @@ def _compute_error(cross_correlation_max, src_amp, target_amp): target_amp : float The normalized average image intensity of the target image """ - error = 1.0 - cross_correlation_max * cross_correlation_max.conj() /\ - (src_amp * target_amp) + error = 1.0 - cross_correlation_max * cross_correlation_max.conj() / (src_amp * target_amp) return np.sqrt(np.abs(error)) -def phase_cross_correlation(reference_image, moving_image, *, - upsample_factor=1, space="real", - return_error=True, overlap_ratio=0.3): +def phase_cross_correlation(reference_image, moving_image, *, upsample_factor=1, space="real", return_error=True, overlap_ratio=0.3): """ Efficient subpixel image translation registration by cross-correlation. This code gives the same precision as the FFT upsampled cross-correlation @@ -174,11 +172,11 @@ def phase_cross_correlation(reference_image, moving_image, *, raise ValueError("images must be same shape") # assume complex data is already in Fourier space - if space.lower() == 'fourier': + if space.lower() == "fourier": src_freq = reference_image target_freq = moving_image # real data needs to be fft'd. - elif space.lower() == 'real': + elif space.lower() == "real": src_freq = fft.fftn(reference_image) target_freq = fft.fftn(moving_image) else: @@ -190,8 +188,7 @@ def phase_cross_correlation(reference_image, moving_image, *, cross_correlation = fft.ifftn(image_product) # Locate maximum - maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), - cross_correlation.shape) + maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape) midpoints = np.array([np.fix(axis_size / 2) for axis_size in shape]) shifts = np.stack(maxima).astype(np.float64) @@ -213,14 +210,10 @@ def phase_cross_correlation(reference_image, moving_image, *, dftshift = np.fix(upsampled_region_size / 2.0) upsample_factor = np.array(upsample_factor, dtype=np.float64) # Matrix multiply DFT around the current shift estimate - sample_region_offset = dftshift - shifts*upsample_factor - cross_correlation = _upsampled_dft(image_product.conj(), - upsampled_region_size, - upsample_factor, - sample_region_offset).conj() + sample_region_offset = dftshift - shifts * upsample_factor + cross_correlation = _upsampled_dft(image_product.conj(), upsampled_region_size, upsample_factor, sample_region_offset).conj() # Locate maximum and map back to original pixel grid - maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), - cross_correlation.shape) + maxima = np.unravel_index(np.argmax(np.abs(cross_correlation)), cross_correlation.shape) CCmax = cross_correlation[maxima] maxima = np.stack(maxima).astype(np.float64) - dftshift @@ -240,10 +233,8 @@ def phase_cross_correlation(reference_image, moving_image, *, if return_error: # Redirect user to masked_phase_cross_correlation if NaNs are observed if np.isnan(CCmax) or np.isnan(src_amp) or np.isnan(target_amp): - raise ValueError( - "NaN values found, please remove NaNs from your input data") + raise ValueError("NaN values found, please remove NaNs from your input data") - return shifts, _compute_error(CCmax, src_amp, target_amp), \ - _compute_phasediff(CCmax) + return shifts, _compute_error(CCmax, src_amp, target_amp), _compute_phasediff(CCmax) else: return shifts diff --git a/package/lib/deconvolve.py b/package/lib/deconvolve.py index 78417a4..f89eee5 100755 --- a/package/lib/deconvolve.py +++ b/package/lib/deconvolve.py @@ -28,8 +28,8 @@ prototypes : """ import numpy as np -from scipy.signal import convolve from astropy.io import fits +from scipy.signal import convolve def abs2(x): @@ -37,9 +37,9 @@ def abs2(x): if np.iscomplexobj(x): x_re = x.real x_im = x.imag - return x_re*x_re + x_im*x_im + return x_re * x_re + x_im * x_im else: - return x*x + return x * x def zeropad(arr, shape): @@ -53,7 +53,7 @@ def zeropad(arr, shape): diff = np.asarray(shape) - np.asarray(arr.shape) if diff.min() < 0: raise ValueError("output dimensions must be larger or equal input dimensions") - offset = diff//2 + offset = diff // 2 z = np.zeros(shape, dtype=arr.dtype) if rank == 1: i0 = offset[0] @@ -115,10 +115,10 @@ def zeropad(arr, shape): def gaussian2d(x, y, sigma): - return np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2) + return np.exp(-(x**2 + y**2) / (2 * sigma**2)) / (2 * np.pi * sigma**2) -def gaussian_psf(FWHM=1., shape=(5, 5)): +def gaussian_psf(FWHM=1.0, shape=(5, 5)): """ Define the gaussian Point-Spread-Function of chosen shape and FWHM. ---------- @@ -136,13 +136,13 @@ def gaussian_psf(FWHM=1., shape=(5, 5)): Kernel containing the weights of the desired gaussian PSF. """ # Compute standard deviation from FWHM - stdev = FWHM/(2.*np.sqrt(2.*np.log(2.))) + stdev = FWHM / (2.0 * np.sqrt(2.0 * np.log(2.0))) # Create kernel of desired shape - x, y = np.meshgrid(np.arange(-shape[0]/2, shape[0]/2), np.arange(-shape[1]/2, shape[1]/2)) + x, y = np.meshgrid(np.arange(-shape[0] / 2, shape[0] / 2), np.arange(-shape[1] / 2, shape[1] / 2)) kernel = gaussian2d(x, y, stdev) - return kernel/kernel.sum() + return kernel / kernel.sum() def from_file_psf(filename): @@ -164,7 +164,7 @@ def from_file_psf(filename): if isinstance(psf, np.ndarray) or len(psf) != 2: raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename)) # Return the normalized Point Spread Function - kernel = psf/psf.max() + kernel = psf / psf.max() return kernel @@ -199,14 +199,14 @@ def wiener(image, psf, alpha=0.1, clip=True): ft_y = np.fft.fftn(im_deconv) ft_h = np.fft.fftn(np.fft.ifftshift(psf)) - ft_x = ft_h.conj()*ft_y / (abs2(ft_h) + alpha) + ft_x = ft_h.conj() * ft_y / (abs2(ft_h) + alpha) im_deconv = np.fft.ifftn(ft_x).real if clip: im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv < -1] = -1 - return im_deconv/im_deconv.max() + return im_deconv / im_deconv.max() def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon=None): @@ -241,12 +241,12 @@ def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon= im_deconv = image.copy() for _ in range(iterations): - conv = convolve(im_deconv, psf, mode='same') + conv = convolve(im_deconv, psf, mode="same") if filter_epsilon: relative_blur = np.where(conv < filter_epsilon, 0, image - conv) else: relative_blur = image - conv - im_deconv += alpha*relative_blur + im_deconv += alpha * relative_blur if clip: im_deconv[im_deconv > 1] = 1 @@ -290,12 +290,12 @@ def richardson_lucy(image, psf, iterations=20, clip=True, filter_epsilon=None): psf_mirror = np.flip(psf) for _ in range(iterations): - conv = convolve(im_deconv, psf, mode='same') + conv = convolve(im_deconv, psf, mode="same") if filter_epsilon: relative_blur = np.where(conv < filter_epsilon, 0, image / conv) else: relative_blur = image / conv - im_deconv *= convolve(relative_blur, psf_mirror, mode='same') + im_deconv *= convolve(relative_blur, psf_mirror, mode="same") if clip: im_deconv[im_deconv > 1] = 1 @@ -335,12 +335,12 @@ def one_step_gradient(image, psf, iterations=20, clip=True, filter_epsilon=None) psf_mirror = np.flip(psf) for _ in range(iterations): - conv = convolve(im_deconv, psf, mode='same') + conv = convolve(im_deconv, psf, mode="same") if filter_epsilon: relative_blur = np.where(conv < filter_epsilon, 0, image - conv) else: relative_blur = image - conv - im_deconv += convolve(relative_blur, psf_mirror, mode='same') + im_deconv += convolve(relative_blur, psf_mirror, mode="same") if clip: im_deconv[im_deconv > 1] = 1 @@ -387,20 +387,20 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20): if error is None: wgt = np.ones(image.shape) else: - wgt = image/error + wgt = image / error wgt /= wgt.max() def W(x): """Define W operator : apply weights""" - return wgt*x + return wgt * x def H(x): """Define H operator : convolution with PSF""" - return np.fft.ifftn(ft_h*np.fft.fftn(x)).real + return np.fft.ifftn(ft_h * np.fft.fftn(x)).real def Ht(x): """Define Ht operator : transpose of H""" - return np.fft.ifftn(ft_h.conj()*np.fft.fftn(x)).real + return np.fft.ifftn(ft_h.conj() * np.fft.fftn(x)).real def DtD(x): """Returns the result of D'.D.x where D is a (multi-dimensional) @@ -444,7 +444,7 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20): def A(x): """Define symetric positive semi definite operator A""" - return Ht(W(H(x)))+alpha*DtD(x) + return Ht(W(H(x))) + alpha * DtD(x) # Define obtained vector A.x = b b = Ht(W(image)) @@ -458,7 +458,7 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20): r = np.copy(b) x = np.zeros(b.shape, dtype=b.dtype) rho = inner(r, r) - epsilon = np.max([0., 1e-5*np.sqrt(rho)]) + epsilon = np.max([0.0, 1e-5 * np.sqrt(rho)]) # Conjugate gradient iterations. beta = 0.0 @@ -476,26 +476,25 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20): if beta == 0.0: p = r else: - p = r + beta*p + p = r + beta * p # Make optimal step along search direction. q = A(p) gamma = inner(p, q) if gamma <= 0.0: raise ValueError("Operator A is not positive definite") - alpha = rho/gamma - x += alpha*p - r -= alpha*q + alpha = rho / gamma + x += alpha * p + r -= alpha * q rho_prev, rho = rho, inner(r, r) - beta = rho/rho_prev + beta = rho / rho_prev # Return normalized solution - im_deconv = x/x.max() + im_deconv = x / x.max() return im_deconv -def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True, - filter_epsilon=None, algo='richardson'): +def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True, filter_epsilon=None, algo="richardson"): """ Prepare an image for deconvolution using a chosen algorithm and return results. @@ -537,27 +536,23 @@ def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True, """ # Normalize image to highest pixel value pxmax = image[np.isfinite(image)].max() - if pxmax == 0.: + if pxmax == 0.0: raise ValueError("Invalid image") - norm_image = image/pxmax + norm_image = image / pxmax # Deconvolve normalized image - if algo.lower() in ['wiener', 'wiener simple']: + if algo.lower() in ["wiener", "wiener simple"]: norm_deconv = wiener(image=norm_image, psf=psf, alpha=alpha, clip=clip) - elif algo.lower() in ['van-cittert', 'vancittert', 'cittert']: - norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha, - iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) - elif algo.lower() in ['1grad', 'one_step_grad', 'one step grad']: - norm_deconv = one_step_gradient(image=norm_image, psf=psf, - iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) - elif algo.lower() in ['conjgrad', 'conj_grad', 'conjugate gradient']: - norm_deconv = conjgrad(image=norm_image, psf=psf, alpha=alpha, - error=error, iterations=iterations) + elif algo.lower() in ["van-cittert", "vancittert", "cittert"]: + norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) + elif algo.lower() in ["1grad", "one_step_grad", "one step grad"]: + norm_deconv = one_step_gradient(image=norm_image, psf=psf, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) + elif algo.lower() in ["conjgrad", "conj_grad", "conjugate gradient"]: + norm_deconv = conjgrad(image=norm_image, psf=psf, alpha=alpha, error=error, iterations=iterations) else: # Defaults to Richardson-Lucy - norm_deconv = richardson_lucy(image=norm_image, psf=psf, - iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) + norm_deconv = richardson_lucy(image=norm_image, psf=psf, iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) # Output deconvolved image with original pxmax value - im_deconv = pxmax*norm_deconv + im_deconv = pxmax * norm_deconv return im_deconv diff --git a/package/lib/fits.py b/package/lib/fits.py index 811afa5..1506a29 100755 --- a/package/lib/fits.py +++ b/package/lib/fits.py @@ -9,11 +9,14 @@ prototypes : Save computed polarimetry parameters to a single fits file (and return HDUList) """ -import numpy as np from os.path import join as path_join + +import numpy as np from astropy.io import fits from astropy.wcs import WCS + from .convex_hull import clean_ROI +from .utils import wcs_PA def get_obs_data(infiles, data_folder="", compute_flux=False): @@ -36,59 +39,61 @@ def get_obs_data(infiles, data_folder="", compute_flux=False): headers : header list List of headers objects corresponding to each image in data_array. """ - data_array, headers = [], [] + data_array, headers, wcs_array = [], [], [] for i in range(len(infiles)): - with fits.open(path_join(data_folder, infiles[i])) as f: + with fits.open(path_join(data_folder, infiles[i]), mode="update") as f: headers.append(f[0].header) data_array.append(f[0].data) + wcs_array.append(WCS(header=f[0].header, fobj=f).celestial) + f.flush() data_array = np.array(data_array, dtype=np.double) # Prevent negative count value in imported data for i in range(len(data_array)): - data_array[i][data_array[i] < 0.] = 0. + data_array[i][data_array[i] < 0.0] = 0.0 # force WCS to convention PCi_ja unitary, cdelt in deg - for header in headers: - new_wcs = WCS(header).celestial.deepcopy() - if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1., 1.])).all(): + for wcs, header in zip(wcs_array, headers): + new_wcs = wcs.deepcopy() + if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1.0, 1.0])).all(): # Update WCS with relevant information if new_wcs.wcs.has_cd(): - old_cd = new_wcs.wcs.cd del new_wcs.wcs.cd - keys = list(new_wcs.to_header().keys())+['CD1_1', 'CD1_2', 'CD1_3', 'CD2_1', 'CD2_2', 'CD2_3', 'CD3_1', 'CD3_2', 'CD3_3'] + keys = list(new_wcs.to_header().keys()) + ["CD1_1", "CD1_2", "CD1_3", "CD2_1", "CD2_2", "CD2_3", "CD3_1", "CD3_2", "CD3_3"] for key in keys: header.remove(key, ignore_missing=True) - new_cdelt = np.linalg.eig(old_cd)[0] - elif (new_wcs.wcs.cdelt == np.array([1., 1.])).all() and \ - (new_wcs.array_shape in [(512, 512), (1024, 512), (512, 1024), (1024, 1024)]): - old_cd = new_wcs.wcs.pc - new_wcs.wcs.pc = np.dot(old_cd, np.diag(1./new_cdelt)) + new_cdelt = np.linalg.eigvals(wcs.wcs.cd) + new_cdelt.sort() + new_wcs.wcs.pc = wcs.wcs.cd.dot(np.diag(1.0 / new_cdelt)) new_wcs.wcs.cdelt = new_cdelt for key, val in new_wcs.to_header().items(): header[key] = val - # header['orientat'] = princ_angle(float(header['orientat'])) + try: + _ = header["ORIENTAT"] + except KeyError: + header["ORIENTAT"] = wcs_PA(new_wcs.wcs.pc[1, 0], np.diag(new_wcs.wcs.pc).mean()) # force WCS for POL60 to have same pixel size as POL0 and POL120 - is_pol60 = np.array([head['filtnam1'].lower() == 'pol60' for head in headers], dtype=bool) - cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 14) + is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool) + cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 10) if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2: print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0)) raise ValueError("Not all images have same pixel size") else: for i in np.arange(len(headers))[is_pol60]: - headers[i]['cdelt1'], headers[i]['cdelt2'] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0] + headers[i]["cdelt1"], headers[i]["cdelt2"] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0] if compute_flux: for i in range(len(infiles)): # Compute the flux in counts/sec - data_array[i] /= headers[i]['EXPTIME'] + data_array[i] /= headers[i]["EXPTIME"] return data_array, headers -def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, - s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="", - return_hdul=False): +def save_Stokes( + I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, header_stokes, data_mask, filename, data_folder="", return_hdul=False +): """ Save computed polarimetry parameters to a single fits file, updating header accordingly. @@ -124,81 +129,90 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, Only returned if return_hdul is True. """ # Create new WCS object given the modified images - ref_header = headers[0] - exp_tot = np.array([header['exptime'] for header in headers]).sum() - new_wcs = WCS(ref_header).deepcopy() + new_wcs = WCS(header_stokes).deepcopy() if data_mask.shape != (1, 1): vertex = clean_ROI(data_mask) - shape = vertex[1::2]-vertex[0::2] + shape = vertex[1::2] - vertex[0::2] new_wcs.array_shape = shape new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2] header = new_wcs.to_header() - header['telescop'] = (ref_header['telescop'] if 'TELESCOP' in list(ref_header.keys()) else 'HST', 'telescope used to acquire data') - header['instrume'] = (ref_header['instrume'] if 'INSTRUME' in list(ref_header.keys()) else 'FOC', 'identifier for instrument used to acuire data') - header['photplam'] = (ref_header['photplam'], 'Pivot Wavelength') - header['photflam'] = (ref_header['photflam'], 'Inverse Sensitivity in DN/sec/cm**2/Angst') - header['exptot'] = (exp_tot, 'Total exposure time in sec') - header['proposid'] = (ref_header['proposid'], 'PEP proposal identifier for observation') - header['targname'] = (ref_header['targname'], 'Target name') - header['orientat'] = (ref_header['orientat'], 'Angle between North and the y-axis of the image') - header['filename'] = (filename, 'Original filename') - header['P_int'] = (ref_header['P_int'], 'Integrated polarization degree') - header['P_int_err'] = (ref_header['P_int_err'], 'Integrated polarization degree error') - header['PA_int'] = (ref_header['PA_int'], 'Integrated polarization angle') - header['PA_int_err'] = (ref_header['PA_int_err'], 'Integrated polarization angle error') + header["TELESCOP"] = (header_stokes["TELESCOP"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "telescope used to acquire data") + header["INSTRUME"] = (header_stokes["INSTRUME"] if "INSTRUME" in list(header_stokes.keys()) else "FOC", "identifier for instrument used to acuire data") + header["PHOTPLAM"] = (header_stokes["PHOTPLAM"], "Pivot Wavelength") + header["PHOTFLAM"] = (header_stokes["PHOTFLAM"], "Inverse Sensitivity in DN/sec/cm**2/Angst") + header["EXPTIME"] = (header_stokes["EXPTIME"], "Total exposure time in sec") + header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation") + header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name") + header["ORIENTAT"] = (header_stokes["ORIENTAT"], "Angle between North and the y-axis of the image") + header["FILENAME"] = (filename, "ORIGINAL FILENAME") + header["BKG_TYPE"] = (header_stokes["BKG_TYPE"], "Bkg estimation method used during reduction") + header["BKG_SUB"] = (header_stokes["BKG_SUB"], "Amount of bkg subtracted from images") + header["SMOOTH"] = (header_stokes["SMOOTH"] if "SMOOTH" in list(header_stokes.keys()) else "None", "Smoothing method used during reduction") + header["SAMPLING"] = (header_stokes["SAMPLING"] if "SAMPLING" in list(header_stokes.keys()) else "None", "Resampling performed during reduction") + header["P_INT"] = (header_stokes["P_INT"], "Integrated polarization degree") + header["sP_INT"] = (header_stokes["sP_INT"], "Integrated polarization degree error") + header["PA_INT"] = (header_stokes["PA_INT"], "Integrated polarization angle") + header["sPA_INT"] = (header_stokes["sPA_INT"], "Integrated polarization angle error") # Crop Data to mask if data_mask.shape != (1, 1): - I_stokes = I_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]] - Q_stokes = Q_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]] - U_stokes = U_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]] - P = P[vertex[2]:vertex[3], vertex[0]:vertex[1]] - debiased_P = debiased_P[vertex[2]:vertex[3], vertex[0]:vertex[1]] - s_P = s_P[vertex[2]:vertex[3], vertex[0]:vertex[1]] - s_P_P = s_P_P[vertex[2]:vertex[3], vertex[0]:vertex[1]] - PA = PA[vertex[2]:vertex[3], vertex[0]:vertex[1]] - s_PA = s_PA[vertex[2]:vertex[3], vertex[0]:vertex[1]] - s_PA_P = s_PA_P[vertex[2]:vertex[3], vertex[0]:vertex[1]] + I_stokes = I_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]] + Q_stokes = Q_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]] + U_stokes = U_stokes[vertex[2] : vertex[3], vertex[0] : vertex[1]] + P = P[vertex[2] : vertex[3], vertex[0] : vertex[1]] + debiased_P = debiased_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] + s_P = s_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] + s_P_P = s_P_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] + PA = PA[vertex[2] : vertex[3], vertex[0] : vertex[1]] + s_PA = s_PA[vertex[2] : vertex[3], vertex[0] : vertex[1]] + s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]] new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1])) for i in range(3): for j in range(3): - Stokes_cov[i, j][(1-data_mask).astype(bool)] = 0. - new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2]:vertex[3], vertex[0]:vertex[1]] + Stokes_cov[i, j][(1 - data_mask).astype(bool)] = 0.0 + new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]] Stokes_cov = new_Stokes_cov - data_mask = data_mask[vertex[2]:vertex[3], vertex[0]:vertex[1]] + data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]] data_mask = data_mask.astype(float, copy=False) # Create HDUList object hdul = fits.HDUList([]) # Add I_stokes as PrimaryHDU - header['datatype'] = ('I_stokes', 'type of data stored in the HDU') - I_stokes[(1-data_mask).astype(bool)] = 0. + header["datatype"] = ("I_stokes", "type of data stored in the HDU") + I_stokes[(1 - data_mask).astype(bool)] = 0.0 primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header) - primary_hdu.name = 'I_stokes' + primary_hdu.name = "I_stokes" hdul.append(primary_hdu) # Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList - for data, name in [[Q_stokes, 'Q_stokes'], [U_stokes, 'U_stokes'], - [Stokes_cov, 'IQU_cov_matrix'], [P, 'Pol_deg'], - [debiased_P, 'Pol_deg_debiased'], [s_P, 'Pol_deg_err'], - [s_P_P, 'Pol_deg_err_Poisson_noise'], [PA, 'Pol_ang'], - [s_PA, 'Pol_ang_err'], [s_PA_P, 'Pol_ang_err_Poisson_noise'], - [data_mask, 'Data_mask']]: + for data, name in [ + [Q_stokes, "Q_stokes"], + [U_stokes, "U_stokes"], + [Stokes_cov, "IQU_cov_matrix"], + [P, "Pol_deg"], + [debiased_P, "Pol_deg_debiased"], + [s_P, "Pol_deg_err"], + [s_P_P, "Pol_deg_err_Poisson_noise"], + [PA, "Pol_ang"], + [s_PA, "Pol_ang_err"], + [s_PA_P, "Pol_ang_err_Poisson_noise"], + [data_mask, "Data_mask"], + ]: hdu_header = header.copy() - hdu_header['datatype'] = name - if not name == 'IQU_cov_matrix': - data[(1-data_mask).astype(bool)] = 0. + hdu_header["datatype"] = name + if not name == "IQU_cov_matrix": + data[(1 - data_mask).astype(bool)] = 0.0 hdu = fits.ImageHDU(data=data, header=hdu_header) hdu.name = name hdul.append(hdu) # Save fits file to designated filepath - hdul.writeto(path_join(data_folder, filename+".fits"), overwrite=True) + hdul.writeto(path_join(data_folder, filename + ".fits"), overwrite=True) if return_hdul: return hdul diff --git a/package/lib/plots.py b/package/lib/plots.py index 6c4dad2..ae31ef5 100755 --- a/package/lib/plots.py +++ b/package/lib/plots.py @@ -43,24 +43,29 @@ prototypes : from copy import deepcopy from os.path import join as path_join -from astropy.wcs import WCS -from astropy.io import fits -from astropy.coordinates import SkyCoord -import matplotlib.pyplot as plt -from matplotlib.patches import Rectangle, Circle, FancyArrowPatch -from matplotlib.path import Path -from matplotlib.widgets import RectangleSelector, LassoSelector, Button, Slider, TextBox -from matplotlib.colors import LogNorm + import matplotlib.font_manager as fm import matplotlib.patheffects as pe -from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar, AnchoredDirectionArrows +import matplotlib.pyplot as plt import numpy as np +from astropy.coordinates import SkyCoord +from astropy.io import fits +from astropy.wcs import WCS +from matplotlib.colors import LogNorm +from matplotlib.patches import Circle, FancyArrowPatch, Rectangle +from matplotlib.path import Path +from matplotlib.widgets import Button, LassoSelector, RectangleSelector, Slider, TextBox +from mpl_toolkits.axes_grid1.anchored_artists import ( + AnchoredDirectionArrows, + AnchoredSizeBar, +) + from scipy.ndimage import zoom as sc_zoom try: - from .utils import rot2D, princ_angle, sci_not + from .utils import princ_angle, rot2D, sci_not except ImportError: - from utils import rot2D, princ_angle, sci_not + from utils import princ_angle, rot2D, sci_not def adaptive_binning(I_stokes, Q_stokes, U_stokes, Stokes_cov): shape = I_stokes.shape @@ -155,19 +160,21 @@ def plot_obs(data_array, headers, rectangle=None, savename=None, plots_folder="" be saved. Not used if savename is None. Defaults to current folder. """ - plt.rcParams.update({'font.size': 10}) - nb_obs = np.max([np.sum([head['filtnam1'] == curr_pol for head in headers]) for curr_pol in ['POL0', 'POL60', 'POL120']]) + plt.rcParams.update({"font.size": 10}) + nb_obs = np.max([np.sum([head["filtnam1"] == curr_pol for head in headers]) for curr_pol in ["POL0", "POL60", "POL120"]]) shape = np.array((3, nb_obs)) + fig, ax = plt.subplots(shape[0], shape[1], figsize=(3*shape[1], 3*shape[0]), dpi=200, layout='constrained', sharex=True, sharey=True) + r_pol = dict(pol0=0, pol60=1, pol120=2) c_pol = dict(pol0=0, pol60=0, pol120=0) for i, (data, head) in enumerate(zip(data_array, headers)): - instr = head['instrume'] - rootname = head['rootname'] - exptime = head['exptime'] - filt = head['filtnam1'] - convert = head['photflam'] + instr = head["instrume"] + rootname = head["rootname"] + exptime = head["exptime"] + filt = head["filtnam1"] + convert = head["photflam"] r_ax, c_ax = r_pol[filt.lower()], c_pol[filt.lower()] c_pol[filt.lower()] += 1 if shape[1] != 1: @@ -175,11 +182,11 @@ def plot_obs(data_array, headers, rectangle=None, savename=None, plots_folder="" else: ax_curr = ax[r_ax] # plots - if ('vmin' in kwargs.keys() or 'vmax' in kwargs.keys()): - vmin, vmax = kwargs['vmin'], kwargs['vmax'] - del kwargs['vmin'], kwargs['vmax'] + if "vmin" in kwargs.keys() or "vmax" in kwargs.keys(): + vmin, vmax = kwargs["vmin"], kwargs["vmax"] + del kwargs["vmin"], kwargs["vmax"] else: - vmin, vmax = convert*data[data > 0.].min()/10., convert*data[data > 0.].max() + vmin, vmax = convert * data[data > 0.0].min() / 10.0, convert * data[data > 0.0].max() for key, value in [["cmap", [["cmap", "gray"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]: try: _ = kwargs[key] @@ -187,26 +194,28 @@ def plot_obs(data_array, headers, rectangle=None, savename=None, plots_folder="" for key_i, val_i in value: kwargs[key_i] = val_i # im = ax[r_ax][c_ax].imshow(convert*data, origin='lower', **kwargs) - data[data*convert < vmin*10.] = vmin*10./convert - im = ax_curr.imshow(convert*data, origin='lower', **kwargs) + data[data * convert < vmin * 10.0] = vmin * 10.0 / convert + im = ax_curr.imshow(convert * data, origin="lower", **kwargs) if rectangle is not None: x, y, width, height, angle, color = rectangle[i] ax_curr.add_patch(Rectangle((x, y), width, height, angle=angle, edgecolor=color, fill=False)) # position of centroid - ax_curr.plot([data.shape[1]/2, data.shape[1]/2], [0, data.shape[0]-1], '--', lw=1, color='grey', alpha=0.5) - ax_curr.plot([0, data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], '--', lw=1, color='grey', alpha=0.5) - ax_curr.annotate(instr+":"+rootname, color='white', fontsize=5, xy=(0.01, 1.00), xycoords='axes fraction', verticalalignment='top', horizontalalignment='left') - ax_curr.annotate(filt, color='white', fontsize=10, xy=(0.01, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left') - ax_curr.annotate(exptime, color='white', fontsize=5, xy=(1.00, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right') + ax_curr.plot([data.shape[1] / 2, data.shape[1] / 2], [0, data.shape[0] - 1], "--", lw=1, color="grey", alpha=0.5) + ax_curr.plot([0, data.shape[1] - 1], [data.shape[1] / 2, data.shape[1] / 2], "--", lw=1, color="grey", alpha=0.5) + ax_curr.annotate( + instr + ":" + rootname, color="white", fontsize=5, xy=(0.01, 1.00), xycoords="axes fraction", verticalalignment="top", horizontalalignment="left" + ) + ax_curr.annotate(filt, color="white", fontsize=10, xy=(0.01, 0.01), xycoords="axes fraction", verticalalignment="bottom", horizontalalignment="left") + ax_curr.annotate(exptime, color="white", fontsize=5, xy=(1.00, 0.01), xycoords="axes fraction", verticalalignment="bottom", horizontalalignment="right") # fig.subplots_adjust(hspace=0.01, wspace=0.01, right=1.02) - fig.colorbar(im, ax=ax, location='right', shrink=0.75, aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") + fig.colorbar(im, ax=ax, location="right", shrink=0.75, aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") - if not (savename is None): + if savename is not None: # fig.suptitle(savename) - if not savename[-4:] in ['.png', '.jpg', '.pdf']: - savename += '.pdf' - fig.savefig(path_join(plots_folder, savename), bbox_inches='tight') + if savename[-4:] not in [".png", ".jpg", ".pdf"]: + savename += ".pdf" + fig.savefig(path_join(plots_folder, savename), bbox_inches="tight") plt.show() return 0 @@ -229,10 +238,10 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""): Defaults to current folder. """ # Get data - stkI = Stokes['I_stokes'].data.copy() - stkQ = Stokes['Q_stokes'].data.copy() - stkU = Stokes['U_stokes'].data.copy() - data_mask = Stokes['Data_mask'].data.astype(bool) + stkI = Stokes["I_stokes"].data.copy() + stkQ = Stokes["Q_stokes"].data.copy() + stkU = Stokes["U_stokes"].data.copy() + data_mask = Stokes["Data_mask"].data.astype(bool) for dataset in [stkI, stkQ, stkU]: dataset[np.logical_not(data_mask)] = np.nan @@ -240,36 +249,52 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""): wcs = WCS(Stokes[0]).deepcopy() # Plot figure - plt.rcParams.update({'font.size': 10}) - fig, (axI, axQ, axU) = plt.subplots(ncols=3, figsize=(20, 6), subplot_kw=dict(projection=wcs)) - fig.subplots_adjust(hspace=0, wspace=0.75, bottom=0.01, top=0.99, left=0.08, right=0.95) + plt.rcParams.update({"font.size": 14}) + ratiox = max(int(stkI.shape[1]/stkI.shape[0]),1) + ratioy = max(int(stkI.shape[0]/stkI.shape[1]),1) + fig, (axI, axQ, axU) = plt.subplots(ncols=3, figsize=(15*ratiox, 6*ratioy), subplot_kw=dict(projection=wcs)) + fig.subplots_adjust(hspace=0, wspace=0.50, bottom=0.01, top=0.99, left=0.07, right=0.97) fig.suptitle("I, Q, U Stokes parameters") - imI = axI.imshow(stkI, origin='lower', cmap='inferno') - fig.colorbar(imI, ax=axI, aspect=50, shrink=0.50, pad=0.025, label='counts/sec') - axI.set(xlabel="RA", ylabel='DEC', title=r"$I_{stokes}$") + imI = axI.imshow(stkI, origin="lower", cmap="inferno") + fig.colorbar(imI, ax=axI, aspect=30, shrink=0.50, pad=0.025, label="counts/sec") + axI.set(xlabel="RA", ylabel="DEC", title=r"$I_{stokes}$") - imQ = axQ.imshow(stkQ, origin='lower', cmap='inferno') - fig.colorbar(imQ, ax=axQ, aspect=50, shrink=0.50, pad=0.025, label='counts/sec') - axQ.set(xlabel="RA", ylabel='DEC', title=r"$Q_{stokes}$") + imQ = axQ.imshow(stkQ, origin="lower", cmap="inferno") + fig.colorbar(imQ, ax=axQ, aspect=30, shrink=0.50, pad=0.025, label="counts/sec") + axQ.set(xlabel="RA", ylabel="DEC", title=r"$Q_{stokes}$") - imU = axU.imshow(stkU, origin='lower', cmap='inferno') - fig.colorbar(imU, ax=axU, aspect=50, shrink=0.50, pad=0.025, label='counts/sec') - axU.set(xlabel="RA", ylabel='DEC', title=r"$U_{stokes}$") + imU = axU.imshow(stkU, origin="lower", cmap="inferno") + fig.colorbar(imU, ax=axU, aspect=30, shrink=0.50, pad=0.025, label="counts/sec") + axU.set(xlabel="RA", ylabel="DEC", title=r"$U_{stokes}$") - if not (savename is None): + if savename is not None: # fig.suptitle(savename+"_IQU") - if not savename[-4:] in ['.png', '.jpg', '.pdf']: - savename += '_IQU.pdf' + if savename[-4:] not in [".png", ".jpg", ".pdf"]: + savename += "_IQU.pdf" else: - savename = savename[:-4]+"_IQU"+savename[-4:] - fig.savefig(path_join(plots_folder, savename), bbox_inches='tight') + savename = savename[:-4] + "_IQU" + savename[-4:] + fig.savefig(path_join(plots_folder, savename), bbox_inches="tight") plt.show() return 0 -def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_cut=3., - flux_lim=None, step_vec=1, vec_scale=2., savename=None, plots_folder="", display="default", **kwargs): + +def polarization_map( + Stokes, + data_mask=None, + rectangle=None, + SNRp_cut=3.0, + SNRi_cut=3.0, + flux_lim=None, + step_vec=1, + scale_vec=2.0, + savename=None, + plots_folder="", + display="default", + **kwargs +): + """ Plots polarization map from Stokes HDUList. ---------- @@ -297,9 +322,9 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c Number of steps between each displayed polarization vector. If step_vec = 2, every other vector will be displayed. Defaults to 1 - vec_scale : float, optional + scale_vec : float, optional Pixel length of displayed 100% polarization vector. - If vec_scale = 2, a vector of 50% polarization will be 1 pixel wide. + If scale_vec = 2, a vector of 50% polarization will be 1 pixel wide. Defaults to 2. savename : str, optional Name of the figure the map should be saved to. If None, the map won't @@ -320,6 +345,7 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c The figure and ax created for interactive contour maps. """ # Get data + optimal_binning = kwargs.get('optimal_binning', False) stkI = Stokes['I_stokes'].data.copy() @@ -329,9 +355,10 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c pol = Stokes['Pol_deg_debiased'].data.copy() pol_err = Stokes['Pol_deg_err'].data.copy() pang = Stokes['Pol_ang'].data.copy() + try: if data_mask is None: - data_mask = Stokes['Data_mask'].data.astype(bool).copy() + data_mask = Stokes["Data_mask"].data.astype(bool).copy() except KeyError: data_mask = np.ones(stkI.shape).astype(bool) @@ -341,23 +368,23 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c for j in range(3): stk_cov[i][j][np.logical_not(data_mask)] = np.nan - pivot_wav = Stokes[0].header['photplam'] - convert_flux = Stokes[0].header['photflam'] + pivot_wav = Stokes[0].header["photplam"] + convert_flux = Stokes[0].header["photflam"] wcs = WCS(Stokes[0]).deepcopy() # Plot Stokes parameters map - if display is None or display.lower() in ['default']: + if display is None or display.lower() in ["default"]: plot_Stokes(Stokes, savename=savename, plots_folder=plots_folder) # Compute SNR and apply cuts poldata, pangdata = pol.copy(), pang.copy() maskP = pol_err > 0 - SNRp = np.ones(pol.shape)*np.nan - SNRp[maskP] = pol[maskP]/pol_err[maskP] + SNRp = np.ones(pol.shape) * np.nan + SNRp[maskP] = pol[maskP] / pol_err[maskP] maskI = stk_cov[0, 0] > 0 - SNRi = np.ones(stkI.shape)*np.nan - SNRi[maskI] = stkI[maskI]/np.sqrt(stk_cov[0, 0][maskI]) + SNRi = np.ones(stkI.shape) * np.nan + SNRi[maskI] = stkI[maskI] / np.sqrt(stk_cov[0, 0][maskI]) mask = (SNRp > SNRp_cut) * (SNRi > SNRi_cut) poldata[np.logical_not(mask)] = np.nan @@ -371,125 +398,152 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c print("No pixel with polarization information above requested SNR.") # Plot the map - plt.rcParams.update({'font.size': 10}) + plt.rcParams.update({"font.size": 14}) plt.rcdefaults() - fig, ax = plt.subplots(figsize=(10, 10), layout='constrained', subplot_kw=dict(projection=wcs)) - ax.set(aspect='equal', fc='k') + ratiox = max(int(stkI.shape[1]/(stkI.shape[0])),1) + ratioy = max(int((stkI.shape[0])/stkI.shape[1]),1) + fig, ax = plt.subplots(figsize=(6*ratiox, 6*ratioy), layout="compressed", subplot_kw=dict(projection=wcs)) + ax.set(aspect="equal", fc="k", ylim=[-stkI.shape[0]*0.10,stkI.shape[0]*1.15]) # fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02) - if display.lower() in ['intensity']: + # ax.coords.grid(True, color='white', ls='dotted', alpha=0.5) + ax.coords[0].set_axislabel("Right Ascension (J2000)") + ax.coords[0].set_axislabel_position("t") + ax.coords[0].set_ticklabel_position("t") + ax.set_ylabel("Declination (J2000)", labelpad=-1) + + if display.lower() in ["intensity"]: # If no display selected, show intensity map - display = 'i' + display = "i" if flux_lim is None: - if mask.sum() > 0.: - vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov[0, 0][mask])*convert_flux), np.max(stkI[stkI > 0.]*convert_flux) + if mask.sum() > 0.0: + vmin, vmax = 1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][mask]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux) else: - vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov[0, 0][stkI > 0.])*convert_flux), np.max(stkI[stkI > 0.]*convert_flux) + vmin, vmax = 1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][stkI > 0.0]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux) else: vmin, vmax = flux_lim - im = ax.imshow(stkI*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.) - fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") - levelsI = np.array([0.8, 2., 5., 10., 20., 50.])/100.*vmax + im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno", alpha=1.0) + fig.colorbar(im, ax=ax, aspect=30, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") + levelsI = np.array([0.8, 2.0, 5.0, 10.0, 20.0, 50.0]) / 100.0 * vmax print("Total flux contour levels : ", levelsI) - ax.contour(stkI*convert_flux, levels=levelsI, colors='grey', linewidths=0.5) - elif display.lower() in ['pol_flux']: + ax.contour(stkI * convert_flux, levels=levelsI, colors="grey", linewidths=0.5) + elif display.lower() in ["pol_flux"]: # Display polarization flux - display = 'pf' + display = "pf" if flux_lim is None: - if mask.sum() > 0.: - vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov[0, 0][mask])*convert_flux), np.max(stkI[stkI > 0.]*convert_flux) + if mask.sum() > 0.0: + vmin, vmax = 1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][mask]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux) else: - vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov[0, 0][stkI > 0.])*convert_flux), np.max(stkI[stkI > 0.]*convert_flux) + vmin, vmax = 1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][stkI > 0.0]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux) else: vmin, vmax = flux_lim - im = ax.imshow(stkI*convert_flux*pol, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.) + im = ax.imshow(stkI * convert_flux * pol, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno", alpha=1.0) fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") - levelsPf = np.linspace(vmax*0.01, vmax*0.99, 10) + levelsPf = np.linspace(vmax * 0.01, vmax * 0.99, 10) print("Polarized flux contour levels : ", levelsPf) - ax.contour(stkI*convert_flux*pol, levels=levelsPf, colors='grey', linewidths=0.5) - elif display.lower() in ['p', 'pol', 'pol_deg']: + ax.contour(stkI * convert_flux * pol, levels=levelsPf, colors="grey", linewidths=0.5) + elif display.lower() in ["p", "pol", "pol_deg"]: # Display polarization degree map - display = 'p' - vmin, vmax = 0., 100. - im = ax.imshow(pol*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) + display = "p" + vmin, vmax = 0.0, 100.0 + im = ax.imshow(pol * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno", alpha=1.0) fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$P$ [%]") - elif display.lower() in ['pa', 'pang', 'pol_ang']: + elif display.lower() in ["pa", "pang", "pol_ang"]: # Display polarization degree map - display = 'pa' - vmin, vmax = 0., 180. - im = ax.imshow(princ_angle(pang), vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) + display = "pa" + vmin, vmax = 0.0, 180.0 + im = ax.imshow(princ_angle(pang), vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno", alpha=1.0) fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\theta_P$ [°]") - elif display.lower() in ['s_p', 'pol_err', 'pol_deg_err']: + elif display.lower() in ["s_p", "pol_err", "pol_deg_err"]: # Display polarization degree error map - display = 's_p' + display = "s_p" if (SNRp > SNRp_cut).any(): - vmin, vmax = 0., np.max([pol_err[SNRp > SNRp_cut].max(), 1.])*100. - im = ax.imshow(pol_err*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno_r', alpha=1.) + vmin, vmax = 0.0, np.max([pol_err[SNRp > SNRp_cut].max(), 1.0]) * 100.0 + im = ax.imshow(pol_err * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno_r", alpha=1.0) else: - vmin, vmax = 0., 100. - im = ax.imshow(pol_err*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno_r', alpha=1.) + vmin, vmax = 0.0, 100.0 + im = ax.imshow(pol_err * 100.0, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno_r", alpha=1.0) fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\sigma_P$ [%]") - elif display.lower() in ['s_i', 'i_err']: + elif display.lower() in ["s_i", "i_err"]: # Display intensity error map - display = 's_i' + display = "s_i" if (SNRi > SNRi_cut).any(): - vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov[0, 0][stk_cov[0, 0] > 0.]) * - convert_flux), np.max(np.sqrt(stk_cov[0, 0][stk_cov[0, 0] > 0.])*convert_flux) - im = ax.imshow(np.sqrt(stk_cov[0, 0])*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno_r', alpha=1.) + + vmin, vmax = ( + 1.0 / 2.0 * np.median(np.sqrt(stk_cov[0, 0][stk_cov[0, 0] > 0.0]) * convert_flux), + np.max(np.sqrt(stk_cov[0, 0][stk_cov[0, 0] > 0.0]) * convert_flux), + ) + im = ax.imshow(np.sqrt(stk_cov[0, 0]) * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno_r", alpha=1.0) + else: - im = ax.imshow(np.sqrt(stk_cov[0, 0])*convert_flux, aspect='equal', cmap='inferno', alpha=1.) + im = ax.imshow(np.sqrt(stk_cov[0, 0]) * convert_flux, aspect="equal", cmap="inferno", alpha=1.0) fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") - elif display.lower() in ['snr', 'snri']: + elif display.lower() in ["snr", "snri"]: # Display I_stokes signal-to-noise map - display = 'snri' - vmin, vmax = 0., np.max(SNRi[np.isfinite(SNRi)]) - if vmax*0.99 > SNRi_cut: - im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) - levelsSNRi = np.linspace(SNRi_cut, vmax*0.99, 5) + display = "snri" + vmin, vmax = 0.0, np.max(SNRi[np.isfinite(SNRi)]) + if vmax * 0.99 > SNRi_cut: + im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno", alpha=1.0) + levelsSNRi = np.linspace(SNRi_cut, vmax * 0.99, 5) print("SNRi contour levels : ", levelsSNRi) - ax.contour(SNRi, levels=levelsSNRi, colors='grey', linewidths=0.5) + ax.contour(SNRi, levels=levelsSNRi, colors="grey", linewidths=0.5) else: - im = ax.imshow(SNRi, aspect='equal', cmap='inferno', alpha=1.) + im = ax.imshow(SNRi, aspect="equal", cmap="inferno", alpha=1.0) fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$I_{Stokes}/\sigma_{I}$") - elif display.lower() in ['snrp']: + elif display.lower() in ["snrp"]: # Display polarization degree signal-to-noise map - display = 'snrp' - vmin, vmax = 0., np.max(SNRp[np.isfinite(SNRp)]) - if vmax*0.99 > SNRp_cut: - im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) - levelsSNRp = np.linspace(SNRp_cut, vmax*0.99, 5) + display = "snrp" + vmin, vmax = 0.0, np.max(SNRp[np.isfinite(SNRp)]) + if vmax * 0.99 > SNRp_cut: + im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno", alpha=1.0) + levelsSNRp = np.linspace(SNRp_cut, vmax * 0.99, 5) print("SNRp contour levels : ", levelsSNRp) - ax.contour(SNRp, levels=levelsSNRp, colors='grey', linewidths=0.5) + ax.contour(SNRp, levels=levelsSNRp, colors="grey", linewidths=0.5) else: - im = ax.imshow(SNRp, aspect='equal', cmap='inferno', alpha=1.) + im = ax.imshow(SNRp, aspect="equal", cmap="inferno", alpha=1.0) fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$P/\sigma_{P}$") else: # Defaults to intensity map - if mask.sum() > 0.: - vmin, vmax = 1.*np.mean(np.sqrt(stk_cov[0, 0][mask])*convert_flux), np.max(stkI[stkI > 0.]*convert_flux) + if mask.sum() > 0.0: + vmin, vmax = 1.0 * np.mean(np.sqrt(stk_cov[0, 0][mask]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux) else: - vmin, vmax = 1.*np.mean(np.sqrt(stk_cov[0, 0][stkI > 0.])*convert_flux), np.max(stkI[stkI > 0.]*convert_flux) - im = ax.imshow(stkI*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.) + vmin, vmax = 1.0 * np.mean(np.sqrt(stk_cov[0, 0][stkI > 0.0]) * convert_flux), np.max(stkI[stkI > 0.0] * convert_flux) + im = ax.imshow(stkI * convert_flux, norm=LogNorm(vmin, vmax), aspect="equal", cmap="inferno", alpha=1.0) fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]") # Get integrated values from header I_diluted = stkI[data_mask].sum() I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask])) - P_diluted = Stokes[0].header['P_int'] - P_diluted_err = Stokes[0].header['P_int_err'] - PA_diluted = Stokes[0].header['PA_int'] - PA_diluted_err = Stokes[0].header['PA_int_err'] + P_diluted = Stokes[0].header["P_int"] + P_diluted_err = Stokes[0].header["sP_int"] + PA_diluted = Stokes[0].header["PA_int"] + PA_diluted_err = Stokes[0].header["sPA_int"] - plt.rcParams.update({'font.size': 12}) - px_size = wcs.wcs.get_cdelt()[0]*3600. - px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w') - north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10., - angle=-Stokes[0].header['orientat'], text_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': -0.2}, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 1}) + plt.rcParams.update({"font.size": 10}) + px_size = wcs.wcs.get_cdelt()[0] * 3600.0 + px_sc = AnchoredSizeBar(ax.transData, 1.0 / px_size, "1 arcsec", 3, pad=0.25, sep=5, borderpad=0.25, frameon=False, size_vertical=0.005, color="w") + north_dir = AnchoredDirectionArrows( + ax.transAxes, + "E", + "N", + length=-0.05, + fontsize=0.02, + loc=1, + aspect_ratio=-(stkI.shape[1]/(stkI.shape[0]*1.25)), + sep_y=0.01, + sep_x=0.01, + back_length=0.0, + head_length=10.0, + head_width=10.0, + angle=-Stokes[0].header["orientat"], + text_props={"ec": "k", "fc": "w", "alpha": 1, "lw": 0.4}, + arrow_props={"ec": "k", "fc": "w", "alpha": 1, "lw": 1}, + ) - if display.lower() in ['i', 's_i', 'snri', 'pf', 'p', 'pa', 's_p', 'snrp']: + if display.lower() in ["i", "s_i", "snri", "pf", "p", "pa", "s_p", "snrp"]: if step_vec == 0: - poldata[np.isfinite(poldata)] = 1./2. + poldata[np.isfinite(poldata)] = 1.0 / 2.0 step_vec = 1 vec_scale = 2. # X, Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) @@ -499,36 +553,53 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c plot_quiver(ax, stkI, stkQ, stkU, stk_cov, poldata, pangdata, step_vec=step_vec, vec_scale=vec_scale, optimal_binning=optimal_binning) pol_sc = AnchoredSizeBar(ax.transData, vec_scale, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w') + ax.add_artist(pol_sc) ax.add_artist(px_sc) ax.add_artist(north_dir) - ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav, sci_not(I_diluted*convert_flux, I_diluted_err*convert_flux, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_diluted*100., P_diluted_err * - 100.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_diluted, PA_diluted_err), color='white', xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left') + ax.annotate( + r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format( + pivot_wav, sci_not(I_diluted * convert_flux, I_diluted_err * convert_flux, 2) + ) + + "\n" + + r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_diluted * 100.0, P_diluted_err * 100.0) + + "\n" + + r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_diluted, PA_diluted_err), + color="white", + xy=(0.01, 1.00), + xycoords="axes fraction", + path_effects=[pe.withStroke(linewidth=0.5, foreground="k")], + verticalalignment="top", + horizontalalignment="left", + ) else: - if display.lower() == 'default': + if display.lower() == "default": ax.add_artist(px_sc) ax.add_artist(north_dir) - ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav, sci_not(I_diluted*convert_flux, I_diluted_err*convert_flux, 2)), - color='white', xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left') + ax.annotate( + r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format( + pivot_wav, sci_not(I_diluted * convert_flux, I_diluted_err * convert_flux, 2) + ), + color="white", + xy=(0.01, 1.00), + xycoords="axes fraction", + path_effects=[pe.withStroke(linewidth=0.5, foreground="k")], + verticalalignment="top", + horizontalalignment="left", + ) # Display instrument FOV - if not (rectangle is None): + if rectangle is not None: x, y, width, height, angle, color = rectangle - x, y = np.array([x, y]) - np.array(stkI.shape)/2. - ax.add_patch(Rectangle((x, y), width, height, angle=angle, - edgecolor=color, fill=False)) - # ax.coords.grid(True, color='white', ls='dotted', alpha=0.5) - ax.coords[0].set_axislabel('Right Ascension (J2000)') - ax.coords[0].set_axislabel_position('t') - ax.coords[0].set_ticklabel_position('t') - ax.set_ylabel('Declination (J2000)', labelpad=-1) + x, y = np.array([x, y]) - np.array(stkI.shape) / 2.0 + ax.add_patch(Rectangle((x, y), width, height, angle=angle, edgecolor=color, fill=False)) if savename is not None: - if savename[-4:] not in ['.png', '.jpg', '.pdf']: - savename += '.pdf' - fig.savefig(path_join(plots_folder, savename), bbox_inches='tight', dpi=200) + if savename[-4:] not in [".png", ".jpg", ".pdf"]: + savename += ".pdf" + fig.savefig(path_join(plots_folder, savename), bbox_inches="tight", dpi=200) plt.show() return fig, ax @@ -544,8 +615,8 @@ class align_maps(object): self.map = map self.other = other_map - self.map_path = self.map.fileinfo(0)['filename'] - self.other_path = self.other.fileinfo(0)['filename'] + self.map_path = self.map.fileinfo(0)["filename"] + self.other_path = self.other.fileinfo(0)["filename"] self.map_header = fits.getheader(self.map_path) self.other_header = fits.getheader(self.other_path) @@ -564,16 +635,27 @@ class align_maps(object): elif len(self.other_data.shape) == 3: self.other_data = self.other_data[0] - self.map_convert, self.map_unit = (float(self.map_header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list( - self.map_header.keys()) else (1., self.map_header['bunit'] if 'BUNIT' in list(self.map_header.keys()) else "Arbitray Units") - self.other_convert, self.other_unit = (float(self.other_header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list( - self.other_header.keys()) else (1., self.other_header['bunit'] if 'BUNIT' in list(self.other_header.keys()) else "Arbitray Units") - self.map_observer = "/".join([self.map_header['telescop'], self.map_header['instrume']] - ) if "INSTRUME" in list(self.map_header.keys()) else self.map_header['telescop'] - self.other_observer = "/".join([self.other_header['telescop'], self.other_header['instrume']] - ) if "INSTRUME" in list(self.other_header.keys()) else self.other_header['telescop'] + self.map_convert, self.map_unit = ( + (float(self.map_header["photflam"]), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") + if "PHOTFLAM" in list(self.map_header.keys()) + else (1.0, self.map_header["bunit"] if "BUNIT" in list(self.map_header.keys()) else "Arbitray Units") + ) + self.other_convert, self.other_unit = ( + (float(self.other_header["photflam"]), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") + if "PHOTFLAM" in list(self.other_header.keys()) + else (1.0, self.other_header["bunit"] if "BUNIT" in list(self.other_header.keys()) else "Arbitray Units") + ) + self.map_observer = ( + "/".join([self.map_header["telescop"], self.map_header["instrume"]]) if "INSTRUME" in list(self.map_header.keys()) else self.map_header["telescop"] + ) + self.other_observer = ( + "/".join([self.other_header["telescop"], self.other_header["instrume"]]) + if "INSTRUME" in list(self.other_header.keys()) + else self.other_header["telescop"] + ) + + plt.rcParams.update({"font.size": 10}) - plt.rcParams.update({'font.size': 10}) fontprops = fm.FontProperties(size=16) self.fig_align = plt.figure(figsize=(20, 10)) self.map_ax = self.fig_align.add_subplot(121, projection=self.map_wcs) @@ -581,66 +663,147 @@ class align_maps(object): # Plot the UV map other_kwargs = deepcopy(kwargs) - vmin, vmax = self.map_data[self.map_data > 0.].max()/1e3*self.map_convert, self.map_data[self.map_data > 0.].max()*self.map_convert + vmin, vmax = self.map_data[self.map_data > 0.0].max() / 1e3 * self.map_convert, self.map_data[self.map_data > 0.0].max() * self.map_convert for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]: try: _ = kwargs[key] except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i - self.map_ax.imshow(self.map_data*self.map_convert, aspect='equal', **kwargs) + self.map_ax.imshow(self.map_data * self.map_convert, aspect="equal", **kwargs) - if kwargs['cmap'] in ['inferno', 'magma', 'Greys_r', 'binary_r', 'gist_yarg_r', 'gist_gray', 'gray', 'bone', 'pink', 'hot', 'afmhot', 'gist_heat', 'copper', 'gist_earth', 'gist_stern', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'nipy_spectral', 'gist_ncar', 'viridis']: - self.map_ax.set_facecolor('black') - self.other_ax.set_facecolor('black') + if kwargs["cmap"] in [ + "inferno", + "magma", + "Greys_r", + "binary_r", + "gist_yarg_r", + "gist_gray", + "gray", + "bone", + "pink", + "hot", + "afmhot", + "gist_heat", + "copper", + "gist_earth", + "gist_stern", + "gnuplot", + "gnuplot2", + "CMRmap", + "cubehelix", + "nipy_spectral", + "gist_ncar", + "viridis", + ]: + self.map_ax.set_facecolor("black") + self.other_ax.set_facecolor("black") font_color = "white" else: - self.map_ax.set_facecolor('white') - self.other_ax.set_facecolor('white') + self.map_ax.set_facecolor("white") + self.other_ax.set_facecolor("white") font_color = "black" - px_size1 = self.map_wcs.wcs.get_cdelt()[0]*3600. - px_sc1 = AnchoredSizeBar(self.map_ax.transData, 1./px_size1, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, - frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + px_size1 = self.map_wcs.wcs.get_cdelt()[0] * 3600.0 + px_sc1 = AnchoredSizeBar( + self.map_ax.transData, + 1.0 / px_size1, + "1 arcsec", + 3, + pad=0.5, + sep=5, + borderpad=0.5, + frameon=False, + size_vertical=0.005, + color=font_color, + fontproperties=fontprops, + ) self.map_ax.add_artist(px_sc1) - if 'PHOTPLAM' in list(self.map_header.keys()): - self.map_ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(self.map_header['photplam']), color=font_color, fontsize=12, xy=( - 0.01, 0.93), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')]) - if 'ORIENTAT' in list(self.map_header.keys()): - north_dir1 = AnchoredDirectionArrows(self.map_ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, - sep_x=0.01, angle=-self.map_header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5}) + if "PHOTPLAM" in list(self.map_header.keys()): + self.map_ax.annotate( + r"$\lambda$ = {0:.0f} $\AA$".format(self.map_header["photplam"]), + color=font_color, + fontsize=12, + xy=(0.01, 0.93), + xycoords="axes fraction", + path_effects=[pe.withStroke(linewidth=0.5, foreground="k")], + ) + if "ORIENTAT" in list(self.map_header.keys()): + north_dir1 = AnchoredDirectionArrows( + self.map_ax.transAxes, + "E", + "N", + length=-0.08, + fontsize=0.03, + loc=1, + aspect_ratio=-(self.map_data.shape[1]/self.map_data.shape[0]), + sep_y=0.01, + sep_x=0.01, + angle=-self.map_header["orientat"], + color=font_color, + arrow_props={"ec": "k", "fc": "w", "alpha": 1, "lw": 0.5}, + ) self.map_ax.add_artist(north_dir1) - self.cr_map, = self.map_ax.plot(*(self.map_wcs.wcs.crpix-(1., 1.)), 'r+') + (self.cr_map,) = self.map_ax.plot(*(self.map_wcs.wcs.crpix - (1.0, 1.0)), "r+") self.map_ax.set_title("{0:s} observation\nClick on selected point of reference.".format(self.map_observer)) self.map_ax.set_xlabel(label="Right Ascension (J2000)") self.map_ax.set_ylabel(label="Declination (J2000)", labelpad=-1) # Plot the other map - vmin, vmax = self.other_data[self.other_data > 0.].max()/1e3*self.other_convert, self.other_data[self.other_data > 0.].max()*self.other_convert + vmin, vmax = self.other_data[self.other_data > 0.0].max() / 1e3 * self.other_convert, self.other_data[self.other_data > 0.0].max() * self.other_convert for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]: try: _ = other_kwargs[key] except KeyError: for key_i, val_i in value: other_kwargs[key_i] = val_i - self.other_ax.imshow(self.other_data*self.other_convert, aspect='equal', **other_kwargs) + self.other_ax.imshow(self.other_data * self.other_convert, aspect="equal", **other_kwargs) - px_size2 = self.other_wcs.wcs.get_cdelt()[0]*3600. - px_sc2 = AnchoredSizeBar(self.other_ax.transData, 1./px_size2, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, - frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + px_size2 = self.other_wcs.wcs.get_cdelt()[0] * 3600.0 + px_sc2 = AnchoredSizeBar( + self.other_ax.transData, + 1.0 / px_size2, + "1 arcsec", + 3, + pad=0.5, + sep=5, + borderpad=0.5, + frameon=False, + size_vertical=0.005, + color=font_color, + fontproperties=fontprops, + ) self.other_ax.add_artist(px_sc2) - if 'PHOTPLAM' in list(self.other_header.keys()): - self.other_ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(self.other_header['photplam']), color='white', fontsize=12, xy=( - 0.01, 0.93), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')]) - if 'ORIENTAT' in list(self.other_header.keys()): - north_dir2 = AnchoredDirectionArrows(self.map_ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, - sep_x=0.01, angle=-self.other_header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5}) + if "PHOTPLAM" in list(self.other_header.keys()): + self.other_ax.annotate( + r"$\lambda$ = {0:.0f} $\AA$".format(self.other_header["photplam"]), + color="white", + fontsize=12, + xy=(0.01, 0.93), + xycoords="axes fraction", + path_effects=[pe.withStroke(linewidth=0.5, foreground="k")], + ) + if "ORIENTAT" in list(self.other_header.keys()): + north_dir2 = AnchoredDirectionArrows( + self.map_ax.transAxes, + "E", + "N", + length=-0.08, + fontsize=0.03, + loc=1, + aspect_ratio=-(self.other_data.shape[1]/self.other_data.shape[0]), + sep_y=0.01, + sep_x=0.01, + angle=-self.other_header["orientat"], + color=font_color, + arrow_props={"ec": "k", "fc": "w", "alpha": 1, "lw": 0.5}, + ) self.other_ax.add_artist(north_dir2) - self.cr_other, = self.other_ax.plot(*(self.other_wcs.wcs.crpix-(1., 1.)), 'r+') + (self.cr_other,) = self.other_ax.plot(*(self.other_wcs.wcs.crpix - (1.0, 1.0)), "r+") self.other_ax.set_title("{0:s} observation\nClick on selected point of reference.".format(self.other_observer)) self.other_ax.set_xlabel(label="Right Ascension (J2000)") @@ -648,12 +811,12 @@ class align_maps(object): # Selection button self.axapply = self.fig_align.add_axes([0.80, 0.01, 0.1, 0.04]) - self.bapply = Button(self.axapply, 'Apply reference') + self.bapply = Button(self.axapply, "Apply reference") self.bapply.label.set_fontsize(8) self.axreset = self.fig_align.add_axes([0.60, 0.01, 0.1, 0.04]) - self.breset = Button(self.axreset, 'Leave as is') + self.breset = Button(self.axreset, "Leave as is") self.breset.label.set_fontsize(8) - self.enter = self.fig_align.canvas.mpl_connect('key_press_event', self.on_key) + self.enter = self.fig_align.canvas.mpl_connect("key_press_event", self.on_key) def on_key(self, event): if event.key.lower() == "enter": @@ -663,7 +826,7 @@ class align_maps(object): return self.map_wcs, self.other_wcs def onclick_ref(self, event) -> None: - if self.fig_align.canvas.manager.toolbar.mode == '': + if self.fig_align.canvas.manager.toolbar.mode == "": if (event.inaxes is not None) and (event.inaxes == self.map_ax): x = event.xdata y = event.ydata @@ -690,13 +853,13 @@ class align_maps(object): def apply_align(self, event=None): if np.array(self.cr_map.get_data()).shape == (2, 1): - self.map_wcs.wcs.crpix = np.array(self.cr_map.get_data())[:, 0]+(1., 1.) + self.map_wcs.wcs.crpix = np.array(self.cr_map.get_data())[:, 0] + (1.0, 1.0) else: - self.map_wcs.wcs.crpix = np.array(self.cr_map.get_data())+(1., 1.) + self.map_wcs.wcs.crpix = np.array(self.cr_map.get_data()) + (1.0, 1.0) if np.array(self.cr_other.get_data()).shape == (2, 1): - self.other_wcs.wcs.crpix = np.array(self.cr_other.get_data())[:, 0]+(1., 1.) + self.other_wcs.wcs.crpix = np.array(self.cr_other.get_data())[:, 0] + (1.0, 1.0) else: - self.other_wcs.wcs.crpix = np.array(self.cr_other.get_data())+(1., 1.) + self.other_wcs.wcs.crpix = np.array(self.cr_other.get_data()) + (1.0, 1.0) self.map_wcs.wcs.crval = np.array(self.map_wcs.pixel_to_world_values(*self.map_wcs.wcs.crpix)) self.other_wcs.wcs.crval = self.map_wcs.wcs.crval self.fig_align.canvas.draw_idle() @@ -713,10 +876,10 @@ class align_maps(object): def align(self): self.fig_align.canvas.draw() - self.fig_align.canvas.mpl_connect('button_press_event', self.onclick_ref) + self.fig_align.canvas.mpl_connect("button_press_event", self.onclick_ref) self.bapply.on_clicked(self.apply_align) self.breset.on_clicked(self.reset_align) - self.fig_align.canvas.mpl_connect('close_event', self.on_close_align) + self.fig_align.canvas.mpl_connect("close_event", self.on_close_align) plt.show(block=True) return self.get_aligned_wcs() @@ -724,14 +887,14 @@ class align_maps(object): new_head = deepcopy(self.map_header) new_head.update(self.map_wcs.to_header()) new_hdul = fits.HDUList(fits.PrimaryHDU(self.map_data, new_head)) - new_hdul.writeto("_".join([path[:-5], suffix])+".fits", overwrite=True) + new_hdul.writeto("_".join([path[:-5], suffix]) + ".fits", overwrite=True) return 0 def write_other_to(self, path="other_map.fits", suffix="aligned", data_dir="."): new_head = deepcopy(self.other_header) new_head.update(self.other_wcs.to_header()) new_hdul = fits.HDUList(fits.PrimaryHDU(self.other_data, new_head)) - new_hdul.writeto("_".join([path[:-5], suffix])+".fits", overwrite=True) + new_hdul.writeto("_".join([path[:-5], suffix]) + ".fits", overwrite=True) return 0 def write_to(self, path1="map.fits", path2="other_map.fits", suffix="aligned", data_dir="."): @@ -746,114 +909,201 @@ class overplot_radio(align_maps): Inherit from class align_maps in order to get the same WCS on both maps. """ - def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=3., vec_scale=2, savename=None, **kwargs): + def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, scale_vec=2, savename=None, **kwargs): self.Stokes_UV = self.map self.wcs_UV = self.map_wcs # Get Data - obj = self.Stokes_UV[0].header['targname'] - stkI = self.Stokes_UV['I_STOKES'].data - stk_cov = self.Stokes_UV['IQU_COV_MATRIX'].data - pol = deepcopy(self.Stokes_UV['POL_DEG_DEBIASED'].data) - pol_err = self.Stokes_UV['POL_DEG_ERR'].data - pang = self.Stokes_UV['POL_ANG'].data + obj = self.Stokes_UV[0].header["targname"] + stkI = self.Stokes_UV["I_STOKES"].data + stk_cov = self.Stokes_UV["IQU_COV_MATRIX"].data + pol = deepcopy(self.Stokes_UV["POL_DEG_DEBIASED"].data) + pol_err = self.Stokes_UV["POL_DEG_ERR"].data + pang = self.Stokes_UV["POL_ANG"].data other_data = self.other_data - self.other_convert = 1. - if self.other_unit.lower() == 'jy/beam': + self.other_convert = 1.0 + if self.other_unit.lower() == "jy/beam": self.other_unit = r"mJy/Beam" self.other_convert = 1e3 - other_freq = self.other_header['crval3'] if 'CRVAL3' in list(self.other_header.keys()) else 1. + other_freq = self.other_header["crval3"] if "CRVAL3" in list(self.other_header.keys()) else 1.0 - self.map_convert = self.Stokes_UV[0].header['photflam'] + self.map_convert = self.Stokes_UV[0].header["photflam"] # Compute SNR and apply cuts - pol[pol == 0.] = np.nan - SNRp = pol/pol_err - SNRp[np.isnan(SNRp)] = 0. + pol[pol == 0.0] = np.nan + SNRp = pol / pol_err + SNRp[np.isnan(SNRp)] = 0.0 pol[SNRp < SNRp_cut] = np.nan - SNRi = stkI/np.sqrt(stk_cov[0, 0]) - SNRi[np.isnan(SNRi)] = 0. + SNRi = stkI / np.sqrt(stk_cov[0, 0]) + SNRi[np.isnan(SNRi)] = 0.0 pol[SNRi < SNRi_cut] = np.nan - plt.rcParams.update({'font.size': 16}) + plt.rcParams.update({"font.size": 16}) self.fig_overplot, self.ax_overplot = plt.subplots(figsize=(10, 10), subplot_kw=dict(projection=self.wcs_UV)) self.fig_overplot.subplots_adjust(hspace=0, wspace=0, bottom=0.1, left=0.1, top=0.8, right=1) # Display UV intensity map with polarization vectors - vmin, vmax = stkI[np.isfinite(stkI)].max()/1e3*self.map_convert, stkI[np.isfinite(stkI)].max()*self.map_convert + vmin, vmax = stkI[np.isfinite(stkI)].max() / 1e3 * self.map_convert, stkI[np.isfinite(stkI)].max() * self.map_convert for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]: try: _ = kwargs[key] except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i - if kwargs['cmap'] in ['inferno', 'magma', 'Greys_r', 'binary_r', 'gist_yarg_r', 'gist_gray', 'gray', 'bone', 'pink', 'hot', 'afmhot', 'gist_heat', 'copper', 'gist_earth', 'gist_stern', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'nipy_spectral', 'gist_ncar', 'viridis']: - self.ax_overplot.set_facecolor('black') + if kwargs["cmap"] in [ + "inferno", + "magma", + "Greys_r", + "binary_r", + "gist_yarg_r", + "gist_gray", + "gray", + "bone", + "pink", + "hot", + "afmhot", + "gist_heat", + "copper", + "gist_earth", + "gist_stern", + "gnuplot", + "gnuplot2", + "CMRmap", + "cubehelix", + "nipy_spectral", + "gist_ncar", + "viridis", + ]: + self.ax_overplot.set_facecolor("black") font_color = "white" else: - self.ax_overplot.set_facecolor('white') + self.ax_overplot.set_facecolor("white") font_color = "black" - self.im = self.ax_overplot.imshow(stkI*self.map_convert, aspect='equal', label="{0:s} observation".format(self.map_observer), **kwargs) - self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=50, shrink=0.75, pad=0.025, - label=r"$F_{{\lambda}}$ [{0:s}]".format(self.map_unit)) + self.im = self.ax_overplot.imshow(stkI * self.map_convert, aspect="equal", label="{0:s} observation".format(self.map_observer), **kwargs) + self.cbar = self.fig_overplot.colorbar( + self.im, ax=self.ax_overplot, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{{\lambda}}$ [{0:s}]".format(self.map_unit) + ) # Display full size polarization vectors - if vec_scale is None: - self.vec_scale = 2. - pol[np.isfinite(pol)] = 1./2. + if scale_vec is None: + self.scale_vec = 2.0 + pol[np.isfinite(pol)] = 1.0 / 2.0 else: - self.vec_scale = vec_scale + self.scale_vec = scale_vec step_vec = 1 self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) - self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.) - self.Q = self.ax_overplot.quiver(self.X[::step_vec, ::step_vec], self.Y[::step_vec, ::step_vec], self.U[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec], units='xy', angles='uv', scale=1./self.vec_scale, - scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.5, linewidth=0.75, color='white', edgecolor='black', label="{0:s} polarization map".format(self.map_observer)) + self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0) + self.Q = self.ax_overplot.quiver( + self.X[::step_vec, ::step_vec], + self.Y[::step_vec, ::step_vec], + self.U[::step_vec, ::step_vec], + self.V[::step_vec, ::step_vec], + units="xy", + angles="uv", + scale=1.0 / self.scale_vec, + scale_units="xy", + pivot="mid", + headwidth=0.0, + headlength=0.0, + headaxislength=0.0, + width=0.5, + linewidth=0.75, + color="white", + edgecolor="black", + label="{0:s} polarization map".format(self.map_observer), + ) self.ax_overplot.autoscale(False) # Display other map as contours if levels is None: - levels = np.logspace(0., 1.9, 5)/100.*other_data[other_data > 0.].max() + levels = np.logspace(0.0, 1.9, 5) / 100.0 * other_data[other_data > 0.0].max() other_cont = self.ax_overplot.contour( - other_data*self.other_convert, transform=self.ax_overplot.get_transform(self.other_wcs.celestial), levels=levels*self.other_convert, colors='grey') + other_data * self.other_convert, + transform=self.ax_overplot.get_transform(self.other_wcs.celestial), + levels=levels * self.other_convert, + colors="grey", + ) self.ax_overplot.clabel(other_cont, inline=True, fontsize=5) self.ax_overplot.set_xlabel(label="Right Ascension (J2000)") self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1) - self.fig_overplot.suptitle("{0:s} polarization map of {1:s} overplotted with {2:s} {3:.2f}GHz map in {4:s}.".format( - self.map_observer, obj, self.other_observer, other_freq*1e-9, self.other_unit), wrap=True) + self.fig_overplot.suptitle( + "{0:s} polarization map of {1:s} overplotted with {2:s} {3:.2f}GHz map in {4:s}.".format( + self.map_observer, obj, self.other_observer, other_freq * 1e-9, self.other_unit + ), + wrap=True, + ) # Display pixel scale and North direction fontprops = fm.FontProperties(size=16) - px_size = self.wcs_UV.wcs.get_cdelt()[0]*3600. - px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, - frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + px_size = self.wcs_UV.wcs.get_cdelt()[0] * 3600.0 + px_sc = AnchoredSizeBar( + self.ax_overplot.transData, + 1.0 / px_size, + "1 arcsec", + 3, + pad=0.5, + sep=5, + borderpad=0.5, + frameon=False, + size_vertical=0.005, + color=font_color, + fontproperties=fontprops, + ) self.ax_overplot.add_artist(px_sc) - north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, - sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5}) + north_dir = AnchoredDirectionArrows( + self.ax_overplot.transAxes, + "E", + "N", + length=-0.08, + fontsize=0.03, + loc=1, + aspect_ratio=-(stkI.shape[1]/stkI.shape[0]), + sep_y=0.01, + sep_x=0.01, + angle=-self.Stokes_UV[0].header["orientat"], + color=font_color, + arrow_props={"ec": "k", "fc": "w", "alpha": 1, "lw": 0.5}, + ) self.ax_overplot.add_artist(north_dir) - pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, - frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + pol_sc = AnchoredSizeBar( + self.ax_overplot.transData, + self.scale_vec, + r"$P$= 100%", + 4, + pad=0.5, + sep=5, + borderpad=0.5, + frameon=False, + size_vertical=0.005, + color=font_color, + fontproperties=fontprops, + ) self.ax_overplot.add_artist(pol_sc) - self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1., 1.)), 'r+') - self.cr_other, = self.ax_overplot.plot(*(self.other_wcs.celestial.wcs.crpix-(1., 1.)), 'g+', transform=self.ax_overplot.get_transform(self.other_wcs)) + (self.cr_map,) = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix - (1.0, 1.0)), "r+") + (self.cr_other,) = self.ax_overplot.plot( + *(self.other_wcs.celestial.wcs.crpix - (1.0, 1.0)), "g+", transform=self.ax_overplot.get_transform(self.other_wcs) + ) handles, labels = self.ax_overplot.get_legend_handles_labels() - handles[np.argmax([li == "{0:s} polarization map".format(self.map_observer) for li in labels]) - ] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2) + handles[np.argmax([li == "{0:s} polarization map".format(self.map_observer) for li in labels])] = FancyArrowPatch( + (0, 0), (0, 1), arrowstyle="-", fc="w", ec="k", lw=2 + ) labels.append("{0:s} contour".format(self.other_observer)) handles.append(Rectangle((0, 0), 1, 1, fill=False, lw=2, ec=other_cont.collections[0].get_edgecolor()[0])) - self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=( - 0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) + self.legend = self.ax_overplot.legend( + handles=handles, labels=labels, bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc="lower left", mode="expand", borderaxespad=0.0 + ) - if not (savename is None): - if not savename[-4:] in ['.png', '.jpg', '.pdf']: - savename += '.pdf' - self.fig_overplot.savefig(savename, bbox_inches='tight', dpi=200) + if savename is not None: + if savename[-4:] not in [".png", ".jpg", ".pdf"]: + savename += ".pdf" + self.fig_overplot.savefig(savename, bbox_inches="tight", dpi=200) self.fig_overplot.canvas.draw() - def plot(self, levels=None, SNRp_cut=3., SNRi_cut=3., savename=None, **kwargs) -> None: + def plot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, savename=None, **kwargs) -> None: while not self.aligned: self.align() self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename, **kwargs) @@ -866,16 +1116,16 @@ class overplot_chandra(align_maps): Inherit from class align_maps in order to get the same WCS on both maps. """ - def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=3., vec_scale=2, zoom=1, savename=None, **kwargs): + def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, scale_vec=2, zoom=1, savename=None, **kwargs): self.Stokes_UV = self.map self.wcs_UV = self.map_wcs # Get Data - obj = self.Stokes_UV[0].header['targname'] - stkI = self.Stokes_UV['I_STOKES'].data - stk_cov = self.Stokes_UV['IQU_COV_MATRIX'].data - pol = deepcopy(self.Stokes_UV['POL_DEG_DEBIASED'].data) - pol_err = self.Stokes_UV['POL_DEG_ERR'].data - pang = self.Stokes_UV['POL_ANG'].data + obj = self.Stokes_UV[0].header["targname"] + stkI = self.Stokes_UV["I_STOKES"].data + stk_cov = self.Stokes_UV["IQU_COV_MATRIX"].data + pol = deepcopy(self.Stokes_UV["POL_DEG_DEBIASED"].data) + pol_err = self.Stokes_UV["POL_DEG_ERR"].data + pang = self.Stokes_UV["POL_ANG"].data other_data = deepcopy(self.other_data) other_wcs = self.other_wcs.deepcopy() @@ -883,96 +1133,176 @@ class overplot_chandra(align_maps): other_data = sc_zoom(other_data, zoom) other_wcs.wcs.crpix *= zoom other_wcs.wcs.cdelt /= zoom - self.other_unit = 'counts' + self.other_unit = "counts" # Compute SNR and apply cuts - pol[pol == 0.] = np.nan - SNRp = pol/pol_err - SNRp[np.isnan(SNRp)] = 0. + pol[pol == 0.0] = np.nan + SNRp = pol / pol_err + SNRp[np.isnan(SNRp)] = 0.0 pol[SNRp < SNRp_cut] = np.nan - SNRi = stkI/np.sqrt(stk_cov[0, 0]) - SNRi[np.isnan(SNRi)] = 0. + SNRi = stkI / np.sqrt(stk_cov[0, 0]) + SNRi[np.isnan(SNRi)] = 0.0 pol[SNRi < SNRi_cut] = np.nan - plt.rcParams.update({'font.size': 16}) + plt.rcParams.update({"font.size": 16}) self.fig_overplot, self.ax_overplot = plt.subplots(figsize=(11, 10), subplot_kw=dict(projection=self.wcs_UV)) self.fig_overplot.subplots_adjust(hspace=0, wspace=0, bottom=0.1, left=0.1, top=0.8, right=1) # Display UV intensity map with polarization vectors - vmin, vmax = stkI[np.isfinite(stkI)].max()/1e3*self.map_convert, stkI[np.isfinite(stkI)].max()*self.map_convert + vmin, vmax = stkI[np.isfinite(stkI)].max() / 1e3 * self.map_convert, stkI[np.isfinite(stkI)].max() * self.map_convert for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]: try: _ = kwargs[key] except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i - if kwargs['cmap'] in ['inferno', 'magma', 'Greys_r', 'binary_r', 'gist_yarg_r', 'gist_gray', 'gray', 'bone', 'pink', 'hot', 'afmhot', 'gist_heat', 'copper', 'gist_earth', 'gist_stern', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'nipy_spectral', 'gist_ncar', 'viridis']: - self.ax_overplot.set_facecolor('black') + if kwargs["cmap"] in [ + "inferno", + "magma", + "Greys_r", + "binary_r", + "gist_yarg_r", + "gist_gray", + "gray", + "bone", + "pink", + "hot", + "afmhot", + "gist_heat", + "copper", + "gist_earth", + "gist_stern", + "gnuplot", + "gnuplot2", + "CMRmap", + "cubehelix", + "nipy_spectral", + "gist_ncar", + "viridis", + ]: + self.ax_overplot.set_facecolor("black") font_color = "white" else: - self.ax_overplot.set_facecolor('white') + self.ax_overplot.set_facecolor("white") font_color = "black" - self.im = self.ax_overplot.imshow(stkI*self.map_convert, aspect='equal', **kwargs) - self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=50, shrink=0.75, pad=0.025, - label=r"$F_{{\lambda}}$ [{0:s}]".format(self.map_unit)) + self.im = self.ax_overplot.imshow(stkI * self.map_convert, aspect="equal", **kwargs) + self.cbar = self.fig_overplot.colorbar( + self.im, ax=self.ax_overplot, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{{\lambda}}$ [{0:s}]".format(self.map_unit) + ) # Display full size polarization vectors - if vec_scale is None: - self.vec_scale = 2. - pol[np.isfinite(pol)] = 1./2. + if scale_vec is None: + self.scale_vec = 2.0 + pol[np.isfinite(pol)] = 1.0 / 2.0 else: - self.vec_scale = vec_scale + self.scale_vec = scale_vec step_vec = 1 self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) - self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.) - self.Q = self.ax_overplot.quiver(self.X[::step_vec, ::step_vec], self.Y[::step_vec, ::step_vec], self.U[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec], units='xy', angles='uv', scale=1./self.vec_scale, - scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.5, linewidth=0.75, color='white', edgecolor='black', label="{0:s} polarization map".format(self.map_observer)) + self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0) + self.Q = self.ax_overplot.quiver( + self.X[::step_vec, ::step_vec], + self.Y[::step_vec, ::step_vec], + self.U[::step_vec, ::step_vec], + self.V[::step_vec, ::step_vec], + units="xy", + angles="uv", + scale=1.0 / self.scale_vec, + scale_units="xy", + pivot="mid", + headwidth=0.0, + headlength=0.0, + headaxislength=0.0, + width=0.5, + linewidth=0.75, + color="white", + edgecolor="black", + label="{0:s} polarization map".format(self.map_observer), + ) self.ax_overplot.autoscale(False) # Display other map as contours if levels is None: - levels = np.logspace(np.log(3)/np.log(10), 2., 5)/100.*other_data[other_data > 0.].max()*self.other_convert + levels = np.logspace(np.log(3) / np.log(10), 2.0, 5) / 100.0 * other_data[other_data > 0.0].max() * self.other_convert elif zoom != 1: - levels *= other_data.max()/self.other_data.max() - other_cont = self.ax_overplot.contour(other_data*self.other_convert, transform=self.ax_overplot.get_transform(other_wcs), levels=levels, colors='grey') + levels *= other_data.max() / self.other_data.max() + other_cont = self.ax_overplot.contour( + other_data * self.other_convert, transform=self.ax_overplot.get_transform(other_wcs), levels=levels, colors="grey" + ) self.ax_overplot.clabel(other_cont, inline=True, fontsize=8) self.ax_overplot.set_xlabel(label="Right Ascension (J2000)") self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1) - self.fig_overplot.suptitle("{0:s} polarization map of {1:s} overplotted\nwith {2:s} contour in counts.".format( - self.map_observer, obj, self.other_observer), wrap=True) + self.fig_overplot.suptitle( + "{0:s} polarization map of {1:s} overplotted\nwith {2:s} contour in counts.".format(self.map_observer, obj, self.other_observer), wrap=True + ) # Display pixel scale and North direction fontprops = fm.FontProperties(size=16) - px_size = self.wcs_UV.wcs.get_cdelt()[0]*3600. - px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, - frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + px_size = self.wcs_UV.wcs.get_cdelt()[0] * 3600.0 + px_sc = AnchoredSizeBar( + self.ax_overplot.transData, + 1.0 / px_size, + "1 arcsec", + 3, + pad=0.5, + sep=5, + borderpad=0.5, + frameon=False, + size_vertical=0.005, + color=font_color, + fontproperties=fontprops, + ) self.ax_overplot.add_artist(px_sc) - north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, - sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5}) + north_dir = AnchoredDirectionArrows( + self.ax_overplot.transAxes, + "E", + "N", + length=-0.08, + fontsize=0.03, + loc=1, + aspect_ratio=-(stkI.shape[1]/stkI.shape[0]), + sep_y=0.01, + sep_x=0.01, + angle=-self.Stokes_UV[0].header["orientat"], + color=font_color, + arrow_props={"ec": "k", "fc": "w", "alpha": 1, "lw": 0.5}, + ) self.ax_overplot.add_artist(north_dir) - pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, - frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + pol_sc = AnchoredSizeBar( + self.ax_overplot.transData, + self.scale_vec, + r"$P$= 100%", + 4, + pad=0.5, + sep=5, + borderpad=0.5, + frameon=False, + size_vertical=0.005, + color=font_color, + fontproperties=fontprops, + ) self.ax_overplot.add_artist(pol_sc) - self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1., 1.)), 'r+') - self.cr_other, = self.ax_overplot.plot(*(other_wcs.celestial.wcs.crpix-(1., 1.)), 'g+', transform=self.ax_overplot.get_transform(other_wcs)) + (self.cr_map,) = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix - (1.0, 1.0)), "r+") + (self.cr_other,) = self.ax_overplot.plot(*(other_wcs.celestial.wcs.crpix - (1.0, 1.0)), "g+", transform=self.ax_overplot.get_transform(other_wcs)) handles, labels = self.ax_overplot.get_legend_handles_labels() - handles[np.argmax([li == "{0:s} polarization map".format(self.map_observer) for li in labels]) - ] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2) + handles[np.argmax([li == "{0:s} polarization map".format(self.map_observer) for li in labels])] = FancyArrowPatch( + (0, 0), (0, 1), arrowstyle="-", fc="w", ec="k", lw=2 + ) labels.append("{0:s} contour in counts".format(self.other_observer)) handles.append(Rectangle((0, 0), 1, 1, fill=False, lw=2, ec=other_cont.collections[0].get_edgecolor()[0])) - self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=( - 0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) + self.legend = self.ax_overplot.legend( + handles=handles, labels=labels, bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc="lower left", mode="expand", borderaxespad=0.0 + ) - if not (savename is None): - if not savename[-4:] in ['.png', '.jpg', '.pdf']: - savename += '.pdf' - self.fig_overplot.savefig(savename, bbox_inches='tight', dpi=200) + if savename is not None: + if savename[-4:] not in [".png", ".jpg", ".pdf"]: + savename += ".pdf" + self.fig_overplot.savefig(savename, bbox_inches="tight", dpi=200) self.fig_overplot.canvas.draw() - def plot(self, levels=None, SNRp_cut=3., SNRi_cut=3., zoom=1, savename=None, **kwargs) -> None: + def plot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, zoom=1, savename=None, **kwargs) -> None: while not self.aligned: self.align() self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, zoom=zoom, savename=savename, **kwargs) @@ -985,143 +1315,228 @@ class overplot_pol(align_maps): Inherit from class align_maps in order to get the same WCS on both maps. """ - def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=3., vec_scale=2., savename=None, **kwargs): + def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, scale_vec=2.0, savename=None, **kwargs): self.Stokes_UV = self.map self.wcs_UV = self.map_wcs # Get Data - obj = self.Stokes_UV[0].header['targname'] - stkI = self.Stokes_UV['I_STOKES'].data - stk_cov = self.Stokes_UV['IQU_COV_MATRIX'].data - pol = deepcopy(self.Stokes_UV['POL_DEG_DEBIASED'].data) - pol_err = self.Stokes_UV['POL_DEG_ERR'].data - pang = self.Stokes_UV['POL_ANG'].data + obj = self.Stokes_UV[0].header["targname"] + stkI = self.Stokes_UV["I_STOKES"].data + stk_cov = self.Stokes_UV["IQU_COV_MATRIX"].data + pol = deepcopy(self.Stokes_UV["POL_DEG_DEBIASED"].data) + pol_err = self.Stokes_UV["POL_DEG_ERR"].data + pang = self.Stokes_UV["POL_ANG"].data other_data = self.other_data # Compute SNR and apply cuts - pol[pol == 0.] = np.nan - SNRp = pol/pol_err - SNRp[np.isnan(SNRp)] = 0. + pol[pol == 0.0] = np.nan + SNRp = pol / pol_err + SNRp[np.isnan(SNRp)] = 0.0 pol[SNRp < SNRp_cut] = np.nan - SNRi = stkI/np.sqrt(stk_cov[0, 0]) - SNRi[np.isnan(SNRi)] = 0. + SNRi = stkI / np.sqrt(stk_cov[0, 0]) + SNRi[np.isnan(SNRi)] = 0.0 pol[SNRi < SNRi_cut] = np.nan - plt.rcParams.update({'font.size': 16}) + plt.rcParams.update({"font.size": 16}) self.fig_overplot, self.ax_overplot = plt.subplots(figsize=(11, 10), subplot_kw=dict(projection=self.other_wcs)) self.fig_overplot.subplots_adjust(hspace=0, wspace=0, bottom=0.1, left=0.1, top=0.80, right=1.02) self.ax_overplot.set_xlabel(label="Right Ascension (J2000)") self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1) - self.fig_overplot.suptitle("{0:s} observation from {1:s} overplotted with polarization vectors and Stokes I contours from {2:s}".format( - obj, self.other_observer, self.map_observer), wrap=True) + self.fig_overplot.suptitle( + "{0:s} observation from {1:s} overplotted with polarization vectors and Stokes I contours from {2:s}".format( + obj, self.other_observer, self.map_observer + ), + wrap=True, + ) # Display "other" intensity map - vmin, vmax = other_data[other_data > 0.].max()/1e3*self.other_convert, other_data[other_data > 0.].max()*self.other_convert + vmin, vmax = other_data[other_data > 0.0].max() / 1e3 * self.other_convert, other_data[other_data > 0.0].max() * self.other_convert for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["vmin", vmin], ["vmax", vmax]]]]: try: _ = kwargs[key] except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i - if kwargs['cmap'] in ['inferno', 'magma', 'Greys_r', 'binary_r', 'gist_yarg_r', 'gist_gray', 'gray', 'bone', 'pink', 'hot', 'afmhot', 'gist_heat', 'copper', 'gist_earth', 'gist_stern', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'nipy_spectral', 'gist_ncar', 'viridis']: - self.ax_overplot.set_facecolor('black') + if kwargs["cmap"] in [ + "inferno", + "magma", + "Greys_r", + "binary_r", + "gist_yarg_r", + "gist_gray", + "gray", + "bone", + "pink", + "hot", + "afmhot", + "gist_heat", + "copper", + "gist_earth", + "gist_stern", + "gnuplot", + "gnuplot2", + "CMRmap", + "cubehelix", + "nipy_spectral", + "gist_ncar", + "viridis", + ]: + self.ax_overplot.set_facecolor("black") font_color = "white" else: - self.ax_overplot.set_facecolor('white') + self.ax_overplot.set_facecolor("white") font_color = "black" - self.im = self.ax_overplot.imshow(other_data*self.other_convert, alpha=1., label="{0:s} observation".format(self.other_observer), **kwargs) - self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=80, shrink=0.75, pad=0.025, - label=r"$F_{{\lambda}}$ [{0:s}]".format(self.other_unit)) + self.im = self.ax_overplot.imshow(other_data * self.other_convert, alpha=1.0, label="{0:s} observation".format(self.other_observer), **kwargs) + self.cbar = self.fig_overplot.colorbar( + self.im, ax=self.ax_overplot, aspect=80, shrink=0.75, pad=0.025, label=r"$F_{{\lambda}}$ [{0:s}]".format(self.other_unit) + ) # Display full size polarization vectors - if vec_scale is None: - self.vec_scale = 2. - pol[np.isfinite(pol)] = 1./2. + if scale_vec is None: + self.scale_vec = 2.0 + pol[np.isfinite(pol)] = 1.0 / 2.0 else: - self.vec_scale = vec_scale + self.scale_vec = scale_vec step_vec = 1 - px_scale = np.abs(self.wcs_UV.wcs.get_cdelt()[0]/self.other_wcs.wcs.get_cdelt()[0]) self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) - self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.) - self.Q = self.ax_overplot.quiver(self.X[::step_vec, ::step_vec], self.Y[::step_vec, ::step_vec], self.U[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec], units='xy', angles='uv', scale=px_scale/self.vec_scale, scale_units='xy', pivot='mid', - headwidth=0., headlength=0., headaxislength=0., width=0.5, linewidth=0.75, color='white', edgecolor='black', transform=self.ax_overplot.get_transform(self.wcs_UV), label="{0:s} polarization map".format(self.map_observer)) + self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0) + self.Q = self.ax_overplot.quiver( + self.X[::step_vec, ::step_vec], + self.Y[::step_vec, ::step_vec], + self.U[::step_vec, ::step_vec], + self.V[::step_vec, ::step_vec], + units="xy", + angles="uv", + scale=1. / self.scale_vec, + scale_units="xy", + pivot="mid", + headwidth=0.0, + headlength=0.0, + headaxislength=0.0, + width=0.5, + linewidth=0.75, + color="white", + edgecolor="black", + transform=self.ax_overplot.get_transform(self.wcs_UV), + label="{0:s} polarization map".format(self.map_observer), + ) # Display Stokes I as contours if levels is None: - levels = np.array([2., 5., 10., 20., 90.])/100.*np.max(stkI[stkI > 0.])*self.map_convert - cont_stkI = self.ax_overplot.contour(stkI*self.map_convert, levels=levels, colors='grey', alpha=0.75, - transform=self.ax_overplot.get_transform(self.wcs_UV)) + levels = np.array([2.0, 5.0, 10.0, 20.0, 90.0]) / 100.0 * np.max(stkI[stkI > 0.0]) * self.map_convert + cont_stkI = self.ax_overplot.contour( + stkI * self.map_convert, levels=levels, colors="grey", alpha=0.75, transform=self.ax_overplot.get_transform(self.wcs_UV) + ) # self.ax_overplot.clabel(cont_stkI, inline=True, fontsize=5) # Display pixel scale and North direction fontprops = fm.FontProperties(size=16) - px_size = self.other_wcs.wcs.get_cdelt()[0]*3600. - px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, - frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + px_size = self.other_wcs.wcs.get_cdelt()[0] * 3600.0 + px_sc = AnchoredSizeBar( + self.ax_overplot.transData, + 1.0 / px_size, + "1 arcsec", + 3, + pad=0.5, + sep=5, + borderpad=0.5, + frameon=False, + size_vertical=0.005, + color=font_color, + fontproperties=fontprops, + ) self.ax_overplot.add_artist(px_sc) - north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, - sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5}) + north_dir = AnchoredDirectionArrows( + self.ax_overplot.transAxes, + "E", + "N", + length=-0.08, + fontsize=0.03, + loc=1, + aspect_ratio=-(stkI.shape[1]/stkI.shape[0]), + sep_y=0.01, + sep_x=0.01, + angle=-self.Stokes_UV[0].header["orientat"], + color=font_color, + arrow_props={"ec": "k", "fc": "w", "alpha": 1, "lw": 0.5}, + ) self.ax_overplot.add_artist(north_dir) - pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale/px_scale, r"$P$= 100%", 4, pad=0.5, sep=5, - borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops) + pol_sc = AnchoredSizeBar( + self.ax_overplot.transData, + self.scale_vec, + r"$P$= 100%", + 4, + pad=0.5, + sep=5, + borderpad=0.5, + frameon=False, + size_vertical=0.005, + color=font_color, + fontproperties=fontprops, + ) self.ax_overplot.add_artist(pol_sc) - self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1., 1.)), 'r+', transform=self.ax_overplot.get_transform(self.wcs_UV)) - self.cr_other, = self.ax_overplot.plot(*(self.other_wcs.celestial.wcs.crpix-(1., 1.)), 'g+') + (self.cr_map,) = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix - (1.0, 1.0)), "r+", transform=self.ax_overplot.get_transform(self.wcs_UV)) + (self.cr_other,) = self.ax_overplot.plot(*(self.other_wcs.celestial.wcs.crpix - (1.0, 1.0)), "g+") if "PHOTPLAM" in list(self.other_header.keys()): - self.legend_title = r"{0:s} image at $\lambda$ = {1:.0f} $\AA$".format(self.other_observer, float(self.other_header['photplam'])) + self.legend_title = r"{0:s} image at $\lambda$ = {1:.0f} $\AA$".format(self.other_observer, float(self.other_header["photplam"])) elif "CRVAL3" in list(self.other_header.keys()): - self.legend_title = "{0:s} image at {1:.2f} GHz".format(self.other_observer, float(self.other_header['crval3'])*1e-9) + self.legend_title = "{0:s} image at {1:.2f} GHz".format(self.other_observer, float(self.other_header["crval3"]) * 1e-9) else: self.legend_title = r"{0:s} image".format(self.other_observer) handles, labels = self.ax_overplot.get_legend_handles_labels() - handles[np.argmax([li == "{0:s} polarization map".format(self.map_observer) for li in labels]) - ] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2) + handles[np.argmax([li == "{0:s} polarization map".format(self.map_observer) for li in labels])] = FancyArrowPatch( + (0, 0), (0, 1), arrowstyle="-", fc="w", ec="k", lw=2 + ) labels.append("{0:s} Stokes I contour".format(self.map_observer)) handles.append(Rectangle((0, 0), 1, 1, fill=False, ec=cont_stkI.collections[0].get_edgecolor()[0])) - self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=( - 0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) + self.legend = self.ax_overplot.legend( + handles=handles, labels=labels, bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc="lower left", mode="expand", borderaxespad=0.0 + ) - if not (savename is None): - if not savename[-4:] in ['.png', '.jpg', '.pdf']: - savename += '.pdf' - self.fig_overplot.savefig(savename, bbox_inches='tight', dpi=200) + if savename is not None: + if savename[-4:] not in [".png", ".jpg", ".pdf"]: + savename += ".pdf" + self.fig_overplot.savefig(savename, bbox_inches="tight", dpi=200) self.fig_overplot.canvas.draw() - def plot(self, levels=None, SNRp_cut=3., SNRi_cut=3., vec_scale=2., savename=None, **kwargs) -> None: + def plot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, scale_vec=2.0, savename=None, **kwargs) -> None: while not self.aligned: self.align() - self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, vec_scale=vec_scale, savename=savename, **kwargs) + self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, scale_vec=scale_vec, savename=savename, **kwargs) plt.show(block=True) - def add_vector(self, position='center', pol_deg=1., pol_ang=0., **kwargs): - if position == 'center': - position = np.array(self.X.shape)/2. + def add_vector(self, position="center", pol_deg=1.0, pol_ang=0.0, **kwargs): + if position == "center": + position = np.array(self.X.shape) / 2.0 if isinstance(position, SkyCoord): position = self.other_wcs.world_to_pixel(position) - u, v = pol_deg*np.cos(np.radians(pol_ang)+np.pi/2.), pol_deg*np.sin(np.radians(pol_ang)+np.pi/2.) - for key, value in [["scale", [["scale", self.vec_scale]]], ["width", [["width", 0.1]]], ["color", [["color", 'k']]]]: + u, v = pol_deg * np.cos(np.radians(pol_ang) + np.pi / 2.0), pol_deg * np.sin(np.radians(pol_ang) + np.pi / 2.0) + for key, value in [["scale", [["scale", self.scale_vec]]], ["width", [["width", 0.1]]], ["color", [["color", "k"]]]]: try: _ = kwargs[key] except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i - new_vec = self.ax_overplot.quiver(*position, u, v, units='xy', angles='uv', scale_units='xy', - pivot='mid', headwidth=0., headlength=0., headaxislength=0., **kwargs) + new_vec = self.ax_overplot.quiver( + *position, u, v, units="xy", angles="uv", scale_units="xy", pivot="mid", headwidth=0.0, headlength=0.0, headaxislength=0.0, **kwargs + ) self.legend.remove() - self.legend = self.ax_overplot.legend(title=self.legend_title, bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) + self.legend = self.ax_overplot.legend( + title=self.legend_title, bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc="lower left", mode="expand", borderaxespad=0.0 + ) self.fig_overplot.canvas.draw() return new_vec class align_pol(object): def __init__(self, maps, **kwargs): - order = np.argsort(np.array([curr[0].header['mjd-obs'] for curr in maps])) + order = np.argsort(np.array([curr[0].header["mjd-obs"] for curr in maps])) maps = np.array(maps)[order] self.ref_map, self.other_maps = maps[0], maps[1:] @@ -1132,39 +1547,43 @@ class align_pol(object): self.kwargs = kwargs - def single_plot(self, curr_map, wcs, v_lim=None, ax_lim=None, SNRp_cut=3., SNRi_cut=3., savename=None, **kwargs): + def single_plot(self, curr_map, wcs, v_lim=None, ax_lim=None, SNRp_cut=3.0, SNRi_cut=3.0, savename=None, **kwargs): # Get data - stkI = curr_map['I_STOKES'].data - stk_cov = curr_map['IQU_COV_MATRIX'].data - pol = deepcopy(curr_map['POL_DEG_DEBIASED'].data) - pol_err = curr_map['POL_DEG_ERR'].data - pang = curr_map['POL_ANG'].data + stkI = curr_map["I_STOKES"].data + stk_cov = curr_map["IQU_COV_MATRIX"].data + pol = deepcopy(curr_map["POL_DEG_DEBIASED"].data) + pol_err = curr_map["POL_DEG_ERR"].data + pang = curr_map["POL_ANG"].data try: - data_mask = curr_map['DATA_MASK'].data.astype(bool) + data_mask = curr_map["DATA_MASK"].data.astype(bool) except KeyError: data_mask = np.ones(stkI.shape).astype(bool) - convert_flux = curr_map[0].header['photflam'] + convert_flux = curr_map[0].header["photflam"] # Compute SNR and apply cuts - maskpol = np.logical_and(pol_err > 0., data_mask) + maskpol = np.logical_and(pol_err > 0.0, data_mask) SNRp = np.zeros(pol.shape) - SNRp[maskpol] = pol[maskpol]/pol_err[maskpol] + SNRp[maskpol] = pol[maskpol] / pol_err[maskpol] maskI = np.logical_and(stk_cov[0, 0] > 0, data_mask) SNRi = np.zeros(stkI.shape) - SNRi[maskI] = stkI[maskI]/np.sqrt(stk_cov[0, 0][maskI]) + SNRi[maskI] = stkI[maskI] / np.sqrt(stk_cov[0, 0][maskI]) - mask = (SNRp > SNRp_cut) * (SNRi > SNRi_cut) * (pol >= 0.) + mask = (SNRp > SNRp_cut) * (SNRi > SNRi_cut) * (pol >= 0.0) pol[mask] = np.nan # Plot the map - plt.rcParams.update({'font.size': 10}) + plt.rcParams.update({"font.size": 10}) plt.rcdefaults() fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111, projection=wcs) - ax.set(xlabel="Right Ascension (J2000)", ylabel="Declination (J2000)", facecolor='k', - title="target {0:s} observed on {1:s}".format(curr_map[0].header['targname'], curr_map[0].header['date-obs'])) + ax.set( + xlabel="Right Ascension (J2000)", + ylabel="Declination (J2000)", + facecolor="k", + title="target {0:s} observed on {1:s}".format(curr_map[0].header["targname"], curr_map[0].header["date-obs"]), + ) fig.subplots_adjust(hspace=0, wspace=0, right=0.102) if ax_lim is not None: @@ -1173,9 +1592,9 @@ class align_pol(object): ax.set(xlim=x_lim, ylim=y_lim) if v_lim is None: - vmin, vmax = 0., np.max(stkI[stkI > 0.]*convert_flux) + vmin, vmax = 0.0, np.max(stkI[stkI > 0.0] * convert_flux) else: - vmin, vmax = v_lim*convert_flux + vmin, vmax = v_lim * convert_flux for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["vmin", vmin], ["vmax", vmax]]]]: try: @@ -1186,33 +1605,70 @@ class align_pol(object): for key_i, val_i in value: kwargs[key_i] = val_i - im = ax.imshow(stkI*convert_flux, aspect='equal', **kwargs) + im = ax.imshow(stkI * convert_flux, aspect="equal", **kwargs) fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") - px_size = wcs.wcs.get_cdelt()[0]*3600. - px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w') + px_size = wcs.wcs.get_cdelt()[0] * 3600.0 + px_sc = AnchoredSizeBar(ax.transData, 1.0 / px_size, "1 arcsec", 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="w") ax.add_artist(px_sc) - north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10., - angle=curr_map[0].header['orientat'], color='white', text_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 1}) + north_dir = AnchoredDirectionArrows( + ax.transAxes, + "E", + "N", + length=-0.08, + fontsize=0.025, + loc=1, + aspect_ratio=-(stkI.shape[1]/stkI.shape[0]), + sep_y=0.01, + sep_x=0.01, + back_length=0.0, + head_length=10.0, + head_width=10.0, + angle=curr_map[0].header["orientat"], + color="white", + text_props={"ec": None, "fc": "w", "alpha": 1, "lw": 0.4}, + arrow_props={"ec": None, "fc": "w", "alpha": 1, "lw": 1}, + ) ax.add_artist(north_dir) step_vec = 1 X, Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) - U, V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.) - ax.quiver(X[::step_vec, ::step_vec], Y[::step_vec, ::step_vec], U[::step_vec, ::step_vec], V[::step_vec, ::step_vec], units='xy', - angles='uv', scale=0.5, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.5, linewidth=0.75, color='w') - pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w') + U, V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0) + ax.quiver( + X[::step_vec, ::step_vec], + Y[::step_vec, ::step_vec], + U[::step_vec, ::step_vec], + V[::step_vec, ::step_vec], + units="xy", + angles="uv", + scale=0.5, + scale_units="xy", + pivot="mid", + headwidth=0.0, + headlength=0.0, + headaxislength=0.0, + width=0.5, + linewidth=0.75, + color="w", + ) + pol_sc = AnchoredSizeBar(ax.transData, 2.0, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="w") ax.add_artist(pol_sc) - if 'PHOTPLAM' in list(curr_map[0].header.keys()): - ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(curr_map[0].header['photplam']), color='white', fontsize=12, xy=( - 0.01, 0.93), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')]) + if "PHOTPLAM" in list(curr_map[0].header.keys()): + ax.annotate( + r"$\lambda$ = {0:.0f} $\AA$".format(curr_map[0].header["photplam"]), + color="white", + fontsize=12, + xy=(0.01, 0.93), + xycoords="axes fraction", + path_effects=[pe.withStroke(linewidth=0.5, foreground="k")], + ) if savename is not None: - if savename[-4:] not in ['.png', '.jpg', '.pdf']: - savename += '.pdf' - fig.savefig(savename, bbox_inches='tight', dpi=300) + if savename[-4:] not in [".png", ".jpg", ".pdf"]: + savename += ".pdf" + fig.savefig(savename, bbox_inches="tight", dpi=300) plt.show(block=True) return fig, ax @@ -1223,27 +1679,60 @@ class align_pol(object): self.wcs, self.wcs_other[i] = curr_align.align() self.aligned[i] = curr_align.aligned - def plot(self, SNRp_cut=3., SNRi_cut=3., savename=None, **kwargs): + def plot(self, SNRp_cut=3.0, SNRi_cut=3.0, savename=None, **kwargs): while not self.aligned.all(): self.align() eps = 1e-35 - vmin = np.min([np.min(curr_map[0].data[curr_map[0].data > SNRi_cut*np.max([eps*np.ones(curr_map[0].data.shape), - np.sqrt(curr_map[3].data[0, 0])], axis=0)]) for curr_map in self.other_maps])/2.5 - vmax = np.max([np.max(curr_map[0].data[curr_map[0].data > SNRi_cut*np.max([eps*np.ones(curr_map[0].data.shape), - np.sqrt(curr_map[3].data[0, 0])], axis=0)]) for curr_map in self.other_maps]) - vmin = np.min([vmin, np.min(self.ref_map[0].data[self.ref_map[0].data > SNRi_cut * - np.max([eps*np.ones(self.ref_map[0].data.shape), np.sqrt(self.ref_map[3].data[0, 0])], axis=0)])])/2.5 - vmax = np.max([vmax, np.max(self.ref_map[0].data[self.ref_map[0].data > SNRi_cut * - np.max([eps*np.ones(self.ref_map[0].data.shape), np.sqrt(self.ref_map[3].data[0, 0])], axis=0)])]) + vmin = ( + np.min( + [ + np.min( + curr_map[0].data[curr_map[0].data > SNRi_cut * np.max([eps * np.ones(curr_map[0].data.shape), np.sqrt(curr_map[3].data[0, 0])], axis=0)] + ) + for curr_map in self.other_maps + ] + ) + / 2.5 + ) + vmax = np.max( + [ + np.max(curr_map[0].data[curr_map[0].data > SNRi_cut * np.max([eps * np.ones(curr_map[0].data.shape), np.sqrt(curr_map[3].data[0, 0])], axis=0)]) + for curr_map in self.other_maps + ] + ) + vmin = ( + np.min( + [ + vmin, + np.min( + self.ref_map[0].data[ + self.ref_map[0].data > SNRi_cut * np.max([eps * np.ones(self.ref_map[0].data.shape), np.sqrt(self.ref_map[3].data[0, 0])], axis=0) + ] + ), + ] + ) + / 2.5 + ) + vmax = np.max( + [ + vmax, + np.max( + self.ref_map[0].data[ + self.ref_map[0].data > SNRi_cut * np.max([eps * np.ones(self.ref_map[0].data.shape), np.sqrt(self.ref_map[3].data[0, 0])], axis=0) + ] + ), + ] + ) v_lim = np.array([vmin, vmax]) - fig, ax = self.single_plot(self.ref_map, self.wcs, v_lim=v_lim, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename+'_0', **kwargs) + fig, ax = self.single_plot(self.ref_map, self.wcs, v_lim=v_lim, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename + "_0", **kwargs) x_lim, y_lim = ax.get_xlim(), ax.get_ylim() ax_lim = np.array([self.wcs.pixel_to_world(x_lim[i], y_lim[i]) for i in range(len(x_lim))]) for i, curr_map in enumerate(self.other_maps): - self.single_plot(curr_map, self.wcs_other[i], v_lim=v_lim, ax_lim=ax_lim, SNRp_cut=SNRp_cut, - SNRi_cut=SNRi_cut, savename=savename+'_'+str(i+1), **kwargs) + self.single_plot( + curr_map, self.wcs_other[i], v_lim=v_lim, ax_lim=ax_lim, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename + "_" + str(i + 1), **kwargs + ) class crop_map(object): @@ -1260,16 +1749,16 @@ class crop_map(object): self.data = deepcopy(self.hdul[0].data) try: - self.map_convert = self.header['photflam'] + self.map_convert = self.header["photflam"] except KeyError: - self.map_convert = 1. + self.map_convert = 1.0 try: self.kwargs = kwargs except AttributeError: self.kwargs = {} # Plot the map - plt.rcParams.update({'font.size': 12}) + plt.rcParams.update({"font.size": 12}) if fig is None: self.fig = plt.figure(figsize=(15, 15)) self.fig.suptitle("Click and drag to crop to desired Region of Interest.") @@ -1277,23 +1766,22 @@ class crop_map(object): self.fig = fig if ax is None: self.ax = self.fig.add_subplot(111, projection=self.wcs) - self.mask_alpha = 1. + self.mask_alpha = 1.0 # Selection button self.axapply = self.fig.add_axes([0.80, 0.01, 0.1, 0.04]) - self.bapply = Button(self.axapply, 'Apply') + self.bapply = Button(self.axapply, "Apply") self.axreset = self.fig.add_axes([0.60, 0.01, 0.1, 0.04]) - self.breset = Button(self.axreset, 'Reset') + self.breset = Button(self.axreset, "Reset") self.embedded = False else: self.ax = ax self.mask_alpha = 0.75 - self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, - button=[1]) + self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1]) self.embedded = True self.display(self.data, self.wcs, self.map_convert, **self.kwargs) - self.extent = np.array([0., self.data.shape[0], 0., self.data.shape[1]]) - self.center = np.array(self.data.shape)/2 + self.extent = np.array([0.0, self.data.shape[0], 0.0, self.data.shape[1]]) + self.center = np.array(self.data.shape) / 2 self.RSextent = deepcopy(self.extent) self.RScenter = deepcopy(self.center) @@ -1309,20 +1797,26 @@ class crop_map(object): else: kwargs = {**self.kwargs, **kwargs} - vmin, vmax = np.min(data[data > 0.]*convert_flux), np.max(data[data > 0.]*convert_flux) - for key, value in [["cmap", [["cmap", "inferno"]]], ["origin", [["origin", "lower"]]], ["aspect", [["aspect", "equal"]]], ["alpha", [["alpha", self.mask_alpha]]], ["norm", [["vmin", vmin], ["vmax", vmax]]]]: + vmin, vmax = np.min(data[data > 0.0] * convert_flux), np.max(data[data > 0.0] * convert_flux) + for key, value in [ + ["cmap", [["cmap", "inferno"]]], + ["origin", [["origin", "lower"]]], + ["aspect", [["aspect", "equal"]]], + ["alpha", [["alpha", self.mask_alpha]]], + ["norm", [["vmin", vmin], ["vmax", vmax]]], + ]: try: _ = kwargs[key] except KeyError: for key_i, val_i in value: kwargs[key_i] = val_i - if hasattr(self, 'im'): + if hasattr(self, "im"): self.im.remove() - self.im = self.ax.imshow(data*convert_flux, **kwargs) - if hasattr(self, 'cr'): + self.im = self.ax.imshow(data * convert_flux, **kwargs) + if hasattr(self, "cr"): self.cr[0].set_data(*wcs.wcs.crpix) else: - self.cr = self.ax.plot(*wcs.wcs.crpix, 'r+') + self.cr = self.ax.plot(*wcs.wcs.crpix, "r+") self.fig.canvas.draw_idle() return self.im @@ -1330,20 +1824,19 @@ class crop_map(object): def crpix_in_RS(self): crpix = self.wcs.wcs.crpix x_lim, y_lim = self.RSextent[:2], self.RSextent[2:] - if (crpix[0] > x_lim[0] and crpix[0] < x_lim[1]): - if (crpix[1] > y_lim[0] and crpix[1] < y_lim[1]): + if crpix[0] > x_lim[0] and crpix[0] < x_lim[1]: + if crpix[1] > y_lim[0] and crpix[1] < y_lim[1]: return True return False def reset_crop(self, event): self.ax.reset_wcs(self.wcs) - if hasattr(self, 'hdul_crop'): + if hasattr(self, "hdul_crop"): del self.hdul_crop, self.data_crop self.display() - if self.fig.canvas.manager.toolbar.mode == '': - self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, - button=[1]) + if self.fig.canvas.manager.toolbar.mode == "": + self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1]) self.RSextent = deepcopy(self.extent) self.RScenter = deepcopy(self.center) @@ -1359,7 +1852,7 @@ class crop_map(object): self.apply_crop(erelease) def apply_crop(self, event): - if hasattr(self, 'hdul_crop'): + if hasattr(self, "hdul_crop"): header = self.header_crop data = self.data_crop wcs = self.wcs_crop @@ -1375,50 +1868,50 @@ class crop_map(object): shape_im = extent[1::2] - extent[0::2] if (shape_im.astype(int) != shape).any() and (self.RSextent != self.extent).any(): # Update WCS and header in new cropped image - crpix = np.array(wcs.wcs.crpix) + # crpix = np.array(wcs.wcs.crpix) self.wcs_crop = wcs.deepcopy() self.wcs_crop.array_shape = shape if self.crpix_in_RS: self.wcs_crop.wcs.crpix = np.array(self.wcs_crop.wcs.crpix) - self.RSextent[::2] else: self.wcs_crop.wcs.crval = wcs.wcs_pix2world([self.RScenter], 1)[0] - self.wcs_crop.wcs.crpix = self.RScenter-self.RSextent[::2] + self.wcs_crop.wcs.crpix = self.RScenter - self.RSextent[::2] # Crop dataset - self.data_crop = deepcopy(data[vertex[2]:vertex[3], vertex[0]:vertex[1]]) + self.data_crop = deepcopy(data[vertex[2] : vertex[3], vertex[0] : vertex[1]]) # Write cropped map to new HDUList self.header_crop = deepcopy(header) self.header_crop.update(self.wcs_crop.to_header()) + if self.header_crop["FILENAME"][-4:] != "crop": + self.header_crop["FILENAME"] += "_crop" self.hdul_crop = fits.HDUList([fits.PrimaryHDU(self.data_crop, self.header_crop)]) self.rect_selector.clear() self.ax.reset_wcs(self.wcs_crop) self.display(data=self.data_crop, wcs=self.wcs_crop) - xlim, ylim = self.RSextent[1::2]-self.RSextent[0::2] + xlim, ylim = self.RSextent[1::2] - self.RSextent[0::2] self.ax.set_xlim(0, xlim) self.ax.set_ylim(0, ylim) - if self.fig.canvas.manager.toolbar.mode == '': - self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, - button=[1]) + if self.fig.canvas.manager.toolbar.mode == "": + self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1]) self.fig.canvas.draw_idle() def on_close(self, event) -> None: - if not hasattr(self, 'hdul_crop'): + if not hasattr(self, "hdul_crop"): self.hdul_crop = self.hdul self.rect_selector.disconnect_events() self.cropped = True def crop(self) -> None: - if self.fig.canvas.manager.toolbar.mode == '': - self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, - button=[1]) + if self.fig.canvas.manager.toolbar.mode == "": + self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1]) self.bapply.on_clicked(self.apply_crop) self.breset.on_clicked(self.reset_crop) - self.fig.canvas.mpl_connect('close_event', self.on_close) + self.fig.canvas.mpl_connect("close_event", self.on_close) plt.show() def write_to(self, filename): @@ -1435,7 +1928,7 @@ class crop_Stokes(crop_map): """ Redefine apply_crop method for the Stokes HDUList. """ - if hasattr(self, 'hdul_crop'): + if hasattr(self, "hdul_crop"): hdul = self.hdul_crop data = self.data_crop wcs = self.wcs_crop @@ -1452,25 +1945,26 @@ class crop_Stokes(crop_map): if (shape_im.astype(int) != shape).any() and (self.RSextent != self.extent).any(): # Update WCS and header in new cropped image self.hdul_crop = deepcopy(hdul) - crpix = np.array(wcs.wcs.crpix) + self.data_crop = deepcopy(data) + # crpix = np.array(wcs.wcs.crpix) self.wcs_crop = wcs.deepcopy() self.wcs_crop.array_shape = shape if self.crpix_in_RS: self.wcs_crop.wcs.crpix = np.array(self.wcs_crop.wcs.crpix) - self.RSextent[::2] else: self.wcs_crop.wcs.crval = wcs.wcs_pix2world([self.RScenter], 1)[0] - self.wcs_crop.wcs.crpix = self.RScenter-self.RSextent[::2] + self.wcs_crop.wcs.crpix = self.RScenter - self.RSextent[::2] # Crop dataset for dataset in self.hdul_crop: - if dataset.header['datatype'] == 'IQU_cov_matrix': + if dataset.header["datatype"] == "IQU_cov_matrix": stokes_cov = np.zeros((3, 3, shape[1], shape[0])) for i in range(3): for j in range(3): - stokes_cov[i, j] = deepcopy(dataset.data[i, j][vertex[2]:vertex[3], vertex[0]:vertex[1]]) + stokes_cov[i, j] = deepcopy(dataset.data[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]) dataset.data = stokes_cov else: - dataset.data = deepcopy(dataset.data[vertex[2]:vertex[3], vertex[0]:vertex[1]]) + dataset.data = deepcopy(dataset.data[vertex[2] : vertex[3], vertex[0] : vertex[1]]) dataset.header.update(self.wcs_crop.to_header()) self.data_crop = self.hdul_crop[0].data @@ -1479,45 +1973,51 @@ class crop_Stokes(crop_map): self.ax.reset_wcs(self.wcs_crop) self.display(data=self.data_crop, wcs=self.wcs_crop) - xlim, ylim = self.RSextent[1::2]-self.RSextent[0::2] + xlim, ylim = self.RSextent[1::2] - self.RSextent[0::2] self.ax.set_xlim(0, xlim) self.ax.set_ylim(0, ylim) else: self.on_close(event) - if self.fig.canvas.manager.toolbar.mode == '': - self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, - button=[1]) + if self.fig.canvas.manager.toolbar.mode == "": + self.rect_selector = RectangleSelector(self.ax, self.onselect_crop, button=[1]) # Update integrated values - mask = np.logical_and(self.hdul_crop['data_mask'].data.astype(bool), self.hdul_crop[0].data > 0) - I_diluted = self.hdul_crop['i_stokes'].data[mask].sum() - Q_diluted = self.hdul_crop['q_stokes'].data[mask].sum() - U_diluted = self.hdul_crop['u_stokes'].data[mask].sum() - I_diluted_err = np.sqrt(np.sum(self.hdul_crop['iqu_cov_matrix'].data[0, 0][mask])) - Q_diluted_err = np.sqrt(np.sum(self.hdul_crop['iqu_cov_matrix'].data[1, 1][mask])) - U_diluted_err = np.sqrt(np.sum(self.hdul_crop['iqu_cov_matrix'].data[2, 2][mask])) - IQ_diluted_err = np.sqrt(np.sum(self.hdul_crop['iqu_cov_matrix'].data[0, 1][mask]**2)) - IU_diluted_err = np.sqrt(np.sum(self.hdul_crop['iqu_cov_matrix'].data[0, 2][mask]**2)) - QU_diluted_err = np.sqrt(np.sum(self.hdul_crop['iqu_cov_matrix'].data[1, 2][mask]**2)) + mask = np.logical_and(self.hdul_crop["data_mask"].data.astype(bool), self.hdul_crop[0].data > 0) + I_diluted = self.hdul_crop["i_stokes"].data[mask].sum() + Q_diluted = self.hdul_crop["q_stokes"].data[mask].sum() + U_diluted = self.hdul_crop["u_stokes"].data[mask].sum() + I_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[0, 0][mask])) + Q_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[1, 1][mask])) + U_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[2, 2][mask])) + IQ_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[0, 1][mask] ** 2)) + IU_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[0, 2][mask] ** 2)) + QU_diluted_err = np.sqrt(np.sum(self.hdul_crop["iqu_cov_matrix"].data[1, 2][mask] ** 2)) - P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted - P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted ** - 2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err) + P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted + P_diluted_err = (1.0 / I_diluted) * np.sqrt( + (Q_diluted**2 * Q_diluted_err**2 + U_diluted**2 * U_diluted_err**2 + 2.0 * Q_diluted * U_diluted * QU_diluted_err) / (Q_diluted**2 + U_diluted**2) + + ((Q_diluted / I_diluted) ** 2 + (U_diluted / I_diluted) ** 2) * I_diluted_err**2 + - 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err + - 2.0 * (U_diluted / I_diluted) * IU_diluted_err + ) - PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted)) - PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err ** - 2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err) + PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted)) + PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt( + U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err + ) for dataset in self.hdul_crop: - dataset.header['P_int'] = (P_diluted, 'Integrated polarization degree') - dataset.header['P_int_err'] = (np.ceil(P_diluted_err*1000.)/1000., 'Integrated polarization degree error') - dataset.header['PA_int'] = (PA_diluted, 'Integrated polarization angle') - dataset.header['PA_int_err'] = (np.ceil(PA_diluted_err*10.)/10., 'Integrated polarization angle error') + if dataset.header["FILENAME"][-4:] != "crop": + dataset.header["FILENAME"] += "_crop" + dataset.header["P_int"] = (P_diluted, "Integrated polarization degree") + dataset.header["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error") + dataset.header["PA_int"] = (PA_diluted, "Integrated polarization angle") + dataset.header["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") self.fig.canvas.draw_idle() @property def data_mask(self): - return self.hdul_crop['data_mask'].data.astype(int) + return self.hdul_crop["data_mask"].data.astype(int) class image_lasso_selector(object): @@ -1527,7 +2027,7 @@ class image_lasso_selector(object): """ self.selected = False self.img = img - self.vmin, self.vmax = 0., np.max(self.img[self.img > 0.]) + self.vmin, self.vmax = 0.0, np.max(self.img[self.img > 0.0]) plt.ioff() # see https://github.com/matplotlib/matplotlib/issues/17013 if fig is None: self.fig = plt.figure(figsize=(15, 15)) @@ -1535,16 +2035,16 @@ class image_lasso_selector(object): self.fig = fig if ax is None: self.ax = self.fig.gca() - self.mask_alpha = 1. + self.mask_alpha = 1.0 self.embedded = False else: self.ax = ax self.mask_alpha = 0.1 self.embedded = True - self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno', alpha=self.mask_alpha) + self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect="equal", cmap="inferno", alpha=self.mask_alpha) plt.ion() - lineprops = {'color': 'grey', 'linewidth': 1, 'alpha': 0.8} + lineprops = {"color": "grey", "linewidth": 1, "alpha": 0.8} self.lasso = LassoSelector(self.ax, self.onselect, props=lineprops, useblit=False) self.lasso.set_visible(True) @@ -1553,11 +2053,11 @@ class image_lasso_selector(object): xv, yv = np.meshgrid(pix_y, pix_x) self.pix = np.vstack((xv.flatten(), yv.flatten())).T - self.fig.canvas.mpl_connect('close_event', self.on_close) + self.fig.canvas.mpl_connect("close_event", self.on_close) plt.show() def on_close(self, event=None) -> None: - if not hasattr(self, 'mask'): + if not hasattr(self, "mask"): self.mask = np.zeros(self.img.shape[:2], dtype=bool) self.lasso.disconnect_events() self.selected = True @@ -1570,15 +2070,15 @@ class image_lasso_selector(object): def update_mask(self): self.displayed.remove() - self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno', alpha=self.mask_alpha) + self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect="equal", cmap="inferno", alpha=self.mask_alpha) array = self.displayed.get_array().data self.mask = np.zeros(self.img.shape[:2], dtype=bool) self.mask[self.indices] = True - if hasattr(self, 'cont'): + if hasattr(self, "cont"): for coll in self.cont.collections: coll.remove() - self.cont = self.ax.contour(self.mask.astype(float), levels=[0.5], colors='white', linewidths=1) + self.cont = self.ax.contour(self.mask.astype(float), levels=[0.5], colors="white", linewidths=1) if not self.embedded: self.displayed.set_data(array) self.fig.canvas.draw_idle() @@ -1587,13 +2087,13 @@ class image_lasso_selector(object): class slit(object): - def __init__(self, img, cdelt=np.array([1., 1.]), width=1., height=2., angle=0., fig=None, ax=None): + def __init__(self, img, cdelt=np.array([1.0, 1.0]), width=1.0, height=2.0, angle=0.0, fig=None, ax=None): """ img must have shape (X, Y) """ self.selected = False self.img = img - self.vmin, self.vmax = 0., np.max(self.img[self.img > 0.]) + self.vmin, self.vmax = 0.0, np.max(self.img[self.img > 0.0]) plt.ioff() # see https://github.com/matplotlib/matplotlib/issues/17013 if fig is None: self.fig = plt.figure(figsize=(15, 15)) @@ -1601,40 +2101,40 @@ class slit(object): self.fig = fig if ax is None: self.ax = self.fig.gca() - self.mask_alpha = 1. + self.mask_alpha = 1.0 self.embedded = False else: self.ax = ax self.mask_alpha = 0.1 self.embedded = True - self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno', alpha=self.mask_alpha) + self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect="equal", cmap="inferno", alpha=self.mask_alpha) plt.ion() xx, yy = np.indices(self.img.shape) self.pix = np.vstack((xx.flatten(), yy.flatten())).T - self.x0, self.y0 = np.array(self.img.shape)/2. + self.x0, self.y0 = np.array(self.img.shape) / 2.0 self.cdelt = cdelt - self.width = width/np.abs(self.cdelt).max()/3600. - self.height = height/np.abs(self.cdelt).max()/3600. + self.width = width / np.abs(self.cdelt).max() / 3600.0 + self.height = height / np.abs(self.cdelt).max() / 3600.0 self.angle = angle - self.rect_center = (self.x0, self.y0)-np.dot(rot2D(self.angle), (self.width/2, self.height/2)) - self.rect = Rectangle(self.rect_center, self.width, self.height, angle=self.angle, alpha=0.8, ec='grey', fc='none') + self.rect_center = (self.x0, self.y0) - np.dot(rot2D(self.angle), (self.width / 2, self.height / 2)) + self.rect = Rectangle(self.rect_center, self.width, self.height, angle=self.angle, alpha=0.8, ec="grey", fc="none") self.ax.add_patch(self.rect) - self.fig.canvas.mpl_connect('button_press_event', self.on_press) - self.fig.canvas.mpl_connect('button_release_event', self.on_release) - self.fig.canvas.mpl_connect('motion_notify_event', self.on_move) - self.fig.canvas.mpl_connect('close_event', self.on_close) + self.fig.canvas.mpl_connect("button_press_event", self.on_press) + self.fig.canvas.mpl_connect("button_release_event", self.on_release) + self.fig.canvas.mpl_connect("motion_notify_event", self.on_move) + self.fig.canvas.mpl_connect("close_event", self.on_close) self.x0, self.y0 = self.rect.xy self.pressevent = None plt.show() def on_close(self, event=None) -> None: - if not hasattr(self, 'mask'): + if not hasattr(self, "mask"): self.mask = np.zeros(self.img.shape[:2], dtype=bool) self.selected = True @@ -1662,12 +2162,12 @@ class slit(object): self.fig.canvas.draw_idle() def update_width(self, width): - self.width = width/np.abs(self.cdelt).max()/3600 + self.width = width / np.abs(self.cdelt).max() / 3600 self.rect.set_width(self.width) self.fig.canvas.draw_idle() def update_height(self, height): - self.height = height/np.abs(self.cdelt).max()/3600 + self.height = height / np.abs(self.cdelt).max() / 3600 self.rect.set_height(self.height) self.fig.canvas.draw_idle() @@ -1677,24 +2177,24 @@ class slit(object): self.fig.canvas.draw_idle() def update_mask(self): - if hasattr(self, 'displayed'): + if hasattr(self, "displayed"): try: self.displayed.remove() except ValueError: return - self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno', alpha=self.mask_alpha) + self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect="equal", cmap="inferno", alpha=self.mask_alpha) array = self.displayed.get_array().data self.mask = np.zeros(array.shape, dtype=bool) for p in self.pix: - self.mask[tuple(p)] = (np.abs(np.dot(rot2D(-self.angle), p-self.rect.get_center()[::-1])) < (self.height/2., self.width/2.)).all() - if hasattr(self, 'cont'): + self.mask[tuple(p)] = (np.abs(np.dot(rot2D(-self.angle), p - self.rect.get_center()[::-1])) < (self.height / 2.0, self.width / 2.0)).all() + if hasattr(self, "cont"): for coll in self.cont.collections: try: coll.remove() except AttributeError: return - self.cont = self.ax.contour(self.mask.astype(float), levels=[0.5], colors='white', linewidths=1) + self.cont = self.ax.contour(self.mask.astype(float), levels=[0.5], colors="white", linewidths=1) if not self.embedded: self.displayed.set_data(array) self.fig.canvas.draw_idle() @@ -1703,13 +2203,13 @@ class slit(object): class aperture(object): - def __init__(self, img, cdelt=np.array([1., 1.]), radius=1., fig=None, ax=None): + def __init__(self, img, cdelt=np.array([1.0, 1.0]), radius=1.0, fig=None, ax=None): """ img must have shape (X, Y) """ self.selected = False self.img = img - self.vmin, self.vmax = 0., np.max(self.img[self.img > 0.]) + self.vmin, self.vmax = 0.0, np.max(self.img[self.img > 0.0]) plt.ioff() # see https://github.com/matplotlib/matplotlib/issues/17013 if fig is None: self.fig = plt.figure(figsize=(15, 15)) @@ -1717,37 +2217,37 @@ class aperture(object): self.fig = fig if ax is None: self.ax = self.fig.gca() - self.mask_alpha = 1. + self.mask_alpha = 1.0 self.embedded = False else: self.ax = ax self.mask_alpha = 0.1 self.embedded = True - self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno', alpha=self.mask_alpha) + self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect="equal", cmap="inferno", alpha=self.mask_alpha) plt.ion() xx, yy = np.indices(self.img.shape) self.pix = np.vstack((xx.flatten(), yy.flatten())).T - self.x0, self.y0 = np.array(self.img.shape)/2. - if np.abs(cdelt).max() != 1.: + self.x0, self.y0 = np.array(self.img.shape) / 2.0 + if np.abs(cdelt).max() != 1.0: self.cdelt = cdelt - self.radius = radius/np.abs(self.cdelt).max()/3600. + self.radius = radius / np.abs(self.cdelt).max() / 3600.0 - self.circ = Circle((self.x0, self.y0), self.radius, alpha=0.8, ec='grey', fc='none') + self.circ = Circle((self.x0, self.y0), self.radius, alpha=0.8, ec="grey", fc="none") self.ax.add_patch(self.circ) - self.fig.canvas.mpl_connect('button_press_event', self.on_press) - self.fig.canvas.mpl_connect('button_release_event', self.on_release) - self.fig.canvas.mpl_connect('motion_notify_event', self.on_move) - self.fig.canvas.mpl_connect('close_event', self.on_close) + self.fig.canvas.mpl_connect("button_press_event", self.on_press) + self.fig.canvas.mpl_connect("button_release_event", self.on_release) + self.fig.canvas.mpl_connect("motion_notify_event", self.on_move) + self.fig.canvas.mpl_connect("close_event", self.on_close) self.x0, self.y0 = self.circ.center self.pressevent = None plt.show() def on_close(self, event=None) -> None: - if not hasattr(self, 'mask'): + if not hasattr(self, "mask"): self.mask = np.zeros(self.img.shape[:2], dtype=bool) self.selected = True @@ -1775,29 +2275,29 @@ class aperture(object): self.fig.canvas.draw_idle() def update_radius(self, radius): - self.radius = radius/np.abs(self.cdelt).max()/3600 + self.radius = radius / np.abs(self.cdelt).max() / 3600 self.circ.set_radius(self.radius) self.fig.canvas.draw_idle() def update_mask(self): - if hasattr(self, 'displayed'): + if hasattr(self, "displayed"): try: self.displayed.remove() except ValueError: return - self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno', alpha=self.mask_alpha) + self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect="equal", cmap="inferno", alpha=self.mask_alpha) array = self.displayed.get_array().data yy, xx = np.indices(self.img.shape[:2]) x0, y0 = self.circ.center - self.mask = np.sqrt((xx-x0)**2+(yy-y0)**2) < self.radius - if hasattr(self, 'cont'): + self.mask = np.sqrt((xx - x0) ** 2 + (yy - y0) ** 2) < self.radius + if hasattr(self, "cont"): for coll in self.cont.collections: try: coll.remove() except AttributeError: return - self.cont = self.ax.contour(self.mask.astype(float), levels=[0.5], colors='white', linewidths=1) + self.cont = self.ax.contour(self.mask.astype(float), levels=[0.5], colors="white", linewidths=1) if not self.embedded: self.displayed.set_data(array) self.fig.canvas.draw_idle() @@ -1810,8 +2310,7 @@ class pol_map(object): Class to interactively study polarization maps. """ - def __init__(self, Stokes, SNRp_cut=3., SNRi_cut=3., flux_lim=None, selection=None): - + def __init__(self, Stokes, SNRp_cut=3.0, SNRi_cut=3.0, step_vec=1, scale_vec=3.0, flux_lim=None, selection=None, pa_err=False): if isinstance(Stokes, str): Stokes = fits.open(Stokes) self.Stokes = deepcopy(Stokes) @@ -1823,15 +2322,17 @@ class pol_map(object): self.region = None self.data = None self.display_selection = selection - self.vec_scale = 2. + self.step_vec = step_vec + self.scale_vec = scale_vec + self.pa_err = pa_err # Get data - self.targ = self.Stokes[0].header['targname'] - self.pivot_wav = self.Stokes[0].header['photplam'] - self.map_convert = self.Stokes[0].header['photflam'] + self.targ = self.Stokes[0].header["targname"] + self.pivot_wav = self.Stokes[0].header["photplam"] + self.map_convert = self.Stokes[0].header["photflam"] # Create figure - plt.rcParams.update({'font.size': 10}) + plt.rcParams.update({"font.size": 10}) self.fig, self.ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(projection=self.wcs)) self.fig.subplots_adjust(hspace=0, wspace=0, right=1.02) self.ax_cosmetics() @@ -1848,11 +2349,11 @@ class pol_map(object): ax_P_cut = self.fig.add_axes([0.120, 0.055, 0.230, 0.01]) ax_vec_sc = self.fig.add_axes([0.240, 0.030, 0.110, 0.01]) ax_snr_reset = self.fig.add_axes([0.080, 0.020, 0.05, 0.02]) - SNRi_max = np.max(self.I[self.IQU_cov[0, 0] > 0.]/np.sqrt(self.IQU_cov[0, 0][self.IQU_cov[0, 0] > 0.])) - SNRp_max = np.max(self.P[self.s_P > 0.]/self.s_P[self.s_P > 0.]) - s_I_cut = Slider(ax_I_cut, r"$SNR^{I}_{cut}$", 1., int(SNRi_max*0.95), valstep=1, valinit=self.SNRi_cut) - s_P_cut = Slider(ax_P_cut, r"$SNR^{P}_{cut}$", 1., int(SNRp_max*0.95), valstep=1, valinit=self.SNRp_cut) - s_vec_sc = Slider(ax_vec_sc, r"Vectors scale", 1., 6., valstep=1, valinit=self.vec_scale) + SNRi_max = np.max(self.I[self.IQU_cov[0, 0] > 0.0] / np.sqrt(self.IQU_cov[0, 0][self.IQU_cov[0, 0] > 0.0])) + SNRp_max = np.max(self.P[self.s_P > 0.0] / self.s_P[self.s_P > 0.0]) + s_I_cut = Slider(ax_I_cut, r"$SNR^{I}_{cut}$", 1.0, int(SNRi_max * 0.95), valstep=1, valinit=self.SNRi_cut) + s_P_cut = Slider(ax_P_cut, r"$SNR^{P}_{cut}$", 1.0, int(SNRp_max * 0.95), valstep=1, valinit=self.SNRp_cut) + s_vec_sc = Slider(ax_vec_sc, r"Vectors scale", 0.0, 10.0, valstep=1, valinit=self.scale_vec) b_snr_reset = Button(ax_snr_reset, "Reset") b_snr_reset.label.set_fontsize(8) @@ -1869,7 +2370,7 @@ class pol_map(object): self.fig.canvas.draw_idle() def update_vecsc(val): - self.vec_scale = val + self.scale_vec = val self.pol_vector() self.ax_cosmetics() self.fig.canvas.draw_idle() @@ -1934,7 +2435,7 @@ class pol_map(object): b_aper.label.set_fontsize(8) b_aper_reset = Button(ax_aper_reset, "Reset") b_aper_reset.label.set_fontsize(8) - s_aper_radius = Slider(ax_aper_radius, r"$R_{aper}$", np.ceil(self.wcs.wcs.cdelt.max()/1.33*3.6e5)/1e2, 3.5, valstep=1e-2, valinit=1.) + s_aper_radius = Slider(ax_aper_radius, r"$R_{aper}$", np.ceil(self.wcs.wcs.cdelt.max() / 1.33 * 3.6e5) / 1e2, 3.5, valstep=1e-2, valinit=1.0) def select_aperture(event): if self.data is None: @@ -1958,8 +2459,8 @@ class pol_map(object): self.fig.canvas.draw_idle() def update_aperture(val): - if hasattr(self, 'select_instance'): - if hasattr(self.select_instance, 'radius'): + if hasattr(self, "select_instance"): + if hasattr(self.select_instance, "radius"): self.select_instance.update_radius(val) else: self.selected = True @@ -1990,9 +2491,9 @@ class pol_map(object): b_slit.label.set_fontsize(8) b_slit_reset = Button(ax_slit_reset, "Reset") b_slit_reset.label.set_fontsize(8) - s_slit_width = Slider(ax_slit_width, r"$W_{slit}$", np.ceil(self.wcs.wcs.cdelt.max()/1.33*3.6e5)/1e2, 7., valstep=1e-2, valinit=1.) - s_slit_height = Slider(ax_slit_height, r"$H_{slit}$", np.ceil(self.wcs.wcs.cdelt.max()/1.33*3.6e5)/1e2, 7., valstep=1e-2, valinit=1.) - s_slit_angle = Slider(ax_slit_angle, r"$\theta_{slit}$", 0., 90., valstep=1., valinit=0.) + s_slit_width = Slider(ax_slit_width, r"$W_{slit}$", np.ceil(self.wcs.wcs.cdelt.max() / 1.33 * 3.6e5) / 1e2, 7.0, valstep=1e-2, valinit=1.0) + s_slit_height = Slider(ax_slit_height, r"$H_{slit}$", np.ceil(self.wcs.wcs.cdelt.max() / 1.33 * 3.6e5) / 1e2, 7.0, valstep=1e-2, valinit=1.0) + s_slit_angle = Slider(ax_slit_angle, r"$\theta_{slit}$", 0.0, 90.0, valstep=1.0, valinit=0.0) def select_slit(event): if self.data is None: @@ -2010,52 +2511,59 @@ class pol_map(object): else: self.selected = True self.region = None - self.select_instance = slit(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, - width=s_slit_width.val, height=s_slit_height.val, angle=s_slit_angle.val) + self.select_instance = slit( + self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, width=s_slit_width.val, height=s_slit_height.val, angle=s_slit_angle.val + ) self.select_instance.rect.set_visible(True) self.fig.canvas.draw_idle() def update_slit_w(val): - if hasattr(self, 'select_instance'): - if hasattr(self.select_instance, 'width'): + if hasattr(self, "select_instance"): + if hasattr(self.select_instance, "width"): self.select_instance.update_width(val) else: self.selected = True - self.select_instance = slit(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, - width=val, height=s_slit_height.val, angle=s_slit_angle.val) + self.select_instance = slit( + self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, width=val, height=s_slit_height.val, angle=s_slit_angle.val + ) else: self.selected = True - self.select_instance = slit(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, - width=val, height=s_slit_height.val, angle=s_slit_angle.val) + self.select_instance = slit( + self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, width=val, height=s_slit_height.val, angle=s_slit_angle.val + ) self.fig.canvas.draw_idle() def update_slit_h(val): - if hasattr(self, 'select_instance'): - if hasattr(self.select_instance, 'height'): + if hasattr(self, "select_instance"): + if hasattr(self.select_instance, "height"): self.select_instance.update_height(val) else: self.selected = True - self.select_instance = slit(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, - width=s_slit_width.val, height=val, angle=s_slit_angle.val) + self.select_instance = slit( + self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, width=s_slit_width.val, height=val, angle=s_slit_angle.val + ) else: self.selected = True - self.select_instance = slit(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, - width=s_slit_width.val, height=val, angle=s_slit_angle.val) + self.select_instance = slit( + self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, width=s_slit_width.val, height=val, angle=s_slit_angle.val + ) self.fig.canvas.draw_idle() def update_slit_a(val): - if hasattr(self, 'select_instance'): - if hasattr(self.select_instance, 'angle'): + if hasattr(self, "select_instance"): + if hasattr(self.select_instance, "angle"): self.select_instance.update_angle(val) else: self.selected = True - self.select_instance = slit(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, - width=s_slit_width.val, height=s_slit_height.val, angle=val) + self.select_instance = slit( + self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, width=s_slit_width.val, height=s_slit_height.val, angle=val + ) else: self.selected = True - self.select_instance = slit(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, - width=s_slit_width.val, height=s_slit_height.val, angle=val) + self.select_instance = slit( + self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, width=s_slit_width.val, height=s_slit_height.val, angle=val + ) self.fig.canvas.draw_idle() def reset_slit(event): @@ -2124,7 +2632,7 @@ class pol_map(object): b_save = Button(ax_save, "Save") b_save.label.set_fontsize(8) ax_text_save = self.fig.add_axes([0.3, 0.020, 0.5, 0.025], visible=False) - text_save = TextBox(ax_text_save, "Save to:", initial='') + text_save = TextBox(ax_text_save, "Save to:", initial="") def saveplot(event): ax_text_save.set(visible=True) @@ -2138,18 +2646,18 @@ class pol_map(object): def submit_save(expression): ax_text_save.set(visible=False) - if expression != '': - save_fig, save_ax = plt.subplots(figsize=(12, 10), layout='constrained', subplot_kw=dict(projection=self.wcs)) + if expression != "": + save_fig, save_ax = plt.subplots(figsize=(12, 10), layout="constrained", subplot_kw=dict(projection=self.wcs)) self.ax_cosmetics(ax=save_ax) self.display(fig=save_fig, ax=save_ax) self.pol_vector(fig=save_fig, ax=save_ax) self.pol_int(fig=save_fig, ax=save_ax) save_fig.suptitle(r"{0:s} with $SNR_{{p}} \geq$ {1:d} and $SNR_{{I}} \geq$ {2:d}".format(self.targ, int(self.SNRp), int(self.SNRi))) - if not expression[-4:] in ['.png', '.jpg', '.pdf']: - expression += '.pdf' - save_fig.savefig(expression, bbox_inches='tight', dpi=200) + if expression[-4:] not in [".png", ".jpg", ".pdf"]: + expression += ".pdf" + save_fig.savefig(expression, bbox_inches="tight", dpi=200) plt.close(save_fig) - text_save.set_val('') + text_save.set_val("") ax_snr_reset.set(visible=True) ax_vec_sc.set(visible=True) ax_save.set(visible=True) @@ -2163,7 +2671,7 @@ class pol_map(object): b_dump = Button(ax_dump, "Dump") b_dump.label.set_fontsize(8) ax_text_dump = self.fig.add_axes([0.3, 0.020, 0.5, 0.025], visible=False) - text_dump = TextBox(ax_text_dump, "Dump to:", initial='') + text_dump = TextBox(ax_text_dump, "Dump to:", initial="") def dump(event): ax_text_dump.set(visible=True) @@ -2174,10 +2682,10 @@ class pol_map(object): self.fig.canvas.draw_idle() shape = np.array(self.I.shape) - center = (shape/2).astype(int) - cdelt_arcsec = self.wcs.wcs.cdelt*3600 + center = (shape / 2).astype(int) + cdelt_arcsec = self.wcs.wcs.cdelt * 3600 xx, yy = np.indices(shape) - x, y = (xx-center[0])*cdelt_arcsec[0], (yy-center[1])*cdelt_arcsec[1] + x, y = (xx - center[0]) * cdelt_arcsec[0], (yy - center[1]) * cdelt_arcsec[1] P, PA = np.zeros(shape), np.zeros(shape) P[self.cut] = self.P[self.cut] @@ -2185,19 +2693,20 @@ class pol_map(object): dump_list = [] for i in range(shape[0]): for j in range(shape[1]): - dump_list.append([x[i, j], y[i, j], self.I[i, j]*self.map_convert, self.Q[i, j] * - self.map_convert, self.U[i, j]*self.map_convert, P[i, j], PA[i, j]]) + dump_list.append( + [x[i, j], y[i, j], self.I[i, j] * self.map_convert, self.Q[i, j] * self.map_convert, self.U[i, j] * self.map_convert, P[i, j], PA[i, j]] + ) self.data_dump = np.array(dump_list) b_dump.on_clicked(dump) def submit_dump(expression): ax_text_dump.set(visible=False) - if expression != '': - if not expression[-4:] in ['.txt', '.dat']: - expression += '.txt' + if expression != "": + if expression[-4:] not in [".txt", ".dat"]: + expression += ".txt" np.savetxt(expression, self.data_dump) - text_dump.set_val('') + text_dump.set_val("") ax_snr_reset.set(visible=True) ax_vec_sc.set(visible=True) ax_save.set(visible=True) @@ -2221,39 +2730,45 @@ class pol_map(object): b_snrp = Button(ax_snrp, r"$P / \sigma_{P}$") def d_tf(event): - self.display_selection = 'total_flux' + self.display_selection = "total_flux" self.display() self.pol_int() + b_tf.on_clicked(d_tf) def d_pf(event): - self.display_selection = 'pol_flux' + self.display_selection = "pol_flux" self.display() self.pol_int() + b_pf.on_clicked(d_pf) def d_p(event): - self.display_selection = 'pol_deg' + self.display_selection = "pol_deg" self.display() self.pol_int() + b_p.on_clicked(d_p) def d_pa(event): - self.display_selection = 'pol_ang' + self.display_selection = "pol_ang" self.display() self.pol_int() + b_pa.on_clicked(d_pa) def d_snri(event): - self.display_selection = 'snri' + self.display_selection = "snri" self.display() self.pol_int() + b_snri.on_clicked(d_snri) def d_snrp(event): - self.display_selection = 'snrp' + self.display_selection = "snrp" self.display() self.pol_int() + b_snrp.on_clicked(d_snrp) plt.show() @@ -2264,75 +2779,111 @@ class pol_map(object): @property def I(self): - return self.Stokes['I_STOKES'].data + return self.Stokes["I_STOKES"].data @property def Q(self): - return self.Stokes['Q_STOKES'].data + return self.Stokes["Q_STOKES"].data @property def U(self): - return self.Stokes['U_STOKES'].data + return self.Stokes["U_STOKES"].data @property def IQU_cov(self): - return self.Stokes['IQU_COV_MATRIX'].data + return self.Stokes["IQU_COV_MATRIX"].data @property def P(self): - return self.Stokes['POL_DEG_DEBIASED'].data + return self.Stokes["POL_DEG_DEBIASED"].data @property def s_P(self): - return self.Stokes['POL_DEG_ERR'].data + return self.Stokes["POL_DEG_ERR"].data @property def PA(self): - return self.Stokes['POL_ANG'].data + return self.Stokes["POL_ANG"].data + + @property + def s_PA(self): + return self.Stokes["POL_ANG_ERR"].data @property def data_mask(self): - return self.Stokes['DATA_MASK'].data + return self.Stokes["DATA_MASK"].data def set_data_mask(self, mask): - self.Stokes[np.argmax([self.Stokes[i].header['datatype'] == 'Data_mask' for i in range(len(self.Stokes))])].data = mask.astype(float) + self.Stokes[np.argmax([self.Stokes[i].header["datatype"] == "Data_mask" for i in range(len(self.Stokes))])].data = mask.astype(float) @property def cut(self): s_I = np.sqrt(self.IQU_cov[0, 0]) SNRp_mask, SNRi_mask = np.zeros(self.P.shape).astype(bool), np.zeros(self.I.shape).astype(bool) - SNRp_mask[self.s_P > 0.] = self.P[self.s_P > 0.] / self.s_P[self.s_P > 0.] > self.SNRp - SNRi_mask[s_I > 0.] = self.I[s_I > 0.] / s_I[s_I > 0.] > self.SNRi + SNRp_mask[self.s_P > 0.0] = self.P[self.s_P > 0.0] / self.s_P[self.s_P > 0.0] > self.SNRp + SNRi_mask[s_I > 0.0] = self.I[s_I > 0.0] / s_I[s_I > 0.0] > self.SNRi return np.logical_and(SNRi_mask, SNRp_mask) def ax_cosmetics(self, ax=None): if ax is None: ax = self.ax - ax.set(aspect='equal', fc='black') + ax.set(aspect="equal", fc="black") - ax.coords.grid(True, color='white', ls='dotted', alpha=0.5) - ax.coords[0].set_axislabel('Right Ascension (J2000)') - ax.coords[0].set_axislabel_position('t') - ax.coords[0].set_ticklabel_position('t') - ax.set_ylabel('Declination (J2000)', labelpad=-1) + ax.coords.grid(True, color="white", ls="dotted", alpha=0.5) + ax.coords[0].set_axislabel("Right Ascension (J2000)") + ax.coords[0].set_axislabel_position("t") + ax.coords[0].set_ticklabel_position("t") + ax.set_ylabel("Declination (J2000)", labelpad=-1) # Display scales and orientation fontprops = fm.FontProperties(size=14) - px_size = self.wcs.wcs.cdelt[0]*3600. - if hasattr(self, 'px_sc'): + px_size = self.wcs.wcs.cdelt[0] * 3600.0 + if hasattr(self, "px_sc"): self.px_sc.remove() - self.px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, - frameon=False, size_vertical=0.005, color='white', fontproperties=fontprops) + self.px_sc = AnchoredSizeBar( + ax.transData, + 1.0 / px_size, + "1 arcsec", + 3, + pad=0.5, + sep=5, + borderpad=0.5, + frameon=False, + size_vertical=0.005, + color="white", + fontproperties=fontprops, + ) ax.add_artist(self.px_sc) - if hasattr(self, 'pol_sc'): + if hasattr(self, "pol_sc"): self.pol_sc.remove() - self.pol_sc = AnchoredSizeBar(ax.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, - frameon=False, size_vertical=0.005, color='white', fontproperties=fontprops) + if self.scale_vec != 0: + scale_vec = self.scale_vec + else: + scale_vec = 2.0 + self.pol_sc = AnchoredSizeBar( + ax.transData, scale_vec, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="white", fontproperties=fontprops + ) ax.add_artist(self.pol_sc) - if hasattr(self, 'north_dir'): + if hasattr(self, "north_dir"): self.north_dir.remove() - self.north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10., - angle=-self.Stokes[0].header['orientat'], color='white', text_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 1}) + self.north_dir = AnchoredDirectionArrows( + ax.transAxes, + "E", + "N", + length=-0.05, + fontsize=0.02, + loc=1, + aspect_ratio=-(self.I.shape[1]/self.I.shape[0]), + sep_y=0.01, + sep_x=0.01, + back_length=0.0, + head_length=10.0, + head_width=10.0, + angle=-self.Stokes[0].header["orientat"], + color="white", + text_props={"ec": None, "fc": "w", "alpha": 1, "lw": 0.4}, + arrow_props={"ec": None, "fc": "w", "alpha": 1, "lw": 1}, + ) ax.add_artist(self.north_dir) def display(self, fig=None, ax=None, flux_lim=None): @@ -2341,93 +2892,222 @@ class pol_map(object): self.display_selection = "total_flux" if flux_lim is None: flux_lim = self.flux_lim - if self.display_selection.lower() in ['total_flux']: - self.data = self.I*self.map_convert + if self.display_selection.lower() in ["total_flux"]: + self.data = self.I * self.map_convert if flux_lim is None: - vmin, vmax = 1./2.*np.median(self.data[self.data > 0.]), np.max(self.data[self.data > 0.]) + vmin, vmax = 1.0 / 2.0 * np.median(self.data[self.data > 0.0]), np.max(self.data[self.data > 0.0]) else: vmin, vmax = flux_lim norm = LogNorm(vmin, vmax) label = r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]" - elif self.display_selection.lower() in ['pol_flux']: - self.data = self.I*self.map_convert*self.P + elif self.display_selection.lower() in ["pol_flux"]: + self.data = self.I * self.map_convert * self.P if flux_lim is None: - vmin, vmax = 1./2.*np.median(self.I[self.I > 0.]*self.map_convert), np.max(self.I[self.I > 0.]*self.map_convert) + vmin, vmax = 1.0 / 2.0 * np.median(self.I[self.I > 0.0] * self.map_convert), np.max(self.I[self.I > 0.0] * self.map_convert) else: vmin, vmax = flux_lim norm = LogNorm(vmin, vmax) label = r"$P \cdot F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]" - elif self.display_selection.lower() in ['pol_deg']: - self.data = self.P*100. - vmin, vmax = 0., np.max(self.data[self.P > self.s_P]) + elif self.display_selection.lower() in ["pol_deg"]: + self.data = self.P * 100.0 + vmin, vmax = 0.0, np.max(self.data[self.P > self.s_P]) label = r"$P$ [%]" - elif self.display_selection.lower() in ['pol_ang']: + elif self.display_selection.lower() in ["pol_ang"]: self.data = princ_angle(self.PA) - vmin, vmax = 0, 180. + vmin, vmax = 0, 180.0 label = r"$\theta_{P}$ [°]" - elif self.display_selection.lower() in ['snri']: + elif self.display_selection.lower() in ["snri"]: s_I = np.sqrt(self.IQU_cov[0, 0]) SNRi = np.zeros(self.I.shape) - SNRi[s_I > 0.] = self.I[s_I > 0.]/s_I[s_I > 0.] + SNRi[s_I > 0.0] = self.I[s_I > 0.0] / s_I[s_I > 0.0] self.data = SNRi - vmin, vmax = 0., np.max(self.data[self.data > 0.]) + vmin, vmax = 0.0, np.max(self.data[self.data > 0.0]) label = r"$I_{Stokes}/\sigma_{I}$" - elif self.display_selection.lower() in ['snrp']: + elif self.display_selection.lower() in ["snrp"]: SNRp = np.zeros(self.P.shape) - SNRp[self.s_P > 0.] = self.P[self.s_P > 0.]/self.s_P[self.s_P > 0.] + SNRp[self.s_P > 0.0] = self.P[self.s_P > 0.0] / self.s_P[self.s_P > 0.0] self.data = SNRp - vmin, vmax = 0., np.max(self.data[self.data > 0.]) + vmin, vmax = 0.0, np.max(self.data[self.data > 0.0]) label = r"$P/\sigma_{P}$" if fig is None: fig = self.fig if ax is None: ax = self.ax - if hasattr(self, 'cbar'): + if hasattr(self, "cbar"): self.cbar.remove() - if hasattr(self, 'im'): + if hasattr(self, "im"): self.im.remove() if norm is not None: - self.im = ax.imshow(self.data, norm=norm, aspect='equal', cmap='inferno') + self.im = ax.imshow(self.data, norm=norm, aspect="equal", cmap="inferno") else: - self.im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno') - plt.rcParams.update({'font.size': 14}) + self.im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno") + plt.rcParams.update({"font.size": 14}) self.cbar = fig.colorbar(self.im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=label) - plt.rcParams.update({'font.size': 10}) + plt.rcParams.update({"font.size": 10}) fig.canvas.draw_idle() return self.im else: if norm is not None: - im = ax.imshow(self.data, norm=norm, aspect='equal', cmap='inferno') + im = ax.imshow(self.data, norm=norm, aspect="equal", cmap="inferno") else: - im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno') + im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect="equal", cmap="inferno") ax.set_xlim(0, self.data.shape[1]) ax.set_ylim(0, self.data.shape[0]) - plt.rcParams.update({'font.size': 14}) + plt.rcParams.update({"font.size": 14}) fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=label) - plt.rcParams.update({'font.size': 10}) + plt.rcParams.update({"font.size": 10}) fig.canvas.draw_idle() return im def pol_vector(self, fig=None, ax=None): - P_cut = np.ones(self.P.shape)*np.nan - P_cut[self.cut] = self.P[self.cut] + P_cut = np.ones(self.P.shape) * np.nan + if self.scale_vec != 0.0: + scale_vec = self.scale_vec + P_cut[self.cut] = self.P[self.cut] + else: + scale_vec = 2.0 + P_cut[self.cut] = 1.0 / scale_vec X, Y = np.meshgrid(np.arange(self.I.shape[1]), np.arange(self.I.shape[0])) - XY_U, XY_V = P_cut*np.cos(np.pi/2. + self.PA*np.pi/180.), P_cut*np.sin(np.pi/2. + self.PA*np.pi/180.) + XY_U, XY_V = P_cut * np.cos(np.pi / 2.0 + self.PA * np.pi / 180.0), P_cut * np.sin(np.pi / 2.0 + self.PA * np.pi / 180.0) if fig is None: fig = self.fig if ax is None: ax = self.ax - if hasattr(self, 'quiver'): + if hasattr(self, "quiver"): self.quiver.remove() - self.quiver = ax.quiver(X, Y, XY_U, XY_V, units='xy', scale=1./self.vec_scale, scale_units='xy', pivot='mid', headwidth=0., - headlength=0., headaxislength=0., width=0.5, linewidth=0.75, color='white', edgecolor='black') + self.quiver = ax.quiver( + X[:: self.step_vec, :: self.step_vec], + Y[:: self.step_vec, :: self.step_vec], + XY_U[:: self.step_vec, :: self.step_vec], + XY_V[:: self.step_vec, :: self.step_vec], + units="xy", + scale=1.0 / scale_vec, + scale_units="xy", + pivot="mid", + headwidth=0.0, + headlength=0.0, + headaxislength=0.0, + width=0.3, + linewidth=0.6, + color="white", + edgecolor="black", + ) + if self.pa_err: + XY_U_err1, XY_V_err1 = ( + P_cut * np.cos(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0), + P_cut * np.sin(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0), + ) + XY_U_err2, XY_V_err2 = ( + P_cut * np.cos(np.pi / 2.0 + (self.PA - 3.0 * self.s_PA) * np.pi / 180.0), + P_cut * np.sin(np.pi / 2.0 + (self.PA - 3.0 * self.s_PA) * np.pi / 180.0), + ) + if hasattr(self, "quiver_err1"): + self.quiver_err1.remove() + if hasattr(self, "quiver_err2"): + self.quiver_err2.remove() + self.quiver_err1 = ax.quiver( + X[:: self.step_vec, :: self.step_vec], + Y[:: self.step_vec, :: self.step_vec], + XY_U_err1[:: self.step_vec, :: self.step_vec], + XY_V_err1[:: self.step_vec, :: self.step_vec], + units="xy", + scale=1.0 / scale_vec, + scale_units="xy", + pivot="mid", + headwidth=0.0, + headlength=0.0, + headaxislength=0.0, + width=0.05, + # linewidth=0.5, + color="black", + edgecolor="black", + ls="dashed", + ) + self.quiver_err2 = ax.quiver( + X[:: self.step_vec, :: self.step_vec], + Y[:: self.step_vec, :: self.step_vec], + XY_U_err2[:: self.step_vec, :: self.step_vec], + XY_V_err2[:: self.step_vec, :: self.step_vec], + units="xy", + scale=1.0 / scale_vec, + scale_units="xy", + pivot="mid", + headwidth=0.0, + headlength=0.0, + headaxislength=0.0, + width=0.05, + # linewidth=0.5, + color="black", + edgecolor="black", + ls="dashed", + ) fig.canvas.draw_idle() return self.quiver else: - ax.quiver(X, Y, XY_U, XY_V, units='xy', scale=1./self.vec_scale, scale_units='xy', pivot='mid', headwidth=0., - headlength=0., headaxislength=0., width=0.5, linewidth=0.75, color='white', edgecolor='black') + ax.quiver( + X[:: self.step_vec, :: self.step_vec], + Y[:: self.step_vec, :: self.step_vec], + XY_U[:: self.step_vec, :: self.step_vec], + XY_V[:: self.step_vec, :: self.step_vec], + units="xy", + scale=1.0 / scale_vec, + scale_units="xy", + pivot="mid", + headwidth=0.0, + headlength=0.0, + headaxislength=0.0, + width=0.3, + linewidth=0.6, + color="white", + edgecolor="black", + ) + if self.pa_err: + XY_U_err1, XY_V_err1 = ( + P_cut * np.cos(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0), + P_cut * np.sin(np.pi / 2.0 + (self.PA + 3.0 * self.s_PA) * np.pi / 180.0), + ) + XY_U_err2, XY_V_err2 = ( + P_cut * np.cos(np.pi / 2.0 + (self.PA - 3.0 * self.s_PA) * np.pi / 180.0), + P_cut * np.sin(np.pi / 2.0 + (self.PA - 3.0 * self.s_PA) * np.pi / 180.0), + ) + ax.quiver( + X[:: self.step_vec, :: self.step_vec], + Y[:: self.step_vec, :: self.step_vec], + XY_U_err1[:: self.step_vec, :: self.step_vec], + XY_V_err1[:: self.step_vec, :: self.step_vec], + units="xy", + scale=1.0 / scale_vec, + scale_units="xy", + pivot="mid", + headwidth=0.0, + headlength=0.0, + headaxislength=0.0, + width=0.05, + # linewidth=0.5, + color="black", + edgecolor="black", + ls="dashed", + ) + ax.quiver( + X[:: self.step_vec, :: self.step_vec], + Y[:: self.step_vec, :: self.step_vec], + XY_U_err2[:: self.step_vec, :: self.step_vec], + XY_V_err2[:: self.step_vec, :: self.step_vec], + units="xy", + scale=1.0 / scale_vec, + scale_units="xy", + pivot="mid", + headwidth=0.0, + headlength=0.0, + headaxislength=0.0, + width=0.05, + # linewidth=0.5, + color="black", + edgecolor="black", + ls="dashed", + ) fig.canvas.draw_idle() def pol_int(self, fig=None, ax=None): @@ -2435,10 +3115,10 @@ class pol_map(object): s_I = np.sqrt(self.IQU_cov[0, 0]) I_reg = self.I.sum() I_reg_err = np.sqrt(np.sum(s_I**2)) - P_reg = self.Stokes[0].header['P_int'] - P_reg_err = self.Stokes[0].header['P_int_err'] - PA_reg = self.Stokes[0].header['PA_int'] - PA_reg_err = self.Stokes[0].header['PA_int_err'] + P_reg = self.Stokes[0].header["P_int"] + P_reg_err = self.Stokes[0].header["sP_int"] + PA_reg = self.Stokes[0].header["PA_int"] + PA_reg_err = self.Stokes[0].header["sPA_int"] s_I = np.sqrt(self.IQU_cov[0, 0]) s_Q = np.sqrt(self.IQU_cov[1, 1]) @@ -2450,19 +3130,29 @@ class pol_map(object): I_cut = self.I[self.cut].sum() Q_cut = self.Q[self.cut].sum() U_cut = self.U[self.cut].sum() - I_cut_err = np.sqrt(np.sum(s_I[self.cut]**2)) - Q_cut_err = np.sqrt(np.sum(s_Q[self.cut]**2)) - U_cut_err = np.sqrt(np.sum(s_U[self.cut]**2)) - IQ_cut_err = np.sqrt(np.sum(s_IQ[self.cut]**2)) - IU_cut_err = np.sqrt(np.sum(s_IU[self.cut]**2)) - QU_cut_err = np.sqrt(np.sum(s_QU[self.cut]**2)) + I_cut_err = np.sqrt(np.sum(s_I[self.cut] ** 2)) + Q_cut_err = np.sqrt(np.sum(s_Q[self.cut] ** 2)) + U_cut_err = np.sqrt(np.sum(s_U[self.cut] ** 2)) + IQ_cut_err = np.sqrt(np.sum(s_IQ[self.cut] ** 2)) + IU_cut_err = np.sqrt(np.sum(s_IU[self.cut] ** 2)) + QU_cut_err = np.sqrt(np.sum(s_QU[self.cut] ** 2)) - P_cut = np.sqrt(Q_cut**2+U_cut**2)/I_cut - P_cut_err = np.sqrt((Q_cut**2*Q_cut_err**2 + U_cut**2*U_cut_err**2 + 2.*Q_cut*U_cut*QU_cut_err)/(Q_cut**2 + U_cut**2) + - ((Q_cut/I_cut)**2 + (U_cut/I_cut)**2)*I_cut_err**2 - 2.*(Q_cut/I_cut)*IQ_cut_err - 2.*(U_cut/I_cut)*IU_cut_err)/I_cut + with np.errstate(divide="ignore", invalid="ignore"): + P_cut = np.sqrt(Q_cut**2 + U_cut**2) / I_cut + P_cut_err = ( + np.sqrt( + (Q_cut**2 * Q_cut_err**2 + U_cut**2 * U_cut_err**2 + 2.0 * Q_cut * U_cut * QU_cut_err) / (Q_cut**2 + U_cut**2) + + ((Q_cut / I_cut) ** 2 + (U_cut / I_cut) ** 2) * I_cut_err**2 + - 2.0 * (Q_cut / I_cut) * IQ_cut_err + - 2.0 * (U_cut / I_cut) * IU_cut_err + ) + / I_cut + ) - PA_cut = princ_angle((90./np.pi)*np.arctan2(U_cut, Q_cut)) - PA_cut_err = (90./(np.pi*(Q_cut**2+U_cut**2)))*np.sqrt(U_cut**2*Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err) + PA_cut = princ_angle((90.0 / np.pi) * np.arctan2(U_cut, Q_cut)) + PA_cut_err = (90.0 / (np.pi * (Q_cut**2 + U_cut**2))) * np.sqrt( + U_cut**2 * Q_cut_err**2 + Q_cut**2 * U_cut_err**2 - 2.0 * Q_cut * U_cut * QU_cut_err + ) else: s_I = np.sqrt(self.IQU_cov[0, 0]) @@ -2475,39 +3165,59 @@ class pol_map(object): I_reg = self.I[self.region].sum() Q_reg = self.Q[self.region].sum() U_reg = self.U[self.region].sum() - I_reg_err = np.sqrt(np.sum(s_I[self.region]**2)) - Q_reg_err = np.sqrt(np.sum(s_Q[self.region]**2)) - U_reg_err = np.sqrt(np.sum(s_U[self.region]**2)) - IQ_reg_err = np.sqrt(np.sum(s_IQ[self.region]**2)) - IU_reg_err = np.sqrt(np.sum(s_IU[self.region]**2)) - QU_reg_err = np.sqrt(np.sum(s_QU[self.region]**2)) + I_reg_err = np.sqrt(np.sum(s_I[self.region] ** 2)) + Q_reg_err = np.sqrt(np.sum(s_Q[self.region] ** 2)) + U_reg_err = np.sqrt(np.sum(s_U[self.region] ** 2)) + IQ_reg_err = np.sqrt(np.sum(s_IQ[self.region] ** 2)) + IU_reg_err = np.sqrt(np.sum(s_IU[self.region] ** 2)) + QU_reg_err = np.sqrt(np.sum(s_QU[self.region] ** 2)) - P_reg = np.sqrt(Q_reg**2+U_reg**2)/I_reg - P_reg_err = np.sqrt((Q_reg**2*Q_reg_err**2 + U_reg**2*U_reg_err**2 + 2.*Q_reg*U_reg*QU_reg_err)/(Q_reg**2 + U_reg**2) + - ((Q_reg/I_reg)**2 + (U_reg/I_reg)**2)*I_reg_err**2 - 2.*(Q_reg/I_reg)*IQ_reg_err - 2.*(U_reg/I_reg)*IU_reg_err)/I_reg + with np.errstate(divide="ignore", invalid="ignore"): + P_reg = np.sqrt(Q_reg**2 + U_reg**2) / I_reg + P_reg_err = ( + np.sqrt( + (Q_reg**2 * Q_reg_err**2 + U_reg**2 * U_reg_err**2 + 2.0 * Q_reg * U_reg * QU_reg_err) / (Q_reg**2 + U_reg**2) + + ((Q_reg / I_reg) ** 2 + (U_reg / I_reg) ** 2) * I_reg_err**2 + - 2.0 * (Q_reg / I_reg) * IQ_reg_err + - 2.0 * (U_reg / I_reg) * IU_reg_err + ) + / I_reg + ) - PA_reg = princ_angle((90./np.pi)*np.arctan2(U_reg, Q_reg)) - PA_reg_err = (90./(np.pi*(Q_reg**2+U_reg**2)))*np.sqrt(U_reg**2*Q_reg_err**2 + Q_reg**2*U_reg_err**2 - 2.*Q_reg*U_reg*QU_reg_err) + PA_reg = princ_angle((90.0 / np.pi) * np.arctan2(U_reg, Q_reg)) + PA_reg_err = (90.0 / (np.pi * (Q_reg**2 + U_reg**2))) * np.sqrt( + U_reg**2 * Q_reg_err**2 + Q_reg**2 * U_reg_err**2 - 2.0 * Q_reg * U_reg * QU_reg_err + ) new_cut = np.logical_and(self.region, self.cut) I_cut = self.I[new_cut].sum() Q_cut = self.Q[new_cut].sum() U_cut = self.U[new_cut].sum() - I_cut_err = np.sqrt(np.sum(s_I[new_cut]**2)) - Q_cut_err = np.sqrt(np.sum(s_Q[new_cut]**2)) - U_cut_err = np.sqrt(np.sum(s_U[new_cut]**2)) - IQ_cut_err = np.sqrt(np.sum(s_IQ[new_cut]**2)) - IU_cut_err = np.sqrt(np.sum(s_IU[new_cut]**2)) - QU_cut_err = np.sqrt(np.sum(s_QU[new_cut]**2)) + I_cut_err = np.sqrt(np.sum(s_I[new_cut] ** 2)) + Q_cut_err = np.sqrt(np.sum(s_Q[new_cut] ** 2)) + U_cut_err = np.sqrt(np.sum(s_U[new_cut] ** 2)) + IQ_cut_err = np.sqrt(np.sum(s_IQ[new_cut] ** 2)) + IU_cut_err = np.sqrt(np.sum(s_IU[new_cut] ** 2)) + QU_cut_err = np.sqrt(np.sum(s_QU[new_cut] ** 2)) - P_cut = np.sqrt(Q_cut**2+U_cut**2)/I_cut - P_cut_err = np.sqrt((Q_cut**2*Q_cut_err**2 + U_cut**2*U_cut_err**2 + 2.*Q_cut*U_cut*QU_cut_err)/(Q_cut**2 + U_cut**2) + - ((Q_cut/I_cut)**2 + (U_cut/I_cut)**2)*I_cut_err**2 - 2.*(Q_cut/I_cut)*IQ_cut_err - 2.*(U_cut/I_cut)*IU_cut_err)/I_cut + with np.errstate(divide="ignore", invalid="ignore"): + P_cut = np.sqrt(Q_cut**2 + U_cut**2) / I_cut + P_cut_err = ( + np.sqrt( + (Q_cut**2 * Q_cut_err**2 + U_cut**2 * U_cut_err**2 + 2.0 * Q_cut * U_cut * QU_cut_err) / (Q_cut**2 + U_cut**2) + + ((Q_cut / I_cut) ** 2 + (U_cut / I_cut) ** 2) * I_cut_err**2 + - 2.0 * (Q_cut / I_cut) * IQ_cut_err + - 2.0 * (U_cut / I_cut) * IU_cut_err + ) + / I_cut + ) - PA_cut = princ_angle((90./np.pi)*np.arctan2(U_cut, Q_cut)) - PA_cut_err = (90./(np.pi*(Q_cut**2+U_cut**2)))*np.sqrt(U_cut**2*Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err) + PA_cut = princ_angle((90.0 / np.pi) * np.arctan2(U_cut, Q_cut)) + PA_cut_err = (90.0 / (np.pi * (Q_cut**2 + U_cut**2))) * np.sqrt( + U_cut**2 * Q_cut_err**2 + Q_cut**2 * U_cut_err**2 - 2.0 * Q_cut * U_cut * QU_cut_err + ) - if hasattr(self, 'cont'): + if hasattr(self, "cont"): for coll in self.cont.collections: try: coll.remove() @@ -2518,39 +3228,192 @@ class pol_map(object): fig = self.fig if ax is None: ax = self.ax - if hasattr(self, 'an_int'): + if hasattr(self, "an_int"): self.an_int.remove() - self.str_int = r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_reg*self.map_convert, I_reg_err*self.map_convert, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100., np.ceil(P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err*10.)/10.) + self.str_int = ( + r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format( + self.pivot_wav, sci_not(I_reg * self.map_convert, I_reg_err * self.map_convert, 2) + ) + + "\n" + + r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg * 100.0, np.ceil(P_reg_err * 1000.0) / 10.0) + + "\n" + + r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err * 10.0) / 10.0) + ) self.str_cut = "" # self.str_cut = "\n"+r"$F_{{\lambda}}^{{cut}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_cut*self.map_convert, I_cut_err*self.map_convert, 2))+"\n"+r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_cut*100., np.ceil(P_cut_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_cut, np.ceil(PA_cut_err*10.)/10.) - self.an_int = ax.annotate(self.str_int+self.str_cut, color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left') + self.an_int = ax.annotate( + self.str_int + self.str_cut, + color="white", + fontsize=12, + xy=(0.01, 1.00), + xycoords="axes fraction", + path_effects=[pe.withStroke(linewidth=0.5, foreground="k")], + verticalalignment="top", + horizontalalignment="left", + ) if self.region is not None: - self.cont = ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8) + self.cont = ax.contour(self.region.astype(float), levels=[0.5], colors="white", linewidths=0.8) fig.canvas.draw_idle() return self.an_int else: - str_int = r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_reg*self.map_convert, I_reg_err*self.map_convert, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100., np.ceil(P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err*10.)/10.) + str_int = ( + r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format( + self.pivot_wav, sci_not(I_reg * self.map_convert, I_reg_err * self.map_convert, 2) + ) + + "\n" + + r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg * 100.0, np.ceil(P_reg_err * 1000.0) / 10.0) + + "\n" + + r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err * 10.0) / 10.0) + ) str_cut = "" # str_cut = "\n"+r"$F_{{\lambda}}^{{cut}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_cut*self.map_convert, I_cut_err*self.map_convert, 2))+"\n"+r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_cut*100., np.ceil(P_cut_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_cut, np.ceil(PA_cut_err*10.)/10.) - ax.annotate(str_int+str_cut, color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left') + ax.annotate( + str_int + str_cut, + color="white", + fontsize=12, + xy=(0.01, 1.00), + xycoords="axes fraction", + path_effects=[pe.withStroke(linewidth=0.5, foreground="k")], + verticalalignment="top", + horizontalalignment="left", + ) if self.region is not None: - ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8) + ax.contour(self.region.astype(float), levels=[0.5], colors="white", linewidths=0.8) fig.canvas.draw_idle() if __name__ == "__main__": import argparse - parser = argparse.ArgumentParser(description='Interactively plot the pipeline products') - parser.add_argument('-f', '--file', metavar='path', required=False, help='the full or relative path to the data product', type=str, default=None) - parser.add_argument('-p', '--snrp', metavar='snrp_cut', required=False, help='the cut in signal-to-noise for the polarization degree', type=float, default=3.) - parser.add_argument('-i', '--snri', metavar='snri_cut', required=False, help='the cut in signal-to-noise for the intensity', type=float, default=3.) - parser.add_argument('-l', '--lim', metavar='flux_lim', nargs=2, required=False, help='limits for the intensity map', default=None) + parser = argparse.ArgumentParser(description="Interactively plot the pipeline products") + parser.add_argument("-f", "--file", metavar="path", required=False, help="The full or relative path to the data product", type=str, default=None) + parser.add_argument( + "-p", "--snrp", metavar="snrp_cut", required=False, help="The cut in signal-to-noise for the polarization degree", type=float, default=3.0 + ) + parser.add_argument("-i", "--snri", metavar="snri_cut", required=False, help="The cut in signal-to-noise for the intensity", type=float, default=3.0) + parser.add_argument( + "-st", "--step-vec", metavar="step_vec", required=False, help="Quantity of vectors to be shown, 1 is all, 2 is every other, etc.", type=int, default=1 + ) + parser.add_argument( + "-sc", "--scale-vec", metavar="scale_vec", required=False, help="Size of the 100% polarization vector in pixel units", type=float, default=3.0 + ) + parser.add_argument("-pa", "--pang-err", action="store_true", required=False, help="Whether the polarization angle uncertainties should be displayed") + parser.add_argument("-l", "--lim", metavar="flux_lim", nargs=2, required=False, help="Limits for the intensity map", type=float, default=None) + parser.add_argument("-pdf", "--static-pdf", metavar="static_pdf", required=False, help="Whether the analysis tool or the static pdfs should be outputed", default=None) args = parser.parse_args() if args.file is not None: - Stokes_UV = fits.open(args.file, mode='readonly') - p = pol_map(Stokes_UV, SNRp_cut=args.snrp, SNRi_cut=args.snri, flux_lim=args.lim) + Stokes_UV = fits.open(args.file, mode="readonly") + if args.static_pdf is not None: + polarization_map( + Stokes_UV, + Stokes_UV["DATA_MASK"].data.astype(bool), + SNRp_cut=args.snrp, + SNRi_cut=args.snri, + flux_lim=args.lim, + step_vec=args.step_vec, + scale_vec=args.scale_vec, + savename="_".join([Stokes_UV[0].header["FILENAME"]]), + plots_folder=args.static_pdf, + ) + polarization_map( + Stokes_UV, + Stokes_UV["DATA_MASK"].data.astype(bool), + SNRp_cut=args.snrp, + SNRi_cut=args.snri, + flux_lim=args.lim, + step_vec=args.step_vec, + scale_vec=args.scale_vec, + savename="_".join([Stokes_UV[0].header["FILENAME"], "I"]), + plots_folder=args.static_pdf, + display="Intensity", + ) + polarization_map( + Stokes_UV, + Stokes_UV["DATA_MASK"].data.astype(bool), + SNRp_cut=args.snrp, + SNRi_cut=args.snri, + flux_lim=args.lim, + step_vec=args.step_vec, + scale_vec=args.scale_vec, + savename="_".join([Stokes_UV[0].header["FILENAME"], "P_flux"]), + plots_folder=args.static_pdf, + display="Pol_Flux", + ) + polarization_map( + Stokes_UV, + Stokes_UV["DATA_MASK"].data.astype(bool), + SNRp_cut=args.snrp, + SNRi_cut=args.snri, + flux_lim=args.lim, + step_vec=args.step_vec, + scale_vec=args.scale_vec, + savename="_".join([Stokes_UV[0].header["FILENAME"], "P"]), + plots_folder=args.static_pdf, + display="Pol_deg", + ) + polarization_map( + Stokes_UV, + Stokes_UV["DATA_MASK"].data.astype(bool), + SNRp_cut=args.snrp, + SNRi_cut=args.snri, + flux_lim=args.lim, + step_vec=args.step_vec, + scale_vec=args.scale_vec, + savename="_".join([Stokes_UV[0].header["FILENAME"], "PA"]), + plots_folder=args.static_pdf, + display="Pol_ang", + ) + polarization_map( + Stokes_UV, + Stokes_UV["DATA_MASK"].data.astype(bool), + SNRp_cut=args.snrp, + SNRi_cut=args.snri, + flux_lim=args.lim, + step_vec=args.step_vec, + scale_vec=args.scale_vec, + savename="_".join([Stokes_UV[0].header["FILENAME"], "I_err"]), + plots_folder=args.static_pdf, + display="I_err", + ) + polarization_map( + Stokes_UV, + Stokes_UV["DATA_MASK"].data.astype(bool), + SNRp_cut=args.snrp, + SNRi_cut=args.snri, + flux_lim=args.lim, + step_vec=args.step_vec, + scale_vec=args.scale_vec, + savename="_".join([Stokes_UV[0].header["FILENAME"], "P_err"]), + plots_folder=args.static_pdf, + display="Pol_deg_err", + ) + polarization_map( + Stokes_UV, + Stokes_UV["DATA_MASK"].data.astype(bool), + SNRp_cut=args.snrp, + SNRi_cut=args.snri, + flux_lim=args.lim, + step_vec=args.step_vec, + scale_vec=args.scale_vec, + savename="_".join([Stokes_UV[0].header["FILENAME"], "SNRi"]), + plots_folder=args.static_pdf, + display="SNRi", + ) + polarization_map( + Stokes_UV, + Stokes_UV["DATA_MASK"].data.astype(bool), + SNRp_cut=args.snrp, + SNRi_cut=args.snri, + flux_lim=args.lim, + step_vec=args.step_vec, + scale_vec=args.scale_vec, + savename="_".join([Stokes_UV[0].header["FILENAME"], "SNRp"]), + plots_folder=args.static_pdf, + display="SNRp", + ) + else: + pol_map(Stokes_UV, SNRp_cut=args.snrp, SNRi_cut=args.snri, step_vec=args.step_vec, scale_vec=args.scale_vec, flux_lim=args.lim, pa_err=args.pang_err) else: - print("python3 plots.py -f -p -i -l ") + print("python3 plots.py -f -p -i -st -sc -l -pa --pdf ") diff --git a/package/lib/query.py b/package/lib/query.py index 5c244c5..ba99d49 100755 --- a/package/lib/query.py +++ b/package/lib/query.py @@ -3,34 +3,44 @@ """ Library function to query and download datatsets from MAST api. """ + from os import system -from os.path import join as path_join, exists as path_exists -from astroquery.mast import MastMissions, Observations -from astropy.table import unique, Column -from astropy.time import Time, TimeDelta +from os.path import exists as path_exists +from os.path import join as path_join +from warnings import filterwarnings + import astropy.units as u import numpy as np +from astropy.table import Column, unique +from astropy.time import Time, TimeDelta +from astroquery.exceptions import NoResultsWarning +from astroquery.mast import MastMissions, Observations + +filterwarnings("error", category=NoResultsWarning) def divide_proposal(products): """ Divide observation in proposals by time or filter """ - for pid in np.unique(products['Proposal ID']): - obs = products[products['Proposal ID'] == pid].copy() - same_filt = np.unique(np.array(np.sum([obs['Filters'][:, 1:] == filt[1:] for filt in obs['Filters']], axis=2) < 3, dtype=bool), axis=0) + for pid in np.unique(products["Proposal ID"]): + obs = products[products["Proposal ID"] == pid].copy() + same_filt = np.unique(np.array(np.sum([obs["Filters"][:, 1:] == filt[1:] for filt in obs["Filters"]], axis=2) < 3, dtype=bool), axis=0) if len(same_filt) > 1: for filt in same_filt: - products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][filt]], axis=0)] = "_".join( - [obs['Proposal ID'][filt][0], "_".join([fi for fi in obs['Filters'][filt][0][1:] if fi[:-1] != "CLEAR"])]) - for pid in np.unique(products['Proposal ID']): - obs = products[products['Proposal ID'] == pid].copy() - close_date = np.unique([[np.abs(TimeDelta(obs['Start'][i].unix-date.unix, format='sec')) - < 7.*u.d for i in range(len(obs))] for date in obs['Start']], axis=0) + products["Proposal ID"][np.any([products["Dataset"] == dataset for dataset in obs["Dataset"][filt]], axis=0)] = "_".join( + [obs["Proposal ID"][filt][0], "_".join([fi for fi in obs["Filters"][filt][0][1:] if fi[:-1] != "CLEAR"])] + ) + for pid in np.unique(products["Proposal ID"]): + obs = products[products["Proposal ID"] == pid].copy() + close_date = np.unique( + [[np.abs(TimeDelta(obs["Start"][i].unix - date.unix, format="sec")) < 7.0 * u.d for i in range(len(obs))] for date in obs["Start"]], axis=0 + ) if len(close_date) > 1: for date in close_date: - products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][date]], axis=0) - ] = "_".join([obs['Proposal ID'][date][0], str(obs['Start'][date][0])[:10]]) + products["Proposal ID"][np.any([products["Dataset"] == dataset for dataset in obs["Dataset"][date]], axis=0)] = "_".join( + [obs["Proposal ID"][date][0], str(obs["Start"][date][0])[:10]] + ) return products @@ -38,53 +48,36 @@ def get_product_list(target=None, proposal_id=None): """ Retrieve products list for a given target from the MAST archive """ - mission = MastMissions(mission='hst') - radius = '3' + mission = MastMissions(mission="hst") + radius = "3" select_cols = [ - 'sci_data_set_name', - 'sci_spec_1234', - 'sci_actual_duration', - 'sci_start_time', - 'sci_stop_time', - 'sci_central_wavelength', - 'sci_instrume', - 'sci_aper_1234', - 'sci_targname', - 'sci_pep_id', - 'sci_pi_last_name'] + "sci_data_set_name", + "sci_spec_1234", + "sci_actual_duration", + "sci_start_time", + "sci_stop_time", + "sci_central_wavelength", + "sci_instrume", + "sci_aper_1234", + "sci_targname", + "sci_pep_id", + "sci_pi_last_name", + ] - cols = [ - 'Dataset', - 'Filters', - 'Exptime', - 'Start', - 'Stop', - 'Central wavelength', - 'Instrument', - 'Size', - 'Target name', - 'Proposal ID', - 'PI last name'] + cols = ["Dataset", "Filters", "Exptime", "Start", "Stop", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"] if target is None: target = input("Target name:\n>") # Use query_object method to resolve the object name into coordinates - results = mission.query_object( - target, - radius=radius, - select_cols=select_cols, - sci_spec_1234='POL*', - sci_obs_type='image', - sci_aec='S', - sci_instrume='foc') + results = mission.query_object(target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc") for c, n_c in zip(select_cols, cols): results.rename_column(c, n_c) - results['Proposal ID'] = Column(results['Proposal ID'], dtype='U35') - results['Filters'] = Column(np.array([filt.split(";") for filt in results['Filters']], dtype=str)) - results['Start'] = Column(Time(results['Start'])) - results['Stop'] = Column(Time(results['Stop'])) + results["Proposal ID"] = Column(results["Proposal ID"], dtype="U35") + results["Filters"] = Column(np.array([filt.split(";") for filt in results["Filters"]], dtype=str)) + results["Start"] = Column(Time(results["Start"])) + results["Stop"] = Column(Time(results["Stop"])) results = divide_proposal(results) obs = results.copy() @@ -92,67 +85,70 @@ def get_product_list(target=None, proposal_id=None): # Remove single observations for which a FIND filter is used to_remove = [] for i in range(len(obs)): - if "F1ND" in obs[i]['Filters']: + if "F1ND" in obs[i]["Filters"]: to_remove.append(i) obs.remove_rows(to_remove) # Remove observations for which a polarization filter is missing polfilt = {"POL0": 0, "POL60": 1, "POL120": 2} - for pid in np.unique(obs['Proposal ID']): + for pid in np.unique(obs["Proposal ID"]): used_pol = np.zeros(3) - for dataset in obs[obs['Proposal ID'] == pid]: - used_pol[polfilt[dataset['Filters'][0]]] += 1 + for dataset in obs[obs["Proposal ID"] == pid]: + used_pol[polfilt[dataset["Filters"][0]]] += 1 if np.any(used_pol < 1): - obs.remove_rows(np.arange(len(obs))[obs['Proposal ID'] == pid]) + obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid]) - tab = unique(obs, ['Target name', 'Proposal ID']) - obs["Obs"] = [np.argmax(np.logical_and(tab['Proposal ID'] == data['Proposal ID'], tab['Target name'] == data['Target name']))+1 for data in obs] + tab = unique(obs, ["Target name", "Proposal ID"]) + obs["Obs"] = [np.argmax(np.logical_and(tab["Proposal ID"] == data["Proposal ID"], tab["Target name"] == data["Target name"])) + 1 for data in obs] try: - n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], 'Obs') + n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], "Obs") except IndexError: - raise ValueError( - "There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target)) + raise ValueError("There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target)) b = np.zeros(len(results), dtype=bool) - if proposal_id is not None and str(proposal_id) in obs['Proposal ID']: - b[results['Proposal ID'] == str(proposal_id)] = True + if proposal_id is not None and str(proposal_id) in obs["Proposal ID"]: + b[results["Proposal ID"] == str(proposal_id)] = True else: - n_obs.pprint(len(n_obs)+2) - a = [np.array(i.split(":"), dtype=str) - for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(',')] - if a[0][0] == '': + n_obs.pprint(len(n_obs) + 2) + a = [ + np.array(i.split(":"), dtype=str) + for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(",") + ] + if a[0][0] == "": a = [[1]] - if a[0][0] in ['a', 'all', '*']: + if a[0][0] in ["a", "all", "*"]: b = np.ones(len(results), dtype=bool) else: a = [np.array(i, dtype=int) for i in a] for i in a: if len(i) > 1: - for j in range(i[0], i[1]+1): - b[np.array([dataset in obs['Dataset'][obs["Obs"] == j] for dataset in results['Dataset']])] = True + for j in range(i[0], i[1] + 1): + b[np.array([dataset in obs["Dataset"][obs["Obs"] == j] for dataset in results["Dataset"]])] = True else: - b[np.array([dataset in obs['Dataset'][obs['Obs'] == i[0]] for dataset in results['Dataset']])] = True + b[np.array([dataset in obs["Dataset"][obs["Obs"] == i[0]] for dataset in results["Dataset"]])] = True - observations = Observations.query_criteria(obs_id=list(results['Dataset'][b])) - products = Observations.filter_products(Observations.get_product_list(observations), - productType=['SCIENCE'], - dataproduct_type=['image'], - calib_level=[2], - description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP") - products['proposal_id'] = Column(products['proposal_id'], dtype='U35') - products['target_name'] = Column(observations['target_name']) + observations = Observations.query_criteria(obs_id=list(results["Dataset"][b])) + products = Observations.filter_products( + Observations.get_product_list(observations), + productType=["SCIENCE"], + dataproduct_type=["image"], + calib_level=[2], + description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP", + ) + products["proposal_id"] = Column(products["proposal_id"], dtype="U35") + products["target_name"] = Column(observations["target_name"]) for prod in products: - prod['proposal_id'] = results['Proposal ID'][results['Dataset'] == prod['productFilename'][:len(results['Dataset'][0])].upper()][0] + prod["proposal_id"] = results["Proposal ID"][results["Dataset"] == prod["productFilename"][: len(results["Dataset"][0])].upper()][0] for prod in products: - prod['target_name'] = observations['target_name'][observations['obsid'] == prod['obsID']][0] - tab = unique(products, ['target_name', 'proposal_id']) + prod["target_name"] = observations["target_name"][observations["obsid"] == prod["obsID"]][0] + tab = unique(products, ["target_name", "proposal_id"]) - products["Obs"] = [np.argmax(np.logical_and(tab['proposal_id'] == data['proposal_id'], tab['target_name'] == data['target_name']))+1 for data in products] + products["Obs"] = [np.argmax(np.logical_and(tab["proposal_id"] == data["proposal_id"], tab["target_name"] == data["target_name"])) + 1 for data in products] return target, products -def retrieve_products(target=None, proposal_id=None, output_dir='./data'): +def retrieve_products(target=None, proposal_id=None, output_dir="./data"): """ Given a target name and a proposal_id, create the local directories and retrieve the fits files from the MAST Archive """ @@ -160,18 +156,19 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'): prodpaths = [] # data_dir = path_join(output_dir, target) out = "" - for obs in unique(products, 'Obs'): + for obs in unique(products, "Obs"): filepaths = [] # obs_dir = path_join(data_dir, obs['prodposal_id']) # if obs['target_name']!=target: - obs_dir = path_join(path_join(output_dir, target), obs['proposal_id']) + obs_dir = path_join(path_join(output_dir, target), obs["proposal_id"]) if not path_exists(obs_dir): system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots"))) - for file in products['productFilename'][products['Obs'] == obs['Obs']]: + for file in products["productFilename"][products["Obs"] == obs["Obs"]]: fpath = path_join(obs_dir, file) if not path_exists(fpath): - out += "{0:s} : {1:s}\n".format(file, Observations.download_file( - products['dataURI'][products['productFilename'] == file][0], local_path=fpath)[0]) + out += "{0:s} : {1:s}\n".format( + file, Observations.download_file(products["dataURI"][products["productFilename"] == file][0], local_path=fpath)[0] + ) else: out += "{0:s} : Exists\n".format(file) filepaths.append([obs_dir, file]) @@ -183,13 +180,13 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'): if __name__ == "__main__": import argparse - parser = argparse.ArgumentParser(description='Query MAST for target products') - parser.add_argument('-t', '--target', metavar='targetname', required=False, - help='the name of the target', type=str, default=None) - parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, - help='the proposal id of the data products', type=int, default=None) - parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False, - help='output directory path for the data products', type=str, default="./data") + parser = argparse.ArgumentParser(description="Query MAST for target products") + parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None) + parser.add_argument("-p", "--proposal_id", metavar="proposal_id", required=False, help="the proposal id of the data products", type=int, default=None) + parser.add_argument( + "-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data" + ) args = parser.parse_args() + print(args.target) prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id) print(prodpaths) diff --git a/package/lib/reduction.py b/package/lib/reduction.py index 38b9836..a265158 100755 --- a/package/lib/reduction.py +++ b/package/lib/reduction.py @@ -39,44 +39,83 @@ prototypes : Rotate data before reduction given an angle in degrees using scipy functions. """ -from copy import deepcopy -import numpy as np -import matplotlib.pyplot as plt -from matplotlib.patches import Rectangle -from matplotlib.colors import LogNorm -from scipy.ndimage import rotate as sc_rotate, shift as sc_shift -from scipy.signal import fftconvolve -from astropy.wcs import WCS -from astropy import log import warnings -from .deconvolve import deconvolve_im, gaussian_psf, gaussian2d, zeropad -from .convex_hull import image_hull, clean_ROI +from copy import deepcopy + +import matplotlib.pyplot as plt +import numpy as np +from astropy import log +from astropy.wcs import WCS +from matplotlib.colors import LogNorm +from matplotlib.patches import Rectangle +from scipy.ndimage import rotate as sc_rotate +from scipy.ndimage import shift as sc_shift +from scipy.signal import fftconvolve + from .background import bkg_fit, bkg_hist, bkg_mini +from .convex_hull import image_hull +from .cross_correlation import phase_cross_correlation +from .deconvolve import deconvolve_im, gaussian2d, gaussian_psf, zeropad from .plots import plot_obs from .utils import princ_angle -from .cross_correlation import phase_cross_correlation -log.setLevel('ERROR') + +log.setLevel("ERROR") # Useful tabulated values # FOC instrument -globals()['trans2'] = {'f140w': 0.21, 'f175w': 0.24, 'f220w': 0.39, 'f275w': 0.40, 'f320w': 0.89, 'f342w': 0.81, - 'f430w': 0.74, 'f370lp': 0.83, 'f486n': 0.63, 'f501n': 0.68, 'f480lp': 0.82, 'clear2': 1.0} -globals()['trans3'] = {'f120m': 0.10, 'f130m': 0.10, 'f140m': 0.08, 'f152m': 0.08, 'f165w': 0.28, - 'f170m': 0.18, 'f195w': 0.42, 'f190m': 0.15, 'f210m': 0.18, 'f231m': 0.18, 'clear3': 1.0} -globals()['trans4'] = {'f253m': 0.18, 'f278m': 0.26, 'f307m': 0.26, 'f130lp': 0.92, 'f346m': 0.58, - 'f372m': 0.73, 'f410m': 0.58, 'f437m': 0.71, 'f470m': 0.79, 'f502m': 0.82, 'f550m': 0.77, 'clear4': 1.0} -globals()['pol_efficiency'] = {'pol0': 0.92, 'pol60': 0.92, 'pol120': 0.91} +globals()["trans2"] = { + "f140w": 0.21, + "f175w": 0.24, + "f220w": 0.39, + "f275w": 0.40, + "f320w": 0.89, + "f342w": 0.81, + "f430w": 0.74, + "f370lp": 0.83, + "f486n": 0.63, + "f501n": 0.68, + "f480lp": 0.82, + "clear2": 1.0, +} +globals()["trans3"] = { + "f120m": 0.10, + "f130m": 0.10, + "f140m": 0.08, + "f152m": 0.08, + "f165w": 0.28, + "f170m": 0.18, + "f195w": 0.42, + "f190m": 0.15, + "f210m": 0.18, + "f231m": 0.18, + "clear3": 1.0, +} +globals()["trans4"] = { + "f253m": 0.18, + "f278m": 0.26, + "f307m": 0.26, + "f130lp": 0.92, + "f346m": 0.58, + "f372m": 0.73, + "f410m": 0.58, + "f437m": 0.71, + "f470m": 0.79, + "f502m": 0.82, + "f550m": 0.77, + "clear4": 1.0, +} +globals()["pol_efficiency"] = {"pol0": 0.92, "pol60": 0.92, "pol120": 0.91} # POL0 = 0deg, POL60 = 60deg, POL120=120deg -globals()['theta'] = np.array([180.*np.pi/180., 60.*np.pi/180., 120.*np.pi/180.]) +globals()["theta"] = np.array([180.0 * np.pi / 180.0, 60.0 * np.pi / 180.0, 120.0 * np.pi / 180.0]) # Uncertainties on the orientation of the polarizers' axes taken to be 3deg (see Nota et. al 1996, p36; Robinson & Thomson 1995) -globals()['sigma_theta'] = np.array([3.*np.pi/180., 3.*np.pi/180., 3.*np.pi/180.]) +globals()["sigma_theta"] = np.array([3.0 * np.pi / 180.0, 3.0 * np.pi / 180.0, 3.0 * np.pi / 180.0]) # Image shift between polarizers as measured by Hodge (1995) -globals()['pol_shift'] = {'pol0': np.array([0., 0.])*1., 'pol60': np.array([3.63, -0.68])*1., 'pol120': np.array([0.65, 0.20])*1.} -globals()['sigma_shift'] = {'pol0': [0.3, 0.3], 'pol60': [0.3, 0.3], 'pol120': [0.3, 0.3]} +globals()["pol_shift"] = {"pol0": np.array([0.0, 0.0]) * 1.0, "pol60": np.array([3.63, -0.68]) * 1.0, "pol120": np.array([0.65, 0.20]) * 1.0} +globals()["sigma_shift"] = {"pol0": [0.3, 0.3], "pol60": [0.3, 0.3], "pol120": [0.3, 0.3]} -def get_row_compressor(old_dimension, new_dimension, operation='sum'): +def get_row_compressor(old_dimension, new_dimension, operation="sum"): """ Return the matrix that allows to compress an array from an old dimension of rows to a new dimension of rows, can be done by summing the original @@ -105,7 +144,6 @@ def get_row_compressor(old_dimension, new_dimension, operation='sum'): dim_compressor[which_row, which_column] = 1 which_column += 1 elif next_bin_break == which_column: - which_row += 1 next_bin_break += bin_size else: @@ -122,7 +160,7 @@ def get_row_compressor(old_dimension, new_dimension, operation='sum'): return dim_compressor -def get_column_compressor(old_dimension, new_dimension, operation='sum'): +def get_column_compressor(old_dimension, new_dimension, operation="sum"): """ Return the matrix that allows to compress an array from an old dimension of columns to a new dimension of columns, can be done by summing the original @@ -144,7 +182,7 @@ def get_column_compressor(old_dimension, new_dimension, operation='sum'): return get_row_compressor(old_dimension, new_dimension, operation).transpose() -def bin_ndarray(ndarray, new_shape, operation='sum'): +def bin_ndarray(ndarray, new_shape, operation="sum"): """ Bins an ndarray in all axes based on the target shape, by summing or averaging. @@ -164,21 +202,20 @@ def bin_ndarray(ndarray, new_shape, operation='sum'): [342 350 358 366 374]] """ - if operation.lower() not in ['sum', 'mean', 'average', 'avg']: + if operation.lower() not in ["sum", "mean", "average", "avg"]: raise ValueError("Operation not supported.") if ndarray.ndim != len(new_shape): - raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape, - new_shape)) - if (np.array(ndarray.shape) % np.array(new_shape) == np.array([0., 0.])).all(): - compression_pairs = [(d, c//d) for d, c in zip(new_shape, ndarray.shape)] + raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape, new_shape)) + if (np.array(ndarray.shape) % np.array(new_shape) == np.array([0.0, 0.0])).all(): + compression_pairs = [(d, c // d) for d, c in zip(new_shape, ndarray.shape)] flattened = [l for p in compression_pairs for l in p] ndarray = ndarray.reshape(flattened) for i in range(len(new_shape)): if operation.lower() == "sum": - ndarray = ndarray.sum(-1*(i+1)) + ndarray = ndarray.sum(-1 * (i + 1)) elif operation.lower() in ["mean", "average", "avg"]: - ndarray = ndarray.mean(-1*(i+1)) + ndarray = ndarray.mean(-1 * (i + 1)) else: row_comp = np.mat(get_row_compressor(ndarray.shape[0], new_shape[0], operation)) col_comp = np.mat(get_column_compressor(ndarray.shape[1], new_shape[1], operation)) @@ -240,12 +277,14 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu if error_array is None: error_array = np.zeros(data_array.shape) if null_val is None: - null_val = [1.00*error.mean() for error in error_array] + null_val = [1.00 * error.mean() for error in error_array] elif type(null_val) is float: - null_val = [null_val,]*error_array.shape[0] + null_val = [ + null_val, + ] * error_array.shape[0] vertex = np.zeros((data_array.shape[0], 4), dtype=int) - for i, image in enumerate(data_array): # Get vertex of the rectangular convex hull of each image + for i, image in enumerate(data_array): # Get vertex of the rectangular convex hull of each image vertex[i] = image_hull(image, step=step, null_val=null_val[i], inside=inside) v_array = np.zeros(4, dtype=int) if inside: # Get vertex of the maximum convex hull for all images @@ -253,77 +292,79 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu v_array[1] = np.min(vertex[:, 1]).astype(int) v_array[2] = np.max(vertex[:, 2]).astype(int) v_array[3] = np.min(vertex[:, 3]).astype(int) - else: # Get vertex of the minimum convex hull for all images + else: # Get vertex of the minimum convex hull for all images v_array[0] = np.min(vertex[:, 0]).astype(int) v_array[1] = np.max(vertex[:, 1]).astype(int) v_array[2] = np.min(vertex[:, 2]).astype(int) v_array[3] = np.max(vertex[:, 3]).astype(int) - new_shape = np.array([v_array[1]-v_array[0], v_array[3]-v_array[2]]) - rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 0., 'b'] + new_shape = np.array([v_array[1] - v_array[0], v_array[3] - v_array[2]]) + rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 0.0, "b"] crop_headers = deepcopy(headers) crop_array = np.zeros((data_array.shape[0], new_shape[0], new_shape[1])) crop_error_array = np.zeros((data_array.shape[0], new_shape[0], new_shape[1])) for i, image in enumerate(data_array): # Put the image data in the cropped array - crop_array[i] = image[v_array[0]:v_array[1], v_array[2]:v_array[3]] - crop_error_array[i] = error_array[i][v_array[0]:v_array[1], v_array[2]:v_array[3]] + crop_array[i] = image[v_array[0] : v_array[1], v_array[2] : v_array[3]] + crop_error_array[i] = error_array[i][v_array[0] : v_array[1], v_array[2] : v_array[3]] # Update CRPIX value in the associated header curr_wcs = WCS(crop_headers[i]).celestial.deepcopy() curr_wcs.wcs.crpix[:2] = curr_wcs.wcs.crpix[:2] - np.array([v_array[2], v_array[0]]) crop_headers[i].update(curr_wcs.to_header()) - crop_headers[i]['naxis1'], crop_headers[i]['naxis2'] = crop_array[i].shape + crop_headers[i]["naxis1"], crop_headers[i]["naxis2"] = crop_array[i].shape if display: - plt.rcParams.update({'font.size': 15}) - fig, ax = plt.subplots(figsize=(10, 10), layout='constrained') - convert_flux = headers[0]['photflam'] - data = deepcopy(data_array[0]*convert_flux) - data[data <= data[data > 0.].min()] = data[data > 0.].min() - crop = crop_array[0]*convert_flux - instr = headers[0]['instrume'] - rootname = headers[0]['rootname'] - exptime = headers[0]['exptime'] - filt = headers[0]['filtnam1'] + plt.rcParams.update({"font.size": 15}) + fig, ax = plt.subplots(figsize=(10, 10), layout="constrained") + convert_flux = headers[0]["photflam"] + data = deepcopy(data_array[0] * convert_flux) + data[data <= data[data > 0.0].min()] = data[data > 0.0].min() + crop = crop_array[0] * convert_flux + instr = headers[0]["instrume"] + rootname = headers[0]["rootname"] + exptime = headers[0]["exptime"] + filt = headers[0]["filtnam1"] # plots # im = ax.imshow(data, vmin=data.min(), vmax=data.max(), origin='lower', cmap='gray') - im = ax.imshow(data, norm=LogNorm(crop[crop > 0.].mean()/5., crop.max()), origin='lower', cmap='gray') + im = ax.imshow(data, norm=LogNorm(crop[crop > 0.0].mean() / 5.0, crop.max()), origin="lower", cmap="gray") x, y, width, height, angle, color = rectangle ax.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False)) # position of centroid - ax.plot([data.shape[1]/2, data.shape[1]/2], [0, data.shape[0]-1], '--', lw=1, - color='grey', alpha=0.3) - ax.plot([0, data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], '--', lw=1, - color='grey', alpha=0.3) - ax.annotate(instr+":"+rootname, color='white', fontsize=10, - xy=(0.02, 0.95), xycoords='axes fraction') - ax.annotate(filt, color='white', fontsize=14, xy=(0.02, 0.02), - xycoords='axes fraction') - ax.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(0.80, 0.02), - xycoords='axes fraction') - ax.set(title="Location of cropped image.", xlabel='pixel offset', ylabel='pixel offset') + ax.plot([data.shape[1] / 2, data.shape[1] / 2], [0, data.shape[0] - 1], "--", lw=1, color="grey", alpha=0.3) + ax.plot([0, data.shape[1] - 1], [data.shape[1] / 2, data.shape[1] / 2], "--", lw=1, color="grey", alpha=0.3) + ax.annotate(instr + ":" + rootname, color="white", fontsize=10, xy=(0.02, 0.95), xycoords="axes fraction") + ax.annotate(filt, color="white", fontsize=14, xy=(0.02, 0.02), xycoords="axes fraction") + ax.annotate(str(exptime) + " s", color="white", fontsize=10, xy=(0.80, 0.02), xycoords="axes fraction") + ax.set(title="Location of cropped image.", xlabel="pixel offset", ylabel="pixel offset") # fig.subplots_adjust(hspace=0, wspace=0, right=0.85) # cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75]) fig.colorbar(im, ax=ax, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") if savename is not None: - fig.savefig("/".join([plots_folder, savename+'_'+filt+'_crop_region.pdf']), - bbox_inches='tight', dpi=200) - plot_obs(data_array, headers, vmin=convert_flux*data_array[data_array > 0.].mean()/5., - vmax=convert_flux*data_array[data_array > 0.].max(), rectangle=[rectangle,]*len(headers), - savename=savename+'_crop_region', plots_folder=plots_folder) + fig.savefig("/".join([plots_folder, savename + "_" + filt + "_crop_region.pdf"]), bbox_inches="tight", dpi=200) + plot_obs( + data_array, + headers, + vmin=convert_flux * data_array[data_array > 0.0].mean() / 5.0, + vmax=convert_flux * data_array[data_array > 0.0].max(), + rectangle=[ + rectangle, + ] + * len(headers), + savename=savename + "_crop_region", + plots_folder=plots_folder, + ) plt.show() if data_mask is not None: - crop_mask = data_mask[v_array[0]:v_array[1], v_array[2]:v_array[3]] + crop_mask = data_mask[v_array[0] : v_array[1], v_array[2] : v_array[3]] return crop_array, crop_error_array, crop_mask, crop_headers else: return crop_array, crop_error_array, crop_headers -def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px', - shape=None, iterations=20, algo='richardson'): +def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px", shape=None, iterations=20, algo="richardson"): """ Homogeneously deconvolve a data array using Richardson-Lucy iterative algorithm. ---------- @@ -364,20 +405,20 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px', point spread function. """ # If chosen FWHM scale is 'arcsec', compute FWHM in pixel scale - if scale.lower() in ['arcsec', 'arcseconds']: + if scale.lower() in ["arcsec", "arcseconds"]: pxsize = np.zeros((data_array.shape[0], 2)) for i, header in enumerate(headers): # Get current pixel size w = WCS(header).celestial.deepcopy() - pxsize[i] = np.round(w.wcs.cdelt/3600., 15) + pxsize[i] = np.round(w.wcs.cdelt / 3600.0, 15) if (pxsize != pxsize[0]).any(): raise ValueError("Not all images in array have same pixel size") FWHM /= pxsize[0].min() # Define Point-Spread-Function kernel - if psf.lower() in ['gauss', 'gaussian']: + if psf.lower() in ["gauss", "gaussian"]: if shape is None: - shape = np.min(data_array[0].shape)-2, np.min(data_array[0].shape)-2 + shape = np.min(data_array[0].shape) - 2, np.min(data_array[0].shape) - 2 kernel = gaussian_psf(FWHM=FWHM, shape=shape) elif isinstance(psf, np.ndarray) and (len(psf.shape) == 2): kernel = psf @@ -392,7 +433,18 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px', return deconv_array -def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=None, subtract_error=True, display=False, savename=None, plots_folder="", return_background=False): +def get_error( + data_array, + headers, + error_array=None, + data_mask=None, + sub_type=None, + subtract_error=0.5, + display=False, + savename=None, + plots_folder="", + return_background=False, +): """ Look for sub-image of shape sub_shape that have the smallest integrated flux (no source assumption) and define the background on the image by the @@ -459,7 +511,7 @@ def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=No if data_mask is not None: mask = deepcopy(data_mask) else: - data_c, error_c, _ = crop_array(data, headers, error, step=5, null_val=0., inside=False) + data_c, error_c, _ = crop_array(data, headers, error, step=5, null_val=0.0, inside=False) mask_c = np.ones(data_c[0].shape, dtype=bool) for i, (data_ci, error_ci) in enumerate(zip(data_c, error_c)): data[i], error[i] = zeropad(data_ci, data[i].shape), zeropad(error_ci, error[i].shape) @@ -468,33 +520,42 @@ def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=No # wavelength dependence of the polarizer filters # estimated to less than 1% - err_wav = data*0.01 + err_wav = data * 0.01 # difference in PSFs through each polarizers # estimated to less than 3% - err_psf = data*0.03 + err_psf = data * 0.03 # flatfielding uncertainties # estimated to less than 3% - err_flat = data*0.03 + err_flat = data * 0.03 + if (sub_type is None): n_data_array, c_error_bkg, headers, background, error_bkg = bkg_hist( data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) - + sub_type, subtract_error = "histogram ", str(int(subtract_error > 0.0)) elif isinstance(sub_type, str): if sub_type.lower() in ['auto']: n_data_array, c_error_bkg, headers, background, error_bkg = bkg_fit( data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) + sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0 else: n_data_array, c_error_bkg, headers, background, error_bkg = bkg_hist( data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) + sub_type, subtract_error = "histogram ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0 elif isinstance(sub_type, tuple): n_data_array, c_error_bkg, headers, background, error_bkg = bkg_mini( data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) + sub_type, subtract_error = "minimal flux ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0 + else: print("Warning: Invalid subtype.") + for header in headers: + header["BKG_TYPE"] = (sub_type, "Bkg estimation method used during reduction") + header["BKG_SUB"] = (subtract_error, "Amount of bkg subtracted from images") + # Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999) - n_error_array = np.sqrt(err_wav**2+err_psf**2+err_flat**2+c_error_bkg**2) + n_error_array = np.sqrt(err_wav**2 + err_psf**2 + err_flat**2 + c_error_bkg**2) if return_background: return n_data_array, n_error_array, headers, background, error_bkg # return background error as well @@ -502,7 +563,7 @@ def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=No return n_data_array, n_error_array, headers -def rebin_array(data_array, error_array, headers, pxsize, scale, operation='sum', data_mask=None): +def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operation="sum", data_mask=None): """ Homogeneously rebin a data array to get a new pixel size equal to pxsize where pxsize is given in arcsec. @@ -541,73 +602,84 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, operation='sum' """ # Check that all images are from the same instrument ref_header = headers[0] - instr = ref_header['instrume'] - same_instr = np.array([instr == header['instrume'] for header in headers]).all() + instr = ref_header["instrume"] + same_instr = np.array([instr == header["instrume"] for header in headers]).all() if not same_instr: - raise ValueError("All images in data_array are not from the same\ - instrument, cannot proceed.") - if instr not in ['FOC']: - raise ValueError("Cannot reduce images from {0:s} instrument\ - (yet)".format(instr)) + raise ValueError( + "All images in data_array are not from the same\ + instrument, cannot proceed." + ) + if instr not in ["FOC"]: + raise ValueError( + "Cannot reduce images from {0:s} instrument\ + (yet)".format(instr) + ) rebinned_data, rebinned_error, rebinned_headers = [], [], [] - Dxy = np.array([1., 1.]) + Dxy = np.array([1.0, 1.0]) # Routine for the FOC instrument - if instr == 'FOC': - HST_aper = 2400. # HST aperture in mm - Dxy_arr = np.ones((data_array.shape[0], 2)) - for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))): - # Get current pixel size - w = WCS(header).celestial.deepcopy() - new_header = deepcopy(header) + Dxy_arr = np.ones((data_array.shape[0], 2)) + for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))): + # Get current pixel size + w = WCS(header).celestial.deepcopy() + new_header = deepcopy(header) - # Compute binning ratio - if scale.lower() in ['px', 'pixel']: - Dxy_arr[i] = np.array([pxsize,]*2) - elif scale.lower() in ['arcsec', 'arcseconds']: - Dxy_arr[i] = np.array(pxsize/np.abs(w.wcs.cdelt)/3600.) - elif scale.lower() in ['full', 'integrate']: - Dxy_arr[i] = image.shape - else: - raise ValueError("'{0:s}' invalid scale for binning.".format(scale)) - new_shape = np.ceil(min(image.shape/Dxy_arr, key=lambda x: x[0]+x[1])).astype(int) + # Compute binning ratio + if scale.lower() in ["px", "pixel"]: + Dxy_arr[i] = np.array( + [ + pxsize, + ] + * 2 + ) + scale = "px" + elif scale.lower() in ["arcsec", "arcseconds"]: + Dxy_arr[i] = np.array(pxsize / np.abs(w.wcs.cdelt) / 3600.0) + scale = "arcsec" + elif scale.lower() in ["full", "integrate"]: + Dxy_arr[i] = image.shape + pxsize, scale = "", "full" + else: + raise ValueError("'{0:s}' invalid scale for binning.".format(scale)) + new_shape = np.ceil(min(image.shape / Dxy_arr, key=lambda x: x[0] + x[1])).astype(int) - for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))): - # Get current pixel size - w = WCS(header).celestial.deepcopy() - new_header = deepcopy(header) + for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))): + # Get current pixel size + w = WCS(header).celestial.deepcopy() + new_header = deepcopy(header) - Dxy = image.shape/new_shape - if (Dxy < 1.).any(): - raise ValueError("Requested pixel size is below resolution.") + Dxy = image.shape / new_shape + if (Dxy < 1.0).any(): + raise ValueError("Requested pixel size is below resolution.") - # Rebin data - rebin_data = bin_ndarray(image, new_shape=new_shape, operation=operation) - rebinned_data.append(rebin_data) + # Rebin data + rebin_data = bin_ndarray(image, new_shape=new_shape, operation=operation) + rebinned_data.append(rebin_data) - # Propagate error - rms_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape, operation='average')) - sum_image = bin_ndarray(image, new_shape=new_shape, operation='sum') - mask = sum_image > 0. - new_error = np.zeros(rms_image.shape) - if operation.lower() in ["mean", "average", "avg"]: - new_error = np.sqrt(bin_ndarray(error**2, new_shape=new_shape, operation='average')) - else: - new_error = np.sqrt(bin_ndarray(error**2, new_shape=new_shape, operation='sum')) - rebinned_error.append(np.sqrt(rms_image**2 + new_error**2)) + # Propagate error + rms_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape, operation="average")) + # sum_image = bin_ndarray(image, new_shape=new_shape, operation="sum") + # mask = sum_image > 0.0 + new_error = np.zeros(rms_image.shape) + if operation.lower() in ["mean", "average", "avg"]: + new_error = np.sqrt(bin_ndarray(error**2, new_shape=new_shape, operation="average")) + else: + new_error = np.sqrt(bin_ndarray(error**2, new_shape=new_shape, operation="sum")) + rebinned_error.append(np.sqrt(rms_image**2 + new_error**2)) - # Update header - nw = w.deepcopy() - nw.wcs.cdelt *= Dxy - nw.wcs.crpix /= Dxy - nw.array_shape = new_shape - new_header['NAXIS1'], new_header['NAXIS2'] = nw.array_shape - for key, val in nw.to_header().items(): - new_header.set(key, val) - rebinned_headers.append(new_header) - if data_mask is not None: - data_mask = bin_ndarray(data_mask, new_shape=new_shape, operation='average') > 0.80 + # Update header + nw = w.deepcopy() + nw.wcs.cdelt *= Dxy + nw.wcs.crpix /= Dxy + nw.array_shape = new_shape + new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape + for key, val in nw.to_header().items(): + new_header.set(key, val) + new_header["SAMPLING"] = (str(pxsize) + scale, "Resampling performed during reduction") + rebinned_headers.append(new_header) + if data_mask is not None: + data_mask = bin_ndarray(data_mask, new_shape=new_shape, operation="average") > 0.80 rebinned_data = np.array(rebinned_data) rebinned_error = np.array(rebinned_error) @@ -618,7 +690,9 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, operation='sum' return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask -def align_data(data_array, headers, error_array=None, background=None, upsample_factor=1., ref_data=None, ref_center=None, return_shifts=False): +def align_data( + data_array, headers, error_array=None, data_mask=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False +): """ Align images in data_array using cross correlation, and rescale them to wider images able to contain any rotation of the reference image. @@ -660,6 +734,8 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_ image with margins of value 0. rescaled_error : numpy.ndarray Array containing the errors on the aligned images in the rescaled array. + headers : header list + List of headers corresponding to the images in data_array. data_mask : numpy.ndarray 2D boolean array delimiting the data to work on. shifts : numpy.ndarray @@ -678,10 +754,12 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_ for array in data_array: # Check if all images have the same shape. If not, cross-correlation # cannot be computed. - same *= (array.shape == ref_data.shape) + same *= array.shape == ref_data.shape if not same: - raise ValueError("All images in data_array must have same shape as\ - ref_data") + raise ValueError( + "All images in data_array must have same shape as\ + ref_data" + ) if (error_array is None) or (background is None): _, error_array, headers, background = get_error(data_array, headers, return_background=True) @@ -692,65 +770,76 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_ full_headers.append(headers[0]) err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0) - # full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.) + + if data_mask is None: + full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0) + else: + full_array, err_array, data_mask, full_headers = crop_array( + full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0 + ) data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1] error_array = err_array[:-1] + do_shift = True if ref_center is None: # Define the center of the reference image to be the center pixel # if None have been specified - ref_center = (np.array(ref_data.shape)/2).astype(int) + ref_center = (np.array(ref_data.shape) / 2).astype(int) do_shift = False - elif ref_center.lower() in ['max', 'flux', 'maxflux', 'max_flux']: + elif ref_center.lower() in ["max", "flux", "maxflux", "max_flux"]: # Define the center of the reference image to be the pixel of max flux. ref_center = np.unravel_index(np.argmax(ref_data), ref_data.shape) else: # Default to image center. - ref_center = (np.array(ref_data.shape)/2).astype(int) + ref_center = (np.array(ref_data.shape) / 2).astype(int) # Create a rescaled null array that can contain any rotation of the # original image (and shifted images) shape = data_array.shape - res_shape = int(np.ceil(np.sqrt(2.)*np.max(shape[1:]))) + res_shape = int(np.ceil(np.sqrt(2.0) * np.max(shape[1:]))) rescaled_image = np.zeros((shape[0], res_shape, res_shape)) rescaled_error = np.ones((shape[0], res_shape, res_shape)) rescaled_mask = np.zeros((shape[0], res_shape, res_shape), dtype=bool) - res_center = (np.array(rescaled_image.shape[1:])/2).astype(int) - res_shift = res_center-ref_center + res_center = (np.array(rescaled_image.shape[1:]) / 2).astype(int) + res_shift = res_center - ref_center res_mask = np.zeros((res_shape, res_shape), dtype=bool) - res_mask[res_shift[0]:res_shift[0]+shape[1], res_shift[1]:res_shift[1]+shape[2]] = True + res_mask[res_shift[0] : res_shift[0] + shape[1], res_shift[1] : res_shift[1] + shape[2]] = True + if data_mask is not None: + res_mask = np.logical_and(res_mask, zeropad(data_mask, (res_shape, res_shape)).astype(bool)) shifts, errors = [], [] for i, image in enumerate(data_array): # Initialize rescaled images to background values - rescaled_error[i] *= 0.01*background[i] + rescaled_error[i] *= 0.01 * background[i] # Get shifts and error by cross-correlation to ref_data if do_shift: - shift, error, _ = phase_cross_correlation(ref_data/ref_data.max(), image/image.max(), upsample_factor=upsample_factor) + shift, error, _ = phase_cross_correlation(ref_data / ref_data.max(), image / image.max(), upsample_factor=upsample_factor) else: - shift = globals["pol_shift"][headers[i]['filtnam1'].lower()] - error = globals["sigma_shift"][headers[i]['filtnam1'].lower()] + shift = globals()["pol_shift"][headers[i]["filtnam1"].lower()] + error = globals()["sigma_shift"][headers[i]["filtnam1"].lower()] # Rescale image to requested output - rescaled_image[i, res_shift[0]:res_shift[0]+shape[1], res_shift[1]:res_shift[1]+shape[2]] = deepcopy(image) - rescaled_error[i, res_shift[0]:res_shift[0]+shape[1], res_shift[1]:res_shift[1]+shape[2]] = deepcopy(error_array[i]) + rescaled_image[i, res_shift[0] : res_shift[0] + shape[1], res_shift[1] : res_shift[1] + shape[2]] = deepcopy(image) + rescaled_error[i, res_shift[0] : res_shift[0] + shape[1], res_shift[1] : res_shift[1] + shape[2]] = deepcopy(error_array[i]) # Shift images to align - rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.) + rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.0) rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i]) - curr_mask = sc_shift(res_mask, shift, order=1, cval=False) - mask_vertex = clean_ROI(curr_mask) - rescaled_mask[i, mask_vertex[2]:mask_vertex[3], mask_vertex[0]:mask_vertex[1]] = True + curr_mask = sc_shift(res_mask * 10.0, shift, order=1, cval=0.0) + curr_mask[curr_mask < curr_mask.max() * 2.0 / 3.0] = 0.0 + rescaled_mask[i] = curr_mask.astype(bool) + # mask_vertex = clean_ROI(curr_mask) + # rescaled_mask[i, mask_vertex[2] : mask_vertex[3], mask_vertex[0] : mask_vertex[1]] = True - rescaled_image[i][rescaled_image[i] < 0.] = 0. - rescaled_image[i][(1-rescaled_mask[i]).astype(bool)] = 0. + rescaled_image[i][rescaled_image[i] < 0.0] = 0.0 + rescaled_image[i][(1 - rescaled_mask[i]).astype(bool)] = 0.0 # Uncertainties from shifting - prec_shift = np.array([1., 1.])/upsample_factor - shifted_image = sc_shift(rescaled_image[i], prec_shift, cval=0.) - error_shift = np.abs(rescaled_image[i] - shifted_image)/2. + prec_shift = np.array([1.0, 1.0]) / upsample_factor + shifted_image = sc_shift(rescaled_image[i], prec_shift, cval=0.0) + error_shift = np.abs(rescaled_image[i] - shifted_image) / 2.0 # sum quadratically the errors - rescaled_error[i] = np.sqrt(rescaled_error[i]**2 + error_shift**2) + rescaled_error[i] = np.sqrt(rescaled_error[i] ** 2 + error_shift**2) shifts.append(shift) errors.append(error) @@ -768,13 +857,14 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_ data_mask = rescaled_mask.all(axis=0) # data_array, error_array, data_mask, headers = crop_array(rescaled_image, headers, rescaled_error, data_mask, null_val=0.01*background) + if return_shifts: return data_array, error_array, headers, data_mask, shifts, errors else: return data_array, error_array, headers, data_mask -def smooth_data(data_array, error_array, data_mask, headers, FWHM=1., scale='pixel', smoothing='gaussian'): +def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.5, scale="pixel", smoothing="weighted_gaussian"): """ Smooth a data_array using selected function. ---------- @@ -810,24 +900,30 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1., scale='pix smoothed_array. """ # If chosen FWHM scale is 'arcsec', compute FWHM in pixel scale - if scale.lower() in ['arcsec', 'arcseconds']: + if scale.lower() in ["arcsec", "arcseconds"]: pxsize = np.zeros((data_array.shape[0], 2)) for i, header in enumerate(headers): # Get current pixel size w = WCS(header).celestial.deepcopy() - pxsize[i] = np.round(w.wcs.cdelt*3600., 4) + pxsize[i] = np.round(w.wcs.cdelt * 3600.0, 4) if (pxsize != pxsize[0]).any(): raise ValueError("Not all images in array have same pixel size") + FWHM_size = str(FWHM) + FWHM_scale = "arcsec" FWHM /= pxsize[0].min() + else: + FWHM_size = str(FWHM) + FWHM_scale = "px" # Define gaussian stdev - stdev = FWHM/(2.*np.sqrt(2.*np.log(2))) + stdev = FWHM / (2.0 * np.sqrt(2.0 * np.log(2))) fmax = np.finfo(np.double).max - if smoothing.lower() in ['combine', 'combining']: + if smoothing.lower() in ["combine", "combining"]: + smoothing = "combine" # Smooth using N images combination algorithm # Weight array - weight = 1./error_array**2 + weight = 1.0 / error_array**2 # Prepare pixel distance matrix xx, yy = np.indices(data_array[0].shape) # Initialize smoothed image and error arrays @@ -838,47 +934,62 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1., scale='pix for r in range(smoothed.shape[0]): for c in range(smoothed.shape[1]): # Compute distance from current pixel - dist_rc = np.where(data_mask, np.sqrt((r-xx)**2+(c-yy)**2), fmax) + dist_rc = np.where(data_mask, np.sqrt((r - xx) ** 2 + (c - yy) ** 2), fmax) # Catch expected "OverflowWarning" as we overflow values that are not in the image with warnings.catch_warnings(record=True) as w: - g_rc = np.array([np.exp(-0.5*(dist_rc/stdev)**2)/(2.*np.pi*stdev**2),]*data_array.shape[0]) + g_rc = np.array( + [ + np.exp(-0.5 * (dist_rc / stdev) ** 2) / (2.0 * np.pi * stdev**2), + ] + * data_array.shape[0] + ) # Apply weighted combination - smoothed[r, c] = np.where(data_mask[r, c], np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc), data_array.mean(axis=0)[r, c]) - error[r, c] = np.where(data_mask[r, c], np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc), - (np.sqrt(np.sum(error_array**2, axis=0)/error_array.shape[0]))[r, c]) + smoothed[r, c] = np.where(data_mask[r, c], np.sum(data_array * weight * g_rc) / np.sum(weight * g_rc), data_array.mean(axis=0)[r, c]) + error[r, c] = np.where( + data_mask[r, c], + np.sqrt(np.sum(weight * g_rc**2)) / np.sum(weight * g_rc), + (np.sqrt(np.sum(error_array**2, axis=0) / error_array.shape[0]))[r, c], + ) # Nan handling - error[np.logical_or(np.isnan(smoothed*error), 1-data_mask)] = 0. - smoothed[np.logical_or(np.isnan(smoothed*error), 1-data_mask)] = 0. + error[np.logical_or(np.isnan(smoothed * error), 1 - data_mask)] = 0.0 + smoothed[np.logical_or(np.isnan(smoothed * error), 1 - data_mask)] = 0.0 - elif smoothing.lower() in ['weight_gauss', 'weighted_gaussian', 'gauss', 'gaussian']: + elif smoothing.lower() in ["weight_gauss", "weighted_gaussian", "gauss", "gaussian"]: + smoothing = "gaussian" # Convolution with gaussian function smoothed = np.zeros(data_array.shape) error = np.zeros(error_array.shape) for i, (image, image_error) in enumerate(zip(data_array, error_array)): - x, y = np.meshgrid(np.arange(-image.shape[1]/2, image.shape[1]/2), np.arange(-image.shape[0]/2, image.shape[0]/2)) + x, y = np.meshgrid(np.arange(-image.shape[1] / 2, image.shape[1] / 2), np.arange(-image.shape[0] / 2, image.shape[0] / 2)) weights = np.ones(image_error.shape) - if smoothing.lower()[:6] in ['weight']: - weights = 1./image_error**2 - weights[(1-np.isfinite(weights)).astype(bool)] = 0. - weights[(1-data_mask).astype(bool)] = 0. + if smoothing.lower()[:6] in ["weight"]: + smoothing = "weighted gaussian" + weights = 1.0 / image_error**2 + weights[(1 - np.isfinite(weights)).astype(bool)] = 0.0 + weights[(1 - data_mask).astype(bool)] = 0.0 weights /= weights.sum() kernel = gaussian2d(x, y, stdev) kernel /= kernel.sum() - smoothed[i] = np.where(data_mask, fftconvolve(image*weights, kernel, 'same')/fftconvolve(weights, kernel, 'same'), image) - error[i] = np.where(data_mask, np.sqrt(fftconvolve(image_error**2*weights**2, kernel**2, 'same'))/fftconvolve(weights, kernel, 'same'), image_error) + smoothed[i] = np.where(data_mask, fftconvolve(image * weights, kernel, "same") / fftconvolve(weights, kernel, "same"), image) + error[i] = np.where( + data_mask, np.sqrt(fftconvolve(image_error**2 * weights**2, kernel**2, "same")) / fftconvolve(weights, kernel, "same"), image_error + ) # Nan handling - error[i][np.logical_or(np.isnan(smoothed[i]*error[i]), 1-data_mask)] = 0. - smoothed[i][np.logical_or(np.isnan(smoothed[i]*error[i]), 1-data_mask)] = 0. + error[i][np.logical_or(np.isnan(smoothed[i] * error[i]), 1 - data_mask)] = 0.0 + smoothed[i][np.logical_or(np.isnan(smoothed[i] * error[i]), 1 - data_mask)] = 0.0 else: raise ValueError("{} is not a valid smoothing option".format(smoothing)) + for header in headers: + header["SMOOTH"] = (" ".join([smoothing, FWHM_size, FWHM_scale]), "Smoothing method used during reduction") + return smoothed, error -def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale='pixel', smoothing='gaussian'): +def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=1.5, scale="pixel", smoothing="weighted_gaussian"): """ Make the average image from a single polarizer for a given instrument. ----------- @@ -916,34 +1027,44 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale= Covariance matrix between the polarizer images in polarizer_array """ # Check that all images are from the same instrument - instr = headers[0]['instrume'] - same_instr = np.array([instr == header['instrume'] for header in headers]).all() + instr = headers[0]["instrume"] + same_instr = np.array([instr == header["instrume"] for header in headers]).all() if not same_instr: - raise ValueError("All images in data_array are not from the same\ - instrument, cannot proceed.") - if instr not in ['FOC']: - raise ValueError("Cannot reduce images from {0:s} instrument\ - (yet)".format(instr)) + raise ValueError( + "All images in data_array are not from the same\ + instrument, cannot proceed." + ) + if instr not in ["FOC"]: + raise ValueError( + "Cannot reduce images from {0:s} instrument\ + (yet)".format(instr) + ) # Routine for the FOC instrument - if instr == 'FOC': + if instr == "FOC": # Sort images by polarizer filter : can be 0deg, 60deg, 120deg for the FOC - is_pol0 = np.array([header['filtnam1'] == 'POL0' for header in headers]) - if (1-is_pol0).all(): - print("Warning : no image for POL0 of FOC found, averaged data\ - will be NAN") - is_pol60 = np.array([header['filtnam1'] == 'POL60' for header in headers]) - if (1-is_pol60).all(): - print("Warning : no image for POL60 of FOC found, averaged data\ - will be NAN") - is_pol120 = np.array([header['filtnam1'] == 'POL120' for header in headers]) - if (1-is_pol120).all(): - print("Warning : no image for POL120 of FOC found, averaged data\ - will be NAN") + is_pol0 = np.array([header["filtnam1"] == "POL0" for header in headers]) + if (1 - is_pol0).all(): + print( + "Warning : no image for POL0 of FOC found, averaged data\ + will be NAN" + ) + is_pol60 = np.array([header["filtnam1"] == "POL60" for header in headers]) + if (1 - is_pol60).all(): + print( + "Warning : no image for POL60 of FOC found, averaged data\ + will be NAN" + ) + is_pol120 = np.array([header["filtnam1"] == "POL120" for header in headers]) + if (1 - is_pol120).all(): + print( + "Warning : no image for POL120 of FOC found, averaged data\ + will be NAN" + ) # Put each polarizer images in separate arrays - headers0 = [header for header in headers if header['filtnam1'] == 'POL0'] - headers60 = [header for header in headers if header['filtnam1'] == 'POL60'] - headers120 = [header for header in headers if header['filtnam1'] == 'POL120'] + headers0 = [header for header in headers if header["filtnam1"] == "POL0"] + headers60 = [header for header in headers if header["filtnam1"] == "POL60"] + headers120 = [header for header in headers if header["filtnam1"] == "POL120"] pol0_array = data_array[is_pol0] pol60_array = data_array[is_pol60] @@ -954,10 +1075,10 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale= err120_array = error_array[is_pol120] # For a single observation, combination amount to a weighted gaussian - if np.max([is_pol0.sum(), is_pol60.sum(), is_pol120.sum()]) == 1 and smoothing.lower() in ['combine', 'combining']: - smoothing = 'weighted_gaussian' + if np.max([is_pol0.sum(), is_pol60.sum(), is_pol120.sum()]) == 1 and smoothing.lower() in ["combine", "combining"]: + smoothing = "weighted_gaussian" - if (FWHM is not None) and (smoothing.lower() in ['combine', 'combining']): + if (FWHM is not None) and (smoothing.lower() in ["combine", "combining"]): # Smooth by combining each polarizer images pol0, err0 = smooth_data(pol0_array, err0_array, data_mask, headers0, FWHM=FWHM, scale=scale, smoothing=smoothing) pol60, err60 = smooth_data(pol60_array, err60_array, data_mask, headers60, FWHM=FWHM, scale=scale, smoothing=smoothing) @@ -965,33 +1086,33 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale= else: # Sum on each polarization filter. - pol0_t = np.sum([header['exptime'] for header in headers0]) - pol60_t = np.sum([header['exptime'] for header in headers60]) - pol120_t = np.sum([header['exptime'] for header in headers120]) + pol0_t = np.sum([header["exptime"] for header in headers0]) + pol60_t = np.sum([header["exptime"] for header in headers60]) + pol120_t = np.sum([header["exptime"] for header in headers120]) for i in range(pol0_array.shape[0]): - pol0_array[i] *= headers0[i]['exptime'] - err0_array[i] *= headers0[i]['exptime'] + pol0_array[i] *= headers0[i]["exptime"] + err0_array[i] *= headers0[i]["exptime"] for i in range(pol60_array.shape[0]): - pol60_array[i] *= headers60[i]['exptime'] - err60_array[i] *= headers60[i]['exptime'] + pol60_array[i] *= headers60[i]["exptime"] + err60_array[i] *= headers60[i]["exptime"] for i in range(pol120_array.shape[0]): - pol120_array[i] *= headers120[i]['exptime'] - err120_array[i] *= headers120[i]['exptime'] + pol120_array[i] *= headers120[i]["exptime"] + err120_array[i] *= headers120[i]["exptime"] - pol0 = pol0_array.sum(axis=0)/pol0_t - pol60 = pol60_array.sum(axis=0)/pol60_t - pol120 = pol120_array.sum(axis=0)/pol120_t + pol0 = pol0_array.sum(axis=0) / pol0_t + pol60 = pol60_array.sum(axis=0) / pol60_t + pol120 = pol120_array.sum(axis=0) / pol120_t pol_array = np.array([pol0, pol60, pol120]) pol_headers = [headers0[0], headers60[0], headers120[0]] # Propagate uncertainties quadratically - err0 = np.sqrt(np.sum(err0_array**2, axis=0))/pol0_t - err60 = np.sqrt(np.sum(err60_array**2, axis=0))/pol60_t - err120 = np.sqrt(np.sum(err120_array**2, axis=0))/pol120_t + err0 = np.sqrt(np.sum(err0_array**2, axis=0)) / pol0_t + err60 = np.sqrt(np.sum(err60_array**2, axis=0)) / pol60_t + err120 = np.sqrt(np.sum(err120_array**2, axis=0)) / pol120_t polerr_array = np.array([err0, err60, err120]) - if not (FWHM is None) and (smoothing.lower() in ['gaussian', 'gauss', 'weighted_gaussian', 'weight_gauss']): + if (FWHM is not None) and (smoothing.lower() in ["gaussian", "gauss", "weighted_gaussian", "weight_gauss"]): # Smooth by convoluting with a gaussian each polX image. pol_array, polerr_array = smooth_data(pol_array, polerr_array, data_mask, pol_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) pol0, pol60, pol120 = pol_array @@ -999,13 +1120,13 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale= # Update headers for header in headers: - if header['filtnam1'] == 'POL0': + if header["filtnam1"] == "POL0": list_head = headers0 - elif header['filtnam1'] == 'POL60': + elif header["filtnam1"] == "POL60": list_head = headers60 - elif header['filtnam1'] == 'POL120': + elif header["filtnam1"] == "POL120": list_head = headers120 - header['exptime'] = np.sum([head['exptime'] for head in list_head]) + header["exptime"] = np.sum([head["exptime"] for head in list_head]) pol_headers = [headers0[0], headers60[0], headers120[0]] # Get image shape @@ -1027,7 +1148,7 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale= return polarizer_array, polarizer_cov, pol_headers -def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale='pixel', smoothing='combine', transmitcorr=True): +def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale="pixel", smoothing="combine", transmitcorr=True, integrate=True): """ Compute the Stokes parameters I, Q and U for a given data_set ---------- @@ -1079,62 +1200,78 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale Covariance matrix of the Stokes parameters I, Q, U. """ # Check that all images are from the same instrument - instr = headers[0]['instrume'] - same_instr = np.array([instr == header['instrume'] for header in headers]).all() + instr = headers[0]["instrume"] + same_instr = np.array([instr == header["instrume"] for header in headers]).all() if not same_instr: - raise ValueError("All images in data_array are not from the same\ - instrument, cannot proceed.") - if instr not in ['FOC']: - raise ValueError("Cannot reduce images from {0:s} instrument\ - (yet)".format(instr)) + raise ValueError( + "All images in data_array are not from the same\ + instrument, cannot proceed." + ) + if instr not in ["FOC"]: + raise ValueError( + "Cannot reduce images from {0:s} instrument\ + (yet)".format(instr) + ) + rotate = np.zeros(len(headers)) + for i, head in enumerate(headers): + try: + rotate[i] = head["ROTATE"] + except KeyError: + rotate[i] = 0.0 - # Routine for the FOC instrument - if instr == 'FOC': + if (np.unique(rotate) == rotate[0]).all(): + theta = globals()["theta"] - rotate[0] * np.pi / 180.0 # Get image from each polarizer and covariance matrix pol_array, pol_cov, pol_headers = polarizer_avg(data_array, error_array, data_mask, headers, FWHM=FWHM, scale=scale, smoothing=smoothing) pol0, pol60, pol120 = pol_array - if (pol0 < 0.).any() or (pol60 < 0.).any() or (pol120 < 0.).any(): + if (pol0 < 0.0).any() or (pol60 < 0.0).any() or (pol120 < 0.0).any(): print("WARNING : Negative value in polarizer array.") # Stokes parameters # transmittance corrected transmit = np.ones((3,)) # will be filter dependant - filt2, filt3, filt4 = headers[0]['filtnam2'], headers[0]['filtnam3'], headers[0]['filtnam4'] - same_filt2 = np.array([filt2 == header['filtnam2'] for header in headers]).all() - same_filt3 = np.array([filt3 == header['filtnam3'] for header in headers]).all() - same_filt4 = np.array([filt4 == header['filtnam4'] for header in headers]).all() - if (same_filt2 and same_filt3 and same_filt4): + filt2, filt3, filt4 = headers[0]["filtnam2"], headers[0]["filtnam3"], headers[0]["filtnam4"] + same_filt2 = np.array([filt2 == header["filtnam2"] for header in headers]).all() + same_filt3 = np.array([filt3 == header["filtnam3"] for header in headers]).all() + same_filt4 = np.array([filt4 == header["filtnam4"] for header in headers]).all() + if same_filt2 and same_filt3 and same_filt4: transmit2, transmit3, transmit4 = globals()["trans2"][filt2.lower()], globals()["trans3"][filt3.lower()], globals()["trans4"][filt4.lower()] else: - print("WARNING : All images in data_array are not from the same \ - band filter, the limiting transmittance will be taken.") - transmit2 = np.min([globals()["trans2"][header['filtnam2'].lower()] for header in headers]) - transmit3 = np.min([globals()["trans3"][header['filtnam3'].lower()] for header in headers]) - transmit4 = np.min([globals()["trans4"][header['filtnam4'].lower()] for header in headers]) + print( + "WARNING : All images in data_array are not from the same \ + band filter, the limiting transmittance will be taken." + ) + transmit2 = np.min([globals()["trans2"][header["filtnam2"].lower()] for header in headers]) + transmit3 = np.min([globals()["trans3"][header["filtnam3"].lower()] for header in headers]) + transmit4 = np.min([globals()["trans4"][header["filtnam4"].lower()] for header in headers]) if transmitcorr: - transmit *= transmit2*transmit3*transmit4 - pol_eff = np.array([globals()["pol_efficiency"]['pol0'], globals()["pol_efficiency"]['pol60'], globals()["pol_efficiency"]['pol120']]) + transmit *= transmit2 * transmit3 * transmit4 + pol_eff = np.array([globals()["pol_efficiency"]["pol0"], globals()["pol_efficiency"]["pol60"], globals()["pol_efficiency"]["pol120"]]) - # Calculating correction factor - corr = np.array([1.0*h['photflam']/h['exptime'] for h in pol_headers])*pol_headers[0]['exptime']/pol_headers[0]['photflam'] + # Calculating correction factor: allows all pol_filt to share same exptime and inverse sensitivity (taken to be the one from POL0) + corr = np.array([1.0 * h["photflam"] / h["exptime"] for h in pol_headers]) * pol_headers[0]["exptime"] / pol_headers[0]["photflam"] + pol_headers[1]["photflam"], pol_headers[1]["exptime"] = pol_headers[0]["photflam"], pol_headers[1]["exptime"] + pol_headers[2]["photflam"], pol_headers[2]["exptime"] = pol_headers[0]["photflam"], pol_headers[2]["exptime"] # Orientation and error for each polarizer - fmax = np.finfo(np.float64).max - pol_flux = np.array([corr[0]*pol0, corr[1]*pol60, corr[2]*pol120]) + # fmax = np.finfo(np.float64).max + pol_flux = np.array([corr[0] * pol0, corr[1] * pol60, corr[2] * pol120]) coeff_stokes = np.zeros((3, 3)) # Coefficients linking each polarizer flux to each Stokes parameter for i in range(3): - coeff_stokes[0, i] = pol_eff[(i+1) % 3]*pol_eff[(i+2) % 3]*np.sin(-2.*globals()["theta"][(i+1) % 3]+2.*globals()["theta"][(i+2) % 3])*2./transmit[i] - coeff_stokes[1, i] = (-pol_eff[(i+1) % 3]*np.sin(2.*globals()["theta"][(i+1) % 3]) + - pol_eff[(i+2) % 3]*np.sin(2.*globals()["theta"][(i+2) % 3]))*2./transmit[i] - coeff_stokes[2, i] = (pol_eff[(i+1) % 3]*np.cos(2.*globals()["theta"][(i+1) % 3]) - - pol_eff[(i+2) % 3]*np.cos(2.*globals()["theta"][(i+2) % 3]))*2./transmit[i] + coeff_stokes[0, i] = pol_eff[(i + 1) % 3] * pol_eff[(i + 2) % 3] * np.sin(-2.0 * theta[(i + 1) % 3] + 2.0 * theta[(i + 2) % 3]) * 2.0 / transmit[i] + coeff_stokes[1, i] = ( + (-pol_eff[(i + 1) % 3] * np.sin(2.0 * theta[(i + 1) % 3]) + pol_eff[(i + 2) % 3] * np.sin(2.0 * theta[(i + 2) % 3])) * 2.0 / transmit[i] + ) + coeff_stokes[2, i] = ( + (pol_eff[(i + 1) % 3] * np.cos(2.0 * theta[(i + 1) % 3]) - pol_eff[(i + 2) % 3] * np.cos(2.0 * theta[(i + 2) % 3])) * 2.0 / transmit[i] + ) # Normalization parameter for Stokes parameters computation - N = (coeff_stokes[0, :]*transmit/2.).sum() - coeff_stokes = coeff_stokes/N + N = (coeff_stokes[0, :] * transmit / 2.0).sum() + coeff_stokes = coeff_stokes / N I_stokes = np.zeros(pol_array[0].shape) Q_stokes = np.zeros(pol_array[0].shape) U_stokes = np.zeros(pol_array[0].shape) @@ -1145,7 +1282,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale I_stokes[i, j], Q_stokes[i, j], U_stokes[i, j] = np.dot(coeff_stokes, pol_flux[:, i, j]).T Stokes_cov[:, :, i, j] = np.dot(coeff_stokes, np.dot(pol_cov[:, :, i, j], coeff_stokes.T)) - if not (FWHM is None) and (smoothing.lower() in ['weighted_gaussian_after', 'weight_gauss_after', 'gaussian_after', 'gauss_after']): + if (FWHM is not None) and (smoothing.lower() in ["weighted_gaussian_after", "weight_gauss_after", "gaussian_after", "gauss_after"]): smoothing = smoothing.lower()[:-6] Stokes_array = np.array([I_stokes, Q_stokes, U_stokes]) Stokes_error = np.array([np.sqrt(Stokes_cov[i, i]) for i in range(3)]) @@ -1156,14 +1293,16 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale I_stokes, Q_stokes, U_stokes = Stokes_array Stokes_cov[0, 0], Stokes_cov[1, 1], Stokes_cov[2, 2] = deepcopy(Stokes_error**2) - sStokes_array = np.array([I_stokes*Q_stokes, I_stokes*U_stokes, Q_stokes*U_stokes]) + sStokes_array = np.array([I_stokes * Q_stokes, I_stokes * U_stokes, Q_stokes * U_stokes]) sStokes_error = np.array([Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2]]) uStokes_error = np.array([Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1]]) - sStokes_array, sStokes_error = smooth_data(sStokes_array, sStokes_error, data_mask, - headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) - uStokes_array, uStokes_error = smooth_data(sStokes_array, uStokes_error, data_mask, - headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing) + sStokes_array, sStokes_error = smooth_data( + sStokes_array, sStokes_error, data_mask, headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing + ) + uStokes_array, uStokes_error = smooth_data( + sStokes_array, uStokes_error, data_mask, headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing + ) Stokes_cov[0, 1], Stokes_cov[0, 2], Stokes_cov[1, 2] = deepcopy(sStokes_error) Stokes_cov[1, 0], Stokes_cov[2, 0], Stokes_cov[2, 1] = deepcopy(uStokes_error) @@ -1173,51 +1312,114 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(I_stokes[mask].size)) # Statistical error: Poisson noise is assumed - sigma_flux = np.array([np.sqrt(flux/head['exptime']) for flux, head in zip(pol_flux, pol_headers)]) - s_I2_stat = np.sum([coeff_stokes[0, i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))], axis=0) - s_Q2_stat = np.sum([coeff_stokes[1, i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))], axis=0) - s_U2_stat = np.sum([coeff_stokes[2, i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))], axis=0) + sigma_flux = np.array([np.sqrt(flux / head["exptime"]) for flux, head in zip(pol_flux, pol_headers)]) + s_I2_stat = np.sum([coeff_stokes[0, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0) + s_Q2_stat = np.sum([coeff_stokes[1, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0) + s_U2_stat = np.sum([coeff_stokes[2, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0) - pol_flux_corr = np.array([pf*2./t for (pf, t) in zip(pol_flux, transmit)]) - coeff_stokes_corr = np.array([cs*t/2. for (cs, t) in zip(coeff_stokes.T, transmit)]).T + pol_flux_corr = np.array([pf * 2.0 / t for (pf, t) in zip(pol_flux, transmit)]) + coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T # Compute the derivative of each Stokes parameter with respect to the polarizer orientation - dI_dtheta1 = 2.*pol_eff[0]/N*(pol_eff[2]*np.cos(-2.*globals()["theta"][2]+2.*globals()["theta"][0])*(pol_flux_corr[1]-I_stokes) - - pol_eff[1]*np.cos(-2.*globals()["theta"][0]+2.*globals()["theta"][1])*(pol_flux_corr[2]-I_stokes) + - coeff_stokes_corr[0, 0]*(np.sin(2.*globals()["theta"][0])*Q_stokes-np.cos(2*globals()["theta"][0])*U_stokes)) - dI_dtheta2 = 2.*pol_eff[1]/N*(pol_eff[0]*np.cos(-2.*globals()["theta"][0]+2.*globals()["theta"][1])*(pol_flux_corr[2]-I_stokes) - - pol_eff[2]*np.cos(-2.*globals()["theta"][1]+2.*globals()["theta"][2])*(pol_flux_corr[0]-I_stokes) + - coeff_stokes_corr[0, 1]*(np.sin(2.*globals()["theta"][1])*Q_stokes-np.cos(2*globals()["theta"][1])*U_stokes)) - dI_dtheta3 = 2.*pol_eff[2]/N*(pol_eff[1]*np.cos(-2.*globals()["theta"][1]+2.*globals()["theta"][2])*(pol_flux_corr[0]-I_stokes) - - pol_eff[0]*np.cos(-2.*globals()["theta"][2]+2.*globals()["theta"][0])*(pol_flux_corr[1]-I_stokes) + - coeff_stokes_corr[0, 2]*(np.sin(2.*globals()["theta"][2])*Q_stokes-np.cos(2*globals()["theta"][2])*U_stokes)) + dI_dtheta1 = ( + 2.0 + * pol_eff[0] + / N + * ( + pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes) + - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes) + + coeff_stokes_corr[0, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes) + ) + ) + dI_dtheta2 = ( + 2.0 + * pol_eff[1] + / N + * ( + pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes) + - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes) + + coeff_stokes_corr[0, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes) + ) + ) + dI_dtheta3 = ( + 2.0 + * pol_eff[2] + / N + * ( + pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes) + - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes) + + coeff_stokes_corr[0, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes) + ) + ) dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3]) - dQ_dtheta1 = 2.*pol_eff[0]/N*(np.cos(2.*globals()["theta"][0])*(pol_flux_corr[1]-pol_flux_corr[2]) - (pol_eff[2]*np.cos(-2.*globals() - ["theta"][2]+2.*globals()["theta"][0]) - pol_eff[1]*np.cos(-2.*globals()["theta"][0]+2.*globals()["theta"][1]))*Q_stokes + - coeff_stokes_corr[1, 0]*(np.sin(2.*globals()["theta"][0])*Q_stokes-np.cos(2*globals()["theta"][0])*U_stokes)) - dQ_dtheta2 = 2.*pol_eff[1]/N*(np.cos(2.*globals()["theta"][1])*(pol_flux_corr[2]-pol_flux_corr[0]) - (pol_eff[0]*np.cos(-2.*globals() - ["theta"][0]+2.*globals()["theta"][1]) - pol_eff[2]*np.cos(-2.*globals()["theta"][1]+2.*globals()["theta"][2]))*Q_stokes + - coeff_stokes_corr[1, 1]*(np.sin(2.*globals()["theta"][1])*Q_stokes-np.cos(2*globals()["theta"][1])*U_stokes)) - dQ_dtheta3 = 2.*pol_eff[2]/N*(np.cos(2.*globals()["theta"][2])*(pol_flux_corr[0]-pol_flux_corr[1]) - (pol_eff[1]*np.cos(-2.*globals() - ["theta"][1]+2.*globals()["theta"][2]) - pol_eff[0]*np.cos(-2.*globals()["theta"][2]+2.*globals()["theta"][0]))*Q_stokes + - coeff_stokes_corr[1, 2]*(np.sin(2.*globals()["theta"][2])*Q_stokes-np.cos(2*globals()["theta"][2])*U_stokes)) + dQ_dtheta1 = ( + 2.0 + * pol_eff[0] + / N + * ( + np.cos(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2]) + - (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * Q_stokes + + coeff_stokes_corr[1, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes) + ) + ) + dQ_dtheta2 = ( + 2.0 + * pol_eff[1] + / N + * ( + np.cos(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0]) + - (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * Q_stokes + + coeff_stokes_corr[1, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes) + ) + ) + dQ_dtheta3 = ( + 2.0 + * pol_eff[2] + / N + * ( + np.cos(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1]) + - (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * Q_stokes + + coeff_stokes_corr[1, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes) + ) + ) dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3]) - dU_dtheta1 = 2.*pol_eff[0]/N*(np.sin(2.*globals()["theta"][0])*(pol_flux_corr[1]-pol_flux_corr[2]) - (pol_eff[2]*np.cos(-2.*globals() - ["theta"][2]+2.*globals()["theta"][0]) - pol_eff[1]*np.cos(-2.*globals()["theta"][0]+2.*globals()["theta"][1]))*U_stokes + - coeff_stokes_corr[2, 0]*(np.sin(2.*globals()["theta"][0])*Q_stokes-np.cos(2*globals()["theta"][0])*U_stokes)) - dU_dtheta2 = 2.*pol_eff[1]/N*(np.sin(2.*globals()["theta"][1])*(pol_flux_corr[2]-pol_flux_corr[0]) - (pol_eff[0]*np.cos(-2.*globals() - ["theta"][0]+2.*globals()["theta"][1]) - pol_eff[2]*np.cos(-2.*globals()["theta"][1]+2.*globals()["theta"][2]))*U_stokes + - coeff_stokes_corr[2, 1]*(np.sin(2.*globals()["theta"][1])*Q_stokes-np.cos(2*globals()["theta"][1])*U_stokes)) - dU_dtheta3 = 2.*pol_eff[2]/N*(np.sin(2.*globals()["theta"][2])*(pol_flux_corr[0]-pol_flux_corr[1]) - (pol_eff[1]*np.cos(-2.*globals() - ["theta"][1]+2.*globals()["theta"][2]) - pol_eff[0]*np.cos(-2.*globals()["theta"][2]+2.*globals()["theta"][0]))*U_stokes + - coeff_stokes_corr[2, 2]*(np.sin(2.*globals()["theta"][2])*Q_stokes-np.cos(2*globals()["theta"][2])*U_stokes)) + dU_dtheta1 = ( + 2.0 + * pol_eff[0] + / N + * ( + np.sin(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2]) + - (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * U_stokes + + coeff_stokes_corr[2, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes) + ) + ) + dU_dtheta2 = ( + 2.0 + * pol_eff[1] + / N + * ( + np.sin(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0]) + - (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * U_stokes + + coeff_stokes_corr[2, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes) + ) + ) + dU_dtheta3 = ( + 2.0 + * pol_eff[2] + / N + * ( + np.sin(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1]) + - (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * U_stokes + + coeff_stokes_corr[2, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes) + ) + ) dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3]) # Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999) - s_I2_axis = np.sum([dI_dtheta[i]**2 * globals()["sigma_theta"][i]**2 for i in range(len(globals()["sigma_theta"]))], axis=0) - s_Q2_axis = np.sum([dQ_dtheta[i]**2 * globals()["sigma_theta"][i]**2 for i in range(len(globals()["sigma_theta"]))], axis=0) - s_U2_axis = np.sum([dU_dtheta[i]**2 * globals()["sigma_theta"][i]**2 for i in range(len(globals()["sigma_theta"]))], axis=0) + s_I2_axis = np.sum([dI_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0) + s_Q2_axis = np.sum([dQ_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0) + s_U2_axis = np.sum([dU_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0) # np.savetxt("output/sI_dir.txt", np.sqrt(s_I2_axis)) # np.savetxt("output/sQ_dir.txt", np.sqrt(s_Q2_axis)) # np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis)) @@ -1227,34 +1429,92 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale Stokes_cov[1, 1] += s_Q2_axis + s_Q2_stat Stokes_cov[2, 2] += s_U2_axis + s_U2_stat + # Save values to single header + header_stokes = pol_headers[0] + + else: + all_I_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2])) + all_Q_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2])) + all_U_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2])) + all_Stokes_cov = np.zeros((np.unique(rotate).size, 3, 3, data_array.shape[1], data_array.shape[2])) + all_header_stokes = [ + {}, + ] * np.unique(rotate).size + + for i, rot in enumerate(np.unique(rotate)): + rot_mask = rotate == rot + all_I_stokes[i], all_Q_stokes[i], all_U_stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes( + data_array[rot_mask], + error_array[rot_mask], + data_mask, + [headers[i] for i in np.arange(len(headers))[rot_mask]], + FWHM=FWHM, + scale=scale, + smoothing=smoothing, + transmitcorr=transmitcorr, + integrate=False, + ) + all_exp = np.array([float(h["exptime"]) for h in all_header_stokes]) + + I_stokes = np.sum([exp * I for exp, I in zip(all_exp, all_I_stokes)], axis=0) / all_exp.sum() + Q_stokes = np.sum([exp * Q for exp, Q in zip(all_exp, all_Q_stokes)], axis=0) / all_exp.sum() + U_stokes = np.sum([exp * U for exp, U in zip(all_exp, all_U_stokes)], axis=0) / all_exp.sum() + Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1])) + for i in range(3): + Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2 + for j in [x for x in range(3) if x != i]: + Stokes_cov[i, j] = np.sqrt(np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, i, j])], axis=0) / all_exp.sum() ** 2) + Stokes_cov[j, i] = np.sqrt(np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, j, i])], axis=0) / all_exp.sum() ** 2) + + # Save values to single header + header_stokes = all_header_stokes[0] + header_stokes["exptime"] = all_exp.sum() + + # Nan handling : + fmax = np.finfo(np.float64).max + + I_stokes[np.isnan(I_stokes)] = 0.0 + Q_stokes[I_stokes == 0.0] = 0.0 + U_stokes[I_stokes == 0.0] = 0.0 + Q_stokes[np.isnan(Q_stokes)] = 0.0 + U_stokes[np.isnan(U_stokes)] = 0.0 + Stokes_cov[np.isnan(Stokes_cov)] = fmax + + if integrate: # Compute integrated values for P, PA before any rotation - mask = np.logical_and(data_mask.astype(bool), (I_stokes > 0.)) + mask = deepcopy(data_mask).astype(bool) I_diluted = I_stokes[mask].sum() Q_diluted = Q_stokes[mask].sum() U_diluted = U_stokes[mask].sum() I_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 0][mask])) Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 1][mask])) U_diluted_err = np.sqrt(np.sum(Stokes_cov[2, 2][mask])) - IQ_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 1][mask]**2)) - IU_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 2][mask]**2)) - QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask]**2)) + IQ_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 1][mask] ** 2)) + IU_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 2][mask] ** 2)) + QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask] ** 2)) - P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted - P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err) + P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted + P_diluted_err = np.sqrt( + (Q_diluted**2 * Q_diluted_err**2 + U_diluted**2 * U_diluted_err**2 + 2.0 * Q_diluted * U_diluted * QU_diluted_err) / (Q_diluted**2 + U_diluted**2) + + ((Q_diluted / I_diluted) ** 2 + (U_diluted / I_diluted) ** 2) * I_diluted_err**2 + - 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err + - 2.0 * (U_diluted / I_diluted) * IU_diluted_err + ) - PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted)) - PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err) + PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted)) + PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt( + U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err + ) - for header in headers: - header['P_int'] = (P_diluted, 'Integrated polarization degree') - header['P_int_err'] = (np.ceil(P_diluted_err*1000.)/1000., 'Integrated polarization degree error') - header['PA_int'] = (PA_diluted, 'Integrated polarization angle') - header['PA_int_err'] = (np.ceil(PA_diluted_err*10.)/10., 'Integrated polarization angle error') + header_stokes["P_int"] = (P_diluted, "Integrated polarization degree") + header_stokes["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error") + header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle") + header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") - return I_stokes, Q_stokes, U_stokes, Stokes_cov + return I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes -def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers): +def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes): """ Compute the polarization degree (in %) and angle (in deg) and their respective errors from given Stokes parameters. @@ -1271,8 +1531,8 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers): +45/-45deg linear polarization intensity Stokes_cov : numpy.ndarray Covariance matrix of the Stokes parameters I, Q, U. - headers : header list - List of headers corresponding to the images in data_array. + header_stokes : astropy.fits.header.Header + Header file associated with the Stokes fluxes. ---------- Returns: P : numpy.ndarray @@ -1291,32 +1551,41 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers): s_PA_P : numpy.ndarray Image (2D floats) containing the Poisson noise error on the polarization angle. - new_headers : header list - Updated list of headers corresponding to the reduced images accounting - for the new orientation angle. """ # Polarization degree and angle computation - mask = I_stokes > 0. + mask = I_stokes > 0.0 I_pol = np.zeros(I_stokes.shape) - I_pol[mask] = np.sqrt(Q_stokes[mask]**2 + U_stokes[mask]**2) + I_pol[mask] = np.sqrt(Q_stokes[mask] ** 2 + U_stokes[mask] ** 2) P = np.zeros(I_stokes.shape) - P[mask] = I_pol[mask]/I_stokes[mask] + P[mask] = I_pol[mask] / I_stokes[mask] PA = np.zeros(I_stokes.shape) - PA[mask] = (90./np.pi)*np.arctan2(U_stokes[mask], Q_stokes[mask]) + PA[mask] = (90.0 / np.pi) * np.arctan2(U_stokes[mask], Q_stokes[mask]) if (P > 1).any(): - print("WARNING : found {0:d} pixels for which P > 1".format(P[P > 1.].size)) + print("WARNING : found {0:d} pixels for which P > 1".format(P[P > 1.0].size)) # Associated errors fmax = np.finfo(np.float64).max - s_P = np.ones(I_stokes.shape)*fmax - s_PA = np.ones(I_stokes.shape)*fmax + s_P = np.ones(I_stokes.shape) * fmax + s_PA = np.ones(I_stokes.shape) * fmax # Propagate previously computed errors - s_P[mask] = (1/I_stokes[mask])*np.sqrt((Q_stokes[mask]**2*Stokes_cov[1, 1][mask] + U_stokes[mask]**2*Stokes_cov[2, 2][mask] + 2.*Q_stokes[mask]*U_stokes[mask]*Stokes_cov[1, 2][mask])/(Q_stokes[mask]**2 + U_stokes[mask]**2) + - ((Q_stokes[mask]/I_stokes[mask])**2 + (U_stokes[mask]/I_stokes[mask])**2)*Stokes_cov[0, 0][mask] - 2.*(Q_stokes[mask]/I_stokes[mask])*Stokes_cov[0, 1][mask] - 2.*(U_stokes[mask]/I_stokes[mask])*Stokes_cov[0, 2][mask]) - s_PA[mask] = (90./(np.pi*(Q_stokes[mask]**2 + U_stokes[mask]**2)))*np.sqrt(U_stokes[mask]**2*Stokes_cov[1, 1][mask] + - Q_stokes[mask]**2*Stokes_cov[2, 2][mask] - 2.*Q_stokes[mask]*U_stokes[mask]*Stokes_cov[1, 2][mask]) + s_P[mask] = (1 / I_stokes[mask]) * np.sqrt( + ( + Q_stokes[mask] ** 2 * Stokes_cov[1, 1][mask] + + U_stokes[mask] ** 2 * Stokes_cov[2, 2][mask] + + 2.0 * Q_stokes[mask] * U_stokes[mask] * Stokes_cov[1, 2][mask] + ) + / (Q_stokes[mask] ** 2 + U_stokes[mask] ** 2) + + ((Q_stokes[mask] / I_stokes[mask]) ** 2 + (U_stokes[mask] / I_stokes[mask]) ** 2) * Stokes_cov[0, 0][mask] + - 2.0 * (Q_stokes[mask] / I_stokes[mask]) * Stokes_cov[0, 1][mask] + - 2.0 * (U_stokes[mask] / I_stokes[mask]) * Stokes_cov[0, 2][mask] + ) + s_PA[mask] = (90.0 / (np.pi * (Q_stokes[mask] ** 2 + U_stokes[mask] ** 2))) * np.sqrt( + U_stokes[mask] ** 2 * Stokes_cov[1, 1][mask] + + Q_stokes[mask] ** 2 * Stokes_cov[2, 2][mask] + - 2.0 * Q_stokes[mask] * U_stokes[mask] * Stokes_cov[1, 2][mask] + ) s_P[np.isnan(s_P)] = fmax s_PA[np.isnan(s_PA)] = fmax @@ -1324,35 +1593,35 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers): with warnings.catch_warnings(record=True) as _: mask2 = P**2 >= s_P**2 debiased_P = np.zeros(I_stokes.shape) - debiased_P[mask2] = np.sqrt(P[mask2]**2 - s_P[mask2]**2) + debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P[mask2] ** 2) - if (debiased_P > 1.).any(): - print("WARNING : found {0:d} pixels for which debiased_P > 100%".format(debiased_P[debiased_P > 1.].size)) + if (debiased_P > 1.0).any(): + print("WARNING : found {0:d} pixels for which debiased_P > 100%".format(debiased_P[debiased_P > 1.0].size)) # Compute the total exposure time so that # I_stokes*exp_tot = N_tot the total number of events - exp_tot = np.array([header['exptime'] for header in headers]).sum() + exp_tot = header_stokes["exptime"] # print("Total exposure time : {} sec".format(exp_tot)) - N_obs = I_stokes*exp_tot + N_obs = I_stokes * exp_tot # Errors on P, PA supposing Poisson noise - s_P_P = np.ones(I_stokes.shape)*fmax - s_P_P[mask] = np.sqrt(2.)/np.sqrt(N_obs[mask])*100. - s_PA_P = np.ones(I_stokes.shape)*fmax - s_PA_P[mask2] = s_P_P[mask2]/(2.*P[mask2])*180./np.pi + s_P_P = np.ones(I_stokes.shape) * fmax + s_P_P[mask] = np.sqrt(2.0) / np.sqrt(N_obs[mask]) * 100.0 + s_PA_P = np.ones(I_stokes.shape) * fmax + s_PA_P[mask2] = s_P_P[mask2] / (2.0 * P[mask2]) * 180.0 / np.pi # Nan handling : - P[np.isnan(P)] = 0. + P[np.isnan(P)] = 0.0 s_P[np.isnan(s_P)] = fmax s_PA[np.isnan(s_PA)] = fmax - debiased_P[np.isnan(debiased_P)] = 0. + debiased_P[np.isnan(debiased_P)] = 0.0 s_P_P[np.isnan(s_P_P)] = fmax s_PA_P[np.isnan(s_PA_P)] = fmax return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P -def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, ang=None, SNRi_cut=None): +def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None): """ Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation matrix to rotate Q, U of a given angle in degrees and update header @@ -1372,12 +1641,8 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, Covariance matrix of the Stokes parameters I, Q, U. data_mask : numpy.ndarray 2D boolean array delimiting the data to work on. - headers : header list - List of headers corresponding to the reduced images. - ang : float, optional - Rotation angle (in degrees) that should be applied to the Stokes - parameters. If None, will rotate to have North up. - Defaults to None. + header_stokes : astropy.fits.header.Header + Header file associated with the Stokes fluxes. SNRi_cut : float, optional Cut that should be applied to the signal-to-noise ratio on I. Any SNR < SNRi_cut won't be displayed. If None, cut won't be applied. @@ -1395,38 +1660,32 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, accounting for +45/-45deg linear polarization intensity. new_Stokes_cov : numpy.ndarray Updated covariance matrix of the Stokes parameters I, Q, U. - new_headers : header list - Updated list of headers corresponding to the reduced images accounting + new_header_stokes : astropy.fits.header.Header + Updated Header file associated with the Stokes fluxes accounting for the new orientation angle. new_data_mask : numpy.ndarray Updated 2D boolean array delimiting the data to work on. """ # Apply cuts if SNRi_cut is not None: - SNRi = I_stokes/np.sqrt(Stokes_cov[0, 0]) + SNRi = I_stokes / np.sqrt(Stokes_cov[0, 0]) mask = SNRi < SNRi_cut eps = 1e-5 for i in range(I_stokes.shape[0]): for j in range(I_stokes.shape[1]): if mask[i, j]: - I_stokes[i, j] = eps*np.sqrt(Stokes_cov[0, 0][i, j]) - Q_stokes[i, j] = eps*np.sqrt(Stokes_cov[1, 1][i, j]) - U_stokes[i, j] = eps*np.sqrt(Stokes_cov[2, 2][i, j]) + I_stokes[i, j] = eps * np.sqrt(Stokes_cov[0, 0][i, j]) + Q_stokes[i, j] = eps * np.sqrt(Stokes_cov[1, 1][i, j]) + U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j]) # Rotate I_stokes, Q_stokes, U_stokes using rotation matrix - if ang is None: - ang = np.zeros((len(headers),)) - for i, head in enumerate(headers): - ang[i] = -head['orientat'] - ang = ang.mean() - alpha = np.pi/180.*ang - mrot = np.array([[1., 0., 0.], - [0., np.cos(2.*alpha), np.sin(2.*alpha)], - [0, -np.sin(2.*alpha), np.cos(2.*alpha)]]) + ang = -float(header_stokes["ORIENTAT"]) + alpha = np.pi / 180.0 * ang + mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]]) - old_center = np.array(I_stokes.shape)/2 - shape = np.fix(np.array(I_stokes.shape)*np.sqrt(2.5)).astype(int) - new_center = np.array(shape)/2 + old_center = np.array(I_stokes.shape) / 2 + shape = np.fix(np.array(I_stokes.shape) * np.sqrt(2.5)).astype(int) + new_center = np.array(shape) / 2 I_stokes = zeropad(I_stokes, shape) Q_stokes = zeropad(Q_stokes, shape) @@ -1439,15 +1698,15 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape)) # Rotate original images using scipy.ndimage.rotate - new_I_stokes = sc_rotate(I_stokes, ang, order=1, reshape=False, cval=0.) - new_Q_stokes = sc_rotate(Q_stokes, ang, order=1, reshape=False, cval=0.) - new_U_stokes = sc_rotate(U_stokes, ang, order=1, reshape=False, cval=0.) - new_data_mask = sc_rotate(data_mask.astype(float)*10., ang, order=1, reshape=False, cval=0.) - new_data_mask[new_data_mask < 2] = 0. + new_I_stokes = sc_rotate(I_stokes, ang, order=1, reshape=False, cval=0.0) + new_Q_stokes = sc_rotate(Q_stokes, ang, order=1, reshape=False, cval=0.0) + new_U_stokes = sc_rotate(U_stokes, ang, order=1, reshape=False, cval=0.0) + new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0) + new_data_mask[new_data_mask < 1.0] = 0.0 new_data_mask = new_data_mask.astype(bool) for i in range(3): for j in range(3): - new_Stokes_cov[i, j] = sc_rotate(Stokes_cov[i, j], ang, order=1, reshape=False, cval=0.) + new_Stokes_cov[i, j] = sc_rotate(Stokes_cov[i, j], ang, order=1, reshape=False, cval=0.0) new_Stokes_cov[i, i] = np.abs(new_Stokes_cov[i, i]) for i in range(shape[0]): @@ -1456,34 +1715,30 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, new_Stokes_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:, :, i, j], mrot.T)) # Update headers to new angle - new_headers = [] - mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], - [np.sin(-alpha), np.cos(-alpha)]]) - for header in headers: - new_header = deepcopy(header) - new_header['orientat'] = header['orientat'] + ang - new_wcs = WCS(header).celestial.deepcopy() + mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) - new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc) - new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center[::-1]) + new_center[::-1] - new_wcs.wcs.set() - for key, val in new_wcs.to_header().items(): - new_header.set(key, val) - if new_wcs.wcs.pc[0, 0] == 1.: - new_header.set('PC1_1', 1.) - if new_wcs.wcs.pc[1, 1] == 1.: - new_header.set('PC2_2', 1.) + new_header_stokes = deepcopy(header_stokes) + new_wcs = WCS(header_stokes).celestial.deepcopy() - new_headers.append(new_header) + new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc) + new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center[::-1]) + new_center[::-1] + new_wcs.wcs.set() + for key, val in new_wcs.to_header().items(): + new_header_stokes.set(key, val) + if new_wcs.wcs.pc[0, 0] == 1.0: + new_header_stokes.set("PC1_1", 1.0) + if new_wcs.wcs.pc[1, 1] == 1.0: + new_header_stokes.set("PC2_2", 1.0) + new_header_stokes["ORIENTAT"] += ang # Nan handling : fmax = np.finfo(np.float64).max - new_I_stokes[np.isnan(new_I_stokes)] = 0. - new_Q_stokes[new_I_stokes == 0.] = 0. - new_U_stokes[new_I_stokes == 0.] = 0. - new_Q_stokes[np.isnan(new_Q_stokes)] = 0. - new_U_stokes[np.isnan(new_U_stokes)] = 0. + new_I_stokes[np.isnan(new_I_stokes)] = 0.0 + new_Q_stokes[new_I_stokes == 0.0] = 0.0 + new_U_stokes[new_I_stokes == 0.0] = 0.0 + new_Q_stokes[np.isnan(new_Q_stokes)] = 0.0 + new_U_stokes[np.isnan(new_U_stokes)] = 0.0 new_Stokes_cov[np.isnan(new_Stokes_cov)] = fmax # Compute updated integrated values for P, PA @@ -1494,28 +1749,32 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 0][mask])) Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 1][mask])) U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2, 2][mask])) - IQ_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 1][mask]**2)) - IU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 2][mask]**2)) - QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 2][mask]**2)) + IQ_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 1][mask] ** 2)) + IU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 2][mask] ** 2)) + QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 2][mask] ** 2)) - P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted - P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted ** - 2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err) + P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted + P_diluted_err = (1.0 / I_diluted) * np.sqrt( + (Q_diluted**2 * Q_diluted_err**2 + U_diluted**2 * U_diluted_err**2 + 2.0 * Q_diluted * U_diluted * QU_diluted_err) / (Q_diluted**2 + U_diluted**2) + + ((Q_diluted / I_diluted) ** 2 + (U_diluted / I_diluted) ** 2) * I_diluted_err**2 + - 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err + - 2.0 * (U_diluted / I_diluted) * IU_diluted_err + ) - PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted)) - PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err ** - 2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err) + PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted)) + PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt( + U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err + ) - for header in new_headers: - header['P_int'] = (P_diluted, 'Integrated polarization degree') - header['P_int_err'] = (np.ceil(P_diluted_err*1000.)/1000., 'Integrated polarization degree error') - header['PA_int'] = (PA_diluted, 'Integrated polarization angle') - header['PA_int_err'] = (np.ceil(PA_diluted_err*10.)/10., 'Integrated polarization angle error') + new_header_stokes["P_int"] = (P_diluted, "Integrated polarization degree") + new_header_stokes["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error") + new_header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle") + new_header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") - return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_headers + return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_header_stokes -def rotate_data(data_array, error_array, data_mask, headers, ang): +def rotate_data(data_array, error_array, data_mask, headers): """ Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation matrix to rotate Q, U of a given angle in degrees and update header @@ -1530,9 +1789,6 @@ def rotate_data(data_array, error_array, data_mask, headers, ang): 2D boolean array delimiting the data to work on. headers : header list List of headers corresponding to the reduced images. - ang : float - Rotation angle (in degrees) that should be applied to the Stokes - parameters ---------- Returns: new_data_array : numpy.ndarray @@ -1546,46 +1802,49 @@ def rotate_data(data_array, error_array, data_mask, headers, ang): for the new orientation angle. """ # Rotate I_stokes, Q_stokes, U_stokes using rotation matrix - alpha = ang*np.pi/180. - old_center = np.array(data_array[0].shape)/2 - shape = np.fix(np.array(data_array[0].shape)*np.sqrt(2.5)).astype(int) - new_center = np.array(shape)/2 + old_center = np.array(data_array[0].shape) / 2 + shape = np.fix(np.array(data_array[0].shape) * np.sqrt(2.5)).astype(int) + new_center = np.array(shape) / 2 data_array = zeropad(data_array, [data_array.shape[0], *shape]) error_array = zeropad(error_array, [error_array.shape[0], *shape]) data_mask = zeropad(data_mask, shape) + # Rotate original images using scipy.ndimage.rotate + new_headers = [] new_data_array = [] new_error_array = [] - for i in range(data_array.shape[0]): - new_data_array.append(sc_rotate(data_array[i], ang, order=1, reshape=False, cval=0.)) - new_error_array.append(sc_rotate(error_array[i], ang, order=1, reshape=False, cval=0.)) - new_data_array = np.array(new_data_array) - new_error_array = np.array(new_error_array) - new_data_mask = sc_rotate(data_mask*10., ang, order=1, reshape=False, cval=0.) - new_data_mask[new_data_mask < 2] = 0. - new_data_mask = new_data_mask.astype(bool) + new_data_mask = [] + for i, header in zip(range(data_array.shape[0]), headers): + ang = -float(header["ORIENTAT"]) + alpha = ang * np.pi / 180.0 - for i in range(new_data_array.shape[0]): - new_data_array[i][new_data_array[i] < 0.] = 0. + new_data_array.append(sc_rotate(data_array[i], ang, order=1, reshape=False, cval=0.0)) + new_error_array.append(sc_rotate(error_array[i], ang, order=1, reshape=False, cval=0.0)) + new_data_mask.append(sc_rotate(data_mask * 10.0, ang, order=1, reshape=False, cval=0.0)) - # Update headers to new angle - new_headers = [] - mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) - for header in headers: + # Update headers to new angle + mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) new_header = deepcopy(header) - new_header['orientat'] = header['orientat'] + ang new_wcs = WCS(header).celestial.deepcopy() - new_wcs.wcs.pc[:2, :2] = np.dot(mrot, new_wcs.wcs.pc[:2, :2]) new_wcs.wcs.crpix[:2] = np.dot(mrot, new_wcs.wcs.crpix[:2] - old_center[::-1]) + new_center[::-1] new_wcs.wcs.set() for key, val in new_wcs.to_header().items(): new_header[key] = val - + new_header["ORIENTAT"] = np.arccos(new_wcs.celestial.wcs.pc[0, 0]) * 180.0 / np.pi + new_header["ROTATE"] = ang new_headers.append(new_header) - globals()['theta'] = globals()["theta"] - alpha + + new_data_array = np.array(new_data_array) + new_error_array = np.array(new_error_array) + new_data_mask = np.array(new_data_mask).sum(axis=0) + new_data_mask[new_data_mask < 1.0] = 0.0 + new_data_mask = new_data_mask.astype(bool) + + for i in range(new_data_array.shape[0]): + new_data_array[i][new_data_array[i] < 0.0] = 0.0 return new_data_array, new_error_array, new_data_mask, new_headers \ No newline at end of file diff --git a/package/lib/utils.py b/package/lib/utils.py index 51a4568..bf4128c 100755 --- a/package/lib/utils.py +++ b/package/lib/utils.py @@ -1,10 +1,11 @@ import numpy as np + def rot2D(ang): """ Return the 2D rotation matrix of given angle in degrees """ - alpha = np.pi*ang/180 + alpha = np.pi * ang / 180 return np.array([[np.cos(alpha), np.sin(alpha)], [-np.sin(alpha), np.cos(alpha)]]) @@ -17,10 +18,10 @@ def princ_angle(ang): A = np.array([ang]) else: A = np.array(ang) - while np.any(A < 0.): - A[A < 0.] = A[A < 0.]+360. - while np.any(A >= 180.): - A[A >= 180.] = A[A >= 180.]-180. + while np.any(A < 0.0): + A[A < 0.0] = A[A < 0.0] + 360.0 + while np.any(A >= 180.0): + A[A >= 180.0] = A[A >= 180.0] - 180.0 if type(ang) is type(A): return A else: @@ -31,16 +32,31 @@ def sci_not(v, err, rnd=1, out=str): """ Return the scientifque error notation as a string. """ - power = - int(('%E' % v)[-3:])+1 - output = [r"({0}".format(round(v*10**power, rnd)), round(v*10**power, rnd)] + power = -int(("%E" % v)[-3:]) + 1 + output = [r"({0}".format(round(v * 10**power, rnd)), round(v * 10**power, rnd)] if isinstance(err, list): for error in err: - output[0] += r" $\pm$ {0}".format(round(error*10**power, rnd)) - output.append(round(error*10**power, rnd)) + output[0] += r" $\pm$ {0}".format(round(error * 10**power, rnd)) + output.append(round(error * 10**power, rnd)) else: - output[0] += r" $\pm$ {0}".format(round(err*10**power, rnd)) - output.append(round(err*10**power, rnd)) - if out == str: - return output[0]+r")e{0}".format(-power) + output[0] += r" $\pm$ {0}".format(round(err * 10**power, rnd)) + output.append(round(err * 10**power, rnd)) + if out is str: + return output[0] + r")e{0}".format(-power) else: return *output[1:], -power + +def wcs_PA(PC21, PC22): + """ + Return the position angle in degrees to the North direction of a wcs + from the values of coefficient of its transformation matrix. + """ + if (abs(PC21) > abs(PC22)) and (PC21 >= 0): + orient = -np.arccos(PC22) * 180.0 / np.pi + elif (abs(PC21) > abs(PC22)) and (PC21 < 0): + orient = np.arccos(PC22) * 180.0 / np.pi + elif (abs(PC21) < abs(PC22)) and (PC22 >= 0): + orient = np.arccos(PC22) * 180.0 / np.pi + elif (abs(PC21) < abs(PC22)) and (PC22 < 0): + orient = -np.arccos(PC22) * 180.0 / np.pi + return orient diff --git a/package/overplot_IC5063.py b/package/overplot_IC5063.py index 3cb55c3..6a4fb1e 100755 --- a/package/overplot_IC5063.py +++ b/package/overplot_IC5063.py @@ -1,7 +1,7 @@ #!/usr/bin/python3 -from astropy.io import fits import numpy as np -from lib.plots import overplot_radio, overplot_pol +from astropy.io import fits +from lib.plots import overplot_pol, overplot_radio from matplotlib.colors import LogNorm Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits") @@ -14,31 +14,37 @@ Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits") Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits") # levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.]) -levelsMorganti = np.logspace(-0.1249, 1.97, 7)/100. +levelsMorganti = np.logspace(-0.1249, 1.97, 7) / 100.0 -levels18GHz = levelsMorganti*Stokes_18GHz[0].data.max() +levels18GHz = levelsMorganti * Stokes_18GHz[0].data.max() A = overplot_radio(Stokes_UV, Stokes_18GHz) -A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/18GHz_overplot.pdf', vec_scale=None) +A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/18GHz_overplot.pdf", vec_scale=None) -levels24GHz = levelsMorganti*Stokes_24GHz[0].data.max() +levels24GHz = levelsMorganti * Stokes_24GHz[0].data.max() B = overplot_radio(Stokes_UV, Stokes_24GHz) -B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/24GHz_overplot.pdf', vec_scale=None) +B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/24GHz_overplot.pdf", vec_scale=None) -levels103GHz = levelsMorganti*Stokes_103GHz[0].data.max() +levels103GHz = levelsMorganti * Stokes_103GHz[0].data.max() C = overplot_radio(Stokes_UV, Stokes_103GHz) -C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/103GHz_overplot.pdf', vec_scale=None) +C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/103GHz_overplot.pdf", vec_scale=None) -levels229GHz = levelsMorganti*Stokes_229GHz[0].data.max() +levels229GHz = levelsMorganti * Stokes_229GHz[0].data.max() D = overplot_radio(Stokes_UV, Stokes_229GHz) -D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/229GHz_overplot.pdf', vec_scale=None) +D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/229GHz_overplot.pdf", vec_scale=None) -levels357GHz = levelsMorganti*Stokes_357GHz[0].data.max() +levels357GHz = levelsMorganti * Stokes_357GHz[0].data.max() E = overplot_radio(Stokes_UV, Stokes_357GHz) -E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/357GHz_overplot.pdf', vec_scale=None) +E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/357GHz_overplot.pdf", vec_scale=None) # F = overplot_pol(Stokes_UV, Stokes_S2) # F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18)) -G = overplot_pol(Stokes_UV, Stokes_IR, cmap='inferno') -G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot.pdf', vec_scale=None, - norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3, Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']), cmap='inferno_r') +G = overplot_pol(Stokes_UV, Stokes_IR, cmap="inferno") +G.plot( + SNRp_cut=2.0, + SNRi_cut=10.0, + savename="./plots/IC5063/IR_overplot.pdf", + vec_scale=None, + norm=LogNorm(Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"] / 1e3, Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"]), + cmap="inferno_r", +) diff --git a/package/overplot_MRK463E.py b/package/overplot_MRK463E.py index fed7e2f..5c3411d 100755 --- a/package/overplot_MRK463E.py +++ b/package/overplot_MRK463E.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 -from astropy.io import fits import numpy as np +from astropy.io import fits from lib.plots import overplot_chandra, overplot_pol from matplotlib.colors import LogNorm @@ -8,13 +8,13 @@ Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.f Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits") Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits") -levels = np.geomspace(1., 99., 7) +levels = np.geomspace(1.0, 99.0, 7) A = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm()) -A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf') +A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, zoom=1, savename="./plots/MRK463E/Chandra_overplot.pdf") A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned") -levels = np.array([0.8, 2, 5, 10, 20, 50])/100.*Stokes_UV[0].header['photflam'] +levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"] B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm()) -B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, norm=LogNorm(8.5e-18, 2.5e-15), savename='./plots/MRK463E/IR_overplot.pdf') +B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, norm=LogNorm(8.5e-18, 2.5e-15), savename="./plots/MRK463E/IR_overplot.pdf") B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned") diff --git a/package/src/analysis.py b/package/src/analysis.py index 1cb3bc9..815eaa3 100755 --- a/package/src/analysis.py +++ b/package/src/analysis.py @@ -1,5 +1,6 @@ #!/usr/bin/python -from getopt import getopt, error as get_error +from getopt import error as get_error +from getopt import getopt from sys import argv arglist = argv[1:] @@ -24,7 +25,7 @@ try: elif curr_arg in ("-i", "--snri"): SNRi_cut = int(curr_val) elif curr_arg in ("-l", "--lim"): - flux_lim = list("".join(curr_val).split(',')) + flux_lim = list("".join(curr_val).split(",")) except get_error as err: print(str(err)) diff --git a/package/src/get_cdelt.py b/package/src/get_cdelt.py index 45e526b..b7054c6 100755 --- a/package/src/get_cdelt.py +++ b/package/src/get_cdelt.py @@ -1,19 +1,21 @@ #!/usr/bin/python + def main(infiles=None): """ Retrieve native spatial resolution from given observation. """ from os.path import join as path_join from warnings import catch_warnings, filterwarnings + from astropy.io.fits import getheader from astropy.wcs import WCS, FITSFixedWarning from numpy.linalg import eig if infiles is None: - print("Usage: \"python get_cdelt.py -f infiles\"") + print('Usage: "python get_cdelt.py -f infiles"') return 1 - prod = [["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles] + prod = [["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles] data_folder = prod[0][0] infiles = [p[1] for p in prod] @@ -21,14 +23,14 @@ def main(infiles=None): size = {} for currfile in infiles: with catch_warnings(): - filterwarnings('ignore', message="'datfix' made the change", category=FITSFixedWarning) + filterwarnings("ignore", message="'datfix' made the change", category=FITSFixedWarning) wcs = WCS(getheader(path_join(data_folder, currfile))).celestial key = currfile[:-5] size[key] = wcs.array_shape if wcs.wcs.has_cd(): - cdelt[key] = eig(wcs.wcs.cd)[0]*3600. + cdelt[key] = eig(wcs.wcs.cd)[0] * 3600.0 else: - cdelt[key] = wcs.wcs.cdelt*3600. + cdelt[key] = wcs.wcs.cdelt * 3600.0 print("Image name, native resolution in arcsec and shape") for currfile in infiles: @@ -41,7 +43,7 @@ def main(infiles=None): if __name__ == "__main__": import argparse - parser = argparse.ArgumentParser(description='Query MAST for target products') - parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None) + parser = argparse.ArgumentParser(description="Query MAST for target products") + parser.add_argument("-f", "--files", metavar="path", required=False, nargs="*", help="the full or relative path to the data products", default=None) args = parser.parse_args() exitcode = main(infiles=args.files)