Merge branch 'testing' of git.tibeuleu.xyz:tibeuleu/FOC_Reduction into testing

fix merge on personnal machine
This commit is contained in:
2024-03-27 10:50:53 +01:00
5 changed files with 151 additions and 177 deletions

View File

@@ -11,6 +11,7 @@ import lib.fits as proj_fits # Functions to handle fits files
import lib.reduction as proj_red # Functions used in reduction pipeline import lib.reduction as proj_red # Functions used in reduction pipeline
import lib.plots as proj_plots # Functions for plotting data import lib.plots as proj_plots # Functions for plotting data
from lib.query import retrieve_products, path_exists, system from lib.query import retrieve_products, path_exists, system
from lib.utils import sci_not, princ_angle
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
@@ -58,7 +59,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Polarization map output # Polarization map output
SNRp_cut = 3. # P measurments with SNR>3 SNRp_cut = 3. # P measurments with SNR>3
SNRi_cut = 30. # I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 3. # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
vec_scale = 3 vec_scale = 3
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
@@ -89,6 +90,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=True) data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=True)
figname = "_".join([target, "FOC"]) figname = "_".join([target, "FOC"])
figtype = ""
if rebin: if rebin:
if px_scale not in ['full']: if px_scale not in ['full']:
figtype = "".join(["b", "{0:.2f}".format(pxsize), px_scale]) # additionnal informations figtype = "".join(["b", "{0:.2f}".format(pxsize), px_scale]) # additionnal informations
@@ -120,8 +122,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False) data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False)
if display_align: if display_align:
proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min( proj_plots.plot_obs(data_array, headers, savename="_".join([figname, str(align_center)]), plots_folder=plots_folder, norm=LogNorm(
)*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, str(align_center)]), plots_folder=plots_folder) vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam']))
# Rebin data to desired pixel size. # Rebin data to desired pixel size.
if rebin: if rebin:
@@ -136,8 +138,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Plot array for checking output # Plot array for checking output
if display_data and px_scale.lower() not in ['full', 'integrate']: if display_data and px_scale.lower() not in ['full', 'integrate']:
proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min( proj_plots.plot_obs(data_array, headers, savename="_".join([figname, "rebin"]), plots_folder=plots_folder, norm=LogNorm(
)*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, "rebin"]), plots_folder=plots_folder) vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam']))
background = np.array([np.array(bkg).reshape(1, 1) for bkg in background]) background = np.array([np.array(bkg).reshape(1, 1) for bkg in background])
background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1'] == head['filtnam1'] for h in headers], dtype=bool)].mean()) background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1'] == head['filtnam1'] for h in headers], dtype=bool)].mean())
@@ -167,51 +169,52 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Step 4: # Step 4:
# Save image to FITS. # Save image to FITS.
figname = "_".join([figname, figtype]) if figtype != "" else figname
Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P,
headers, data_mask, "_".join([figname, figtype]), data_folder=data_folder, return_hdul=True) headers, data_mask, figname, data_folder=data_folder, return_hdul=True)
data_mask = Stokes_test[-1].data.astype(bool) data_mask = Stokes_test[-1].data.astype(bool)
# Step 5: # Step 5:
# crop to desired region of interest (roi) # crop to desired region of interest (roi)
if crop: if crop:
figtype += "_crop" figname += "_crop"
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_test), norm=LogNorm()) stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_test), norm=LogNorm())
stokescrop.crop() stokescrop.crop()
stokescrop.write_to("/".join([data_folder, "_".join([figname, figtype+".fits"])])) stokescrop.write_to("/".join([data_folder, figname+".fits"]))
Stokes_test, data_mask, headers = stokescrop.hdul_crop, stokescrop.data_mask, [dataset.header for dataset in stokescrop.hdul_crop] Stokes_test, data_mask, headers = stokescrop.hdul_crop, stokescrop.data_mask, [dataset.header for dataset in stokescrop.hdul_crop]
print("F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(headers[0]['photplam'], *proj_plots.sci_not( print("F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(headers[0]['photplam'], *sci_not(
Stokes_test[0].data[data_mask].sum()*headers[0]['photflam'], np.sqrt(Stokes_test[3].data[0, 0][data_mask].sum())*headers[0]['photflam'], 2, out=int))) Stokes_test[0].data[data_mask].sum()*headers[0]['photflam'], np.sqrt(Stokes_test[3].data[0, 0][data_mask].sum())*headers[0]['photflam'], 2, out=int)))
print("P_int = {0:.1f} ± {1:.1f} %".format(headers[0]['p_int']*100., np.ceil(headers[0]['p_int_err']*1000.)/10.)) print("P_int = {0:.1f} ± {1:.1f} %".format(headers[0]['p_int']*100., np.ceil(headers[0]['p_int_err']*1000.)/10.))
print("PA_int = {0:.1f} ± {1:.1f} °".format(headers[0]['pa_int'], np.ceil(headers[0]['pa_int_err']*10.)/10.)) print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(headers[0]['pa_int']), princ_angle(np.ceil(headers[0]['pa_int_err']*10.)/10.)))
# Background values # Background values
print("F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(headers[0]['photplam'], *proj_plots.sci_not( print("F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(headers[0]['photplam'], *sci_not(
I_bkg[0, 0]*headers[0]['photflam'], np.sqrt(S_cov_bkg[0, 0][0, 0])*headers[0]['photflam'], 2, out=int))) I_bkg[0, 0]*headers[0]['photflam'], np.sqrt(S_cov_bkg[0, 0][0, 0])*headers[0]['photflam'], 2, out=int)))
print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0]*100., np.ceil(s_P_bkg[0, 0]*1000.)/10.)) print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0]*100., np.ceil(s_P_bkg[0, 0]*1000.)/10.))
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(PA_bkg[0, 0], np.ceil(s_PA_bkg[0, 0]*10.)/10.)) print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0]*10.)/10.)))
# Plot polarisation map (Background is either total Flux, Polarization degree or Polarization degree error). # Plot polarisation map (Background is either total Flux, Polarization degree or Polarization degree error).
if px_scale.lower() not in ['full', 'integrate'] and not interactive: if px_scale.lower() not in ['full', 'integrate'] and not interactive:
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim,
step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype]), plots_folder=plots_folder) step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname]), plots_folder=plots_folder)
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "I"]), plots_folder=plots_folder, display='Intensity') vec_scale=vec_scale, savename="_".join([figname, "I"]), plots_folder=plots_folder, display='Intensity')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "P_flux"]), plots_folder=plots_folder, display='Pol_Flux') vec_scale=vec_scale, savename="_".join([figname, "P_flux"]), plots_folder=plots_folder, display='Pol_Flux')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "P"]), plots_folder=plots_folder, display='Pol_deg') vec_scale=vec_scale, savename="_".join([figname, "P"]), plots_folder=plots_folder, display='Pol_deg')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "PA"]), plots_folder=plots_folder, display='Pol_ang') vec_scale=vec_scale, savename="_".join([figname, "PA"]), plots_folder=plots_folder, display='Pol_ang')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "I_err"]), plots_folder=plots_folder, display='I_err') vec_scale=vec_scale, savename="_".join([figname, "I_err"]), plots_folder=plots_folder, display='I_err')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "P_err"]), plots_folder=plots_folder, display='Pol_deg_err') vec_scale=vec_scale, savename="_".join([figname, "P_err"]), plots_folder=plots_folder, display='Pol_deg_err')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRi"]), plots_folder=plots_folder, display='SNRi') vec_scale=vec_scale, savename="_".join([figname, "SNRi"]), plots_folder=plots_folder, display='SNRi')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRp"]), plots_folder=plots_folder, display='SNRp') vec_scale=vec_scale, savename="_".join([figname, "SNRp"]), plots_folder=plots_folder, display='SNRp')
elif not interactive: elif not interactive:
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut,
savename="_".join([figname, figtype]), plots_folder=plots_folder, display='integrate') savename=figname, plots_folder=plots_folder, display='integrate')
elif px_scale.lower() not in ['full', 'integrate']: elif px_scale.lower() not in ['full', 'integrate']:
proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim) proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
@@ -227,8 +230,8 @@ if __name__ == "__main__":
parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None) parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None)
parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False, parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False,
help='output directory path for the data products', type=str, default="./data") help='output directory path for the data products', type=str, default="./data")
parser.add_argument('-c', '--crop', required=False, help='whether to crop the analysis region', default=False, action='store_true') parser.add_argument('-c', '--crop', action='store_true', required=False, help='whether to crop the analysis region')
parser.add_argument('-i', '--interactive', required=False, help='whether to output to the interactive analysis tool', default=False, action='store_true') parser.add_argument('-i', '--interactive', action='store_true', required=False, help='whether to output to the interactive analysis tool')
args = parser.parse_args() args = parser.parse_args()
exitcode = main(target=args.target, proposal_id=args.proposal_id, infiles=args.files, exitcode = main(target=args.target, proposal_id=args.proposal_id, infiles=args.files,
output_dir=args.output_dir, crop=args.crop, interactive=args.interactive) output_dir=args.output_dir, crop=args.crop, interactive=args.interactive)

View File

@@ -54,12 +54,10 @@ from astropy.wcs import WCS
from astropy.io import fits from astropy.io import fits
from astropy.coordinates import SkyCoord from astropy.coordinates import SkyCoord
from scipy.ndimage import zoom as sc_zoom from scipy.ndimage import zoom as sc_zoom
from lib.fits import save_Stokes
from lib.utils import rot2D, princ_angle, sci_not from lib.utils import rot2D, princ_angle, sci_not
def plot_obs(data_array, headers, shape=None, vmin=None, vmax=None, rectangle=None, def plot_obs(data_array, headers, rectangle=None, savename=None, plots_folder="", **kwargs):
savename=None, plots_folder=""):
""" """
Plots raw observation imagery with some information on the instrument and Plots raw observation imagery with some information on the instrument and
filters. filters.
@@ -70,10 +68,6 @@ def plot_obs(data_array, headers, shape=None, vmin=None, vmax=None, rectangle=No
single observation with multiple polarizers of an instrument single observation with multiple polarizers of an instrument
headers : header list headers : header list
List of headers corresponding to the images in data_array List of headers corresponding to the images in data_array
shape : array-like of length 2, optional
Shape of the display, with shape = [#row, #columns]. If None, defaults
to the optimal square.
Defaults to None.
vmin : float, optional vmin : float, optional
Min pixel value that should be displayed. Min pixel value that should be displayed.
Defaults to 0. Defaults to 0.
@@ -94,39 +88,47 @@ def plot_obs(data_array, headers, shape=None, vmin=None, vmax=None, rectangle=No
Defaults to current folder. Defaults to current folder.
""" """
plt.rcParams.update({'font.size': 10}) plt.rcParams.update({'font.size': 10})
if shape is None: nb_obs = np.max([np.sum([head['filtnam1'] == curr_pol for head in headers]) for curr_pol in ['POL0', 'POL60', 'POL120']])
shape = np.array([np.ceil(np.sqrt(data_array.shape[0])).astype(int), ]*2) shape = np.array((3, nb_obs))
fig, ax = plt.subplots(shape[0], shape[1], figsize=(10, 10), dpi=200, fig, ax = plt.subplots(shape[0], shape[1], figsize=(10, 10), dpi=200,
sharex=True, sharey=True) sharex=True, sharey=True)
r_pol = dict(pol0=0, pol60=1, pol120=2)
for i, (axe, data, head) in enumerate(zip(ax.flatten(), data_array, headers)): c_pol = dict(pol0=0, pol60=0, pol120=0)
for i, (data, head) in enumerate(zip(data_array, headers)):
instr = head['instrume'] instr = head['instrume']
rootname = head['rootname'] rootname = head['rootname']
exptime = head['exptime'] exptime = head['exptime']
filt = head['filtnam1'] filt = head['filtnam1']
convert = head['photflam'] convert = head['photflam']
r_ax, c_ax = r_pol[filt.lower()], c_pol[filt.lower()]
c_pol[filt.lower()] += 1
# plots # plots
if vmin is None or vmax is None:
vmin, vmax = convert*data[data > 0.].min()/10., convert*data[data > 0.].max() vmin, vmax = convert*data[data > 0.].min()/10., convert*data[data > 0.].max()
# im = axe.imshow(convert*data, vmin=vmin, vmax=vmax, origin='lower', cmap='gray') for key, value in [["cmap", [["cmap", "gray"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]:
try:
_ = kwargs[key]
except KeyError:
for key_i, val_i in value:
kwargs[key_i] = val_i
# im = ax[r_ax][c_ax].imshow(convert*data, origin='lower', **kwargs)
data[data*convert < vmin*10.] = vmin*10./convert data[data*convert < vmin*10.] = vmin*10./convert
im = axe.imshow(convert*data, norm=LogNorm(vmin, vmax), origin='lower', cmap='gray') im = ax[r_ax][c_ax].imshow(convert*data, origin='lower', **kwargs)
if rectangle is not None: if rectangle is not None:
x, y, width, height, angle, color = rectangle[i] x, y, width, height, angle, color = rectangle[i]
axe.add_patch(Rectangle((x, y), width, height, angle=angle, ax[r_ax][c_ax].add_patch(Rectangle((x, y), width, height, angle=angle,
edgecolor=color, fill=False)) edgecolor=color, fill=False))
# position of centroid # position of centroid
axe.plot([data.shape[1]/2, data.shape[1]/2], [0, data.shape[0]-1], '--', lw=1, ax[r_ax][c_ax].plot([data.shape[1]/2, data.shape[1]/2], [0, data.shape[0]-1], '--', lw=1,
color='grey', alpha=0.5) color='grey', alpha=0.5)
axe.plot([0, data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], '--', lw=1, ax[r_ax][c_ax].plot([0, data.shape[1]-1], [data.shape[1]/2, data.shape[1]/2], '--', lw=1,
color='grey', alpha=0.5) color='grey', alpha=0.5)
axe.annotate(instr+":"+rootname, color='white', fontsize=5, xy=(0.01, 1.00), ax[r_ax][c_ax].annotate(instr+":"+rootname, color='white', fontsize=5, xy=(0.01, 1.00),
xycoords='axes fraction', verticalalignment='top', xycoords='axes fraction', verticalalignment='top',
horizontalalignment='left') horizontalalignment='left')
axe.annotate(filt, color='white', fontsize=10, xy=(0.01, 0.01), ax[r_ax][c_ax].annotate(filt, color='white', fontsize=10, xy=(0.01, 0.01),
xycoords='axes fraction', verticalalignment='bottom', xycoords='axes fraction', verticalalignment='bottom',
horizontalalignment='left') horizontalalignment='left')
axe.annotate(exptime, color='white', fontsize=5, xy=(1.00, 0.01), ax[r_ax][c_ax].annotate(exptime, color='white', fontsize=5, xy=(1.00, 0.01),
xycoords='axes fraction', verticalalignment='bottom', xycoords='axes fraction', verticalalignment='bottom',
horizontalalignment='right') horizontalalignment='right')
@@ -306,7 +308,7 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
vmin, vmax = flux_lim vmin, vmax = flux_lim
im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.) im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsI = np.array([2., 5., 10., 20., 50., 90.])/100.*vmax levelsI = np.array([0.8, 2., 5., 10., 20., 50.])/100.*vmax
print("Total flux contour levels : ", levelsI) print("Total flux contour levels : ", levelsI)
ax.contour(stkI.data*convert_flux, levels=levelsI, colors='grey', linewidths=0.5) ax.contour(stkI.data*convert_flux, levels=levelsI, colors='grey', linewidths=0.5)
elif display.lower() in ['pol_flux']: elif display.lower() in ['pol_flux']:
@@ -391,9 +393,8 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]") fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
# Get integrated values from header # Get integrated values from header
n_pix = stkI.data[data_mask].size
I_diluted = stkI.data[data_mask].sum() I_diluted = stkI.data[data_mask].sum()
I_diluted_err = np.sqrt(n_pix)*np.sqrt(np.sum(stk_cov.data[0, 0][data_mask])) I_diluted_err = np.sqrt(np.sum(stk_cov.data[0, 0][data_mask]))
P_diluted = Stokes[0].header['P_int'] P_diluted = Stokes[0].header['P_int']
P_diluted_err = Stokes[0].header['P_int_err'] P_diluted_err = Stokes[0].header['P_int_err']
@@ -414,7 +415,7 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
X, Y = np.meshgrid(np.arange(stkI.data.shape[1]), np.arange(stkI.data.shape[0])) X, Y = np.meshgrid(np.arange(stkI.data.shape[1]), np.arange(stkI.data.shape[0]))
U, V = poldata*np.cos(np.pi/2.+pangdata*np.pi/180.), poldata*np.sin(np.pi/2.+pangdata*np.pi/180.) U, V = poldata*np.cos(np.pi/2.+pangdata*np.pi/180.), poldata*np.sin(np.pi/2.+pangdata*np.pi/180.)
ax.quiver(X[::step_vec, ::step_vec], Y[::step_vec, ::step_vec], U[::step_vec, ::step_vec], V[::step_vec, ::step_vec], units='xy', angles='uv', ax.quiver(X[::step_vec, ::step_vec], Y[::step_vec, ::step_vec], U[::step_vec, ::step_vec], V[::step_vec, ::step_vec], units='xy', angles='uv',
scale=1./vec_scale, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, linewidth=0.5, color='w', edgecolor='k') scale=1./vec_scale, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.2, linewidth=0.3, color='w', edgecolor='k')
pol_sc = AnchoredSizeBar(ax.transData, vec_scale, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w') pol_sc = AnchoredSizeBar(ax.transData, vec_scale, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
ax.add_artist(pol_sc) ax.add_artist(pol_sc)
@@ -446,7 +447,7 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
if savename is not None: if savename is not None:
if savename[-4:] not in ['.png', '.jpg', '.pdf']: if savename[-4:] not in ['.png', '.jpg', '.pdf']:
savename += '.pdf' savename += '.pdf'
fig.savefig(path_join(plots_folder, savename), bbox_inches='tight', dpi=300) fig.savefig(path_join(plots_folder, savename), bbox_inches='tight', dpi=200)
plt.show() plt.show()
return fig, ax return fig, ax
@@ -730,12 +731,10 @@ class overplot_radio(align_maps):
# Display other map as contours # Display other map as contours
if levels is None: if levels is None:
levels = np.logspace(np.log(3)/np.log(10), 2., 5)/100.*other_data[other_data > 0.].max() levels = np.logspace(0., 1.9, 5)/100.*other_data[other_data > 0.].max()
other_cont = self.ax_overplot.contour( other_cont = self.ax_overplot.contour(
other_data*self.other_convert, transform=self.ax_overplot.get_transform(self.other_wcs.celestial), levels=levels*self.other_convert, colors='grey') other_data*self.other_convert, transform=self.ax_overplot.get_transform(self.other_wcs.celestial), levels=levels*self.other_convert, colors='grey')
self.ax_overplot.clabel(other_cont, inline=True, fontsize=5) self.ax_overplot.clabel(other_cont, inline=True, fontsize=5)
other_proxy = Rectangle((0, 0), 1, 1, fc='w', ec=other_cont.collections[0].get_edgecolor()[0], label=r"{0:s} contour".format(self.other_observer))
self.ax_overplot.add_patch(other_proxy)
self.ax_overplot.set_xlabel(label="Right Ascension (J2000)") self.ax_overplot.set_xlabel(label="Right Ascension (J2000)")
self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1) self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1)
@@ -761,6 +760,8 @@ class overplot_radio(align_maps):
handles, labels = self.ax_overplot.get_legend_handles_labels() handles, labels = self.ax_overplot.get_legend_handles_labels()
handles[np.argmax([li == "{0:s} polarisation map".format(self.map_observer) for li in labels]) handles[np.argmax([li == "{0:s} polarisation map".format(self.map_observer) for li in labels])
] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2) ] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2)
labels.append("{0:s} contour".format(self.other_observer))
handles.append(Rectangle((0, 0), 1, 1, fill=False, lw=2, ec=other_cont.collections[0].get_edgecolor()[0]))
self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=( self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=(
0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) 0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.)
@@ -854,9 +855,6 @@ class overplot_chandra(align_maps):
levels *= other_data.max()/self.other_data.max() levels *= other_data.max()/self.other_data.max()
other_cont = self.ax_overplot.contour(other_data*self.other_convert, transform=self.ax_overplot.get_transform(other_wcs), levels=levels, colors='grey') other_cont = self.ax_overplot.contour(other_data*self.other_convert, transform=self.ax_overplot.get_transform(other_wcs), levels=levels, colors='grey')
self.ax_overplot.clabel(other_cont, inline=True, fontsize=8) self.ax_overplot.clabel(other_cont, inline=True, fontsize=8)
other_proxy = Rectangle((0, 0), 1., 1., fc='w', ec=other_cont.collections[0].get_edgecolor()[
0], lw=2, label=r"{0:s} contour in counts".format(self.other_observer))
self.ax_overplot.add_patch(other_proxy)
self.ax_overplot.set_xlabel(label="Right Ascension (J2000)") self.ax_overplot.set_xlabel(label="Right Ascension (J2000)")
self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1) self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1)
@@ -881,6 +879,8 @@ class overplot_chandra(align_maps):
handles, labels = self.ax_overplot.get_legend_handles_labels() handles, labels = self.ax_overplot.get_legend_handles_labels()
handles[np.argmax([li == "{0:s} polarisation map".format(self.map_observer) for li in labels]) handles[np.argmax([li == "{0:s} polarisation map".format(self.map_observer) for li in labels])
] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2) ] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2)
labels.append("{0:s} contour in counts".format(self.other_observer))
handles.append(Rectangle((0, 0), 1, 1, fill=False, lw=2, ec=other_cont.collections[0].get_edgecolor()[0]))
self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=( self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=(
0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) 0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.)
@@ -972,8 +972,6 @@ class overplot_pol(align_maps):
cont_stkI = self.ax_overplot.contour(stkI*self.map_convert, levels=levels, colors='grey', alpha=0.75, cont_stkI = self.ax_overplot.contour(stkI*self.map_convert, levels=levels, colors='grey', alpha=0.75,
transform=self.ax_overplot.get_transform(self.wcs_UV)) transform=self.ax_overplot.get_transform(self.wcs_UV))
# self.ax_overplot.clabel(cont_stkI, inline=True, fontsize=5) # self.ax_overplot.clabel(cont_stkI, inline=True, fontsize=5)
cont_proxy = Rectangle((0, 0), 1, 1, fc='w', ec=cont_stkI.collections[0].get_edgecolor()[0], label="{0:s} Stokes I contour".format(self.map_observer))
self.ax_overplot.add_patch(cont_proxy)
# Display pixel scale and North direction # Display pixel scale and North direction
fontprops = fm.FontProperties(size=16) fontprops = fm.FontProperties(size=16)
@@ -1001,6 +999,8 @@ class overplot_pol(align_maps):
handles, labels = self.ax_overplot.get_legend_handles_labels() handles, labels = self.ax_overplot.get_legend_handles_labels()
handles[np.argmax([li == "{0:s} polarisation map".format(self.map_observer) for li in labels]) handles[np.argmax([li == "{0:s} polarisation map".format(self.map_observer) for li in labels])
] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2) ] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2)
labels.append("{0:s} Stokes I contour".format(self.map_observer))
handles.append(Rectangle((0, 0), 1, 1, fill=False, ec=cont_stkI.collections[0].get_edgecolor()[0]))
self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=( self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=(
0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.) 0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.)
@@ -2351,10 +2351,9 @@ class pol_map(object):
def pol_int(self, fig=None, ax=None): def pol_int(self, fig=None, ax=None):
if self.region is None: if self.region is None:
n_pix = self.I.size
s_I = np.sqrt(self.IQU_cov[0, 0]) s_I = np.sqrt(self.IQU_cov[0, 0])
I_reg = self.I.sum() I_reg = self.I.sum()
I_reg_err = np.sqrt(n_pix)*np.sqrt(np.sum(s_I**2)) I_reg_err = np.sqrt(np.sum(s_I**2))
P_reg = self.Stokes[0].header['P_int'] P_reg = self.Stokes[0].header['P_int']
P_reg_err = self.Stokes[0].header['P_int_err'] P_reg_err = self.Stokes[0].header['P_int_err']
PA_reg = self.Stokes[0].header['PA_int'] PA_reg = self.Stokes[0].header['PA_int']
@@ -2386,7 +2385,6 @@ class pol_map(object):
Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err))) Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err)))
else: else:
n_pix = self.I[self.region].size
s_I = np.sqrt(self.IQU_cov[0, 0]) s_I = np.sqrt(self.IQU_cov[0, 0])
s_Q = np.sqrt(self.IQU_cov[1, 1]) s_Q = np.sqrt(self.IQU_cov[1, 1])
s_U = np.sqrt(self.IQU_cov[2, 2]) s_U = np.sqrt(self.IQU_cov[2, 2])
@@ -2442,15 +2440,19 @@ class pol_map(object):
ax = self.ax ax = self.ax
if hasattr(self, 'an_int'): if hasattr(self, 'an_int'):
self.an_int.remove() self.an_int.remove()
self.an_int = ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_reg*self.map_convert, I_reg_err*self.map_convert, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100., np.ceil( self.str_int = r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_reg*self.map_convert, I_reg_err*self.map_convert, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100., np.ceil(P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err*10.)/10.)
P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err*10.)/10.), color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left') self.str_cut = ""
# self.str_cut = "\n"+r"$F_{{\lambda}}^{{cut}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_cut*self.map_convert, I_cut_err*self.map_convert, 2))+"\n"+r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_cut*100., np.ceil(P_cut_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_cut, np.ceil(PA_cut_err*10.)/10.)
self.an_int = ax.annotate(self.str_int+self.str_cut, color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left')
if self.region is not None: if self.region is not None:
self.cont = ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8) self.cont = ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8)
fig.canvas.draw_idle() fig.canvas.draw_idle()
return self.an_int return self.an_int
else: else:
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_reg*self.map_convert, I_reg_err*self.map_convert, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100., np.ceil(P_reg_err*1000.)/10.) + str_int = r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_reg*self.map_convert, I_reg_err*self.map_convert, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100., np.ceil(P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err*10.)/10.)
"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err*10.)/10.), color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left') str_cut = ""
# str_cut = "\n"+r"$F_{{\lambda}}^{{cut}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_cut*self.map_convert, I_cut_err*self.map_convert, 2))+"\n"+r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_cut*100., np.ceil(P_cut_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_cut, np.ceil(PA_cut_err*10.)/10.)
ax.annotate(str_int+str_cut, color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left')
if self.region is not None: if self.region is not None:
ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8) ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8)
fig.canvas.draw_idle() fig.canvas.draw_idle()

View File

@@ -12,7 +12,7 @@ prototypes :
Homogeneously deconvolve a data array using a chosen deconvolution algorithm. Homogeneously deconvolve a data array using a chosen deconvolution algorithm.
- get_error(data_array, headers, error_array, data_mask, sub_type, display, savename, plots_folder, return_background) -> data_array, error_array, headers (, background) - get_error(data_array, headers, error_array, data_mask, sub_type, display, savename, plots_folder, return_background) -> data_array, error_array, headers (, background)
Compute the error (noise) on each image of the input array. Compute the uncertainties from the different error sources on each image of the input array.
- rebin_array(data_array, error_array, headers, pxsize, scale, operation, data_mask) -> rebinned_data, rebinned_error, rebinned_headers, Dxy (, data_mask) - rebin_array(data_array, error_array, headers, pxsize, scale, operation, data_mask) -> rebinned_data, rebinned_error, rebinned_headers, Dxy (, data_mask)
Homegeneously rebin a data array given a target pixel size in scale units. Homegeneously rebin a data array given a target pixel size in scale units.
@@ -729,8 +729,8 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_
if do_shift: if do_shift:
shift, error, _ = phase_cross_correlation(ref_data/ref_data.max(), image/image.max(), upsample_factor=upsample_factor) shift, error, _ = phase_cross_correlation(ref_data/ref_data.max(), image/image.max(), upsample_factor=upsample_factor)
else: else:
shift = pol_shift[headers[i]['filtnam1'].lower()] shift = globals["pol_shift"][headers[i]['filtnam1'].lower()]
error = sigma_shift[headers[i]['filtnam1'].lower()] error = globals["sigma_shift"][headers[i]['filtnam1'].lower()]
# Rescale image to requested output # Rescale image to requested output
rescaled_image[i, res_shift[0]:res_shift[0]+shape[1], res_shift[1]:res_shift[1]+shape[2]] = deepcopy(image) rescaled_image[i, res_shift[0]:res_shift[0]+shape[1], res_shift[1]:res_shift[1]+shape[2]] = deepcopy(image)
rescaled_error[i, res_shift[0]:res_shift[0]+shape[1], res_shift[1]:res_shift[1]+shape[2]] = deepcopy(error_array[i]) rescaled_error[i, res_shift[0]:res_shift[0]+shape[1], res_shift[1]:res_shift[1]+shape[2]] = deepcopy(error_array[i])
@@ -1101,16 +1101,16 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
same_filt3 = np.array([filt3 == header['filtnam3'] for header in headers]).all() same_filt3 = np.array([filt3 == header['filtnam3'] for header in headers]).all()
same_filt4 = np.array([filt4 == header['filtnam4'] for header in headers]).all() same_filt4 = np.array([filt4 == header['filtnam4'] for header in headers]).all()
if (same_filt2 and same_filt3 and same_filt4): if (same_filt2 and same_filt3 and same_filt4):
transmit2, transmit3, transmit4 = trans2[filt2.lower()], trans3[filt3.lower()], trans4[filt4.lower()] transmit2, transmit3, transmit4 = globals()["trans2"][filt2.lower()], globals()["trans3"][filt3.lower()], globals()["trans4"][filt4.lower()]
else: else:
print("WARNING : All images in data_array are not from the same \ print("WARNING : All images in data_array are not from the same \
band filter, the limiting transmittance will be taken.") band filter, the limiting transmittance will be taken.")
transmit2 = np.min([trans2[header['filtnam2'].lower()] for header in headers]) transmit2 = np.min([globals()["trans2"][header['filtnam2'].lower()] for header in headers])
transmit3 = np.min([trans3[header['filtnam3'].lower()] for header in headers]) transmit3 = np.min([globals()["trans3"][header['filtnam3'].lower()] for header in headers])
transmit4 = np.min([trans4[header['filtnam4'].lower()] for header in headers]) transmit4 = np.min([globals()["trans4"][header['filtnam4'].lower()] for header in headers])
if transmitcorr: if transmitcorr:
transmit *= transmit2*transmit3*transmit4 transmit *= transmit2*transmit3*transmit4
pol_eff = np.array([pol_efficiency['pol0'], pol_efficiency['pol60'], pol_efficiency['pol120']]) pol_eff = np.array([globals()["pol_efficiency"]['pol0'], globals()["pol_efficiency"]['pol60'], globals()["pol_efficiency"]['pol120']])
# Calculating correction factor # Calculating correction factor
corr = np.array([1.0*h['photflam']/h['exptime'] for h in pol_headers])*pol_headers[0]['exptime']/pol_headers[0]['photflam'] corr = np.array([1.0*h['photflam']/h['exptime'] for h in pol_headers])*pol_headers[0]['exptime']/pol_headers[0]['photflam']
@@ -1122,9 +1122,11 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
coeff_stokes = np.zeros((3, 3)) coeff_stokes = np.zeros((3, 3))
# Coefficients linking each polarizer flux to each Stokes parameter # Coefficients linking each polarizer flux to each Stokes parameter
for i in range(3): for i in range(3):
coeff_stokes[0, i] = pol_eff[(i+1) % 3]*pol_eff[(i+2) % 3]*np.sin(-2.*theta[(i+1) % 3]+2.*theta[(i+2) % 3])*2./transmit[i] coeff_stokes[0, i] = pol_eff[(i+1) % 3]*pol_eff[(i+2) % 3]*np.sin(-2.*globals()["theta"][(i+1) % 3]+2.*globals()["theta"][(i+2) % 3])*2./transmit[i]
coeff_stokes[1, i] = (-pol_eff[(i+1) % 3]*np.sin(2.*theta[(i+1) % 3]) + pol_eff[(i+2) % 3]*np.sin(2.*theta[(i+2) % 3]))*2./transmit[i] coeff_stokes[1, i] = (-pol_eff[(i+1) % 3]*np.sin(2.*globals()["theta"][(i+1) % 3]) +
coeff_stokes[2, i] = (pol_eff[(i+1) % 3]*np.cos(2.*theta[(i+1) % 3]) - pol_eff[(i+2) % 3]*np.cos(2.*theta[(i+2) % 3]))*2./transmit[i] pol_eff[(i+2) % 3]*np.sin(2.*globals()["theta"][(i+2) % 3]))*2./transmit[i]
coeff_stokes[2, i] = (pol_eff[(i+1) % 3]*np.cos(2.*globals()["theta"][(i+1) % 3]) -
pol_eff[(i+2) % 3]*np.cos(2.*globals()["theta"][(i+2) % 3]))*2./transmit[i]
# Normalization parameter for Stokes parameters computation # Normalization parameter for Stokes parameters computation
A = (coeff_stokes[0, :]*transmit/2.).sum() A = (coeff_stokes[0, :]*transmit/2.).sum()
@@ -1173,34 +1175,34 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
s_U2_stat = np.sum([coeff_stokes[2, i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))], axis=0) s_U2_stat = np.sum([coeff_stokes[2, i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))], axis=0)
# Compute the derivative of each Stokes parameter with respect to the polarizer orientation # Compute the derivative of each Stokes parameter with respect to the polarizer orientation
dI_dtheta1 = 2.*pol_eff[0]/A*(pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes) - dI_dtheta1 = 2.*pol_eff[0]/A*(pol_eff[2]*np.cos(-2.*globals()["theta"][2]+2.*globals()["theta"][0])*(pol_flux[1]-I_stokes) -
pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes)) pol_eff[1]*np.cos(-2.*globals()["theta"][0]+2.*globals()["theta"][1])*(pol_flux[2]-I_stokes))
dI_dtheta2 = 2.*pol_eff[1]/A*(pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes) - dI_dtheta2 = 2.*pol_eff[1]/A*(pol_eff[0]*np.cos(-2.*globals()["theta"][0]+2.*globals()["theta"][1])*(pol_flux[2]-I_stokes) -
pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes)) pol_eff[2]*np.cos(-2.*globals()["theta"][1]+2.*globals()["theta"][2])*(pol_flux[0]-I_stokes))
dI_dtheta3 = 2.*pol_eff[2]/A*(pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes) - dI_dtheta3 = 2.*pol_eff[2]/A*(pol_eff[1]*np.cos(-2.*globals()["theta"][1]+2.*globals()["theta"][2])*(pol_flux[0]-I_stokes) -
pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes)) pol_eff[0]*np.cos(-2.*globals()["theta"][2]+2.*globals()["theta"][0])*(pol_flux[1]-I_stokes))
dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3]) dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3])
dQ_dtheta1 = 2.*pol_eff[0]/A*(np.cos(2.*theta[0])*(pol_flux[1]-pol_flux[2]) - (pol_eff[2]*np.cos(-2. * dQ_dtheta1 = 2.*pol_eff[0]/A*(np.cos(2.*globals()["theta"][0])*(pol_flux[1]-pol_flux[2]) - (pol_eff[2]*np.cos(-2.*globals()
theta[2]+2.*theta[0]) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1]))*Q_stokes) ["theta"][2]+2.*globals()["theta"][0]) - pol_eff[1]*np.cos(-2.*globals()["theta"][0]+2.*globals()["theta"][1]))*Q_stokes)
dQ_dtheta2 = 2.*pol_eff[1]/A*(np.cos(2.*theta[1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2. * dQ_dtheta2 = 2.*pol_eff[1]/A*(np.cos(2.*globals()["theta"][1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2.*globals()
theta[0]+2.*theta[1]) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2]))*Q_stokes) ["theta"][0]+2.*globals()["theta"][1]) - pol_eff[2]*np.cos(-2.*globals()["theta"][1]+2.*globals()["theta"][2]))*Q_stokes)
dQ_dtheta3 = 2.*pol_eff[2]/A*(np.cos(2.*theta[2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2. * dQ_dtheta3 = 2.*pol_eff[2]/A*(np.cos(2.*globals()["theta"][2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2.*globals()
theta[1]+2.*theta[2]) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0]))*Q_stokes) ["theta"][1]+2.*globals()["theta"][2]) - pol_eff[0]*np.cos(-2.*globals()["theta"][2]+2.*globals()["theta"][0]))*Q_stokes)
dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3]) dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3])
dU_dtheta1 = 2.*pol_eff[0]/A*(np.sin(2.*theta[0])*(pol_flux[1]-pol_flux[2]) - (pol_eff[2]*np.cos(-2. * dU_dtheta1 = 2.*pol_eff[0]/A*(np.sin(2.*globals()["theta"][0])*(pol_flux[1]-pol_flux[2]) - (pol_eff[2]*np.cos(-2.*globals()
theta[2]+2.*theta[0]) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1]))*U_stokes) ["theta"][2]+2.*globals()["theta"][0]) - pol_eff[1]*np.cos(-2.*globals()["theta"][0]+2.*globals()["theta"][1]))*U_stokes)
dU_dtheta2 = 2.*pol_eff[1]/A*(np.sin(2.*theta[1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2. * dU_dtheta2 = 2.*pol_eff[1]/A*(np.sin(2.*globals()["theta"][1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2.*globals()
theta[0]+2.*theta[1]) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2]))*U_stokes) ["theta"][0]+2.*globals()["theta"][1]) - pol_eff[2]*np.cos(-2.*globals()["theta"][1]+2.*globals()["theta"][2]))*U_stokes)
dU_dtheta3 = 2.*pol_eff[2]/A*(np.sin(2.*theta[2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2. * dU_dtheta3 = 2.*pol_eff[2]/A*(np.sin(2.*globals()["theta"][2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2.*globals()
theta[1]+2.*theta[2]) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0]))*U_stokes) ["theta"][1]+2.*globals()["theta"][2]) - pol_eff[0]*np.cos(-2.*globals()["theta"][2]+2.*globals()["theta"][0]))*U_stokes)
dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3]) dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3])
# Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999) # Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999)
s_I2_axis = np.sum([dI_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))], axis=0) s_I2_axis = np.sum([dI_dtheta[i]**2 * globals()["sigma_theta"][i]**2 for i in range(len(globals()["sigma_theta"]))], axis=0)
s_Q2_axis = np.sum([dQ_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))], axis=0) s_Q2_axis = np.sum([dQ_dtheta[i]**2 * globals()["sigma_theta"][i]**2 for i in range(len(globals()["sigma_theta"]))], axis=0)
s_U2_axis = np.sum([dU_dtheta[i]**2 * sigma_theta[i]**2 for i in range(len(sigma_theta))], axis=0) s_U2_axis = np.sum([dU_dtheta[i]**2 * globals()["sigma_theta"][i]**2 for i in range(len(globals()["sigma_theta"]))], axis=0)
# np.savetxt("output/sI_dir.txt", np.sqrt(s_I2_axis)) # np.savetxt("output/sI_dir.txt", np.sqrt(s_I2_axis))
# np.savetxt("output/sQ_dir.txt", np.sqrt(s_Q2_axis)) # np.savetxt("output/sQ_dir.txt", np.sqrt(s_Q2_axis))
# np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis)) # np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis))
@@ -1212,7 +1214,6 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Compute integrated values for P, PA before any rotation # Compute integrated values for P, PA before any rotation
mask = np.logical_and(data_mask.astype(bool), (I_stokes > 0.)) mask = np.logical_and(data_mask.astype(bool), (I_stokes > 0.))
n_pix = I_stokes[mask].size
I_diluted = I_stokes[mask].sum() I_diluted = I_stokes[mask].sum()
Q_diluted = Q_stokes[mask].sum() Q_diluted = Q_stokes[mask].sum()
U_diluted = U_stokes[mask].sum() U_diluted = U_stokes[mask].sum()
@@ -1224,12 +1225,10 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask]**2)) QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask]**2))
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted ** P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted)) PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted))
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err ** PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
for header in headers: for header in headers:
header['P_int'] = (P_diluted, 'Integrated polarisation degree') header['P_int'] = (P_diluted, 'Integrated polarisation degree')
@@ -1307,7 +1306,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
s_PA[np.isnan(s_PA)] = fmax s_PA[np.isnan(s_PA)] = fmax
# Catch expected "OverflowWarning" as wrong pixel have an overflowing error # Catch expected "OverflowWarning" as wrong pixel have an overflowing error
with warnings.catch_warnings(record=True) as w: with warnings.catch_warnings(record=True) as _:
mask2 = P**2 >= s_P**2 mask2 = P**2 >= s_P**2
debiased_P = np.zeros(I_stokes.shape) debiased_P = np.zeros(I_stokes.shape)
debiased_P[mask2] = np.sqrt(P[mask2]**2 - s_P[mask2]**2) debiased_P[mask2] = np.sqrt(P[mask2]**2 - s_P[mask2]**2)
@@ -1474,7 +1473,6 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
# Compute updated integrated values for P, PA # Compute updated integrated values for P, PA
mask = deepcopy(new_data_mask).astype(bool) mask = deepcopy(new_data_mask).astype(bool)
n_pix = new_I_stokes[mask].size
I_diluted = new_I_stokes[mask].sum() I_diluted = new_I_stokes[mask].sum()
Q_diluted = new_Q_stokes[mask].sum() Q_diluted = new_Q_stokes[mask].sum()
U_diluted = new_U_stokes[mask].sum() U_diluted = new_U_stokes[mask].sum()
@@ -1486,12 +1484,10 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 2][mask]**2)) QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 2][mask]**2))
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted ** P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted)) PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted))
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err ** PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
for header in new_headers: for header in new_headers:
header['P_int'] = (P_diluted, 'Integrated polarisation degree') header['P_int'] = (P_diluted, 'Integrated polarisation degree')
@@ -1573,6 +1569,6 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
new_header[key] = val new_header[key] = val
new_headers.append(new_header) new_headers.append(new_header)
globals()['theta'] = theta - alpha globals()['theta'] = globals()["theta"] - alpha
return new_data_array, new_error_array, new_data_mask, new_headers return new_data_array, new_error_array, new_data_mask, new_headers

3
src/lib/utils.py Normal file → Executable file
View File

@@ -10,7 +10,8 @@ def rot2D(ang):
def princ_angle(ang): def princ_angle(ang):
""" """
Return the principal angle in the 0° to 360° quadrant. Return the principal angle in the 0° to 180° quadrant as PA is always
defined at p/m 180°.
""" """
if not isinstance(ang, np.ndarray): if not isinstance(ang, np.ndarray):
A = np.array([ang]) A = np.array([ang])

View File

@@ -1,72 +1,44 @@
#!/usr/bin/python3 #!/usr/bin/python3
from os import system as command
from astropy.io import fits from astropy.io import fits
import numpy as np import numpy as np
from copy import deepcopy from lib.plots import overplot_radio, overplot_pol
from lib.plots import overplot_radio, overplot_pol, align_pol
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits") Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits")
# Stokes_18GHz = fits.open("./data/IC5063/radio/IC5063_18GHz.fits") Stokes_18GHz = fits.open("./data/IC5063/radio/IC5063_18GHz.fits")
# Stokes_24GHz = fits.open("./data/IC5063/radio/IC5063_24GHz.fits") Stokes_24GHz = fits.open("./data/IC5063/radio/IC5063_24GHz.fits")
# Stokes_103GHz = fits.open("./data/IC5063/radio/IC5063_103GHz.fits") Stokes_103GHz = fits.open("./data/IC5063/radio/IC5063_103GHz.fits")
# Stokes_229GHz = fits.open("./data/IC5063/radio/IC5063_229GHz.fits") Stokes_229GHz = fits.open("./data/IC5063/radio/IC5063_229GHz.fits")
# Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits") Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits")
# Stokes_S2 = fits.open("./data/IC5063/POLARIZATION_COMPARISON/S2_rot_crop.fits") # Stokes_S2 = fits.open("./data/IC5063/POLARIZATION_COMPARISON/S2_rot_crop.fits")
Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits") Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
# levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.]) # levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
# levelsMorganti = np.logspace(0.,1.97,5)/100. levelsMorganti = np.logspace(-0.1249, 1.97, 7)/100.
#
# levels18GHz = levelsMorganti*Stokes_18GHz[0].data.max() levels18GHz = levelsMorganti*Stokes_18GHz[0].data.max()
# A = overplot_radio(Stokes_UV, Stokes_18GHz) A = overplot_radio(Stokes_UV, Stokes_18GHz)
# A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/18GHz_overplot_forced.pdf',vec_scale=None) A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/18GHz_overplot.pdf', vec_scale=None)
##
# levels24GHz = levelsMorganti*Stokes_24GHz[0].data.max() levels24GHz = levelsMorganti*Stokes_24GHz[0].data.max()
# B = overplot_radio(Stokes_UV, Stokes_24GHz) B = overplot_radio(Stokes_UV, Stokes_24GHz)
# B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/24GHz_overplot_forced.pdf',vec_scale=None) B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/24GHz_overplot.pdf', vec_scale=None)
##
# levels103GHz = levelsMorganti*Stokes_103GHz[0].data.max() levels103GHz = levelsMorganti*Stokes_103GHz[0].data.max()
# C = overplot_radio(Stokes_UV, Stokes_103GHz) C = overplot_radio(Stokes_UV, Stokes_103GHz)
# C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/103GHz_overplot_forced.pdf',vec_scale=None) C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/103GHz_overplot.pdf', vec_scale=None)
##
# levels229GHz = levelsMorganti*Stokes_229GHz[0].data.max() levels229GHz = levelsMorganti*Stokes_229GHz[0].data.max()
# D = overplot_radio(Stokes_UV, Stokes_229GHz) D = overplot_radio(Stokes_UV, Stokes_229GHz)
# D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/229GHz_overplot_forced.pdf',vec_scale=None) D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/229GHz_overplot.pdf', vec_scale=None)
##
# levels357GHz = levelsMorganti*Stokes_357GHz[0].data.max() levels357GHz = levelsMorganti*Stokes_357GHz[0].data.max()
# E = overplot_radio(Stokes_UV, Stokes_357GHz) E = overplot_radio(Stokes_UV, Stokes_357GHz)
# E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/357GHz_overplot_forced.pdf',vec_scale=None) E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/357GHz_overplot.pdf', vec_scale=None)
##
# F = overplot_pol(Stokes_UV, Stokes_S2) # F = overplot_pol(Stokes_UV, Stokes_S2)
# F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot_forced.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18)) # F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18))
G = overplot_pol(Stokes_UV, Stokes_IR, cmap='inferno') G = overplot_pol(Stokes_UV, Stokes_IR, cmap='inferno')
G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot_forced.pdf', vec_scale=None, G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot.pdf', vec_scale=None,
norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3, Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']), cmap='inferno_r') norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3, Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']), cmap='inferno_r')
# data_folder1 = "./data/M87/POS1/"
# plots_folder1 = "./plots/M87/POS1/"
# basename1 = "M87_020_log"
# M87_1_95 = fits.open(data_folder1+"M87_POS1_1995_FOC_combine_FWHM020.fits")
# M87_1_96 = fits.open(data_folder1+"M87_POS1_1996_FOC_combine_FWHM020.fits")
# M87_1_97 = fits.open(data_folder1+"M87_POS1_1997_FOC_combine_FWHM020.fits")
# M87_1_98 = fits.open(data_folder1+"M87_POS1_1998_FOC_combine_FWHM020.fits")
# M87_1_99 = fits.open(data_folder1+"M87_POS1_1999_FOC_combine_FWHM020.fits")
# H = align_pol(np.array([M87_1_95,M87_1_96,M87_1_97,M87_1_98,M87_1_99]), norm=LogNorm())
# H.plot(SNRp_cut=5.0, SNRi_cut=50.0, savename=plots_folder1+'animated_loop/'+basename1, norm=LogNorm())
# command("convert -delay 50 -loop 0 {0:s}animated_loop/{1:s}*.pdf {0:s}animated_loop/{1:s}.gif".format(plots_folder1, basename1))
# data_folder3 = "./data/M87/POS3/"
# plots_folder3 = "./plots/M87/POS3/"
# basename3 = "M87_020_log"
# M87_3_95 = fits.open(data_folder3+"M87_POS3_1995_FOC_combine_FWHM020.fits")
# M87_3_96 = fits.open(data_folder3+"M87_POS3_1996_FOC_combine_FWHM020.fits")
# M87_3_97 = fits.open(data_folder3+"M87_POS3_1997_FOC_combine_FWHM020.fits")
# M87_3_98 = fits.open(data_folder3+"M87_POS3_1998_FOC_combine_FWHM020.fits")
# M87_3_99 = fits.open(data_folder3+"M87_POS3_1999_FOC_combine_FWHM020.fits")
# I = align_pol(np.array([M87_3_95,M87_3_96,M87_3_97,M87_3_98,M87_3_99]), norm=LogNorm())
# I.plot(SNRp_cut=5.0, SNRi_cut=50.0, savename=plots_folder3+'animated_loop/'+basename3, norm=LogNorm())
# command("convert -delay 20 -loop 0 {0:s}animated_loop/{1:s}*.pdf {0:s}animated_loop/{1:s}.gif".format(plots_folder3, basename3))