modify files to comply with pep8 format
This commit is contained in:
@@ -15,7 +15,7 @@ from matplotlib.colors import LogNorm
|
|||||||
|
|
||||||
|
|
||||||
def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=0, interactive=0):
|
def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=0, interactive=0):
|
||||||
## Reduction parameters
|
# Reduction parameters
|
||||||
# Deconvolution
|
# Deconvolution
|
||||||
deconvolve = False
|
deconvolve = False
|
||||||
if deconvolve:
|
if deconvolve:
|
||||||
@@ -58,8 +58,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
rotate_stokes = True
|
rotate_stokes = True
|
||||||
|
|
||||||
# Final crop
|
# Final crop
|
||||||
# crop = False #Crop to desired ROI
|
crop = False # Crop to desired ROI
|
||||||
# interactive = False #Whether to output to intercative analysis tool
|
interactive = False # Whether to output to intercative analysis tool
|
||||||
|
|
||||||
# Polarization map output
|
# Polarization map output
|
||||||
SNRp_cut = 3. # P measurments with SNR>3
|
SNRp_cut = 3. # P measurments with SNR>3
|
||||||
@@ -68,10 +68,10 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
vec_scale = 3
|
vec_scale = 3
|
||||||
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
|
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
|
||||||
|
|
||||||
##### Pipeline start
|
# Pipeline start
|
||||||
## Step 1:
|
# Step 1:
|
||||||
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
|
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
|
||||||
if not infiles is None:
|
if infiles is not None:
|
||||||
prod = np.array([["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles], dtype=str)
|
prod = np.array([["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles], dtype=str)
|
||||||
obs_dir = "/".join(infiles[0].split("/")[:-1])
|
obs_dir = "/".join(infiles[0].split("/")[:-1])
|
||||||
if not path_exists(obs_dir):
|
if not path_exists(obs_dir):
|
||||||
@@ -100,12 +100,14 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
else:
|
else:
|
||||||
figtype = "full"
|
figtype = "full"
|
||||||
if smoothing_FWHM is not None:
|
if smoothing_FWHM is not None:
|
||||||
figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]), "{0:.2f}".format(smoothing_FWHM), smoothing_scale]) # additionnal informations
|
figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]),
|
||||||
|
"{0:.2f}".format(smoothing_FWHM), smoothing_scale]) # additionnal informations
|
||||||
if align_center is None:
|
if align_center is None:
|
||||||
figtype += "_not_aligned"
|
figtype += "_not_aligned"
|
||||||
|
|
||||||
# Crop data to remove outside blank margins.
|
# Crop data to remove outside blank margins.
|
||||||
data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0., inside=True, display=display_crop, savename=figname, plots_folder=plots_folder)
|
data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0.,
|
||||||
|
inside=True, display=display_crop, savename=figname, plots_folder=plots_folder)
|
||||||
|
|
||||||
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
|
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
|
||||||
if deconvolve:
|
if deconvolve:
|
||||||
@@ -141,7 +143,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
background = np.array([np.array(bkg).reshape(1, 1) for bkg in background])
|
background = np.array([np.array(bkg).reshape(1, 1) for bkg in background])
|
||||||
background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1'] == head['filtnam1'] for h in headers], dtype=bool)].mean()) ** 2/np.sum([h['filtnam1'] == head['filtnam1'] for h in headers]))).reshape(1, 1) for bkg, head in zip(background, headers)])
|
background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1'] == head['filtnam1'] for h in headers], dtype=bool)].mean()) ** 2/np.sum([h['filtnam1'] == head['filtnam1'] for h in headers]))).reshape(1, 1) for bkg, head in zip(background, headers)])
|
||||||
|
|
||||||
## Step 2:
|
# Step 2:
|
||||||
# Compute Stokes I, Q, U with smoothed polarized images
|
# Compute Stokes I, Q, U with smoothed polarized images
|
||||||
# SMOOTHING DISCUSSION :
|
# SMOOTHING DISCUSSION :
|
||||||
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
|
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
|
||||||
@@ -150,7 +152,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False)
|
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False)
|
||||||
I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False)
|
I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False)
|
||||||
|
|
||||||
## Step 3:
|
# Step 3:
|
||||||
# Rotate images to have North up
|
# Rotate images to have North up
|
||||||
if rotate_stokes:
|
if rotate_stokes:
|
||||||
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None)
|
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None)
|
||||||
@@ -160,12 +162,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers)
|
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers)
|
||||||
P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, headers)
|
P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, headers)
|
||||||
|
|
||||||
## Step 4:
|
# Step 4:
|
||||||
# Save image to FITS.
|
# Save image to FITS.
|
||||||
Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, data_mask, "_".join([figname, figtype]), data_folder=data_folder, return_hdul=True)
|
Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, data_mask, "_".join([figname, figtype]), data_folder=data_folder, return_hdul=True)
|
||||||
data_mask = Stokes_test[-1].data.astype(bool)
|
data_mask = Stokes_test[-1].data.astype(bool)
|
||||||
|
|
||||||
## Step 5:
|
# Step 5:
|
||||||
# crop to desired region of interest (roi)
|
# crop to desired region of interest (roi)
|
||||||
if crop:
|
if crop:
|
||||||
figtype += "_crop"
|
figtype += "_crop"
|
||||||
@@ -183,19 +185,29 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
|
|||||||
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(PA_bkg[0, 0], np.ceil(s_PA_bkg[0, 0]*10.)/10.))
|
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(PA_bkg[0, 0], np.ceil(s_PA_bkg[0, 0]*10.)/10.))
|
||||||
# Plot polarisation map (Background is either total Flux, Polarization degree or Polarization degree error).
|
# Plot polarisation map (Background is either total Flux, Polarization degree or Polarization degree error).
|
||||||
if px_scale.lower() not in ['full', 'integrate'] and not interactive:
|
if px_scale.lower() not in ['full', 'integrate'] and not interactive:
|
||||||
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype]), plots_folder=plots_folder)
|
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim,
|
||||||
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "I"]), plots_folder=plots_folder, display='Intensity')
|
step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype]), plots_folder=plots_folder)
|
||||||
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "P_flux"]), plots_folder=plots_folder, display='Pol_Flux')
|
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
|
||||||
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "P"]), plots_folder=plots_folder, display='Pol_deg')
|
vec_scale=vec_scale, savename="_".join([figname, figtype, "I"]), plots_folder=plots_folder, display='Intensity')
|
||||||
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "PA"]), plots_folder=plots_folder, display='Pol_ang')
|
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
|
||||||
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "I_err"]), plots_folder=plots_folder, display='I_err')
|
vec_scale=vec_scale, savename="_".join([figname, figtype, "P_flux"]), plots_folder=plots_folder, display='Pol_Flux')
|
||||||
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "P_err"]), plots_folder=plots_folder, display='Pol_deg_err')
|
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
|
||||||
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRi"]), plots_folder=plots_folder, display='SNRi')
|
vec_scale=vec_scale, savename="_".join([figname, figtype, "P"]), plots_folder=plots_folder, display='Pol_deg')
|
||||||
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRp"]), plots_folder=plots_folder, display='SNRp')
|
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
|
||||||
|
vec_scale=vec_scale, savename="_".join([figname, figtype, "PA"]), plots_folder=plots_folder, display='Pol_ang')
|
||||||
|
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
|
||||||
|
vec_scale=vec_scale, savename="_".join([figname, figtype, "I_err"]), plots_folder=plots_folder, display='I_err')
|
||||||
|
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
|
||||||
|
vec_scale=vec_scale, savename="_".join([figname, figtype, "P_err"]), plots_folder=plots_folder, display='Pol_deg_err')
|
||||||
|
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
|
||||||
|
vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRi"]), plots_folder=plots_folder, display='SNRi')
|
||||||
|
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
|
||||||
|
vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRp"]), plots_folder=plots_folder, display='SNRp')
|
||||||
elif not interactive:
|
elif not interactive:
|
||||||
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename="_".join([figname, figtype]), plots_folder=plots_folder, display='integrate')
|
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut,
|
||||||
|
savename="_".join([figname, figtype]), plots_folder=plots_folder, display='integrate')
|
||||||
elif px_scale.lower() not in ['full', 'integrate']:
|
elif px_scale.lower() not in ['full', 'integrate']:
|
||||||
pol_map = proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
|
proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
@@ -204,18 +216,15 @@ if __name__ == "__main__":
|
|||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Query MAST for target products')
|
parser = argparse.ArgumentParser(description='Query MAST for target products')
|
||||||
parser.add_argument('-t', '--target', metavar='targetname', required=False,
|
parser.add_argument('-t', '--target', metavar='targetname', required=False, help='the name of the target', type=str, default=None)
|
||||||
help='the name of the target', type=str, default=None)
|
parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, help='the proposal id of the data products', type=int, default=None)
|
||||||
parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False,
|
parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None)
|
||||||
help='the proposal id of the data products', type=int, default=None)
|
|
||||||
parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*',
|
|
||||||
help='the full or relative path to the data products', default=None)
|
|
||||||
parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False,
|
parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False,
|
||||||
help='output directory path for the data products', type=str, default="./data")
|
help='output directory path for the data products', type=str, default="./data")
|
||||||
parser.add_argument('-c', '--crop', metavar='crop_boolean', required=False,
|
parser.add_argument('-c', '--crop', metavar='crop_boolean', required=False, help='whether to crop the analysis region', type=int, default=0)
|
||||||
help='whether to crop the analysis region', type=int, default=0)
|
|
||||||
parser.add_argument('-i', '--interactive', metavar='interactive_boolean', required=False,
|
parser.add_argument('-i', '--interactive', metavar='interactive_boolean', required=False,
|
||||||
help='whether to output to the interactive analysis tool', type=int, default=0)
|
help='whether to output to the interactive analysis tool', type=int, default=0)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
exitcode = main(target=args.target, proposal_id=args.proposal_id, infiles=args.files, output_dir=args.output_dir, crop=args.crop, interactive=args.interactive)
|
exitcode = main(target=args.target, proposal_id=args.proposal_id, infiles=args.files,
|
||||||
|
output_dir=args.output_dir, crop=args.crop, interactive=args.interactive)
|
||||||
print("Finished with ExitCode: ", exitcode)
|
print("Finished with ExitCode: ", exitcode)
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ try:
|
|||||||
except get_error as err:
|
except get_error as err:
|
||||||
print(str(err))
|
print(str(err))
|
||||||
|
|
||||||
if not fits_path is None:
|
if fits_path is not None:
|
||||||
from astropy.io import fits
|
from astropy.io import fits
|
||||||
from lib.plots import pol_map
|
from lib.plots import pol_map
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ prototypes :
|
|||||||
- bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background)
|
- bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background)
|
||||||
Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape.
|
Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape.
|
||||||
"""
|
"""
|
||||||
import sys
|
|
||||||
from os.path import join as path_join
|
from os.path import join as path_join
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -21,17 +20,21 @@ from datetime import datetime
|
|||||||
from lib.plots import plot_obs
|
from lib.plots import plot_obs
|
||||||
from scipy.optimize import curve_fit
|
from scipy.optimize import curve_fit
|
||||||
|
|
||||||
|
|
||||||
def gauss(x, *p):
|
def gauss(x, *p):
|
||||||
N, mu, sigma = p
|
N, mu, sigma = p
|
||||||
return N*np.exp(-(x-mu)**2/(2.*sigma**2))
|
return N*np.exp(-(x-mu)**2/(2.*sigma**2))
|
||||||
|
|
||||||
|
|
||||||
def gausspol(x, *p):
|
def gausspol(x, *p):
|
||||||
N, mu, sigma, a, b, c, d = p
|
N, mu, sigma, a, b, c, d = p
|
||||||
return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + a*np.log(x) + b/x + c*x + d
|
return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + a*np.log(x) + b/x + c*x + d
|
||||||
|
|
||||||
|
|
||||||
def bin_centers(edges):
|
def bin_centers(edges):
|
||||||
return (edges[1:]+edges[:-1])/2.
|
return (edges[1:]+edges[:-1])/2.
|
||||||
|
|
||||||
|
|
||||||
def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"):
|
def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"):
|
||||||
plt.rcParams.update({'font.size': 15})
|
plt.rcParams.update({'font.size': 15})
|
||||||
convert_flux = np.array([head['photflam'] for head in headers])
|
convert_flux = np.array([head['photflam'] for head in headers])
|
||||||
@@ -73,7 +76,8 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
|
|||||||
fig_h, ax_h = plt.subplots(figsize=(10, 6), constrained_layout=True)
|
fig_h, ax_h = plt.subplots(figsize=(10, 6), constrained_layout=True)
|
||||||
for i, (hist, bins) in enumerate(zip(histograms, binning)):
|
for i, (hist, bins) in enumerate(zip(histograms, binning)):
|
||||||
filt_obs[headers[i]['filtnam1']] += 1
|
filt_obs[headers[i]['filtnam1']] += 1
|
||||||
ax_h.plot(bins*convert_flux[i],hist,'+',color="C{0:d}".format(i),alpha=0.8,label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')')
|
ax_h.plot(bins*convert_flux[i], hist, '+', color="C{0:d}".format(i), alpha=0.8,
|
||||||
|
label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')')
|
||||||
ax_h.plot([background[i]*convert_flux[i], background[i]*convert_flux[i]], [hist.min(), hist.max()], 'x--', color="C{0:d}".format(i), alpha=0.8)
|
ax_h.plot([background[i]*convert_flux[i], background[i]*convert_flux[i]], [hist.min(), hist.max()], 'x--', color="C{0:d}".format(i), alpha=0.8)
|
||||||
if not (coeff is None):
|
if not (coeff is None):
|
||||||
ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
|
ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
|
||||||
@@ -101,13 +105,14 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
|
|||||||
filt = headers[0]['filtnam1']
|
filt = headers[0]['filtnam1']
|
||||||
# plots
|
# plots
|
||||||
im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.].mean()/10., data0.max()), origin='lower', cmap='gray')
|
im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.].mean()/10., data0.max()), origin='lower', cmap='gray')
|
||||||
bkg_im = ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.5)
|
ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.5)
|
||||||
if not (rectangle is None):
|
if not (rectangle is None):
|
||||||
x, y, width, height, angle, color = rectangle[0]
|
x, y, width, height, angle, color = rectangle[0]
|
||||||
ax2.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False, lw=2))
|
ax2.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False, lw=2))
|
||||||
ax2.annotate(instr+":"+rootname, color='white', fontsize=10, xy=(0.01, 1.00), xycoords='axes fraction', verticalalignment='top', horizontalalignment='left')
|
ax2.annotate(instr+":"+rootname, color='white', fontsize=10, xy=(0.01, 1.00), xycoords='axes fraction', verticalalignment='top', horizontalalignment='left')
|
||||||
ax2.annotate(filt, color='white', fontsize=14, xy=(0.01, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left')
|
ax2.annotate(filt, color='white', fontsize=14, xy=(0.01, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left')
|
||||||
ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(1.00, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right')
|
ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(1.00, 0.01),
|
||||||
|
xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right')
|
||||||
ax2.set(xlabel='pixel offset', ylabel='pixel offset', aspect='equal')
|
ax2.set(xlabel='pixel offset', ylabel='pixel offset', aspect='equal')
|
||||||
|
|
||||||
fig2.subplots_adjust(hspace=0, wspace=0, right=1.0)
|
fig2.subplots_adjust(hspace=0, wspace=0, right=1.0)
|
||||||
@@ -128,6 +133,7 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
|
|||||||
|
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
def sky_part(img):
|
def sky_part(img):
|
||||||
rand_ind = np.unique((np.random.rand(np.floor(img.size/4).astype(int))*2*img.size).astype(int) % img.size)
|
rand_ind = np.unique((np.random.rand(np.floor(img.size/4).astype(int))*2*img.size).astype(int) % img.size)
|
||||||
rand_pix = img.flatten()[rand_ind]
|
rand_pix = img.flatten()[rand_ind]
|
||||||
@@ -139,6 +145,7 @@ def sky_part(img):
|
|||||||
sky = img[np.logical_and(img >= sky_range[0], img <= sky_range[1])]
|
sky = img[np.logical_and(img >= sky_range[0], img <= sky_range[1])]
|
||||||
return sky, sky_range
|
return sky, sky_range
|
||||||
|
|
||||||
|
|
||||||
def bkg_estimate(img, bins=None, chi2=None, coeff=None):
|
def bkg_estimate(img, bins=None, chi2=None, coeff=None):
|
||||||
if bins is None or chi2 is None or coeff is None:
|
if bins is None or chi2 is None or coeff is None:
|
||||||
bins, chi2, coeff = [8], [], []
|
bins, chi2, coeff = [8], [], []
|
||||||
@@ -161,6 +168,7 @@ def bkg_estimate(img, bins=None, chi2=None, coeff=None):
|
|||||||
coeff.append(popt)
|
coeff.append(popt)
|
||||||
return bins, chi2, coeff
|
return bins, chi2, coeff
|
||||||
|
|
||||||
|
|
||||||
def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, savename=None, plots_folder=""):
|
def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, savename=None, plots_folder=""):
|
||||||
"""
|
"""
|
||||||
----------
|
----------
|
||||||
@@ -298,7 +306,7 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
|
|||||||
# Compute the Count-rate histogram for the image
|
# Compute the Count-rate histogram for the image
|
||||||
n_mask = np.logical_and(mask, image > 0.)
|
n_mask = np.logical_and(mask, image > 0.)
|
||||||
if not (sub_type is None):
|
if not (sub_type is None):
|
||||||
if type(sub_type) == int:
|
if isinstance(sub_type, int):
|
||||||
n_bins = sub_type
|
n_bins = sub_type
|
||||||
elif sub_type.lower() in ['sqrt']:
|
elif sub_type.lower() in ['sqrt']:
|
||||||
n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
|
n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
|
||||||
@@ -309,9 +317,11 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
|
|||||||
elif sub_type.lower() in ['scott']:
|
elif sub_type.lower() in ['scott']:
|
||||||
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size, 1/3))).astype(int) # Scott
|
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size, 1/3))).astype(int) # Scott
|
||||||
else:
|
else:
|
||||||
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25]))/np.power(image[n_mask].size,1/3))).astype(int) # Freedman-Diaconis
|
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) /
|
||||||
|
np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis
|
||||||
else:
|
else:
|
||||||
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25]))/np.power(image[n_mask].size,1/3))).astype(int) # Freedman-Diaconis
|
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) /
|
||||||
|
np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis
|
||||||
|
|
||||||
hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins)
|
hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins)
|
||||||
histograms.append(hist)
|
histograms.append(hist)
|
||||||
@@ -441,4 +451,3 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True,
|
|||||||
if display:
|
if display:
|
||||||
display_bkg(data, background, std_bkg, headers, rectangle=rectangle, savename=savename, plots_folder=plots_folder)
|
display_bkg(data, background, std_bkg, headers, rectangle=rectangle, savename=savename, plots_folder=plots_folder)
|
||||||
return n_data_array, n_error_array, headers, background
|
return n_data_array, n_error_array, headers, background
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
"""
|
"""
|
||||||
Library functions for graham algorithm implementation (find the convex hull
|
Library functions for graham algorithm implementation (find the convex hull of a given list of points).
|
||||||
of a given list of points).
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
@@ -8,6 +7,9 @@ import numpy as np
|
|||||||
|
|
||||||
|
|
||||||
def clean_ROI(image):
|
def clean_ROI(image):
|
||||||
|
"""
|
||||||
|
Remove instruments borders from an observation.
|
||||||
|
"""
|
||||||
H, J = [], []
|
H, J = [], []
|
||||||
|
|
||||||
shape = np.array(image.shape)
|
shape = np.array(image.shape)
|
||||||
@@ -116,7 +118,8 @@ def min_lexico(s):
|
|||||||
"""
|
"""
|
||||||
m = s[0]
|
m = s[0]
|
||||||
for x in s:
|
for x in s:
|
||||||
if lexico(x, m): m = x
|
if lexico(x, m):
|
||||||
|
m = x
|
||||||
return m
|
return m
|
||||||
|
|
||||||
|
|
||||||
@@ -145,16 +148,16 @@ def comp(Omega, A, B):
|
|||||||
|
|
||||||
|
|
||||||
# Implement quicksort
|
# Implement quicksort
|
||||||
def partition(s, l, r, order):
|
def partition(s, left, right, order):
|
||||||
"""
|
"""
|
||||||
Take a random element of a list 's' between indexes 'l', 'r' and place it
|
Take a random element of a list 's' between indexes 'left', 'right' and place it
|
||||||
at its right spot using relation order 'order'. Return the index at which
|
at its right spot using relation order 'order'. Return the index at which
|
||||||
it was placed.
|
it was placed.
|
||||||
----------
|
----------
|
||||||
Inputs:
|
Inputs:
|
||||||
s : list
|
s : list
|
||||||
List of elements to be ordered.
|
List of elements to be ordered.
|
||||||
l, r : int
|
left, right : int
|
||||||
Index of the first and last elements to be considered.
|
Index of the first and last elements to be considered.
|
||||||
order : func: A, B -> bool
|
order : func: A, B -> bool
|
||||||
Relation order between 2 elements A, B that returns True if A<=B,
|
Relation order between 2 elements A, B that returns True if A<=B,
|
||||||
@@ -164,30 +167,29 @@ def partition(s, l, r, order):
|
|||||||
index : int
|
index : int
|
||||||
Index at which have been placed the element chosen by the function.
|
Index at which have been placed the element chosen by the function.
|
||||||
"""
|
"""
|
||||||
i = l - 1
|
i = left - 1
|
||||||
for j in range(l, r):
|
for j in range(left, right):
|
||||||
if order(s[j], s[r]):
|
if order(s[j], s[right]):
|
||||||
i = i + 1
|
i = i + 1
|
||||||
temp = deepcopy(s[i])
|
temp = deepcopy(s[i])
|
||||||
s[i] = deepcopy(s[j])
|
s[i] = deepcopy(s[j])
|
||||||
s[j] = deepcopy(temp)
|
s[j] = deepcopy(temp)
|
||||||
temp = deepcopy(s[i+1])
|
temp = deepcopy(s[i+1])
|
||||||
s[i+1] = deepcopy(s[r])
|
s[i+1] = deepcopy(s[right])
|
||||||
s[r] = deepcopy(temp)
|
s[right] = deepcopy(temp)
|
||||||
return i + 1
|
return i + 1
|
||||||
|
|
||||||
|
|
||||||
def sort_aux(s, l, r, order):
|
def sort_aux(s, left, right, order):
|
||||||
"""
|
"""
|
||||||
Sort a list 's' between indexes 'l', 'r' using relation order 'order' by
|
Sort a list 's' between indexes 'left', 'right' using relation order 'order' by
|
||||||
dividing it in 2 sub-lists and sorting these.
|
dividing it in 2 sub-lists and sorting these.
|
||||||
"""
|
"""
|
||||||
if l <= r:
|
if left <= right:
|
||||||
# Call partition function that gives an index on which the list will be
|
# Call partition function that gives an index on which the list will be divided
|
||||||
#divided
|
q = partition(s, left, right, order)
|
||||||
q = partition(s, l, r, order)
|
sort_aux(s, left, q - 1, order)
|
||||||
sort_aux(s, l, q - 1, order)
|
sort_aux(s, q + 1, right, order)
|
||||||
sort_aux(s, q + 1, r, order)
|
|
||||||
|
|
||||||
|
|
||||||
def quicksort(s, order):
|
def quicksort(s, order):
|
||||||
@@ -204,7 +206,7 @@ def sort_angles_distances(Omega, s):
|
|||||||
Sort the list of points 's' for the composition order given reference point
|
Sort the list of points 's' for the composition order given reference point
|
||||||
Omega.
|
Omega.
|
||||||
"""
|
"""
|
||||||
order = lambda A, B: comp(Omega, A, B)
|
def order(A, B): return comp(Omega, A, B)
|
||||||
quicksort(s, order)
|
quicksort(s, order)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
"""
|
"""
|
||||||
Library functions for phase cross-correlation computation.
|
Library functions for phase cross-correlation computation.
|
||||||
"""
|
"""
|
||||||
##Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+)
|
# Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+)
|
||||||
# Otherwise fall back to numpy.fft.
|
# Otherwise fall back to numpy.fft.
|
||||||
# Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer
|
# Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer
|
||||||
# C++/pybind11 version called pypocketfft
|
# C++/pybind11 version called pypocketfft
|
||||||
|
|||||||
@@ -56,37 +56,58 @@ def zeropad(arr, shape):
|
|||||||
offset = diff//2
|
offset = diff//2
|
||||||
z = np.zeros(shape, dtype=arr.dtype)
|
z = np.zeros(shape, dtype=arr.dtype)
|
||||||
if rank == 1:
|
if rank == 1:
|
||||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
i0 = offset[0]
|
||||||
|
n0 = i0 + arr.shape[0]
|
||||||
z[i0:n0] = arr
|
z[i0:n0] = arr
|
||||||
elif rank == 2:
|
elif rank == 2:
|
||||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
i0 = offset[0]
|
||||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
n0 = i0 + arr.shape[0]
|
||||||
|
i1 = offset[1]
|
||||||
|
n1 = i1 + arr.shape[1]
|
||||||
z[i0:n0, i1:n1] = arr
|
z[i0:n0, i1:n1] = arr
|
||||||
elif rank == 3:
|
elif rank == 3:
|
||||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
i0 = offset[0]
|
||||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
n0 = i0 + arr.shape[0]
|
||||||
i2 = offset[2]; n2 = i2 + arr.shape[2]
|
i1 = offset[1]
|
||||||
|
n1 = i1 + arr.shape[1]
|
||||||
|
i2 = offset[2]
|
||||||
|
n2 = i2 + arr.shape[2]
|
||||||
z[i0:n0, i1:n1, i2:n2] = arr
|
z[i0:n0, i1:n1, i2:n2] = arr
|
||||||
elif rank == 4:
|
elif rank == 4:
|
||||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
i0 = offset[0]
|
||||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
n0 = i0 + arr.shape[0]
|
||||||
i2 = offset[2]; n2 = i2 + arr.shape[2]
|
i1 = offset[1]
|
||||||
i3 = offset[3]; n3 = i3 + arr.shape[3]
|
n1 = i1 + arr.shape[1]
|
||||||
|
i2 = offset[2]
|
||||||
|
n2 = i2 + arr.shape[2]
|
||||||
|
i3 = offset[3]
|
||||||
|
n3 = i3 + arr.shape[3]
|
||||||
z[i0:n0, i1:n1, i2:n2, i3:n3] = arr
|
z[i0:n0, i1:n1, i2:n2, i3:n3] = arr
|
||||||
elif rank == 5:
|
elif rank == 5:
|
||||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
i0 = offset[0]
|
||||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
n0 = i0 + arr.shape[0]
|
||||||
i2 = offset[2]; n2 = i2 + arr.shape[2]
|
i1 = offset[1]
|
||||||
i3 = offset[3]; n3 = i3 + arr.shape[3]
|
n1 = i1 + arr.shape[1]
|
||||||
i4 = offset[4]; n4 = i4 + arr.shape[4]
|
i2 = offset[2]
|
||||||
|
n2 = i2 + arr.shape[2]
|
||||||
|
i3 = offset[3]
|
||||||
|
n3 = i3 + arr.shape[3]
|
||||||
|
i4 = offset[4]
|
||||||
|
n4 = i4 + arr.shape[4]
|
||||||
z[i0:n0, i1:n1, i2:n2, i3:n3, i4:n4] = arr
|
z[i0:n0, i1:n1, i2:n2, i3:n3, i4:n4] = arr
|
||||||
elif rank == 6:
|
elif rank == 6:
|
||||||
i0 = offset[0]; n0 = i0 + arr.shape[0]
|
i0 = offset[0]
|
||||||
i1 = offset[1]; n1 = i1 + arr.shape[1]
|
n0 = i0 + arr.shape[0]
|
||||||
i2 = offset[2]; n2 = i2 + arr.shape[2]
|
i1 = offset[1]
|
||||||
i3 = offset[3]; n3 = i3 + arr.shape[3]
|
n1 = i1 + arr.shape[1]
|
||||||
i4 = offset[4]; n4 = i4 + arr.shape[4]
|
i2 = offset[2]
|
||||||
i5 = offset[5]; n5 = i5 + arr.shape[5]
|
n2 = i2 + arr.shape[2]
|
||||||
|
i3 = offset[3]
|
||||||
|
n3 = i3 + arr.shape[3]
|
||||||
|
i4 = offset[4]
|
||||||
|
n4 = i4 + arr.shape[4]
|
||||||
|
i5 = offset[5]
|
||||||
|
n5 = i5 + arr.shape[5]
|
||||||
z[i0:n0, i1:n1, i2:n2, i3:n3, i4:n4, i5:n5] = arr
|
z[i0:n0, i1:n1, i2:n2, i3:n3, i4:n4, i5:n5] = arr
|
||||||
else:
|
else:
|
||||||
raise ValueError("too many dimensions")
|
raise ValueError("too many dimensions")
|
||||||
@@ -140,7 +161,7 @@ def from_file_psf(filename):
|
|||||||
"""
|
"""
|
||||||
with fits.open(filename) as f:
|
with fits.open(filename) as f:
|
||||||
psf = f[0].data
|
psf = f[0].data
|
||||||
if (type(psf) != np.ndarray) or len(psf) != 2:
|
if isinstance(psf, np.ndarray) or len(psf) != 2:
|
||||||
raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename))
|
raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename))
|
||||||
# Return the normalized Point Spread Function
|
# Return the normalized Point Spread Function
|
||||||
kernel = psf/psf.max()
|
kernel = psf/psf.max()
|
||||||
@@ -387,32 +408,38 @@ def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
|
|||||||
dims = x.shape
|
dims = x.shape
|
||||||
r = np.zeros(dims, dtype=x.dtype) # to store the result
|
r = np.zeros(dims, dtype=x.dtype) # to store the result
|
||||||
rank = x.ndim # number of dimensions
|
rank = x.ndim # number of dimensions
|
||||||
if rank == 0: return r
|
if rank == 0:
|
||||||
|
return r
|
||||||
if dims[0] >= 2:
|
if dims[0] >= 2:
|
||||||
dx = x[1:-1, ...] - x[0:-2, ...]
|
dx = x[1:-1, ...] - x[0:-2, ...]
|
||||||
r[1:-1, ...] += dx
|
r[1:-1, ...] += dx
|
||||||
r[0:-2, ...] -= dx
|
r[0:-2, ...] -= dx
|
||||||
if rank == 1: return r
|
if rank == 1:
|
||||||
|
return r
|
||||||
if dims[1] >= 2:
|
if dims[1] >= 2:
|
||||||
dx = x[:, 1:-1, ...] - x[:, 0:-2, ...]
|
dx = x[:, 1:-1, ...] - x[:, 0:-2, ...]
|
||||||
r[:, 1:-1, ...] += dx
|
r[:, 1:-1, ...] += dx
|
||||||
r[:, 0:-2, ...] -= dx
|
r[:, 0:-2, ...] -= dx
|
||||||
if rank == 2: return r
|
if rank == 2:
|
||||||
|
return r
|
||||||
if dims[2] >= 2:
|
if dims[2] >= 2:
|
||||||
dx = x[:, :, 1:-1, ...] - x[:, :, 0:-2, ...]
|
dx = x[:, :, 1:-1, ...] - x[:, :, 0:-2, ...]
|
||||||
r[:, :, 1:-1, ...] += dx
|
r[:, :, 1:-1, ...] += dx
|
||||||
r[:, :, 0:-2, ...] -= dx
|
r[:, :, 0:-2, ...] -= dx
|
||||||
if rank == 3: return r
|
if rank == 3:
|
||||||
|
return r
|
||||||
if dims[3] >= 2:
|
if dims[3] >= 2:
|
||||||
dx = x[:, :, :, 1:-1, ...] - x[:, :, :, 0:-2, ...]
|
dx = x[:, :, :, 1:-1, ...] - x[:, :, :, 0:-2, ...]
|
||||||
r[:, :, :, 1:-1, ...] += dx
|
r[:, :, :, 1:-1, ...] += dx
|
||||||
r[:, :, :, 0:-2, ...] -= dx
|
r[:, :, :, 0:-2, ...] -= dx
|
||||||
if rank == 4: return r
|
if rank == 4:
|
||||||
|
return r
|
||||||
if dims[4] >= 2:
|
if dims[4] >= 2:
|
||||||
dx = x[:, :, :, :, 1:-1, ...] - x[:, :, :, :, 0:-2, ...]
|
dx = x[:, :, :, :, 1:-1, ...] - x[:, :, :, :, 0:-2, ...]
|
||||||
r[:, :, :, :, 1:-1, ...] += dx
|
r[:, :, :, :, 1:-1, ...] += dx
|
||||||
r[:, :, :, :, 0:-2, ...] -= dx
|
r[:, :, :, :, 0:-2, ...] -= dx
|
||||||
if rank == 5: return r
|
if rank == 5:
|
||||||
|
return r
|
||||||
raise ValueError("too many dimensions")
|
raise ValueError("too many dimensions")
|
||||||
|
|
||||||
def A(x):
|
def A(x):
|
||||||
|
|||||||
@@ -15,9 +15,8 @@ import numpy as np
|
|||||||
from os.path import join as path_join
|
from os.path import join as path_join
|
||||||
from astropy.io import fits
|
from astropy.io import fits
|
||||||
from astropy.wcs import WCS
|
from astropy.wcs import WCS
|
||||||
from lib.convex_hull import image_hull, clean_ROI
|
from lib.convex_hull import clean_ROI
|
||||||
from lib.plots import princ_angle
|
from lib.plots import princ_angle
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
|
|
||||||
|
|
||||||
def get_obs_data(infiles, data_folder="", compute_flux=False):
|
def get_obs_data(infiles, data_folder="", compute_flux=False):
|
||||||
|
|||||||
359
src/lib/plots.py
359
src/lib/plots.py
@@ -60,7 +60,7 @@ def princ_angle(ang):
|
|||||||
"""
|
"""
|
||||||
Return the principal angle in the 0° to 360° quadrant.
|
Return the principal angle in the 0° to 360° quadrant.
|
||||||
"""
|
"""
|
||||||
if type(ang) != np.ndarray:
|
if not isinstance(ang, np.ndarray):
|
||||||
A = np.array([ang])
|
A = np.array([ang])
|
||||||
else:
|
else:
|
||||||
A = np.array(ang)
|
A = np.array(ang)
|
||||||
@@ -68,7 +68,7 @@ def princ_angle(ang):
|
|||||||
A[A < 0.] = A[A < 0.]+360.
|
A[A < 0.] = A[A < 0.]+360.
|
||||||
while np.any(A >= 180.):
|
while np.any(A >= 180.):
|
||||||
A[A >= 180.] = A[A >= 180.]-180.
|
A[A >= 180.] = A[A >= 180.]-180.
|
||||||
if type(ang) == type(A):
|
if type(ang) is type(A):
|
||||||
return A
|
return A
|
||||||
else:
|
else:
|
||||||
return A[0]
|
return A[0]
|
||||||
@@ -80,7 +80,7 @@ def sci_not(v,err,rnd=1,out=str):
|
|||||||
"""
|
"""
|
||||||
power = - int(('%E' % v)[-3:])+1
|
power = - int(('%E' % v)[-3:])+1
|
||||||
output = [r"({0}".format(round(v*10**power, rnd)), round(v*10**power, rnd)]
|
output = [r"({0}".format(round(v*10**power, rnd)), round(v*10**power, rnd)]
|
||||||
if type(err) == list:
|
if isinstance(err, list):
|
||||||
for error in err:
|
for error in err:
|
||||||
output[0] += r" $\pm$ {0}".format(round(error*10**power, rnd))
|
output[0] += r" $\pm$ {0}".format(round(error*10**power, rnd))
|
||||||
output.append(round(error*10**power, rnd))
|
output.append(round(error*10**power, rnd))
|
||||||
@@ -283,8 +283,6 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
|||||||
"""
|
"""
|
||||||
# Get data
|
# Get data
|
||||||
stkI = Stokes[np.argmax([Stokes[i].header['datatype'] == 'I_stokes' for i in range(len(Stokes))])]
|
stkI = Stokes[np.argmax([Stokes[i].header['datatype'] == 'I_stokes' for i in range(len(Stokes))])]
|
||||||
stkQ = Stokes[np.argmax([Stokes[i].header['datatype']=='Q_stokes' for i in range(len(Stokes))])]
|
|
||||||
stkU = Stokes[np.argmax([Stokes[i].header['datatype']=='U_stokes' for i in range(len(Stokes))])]
|
|
||||||
stk_cov = Stokes[np.argmax([Stokes[i].header['datatype'] == 'IQU_cov_matrix' for i in range(len(Stokes))])]
|
stk_cov = Stokes[np.argmax([Stokes[i].header['datatype'] == 'IQU_cov_matrix' for i in range(len(Stokes))])]
|
||||||
pol = Stokes[np.argmax([Stokes[i].header['datatype'] == 'Pol_deg_debiased' for i in range(len(Stokes))])]
|
pol = Stokes[np.argmax([Stokes[i].header['datatype'] == 'Pol_deg_debiased' for i in range(len(Stokes))])]
|
||||||
pol_err = Stokes[np.argmax([Stokes[i].header['datatype'] == 'Pol_deg_err' for i in range(len(Stokes))])]
|
pol_err = Stokes[np.argmax([Stokes[i].header['datatype'] == 'Pol_deg_err' for i in range(len(Stokes))])]
|
||||||
@@ -342,15 +340,13 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
|||||||
else:
|
else:
|
||||||
vmin, vmax = flux_lim
|
vmin, vmax = flux_lim
|
||||||
im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
|
im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
|
||||||
cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||||
levelsI = np.logspace(0.31, 1.955, 6)/100.*vmax
|
levelsI = np.logspace(0.31, 1.955, 6)/100.*vmax
|
||||||
print("Total flux contour levels : ", levelsI)
|
print("Total flux contour levels : ", levelsI)
|
||||||
cont = ax.contour(stkI.data*convert_flux, levels=levelsI, colors='grey', linewidths=0.5)
|
ax.contour(stkI.data*convert_flux, levels=levelsI, colors='grey', linewidths=0.5)
|
||||||
#ax.clabel(cont,inline=True,fontsize=6)
|
|
||||||
elif display.lower() in ['pol_flux']:
|
elif display.lower() in ['pol_flux']:
|
||||||
# Display polarisation flux
|
# Display polarisation flux
|
||||||
display = 'pf'
|
display = 'pf'
|
||||||
pf_mask = (stkI.data > 0.) * (pol.data > 0.)
|
|
||||||
if flux_lim is None:
|
if flux_lim is None:
|
||||||
if mask.sum() > 0.:
|
if mask.sum() > 0.:
|
||||||
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||||
@@ -359,23 +355,22 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
|||||||
else:
|
else:
|
||||||
vmin, vmax = flux_lim
|
vmin, vmax = flux_lim
|
||||||
im = ax.imshow(stkI.data*convert_flux*pol.data, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
|
im = ax.imshow(stkI.data*convert_flux*pol.data, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
|
||||||
cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||||
levelsPf = np.linspace(vmax*0.01, vmax*0.99, 10)
|
levelsPf = np.linspace(vmax*0.01, vmax*0.99, 10)
|
||||||
print("Polarized flux contour levels : ", levelsPf)
|
print("Polarized flux contour levels : ", levelsPf)
|
||||||
cont = ax.contour(stkI.data*convert_flux*pol.data, levels=levelsPf, colors='grey', linewidths=0.5)
|
ax.contour(stkI.data*convert_flux*pol.data, levels=levelsPf, colors='grey', linewidths=0.5)
|
||||||
#ax.clabel(cont,inline=True,fontsize=6)
|
|
||||||
elif display.lower() in ['p', 'pol', 'pol_deg']:
|
elif display.lower() in ['p', 'pol', 'pol_deg']:
|
||||||
# Display polarisation degree map
|
# Display polarisation degree map
|
||||||
display = 'p'
|
display = 'p'
|
||||||
vmin, vmax = 0., 100.
|
vmin, vmax = 0., 100.
|
||||||
im = ax.imshow(pol.data*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
|
im = ax.imshow(pol.data*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
|
||||||
cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$P$ [%]")
|
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$P$ [%]")
|
||||||
elif display.lower() in ['pa', 'pang', 'pol_ang']:
|
elif display.lower() in ['pa', 'pang', 'pol_ang']:
|
||||||
# Display polarisation degree map
|
# Display polarisation degree map
|
||||||
display = 'pa'
|
display = 'pa'
|
||||||
vmin, vmax = 0., 180.
|
vmin, vmax = 0., 180.
|
||||||
im = ax.imshow(princ_angle(pang.data), vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
|
im = ax.imshow(princ_angle(pang.data), vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
|
||||||
cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\theta_P$ [°]")
|
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\theta_P$ [°]")
|
||||||
elif display.lower() in ['s_p', 'pol_err', 'pol_deg_err']:
|
elif display.lower() in ['s_p', 'pol_err', 'pol_deg_err']:
|
||||||
# Display polarisation degree error map
|
# Display polarisation degree error map
|
||||||
display = 's_p'
|
display = 's_p'
|
||||||
@@ -386,16 +381,17 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
|||||||
im = ax.imshow(p_err*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
|
im = ax.imshow(p_err*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
|
||||||
else:
|
else:
|
||||||
im = ax.imshow(pol_err.data*100., aspect='equal', cmap='inferno', alpha=1.)
|
im = ax.imshow(pol_err.data*100., aspect='equal', cmap='inferno', alpha=1.)
|
||||||
cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\sigma_P$ [%]")
|
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\sigma_P$ [%]")
|
||||||
elif display.lower() in ['s_i', 'i_err']:
|
elif display.lower() in ['s_i', 'i_err']:
|
||||||
# Display intensity error map
|
# Display intensity error map
|
||||||
display = 's_i'
|
display = 's_i'
|
||||||
if (SNRi > SNRi_cut).any():
|
if (SNRi > SNRi_cut).any():
|
||||||
vmin, vmax = np.min(np.sqrt(stk_cov.data[0,0][stk_cov.data[0,0] > 0.])*convert_flux), np.max(np.sqrt(stk_cov.data[0,0][stk_cov.data[0,0] > 0.])*convert_flux)
|
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][stk_cov.data[0, 0] > 0.]) *
|
||||||
im = ax.imshow(np.sqrt(stk_cov.data[0,0])*convert_flux, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
|
convert_flux), np.max(np.sqrt(stk_cov.data[0, 0][stk_cov.data[0, 0] > 0.])*convert_flux)
|
||||||
|
im = ax.imshow(np.sqrt(stk_cov.data[0, 0])*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
|
||||||
else:
|
else:
|
||||||
im = ax.imshow(np.sqrt(stk_cov.data[0, 0])*convert_flux, aspect='equal', cmap='inferno', alpha=1.)
|
im = ax.imshow(np.sqrt(stk_cov.data[0, 0])*convert_flux, aspect='equal', cmap='inferno', alpha=1.)
|
||||||
cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025 , label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||||
elif display.lower() in ['snr', 'snri']:
|
elif display.lower() in ['snr', 'snri']:
|
||||||
# Display I_stokes signal-to-noise map
|
# Display I_stokes signal-to-noise map
|
||||||
display = 'snri'
|
display = 'snri'
|
||||||
@@ -404,11 +400,10 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
|||||||
im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
|
im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
|
||||||
levelsSNRi = np.linspace(SNRi_cut, vmax*0.99, 5)
|
levelsSNRi = np.linspace(SNRi_cut, vmax*0.99, 5)
|
||||||
print("SNRi contour levels : ", levelsSNRi)
|
print("SNRi contour levels : ", levelsSNRi)
|
||||||
cont = ax.contour(SNRi, levels=levelsSNRi, colors='grey', linewidths=0.5)
|
ax.contour(SNRi, levels=levelsSNRi, colors='grey', linewidths=0.5)
|
||||||
#ax.clabel(cont,inline=True,fontsize=6)
|
|
||||||
else:
|
else:
|
||||||
im = ax.imshow(SNRi, aspect='equal', cmap='inferno', alpha=1.)
|
im = ax.imshow(SNRi, aspect='equal', cmap='inferno', alpha=1.)
|
||||||
cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025 , label=r"$I_{Stokes}/\sigma_{I}$")
|
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$I_{Stokes}/\sigma_{I}$")
|
||||||
elif display.lower() in ['snrp']:
|
elif display.lower() in ['snrp']:
|
||||||
# Display polarisation degree signal-to-noise map
|
# Display polarisation degree signal-to-noise map
|
||||||
display = 'snrp'
|
display = 'snrp'
|
||||||
@@ -417,11 +412,10 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
|||||||
im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
|
im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
|
||||||
levelsSNRp = np.linspace(SNRp_cut, vmax*0.99, 5)
|
levelsSNRp = np.linspace(SNRp_cut, vmax*0.99, 5)
|
||||||
print("SNRp contour levels : ", levelsSNRp)
|
print("SNRp contour levels : ", levelsSNRp)
|
||||||
cont = ax.contour(SNRp, levels=levelsSNRp, colors='grey', linewidths=0.5)
|
ax.contour(SNRp, levels=levelsSNRp, colors='grey', linewidths=0.5)
|
||||||
#ax.clabel(cont,inline=True,fontsize=6)
|
|
||||||
else:
|
else:
|
||||||
im = ax.imshow(SNRp, aspect='equal', cmap='inferno', alpha=1.)
|
im = ax.imshow(SNRp, aspect='equal', cmap='inferno', alpha=1.)
|
||||||
cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025 , label=r"$P/\sigma_{P}$")
|
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$P/\sigma_{P}$")
|
||||||
else:
|
else:
|
||||||
# Defaults to intensity map
|
# Defaults to intensity map
|
||||||
if mask.sum() > 0.:
|
if mask.sum() > 0.:
|
||||||
@@ -429,7 +423,7 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
|||||||
else:
|
else:
|
||||||
vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0, 0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0, 0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
|
||||||
im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
|
im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
|
||||||
cbar = fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025 , label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
|
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
|
||||||
|
|
||||||
# Get integrated values from header
|
# Get integrated values from header
|
||||||
n_pix = stkI.data[data_mask].size
|
n_pix = stkI.data[data_mask].size
|
||||||
@@ -443,7 +437,8 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
|||||||
|
|
||||||
px_size = wcs.wcs.get_cdelt()[0]*3600.
|
px_size = wcs.wcs.get_cdelt()[0]*3600.
|
||||||
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
|
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
|
||||||
north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10., angle=-Stokes[0].header['orientat'], color='white', text_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': 'k','fc':'w','alpha': 1,'lw': 1})
|
north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10.,
|
||||||
|
angle=-Stokes[0].header['orientat'], color='white', text_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 1})
|
||||||
|
|
||||||
if display.lower() in ['i', 's_i', 'snri', 'pf', 'p', 'pa', 's_p', 'snrp']:
|
if display.lower() in ['i', 's_i', 'snri', 'pf', 'p', 'pa', 's_p', 'snrp']:
|
||||||
if step_vec == 0:
|
if step_vec == 0:
|
||||||
@@ -452,19 +447,22 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
|||||||
vec_scale = 2.
|
vec_scale = 2.
|
||||||
X, Y = np.meshgrid(np.arange(stkI.data.shape[1]), np.arange(stkI.data.shape[0]))
|
X, Y = np.meshgrid(np.arange(stkI.data.shape[1]), np.arange(stkI.data.shape[0]))
|
||||||
U, V = poldata*np.cos(np.pi/2.+pangdata*np.pi/180.), poldata*np.sin(np.pi/2.+pangdata*np.pi/180.)
|
U, V = poldata*np.cos(np.pi/2.+pangdata*np.pi/180.), poldata*np.sin(np.pi/2.+pangdata*np.pi/180.)
|
||||||
Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=1./vec_scale,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,linewidth=0.5,color='w',edgecolor='k')
|
ax.quiver(X[::step_vec, ::step_vec], Y[::step_vec, ::step_vec], U[::step_vec, ::step_vec], V[::step_vec, ::step_vec], units='xy', angles='uv',
|
||||||
|
scale=1./vec_scale, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, linewidth=0.5, color='w', edgecolor='k')
|
||||||
pol_sc = AnchoredSizeBar(ax.transData, vec_scale, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
|
pol_sc = AnchoredSizeBar(ax.transData, vec_scale, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
|
||||||
|
|
||||||
ax.add_artist(pol_sc)
|
ax.add_artist(pol_sc)
|
||||||
ax.add_artist(px_sc)
|
ax.add_artist(px_sc)
|
||||||
ax.add_artist(north_dir)
|
ax.add_artist(north_dir)
|
||||||
|
|
||||||
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav,sci_not(I_diluted*convert_flux,I_diluted_err*convert_flux,2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_diluted*100.,P_diluted_err*100.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_diluted,PA_diluted_err), color='white', xy=(0.01, 1.00), xycoords='axes fraction',path_effects=[pe.withStroke(linewidth=0.5,foreground='k')],verticalalignment='top', horizontalalignment='left')
|
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav, sci_not(I_diluted*convert_flux, I_diluted_err*convert_flux, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_diluted*100., P_diluted_err *
|
||||||
|
100.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_diluted, PA_diluted_err), color='white', xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left')
|
||||||
else:
|
else:
|
||||||
if display.lower() == 'default':
|
if display.lower() == 'default':
|
||||||
ax.add_artist(px_sc)
|
ax.add_artist(px_sc)
|
||||||
ax.add_artist(north_dir)
|
ax.add_artist(north_dir)
|
||||||
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav,sci_not(I_diluted*convert_flux,I_diluted_err*convert_flux,2)), color='white', xy=(0.01, 1.00), xycoords='axes fraction',path_effects=[pe.withStroke(linewidth=0.5,foreground='k')],verticalalignment='top', horizontalalignment='left')
|
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(pivot_wav, sci_not(I_diluted*convert_flux, I_diluted_err*convert_flux, 2)),
|
||||||
|
color='white', xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left')
|
||||||
|
|
||||||
# Display instrument FOV
|
# Display instrument FOV
|
||||||
if not (rectangle is None):
|
if not (rectangle is None):
|
||||||
@@ -473,13 +471,14 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
|
|||||||
ax.add_patch(Rectangle((x, y), width, height, angle=angle,
|
ax.add_patch(Rectangle((x, y), width, height, angle=angle,
|
||||||
edgecolor=color, fill=False))
|
edgecolor=color, fill=False))
|
||||||
|
|
||||||
|
|
||||||
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
|
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
|
||||||
ax.set_xlabel('Right Ascension (J2000)')
|
ax.coords[0].set_axislabel('Right Ascension (J2000)')
|
||||||
|
ax.coords[0].set_axislabel_position('t')
|
||||||
|
ax.coords[0].set_ticklabel_position('t')
|
||||||
ax.set_ylabel('Declination (J2000)', labelpad=-1)
|
ax.set_ylabel('Declination (J2000)', labelpad=-1)
|
||||||
|
|
||||||
if not savename is None:
|
if savename is not None:
|
||||||
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
|
if savename[-4:] not in ['.png', '.jpg', '.pdf']:
|
||||||
savename += '.pdf'
|
savename += '.pdf'
|
||||||
fig.savefig(path_join(plots_folder, savename), bbox_inches='tight', dpi=300)
|
fig.savefig(path_join(plots_folder, savename), bbox_inches='tight', dpi=300)
|
||||||
|
|
||||||
@@ -491,6 +490,7 @@ class align_maps(object):
|
|||||||
"""
|
"""
|
||||||
Class to interactively align maps with different WCS.
|
Class to interactively align maps with different WCS.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, map, other_map, **kwargs):
|
def __init__(self, map, other_map, **kwargs):
|
||||||
self.aligned = False
|
self.aligned = False
|
||||||
|
|
||||||
@@ -516,10 +516,14 @@ class align_maps(object):
|
|||||||
elif len(self.other_data.shape) == 3:
|
elif len(self.other_data.shape) == 3:
|
||||||
self.other_data = self.other_data[0]
|
self.other_data = self.other_data[0]
|
||||||
|
|
||||||
self.map_convert, self.map_unit = (float(self.map_header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list(self.map_header.keys()) else (1., self.map_header['bunit'] if 'BUNIT' in list(self.map_header.keys()) else "Arbitray Units")
|
self.map_convert, self.map_unit = (float(self.map_header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list(
|
||||||
self.other_convert, self.other_unit = (float(self.other_map[0].header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list(self.other_header.keys()) else (1., self.other_header['bunit'] if 'BUNIT' in list(self.other_header.keys()) else "Arbitray Units")
|
self.map_header.keys()) else (1., self.map_header['bunit'] if 'BUNIT' in list(self.map_header.keys()) else "Arbitray Units")
|
||||||
self.map_observer = "/".join([self.map_header['telescop'],self.map_header['instrume']]) if "INSTRUME" in list(self.map_header.keys()) else self.map_header['telescop']
|
self.other_convert, self.other_unit = (float(self.other_map[0].header['photflam']), r"$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$") if "PHOTFLAM" in list(
|
||||||
self.other_observer = "/".join([self.other_header['telescop'],self.other_header['instrume']]) if "INSTRUME" in list(self.other_header.keys()) else self.other_header['telescop']
|
self.other_header.keys()) else (1., self.other_header['bunit'] if 'BUNIT' in list(self.other_header.keys()) else "Arbitray Units")
|
||||||
|
self.map_observer = "/".join([self.map_header['telescop'], self.map_header['instrume']]
|
||||||
|
) if "INSTRUME" in list(self.map_header.keys()) else self.map_header['telescop']
|
||||||
|
self.other_observer = "/".join([self.other_header['telescop'], self.other_header['instrume']]
|
||||||
|
) if "INSTRUME" in list(self.other_header.keys()) else self.other_header['telescop']
|
||||||
|
|
||||||
plt.rcParams.update({'font.size': 10})
|
plt.rcParams.update({'font.size': 10})
|
||||||
fontprops = fm.FontProperties(size=16)
|
fontprops = fm.FontProperties(size=16)
|
||||||
@@ -532,11 +536,11 @@ class align_maps(object):
|
|||||||
vmin, vmax = self.map_data[self.map_data > 0.].max()/1e3*self.map_convert, self.map_data[self.map_data > 0.].max()*self.map_convert
|
vmin, vmax = self.map_data[self.map_data > 0.].max()/1e3*self.map_convert, self.map_data[self.map_data > 0.].max()*self.map_convert
|
||||||
for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]:
|
for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]:
|
||||||
try:
|
try:
|
||||||
test = kwargs[key]
|
_ = kwargs[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
for key_i, val_i in value:
|
for key_i, val_i in value:
|
||||||
kwargs[key_i] = val_i
|
kwargs[key_i] = val_i
|
||||||
im1 = self.map_ax.imshow(self.map_data*self.map_convert, aspect='equal', **kwargs)
|
self.map_ax.imshow(self.map_data*self.map_convert, aspect='equal', **kwargs)
|
||||||
|
|
||||||
if kwargs['cmap'] in ['inferno', 'magma', 'Greys_r', 'binary_r', 'gist_yarg_r', 'gist_gray', 'gray', 'bone', 'pink', 'hot', 'afmhot', 'gist_heat', 'copper', 'gist_earth', 'gist_stern', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'nipy_spectral', 'gist_ncar', 'viridis']:
|
if kwargs['cmap'] in ['inferno', 'magma', 'Greys_r', 'binary_r', 'gist_yarg_r', 'gist_gray', 'gray', 'bone', 'pink', 'hot', 'afmhot', 'gist_heat', 'copper', 'gist_earth', 'gist_stern', 'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'nipy_spectral', 'gist_ncar', 'viridis']:
|
||||||
self.map_ax.set_facecolor('black')
|
self.map_ax.set_facecolor('black')
|
||||||
@@ -547,13 +551,16 @@ class align_maps(object):
|
|||||||
self.other_ax.set_facecolor('white')
|
self.other_ax.set_facecolor('white')
|
||||||
font_color = "black"
|
font_color = "black"
|
||||||
px_size1 = self.map_wcs.wcs.get_cdelt()[0]*3600.
|
px_size1 = self.map_wcs.wcs.get_cdelt()[0]*3600.
|
||||||
px_sc1 = AnchoredSizeBar(self.map_ax.transData, 1./px_size1, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
px_sc1 = AnchoredSizeBar(self.map_ax.transData, 1./px_size1, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5,
|
||||||
|
frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
||||||
self.map_ax.add_artist(px_sc1)
|
self.map_ax.add_artist(px_sc1)
|
||||||
|
|
||||||
if 'PHOTPLAM' in list(self.map_header.keys()):
|
if 'PHOTPLAM' in list(self.map_header.keys()):
|
||||||
annote1 = self.map_ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(self.map_header['photplam']), color=font_color, fontsize=12, xy=(0.01, 0.93), xycoords='axes fraction',path_effects=[pe.withStroke(linewidth=0.5,foreground='k')])
|
self.map_ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(self.map_header['photplam']), color=font_color, fontsize=12, xy=(
|
||||||
|
0.01, 0.93), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')])
|
||||||
if 'ORIENTAT' in list(self.map_header.keys()):
|
if 'ORIENTAT' in list(self.map_header.keys()):
|
||||||
north_dir1 = AnchoredDirectionArrows(self.map_ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-self.map_header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1,'lw': 0.5})
|
north_dir1 = AnchoredDirectionArrows(self.map_ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01,
|
||||||
|
sep_x=0.01, angle=-self.map_header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5})
|
||||||
self.map_ax.add_artist(north_dir1)
|
self.map_ax.add_artist(north_dir1)
|
||||||
|
|
||||||
self.cr_map, = self.map_ax.plot(*(self.map_wcs.wcs.crpix-(1., 1.)), 'r+')
|
self.cr_map, = self.map_ax.plot(*(self.map_wcs.wcs.crpix-(1., 1.)), 'r+')
|
||||||
@@ -566,20 +573,23 @@ class align_maps(object):
|
|||||||
vmin, vmax = self.other_data[self.other_data > 0.].max()/1e3*self.other_convert, self.other_data[self.other_data > 0.].max()*self.other_convert
|
vmin, vmax = self.other_data[self.other_data > 0.].max()/1e3*self.other_convert, self.other_data[self.other_data > 0.].max()*self.other_convert
|
||||||
for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]:
|
for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]:
|
||||||
try:
|
try:
|
||||||
test = other_kwargs[key]
|
_ = other_kwargs[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
for key_i, val_i in value:
|
for key_i, val_i in value:
|
||||||
other_kwargs[key_i] = val_i
|
other_kwargs[key_i] = val_i
|
||||||
im2 = self.other_ax.imshow(self.other_data*self.other_convert, aspect='equal', **other_kwargs)
|
self.other_ax.imshow(self.other_data*self.other_convert, aspect='equal', **other_kwargs)
|
||||||
|
|
||||||
px_size2 = self.other_wcs.wcs.get_cdelt()[0]*3600.
|
px_size2 = self.other_wcs.wcs.get_cdelt()[0]*3600.
|
||||||
px_sc2 = AnchoredSizeBar(self.other_ax.transData, 1./px_size2, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
px_sc2 = AnchoredSizeBar(self.other_ax.transData, 1./px_size2, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5,
|
||||||
|
frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
||||||
self.other_ax.add_artist(px_sc2)
|
self.other_ax.add_artist(px_sc2)
|
||||||
|
|
||||||
if 'PHOTPLAM' in list(self.other_header.keys()):
|
if 'PHOTPLAM' in list(self.other_header.keys()):
|
||||||
annote2 = self.other_ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(self.other_header['photplam']), color='white', fontsize=12, xy=(0.01, 0.93), xycoords='axes fraction',path_effects=[pe.withStroke(linewidth=0.5,foreground='k')])
|
self.other_ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(self.other_header['photplam']), color='white', fontsize=12, xy=(
|
||||||
|
0.01, 0.93), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')])
|
||||||
if 'ORIENTAT' in list(self.other_header.keys()):
|
if 'ORIENTAT' in list(self.other_header.keys()):
|
||||||
north_dir2 = AnchoredDirectionArrows(self.map_ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-self.other_header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1,'lw': 0.5})
|
north_dir2 = AnchoredDirectionArrows(self.map_ax.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01,
|
||||||
|
sep_x=0.01, angle=-self.other_header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5})
|
||||||
self.other_ax.add_artist(north_dir2)
|
self.other_ax.add_artist(north_dir2)
|
||||||
|
|
||||||
self.cr_other, = self.other_ax.plot(*(self.other_wcs.wcs.crpix-(1., 1.)), 'r+')
|
self.cr_other, = self.other_ax.plot(*(self.other_wcs.wcs.crpix-(1., 1.)), 'r+')
|
||||||
@@ -681,11 +691,13 @@ class align_maps(object):
|
|||||||
self.write_other_to(path=path2, suffix=suffix, data_dir=data_dir)
|
self.write_other_to(path=path2, suffix=suffix, data_dir=data_dir)
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
class overplot_radio(align_maps):
|
class overplot_radio(align_maps):
|
||||||
"""
|
"""
|
||||||
Class to overplot maps from different observations.
|
Class to overplot maps from different observations.
|
||||||
Inherit from class align_maps in order to get the same WCS on both maps.
|
Inherit from class align_maps in order to get the same WCS on both maps.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2, savename=None, **kwargs):
|
def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2, savename=None, **kwargs):
|
||||||
self.Stokes_UV = self.map
|
self.Stokes_UV = self.map
|
||||||
self.wcs_UV = self.map_wcs
|
self.wcs_UV = self.map_wcs
|
||||||
@@ -723,7 +735,7 @@ class overplot_radio(align_maps):
|
|||||||
vmin, vmax = stkI[np.isfinite(stkI)].max()/1e3*self.map_convert, stkI[np.isfinite(stkI)].max()*self.map_convert
|
vmin, vmax = stkI[np.isfinite(stkI)].max()/1e3*self.map_convert, stkI[np.isfinite(stkI)].max()*self.map_convert
|
||||||
for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]:
|
for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]:
|
||||||
try:
|
try:
|
||||||
test = kwargs[key]
|
_ = kwargs[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
for key_i, val_i in value:
|
for key_i, val_i in value:
|
||||||
kwargs[key_i] = val_i
|
kwargs[key_i] = val_i
|
||||||
@@ -734,7 +746,8 @@ class overplot_radio(align_maps):
|
|||||||
self.ax_overplot.set_facecolor('white')
|
self.ax_overplot.set_facecolor('white')
|
||||||
font_color = "black"
|
font_color = "black"
|
||||||
self.im = self.ax_overplot.imshow(stkI*self.map_convert, aspect='equal', label="{0:s} observation".format(self.map_observer), **kwargs)
|
self.im = self.ax_overplot.imshow(stkI*self.map_convert, aspect='equal', label="{0:s} observation".format(self.map_observer), **kwargs)
|
||||||
self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{{\lambda}}$ [{0:s}]".format(self.map_unit))
|
self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=50, shrink=0.75, pad=0.025,
|
||||||
|
label=r"$F_{{\lambda}}$ [{0:s}]".format(self.map_unit))
|
||||||
|
|
||||||
# Display full size polarisation vectors
|
# Display full size polarisation vectors
|
||||||
if vec_scale is None:
|
if vec_scale is None:
|
||||||
@@ -743,40 +756,47 @@ class overplot_radio(align_maps):
|
|||||||
else:
|
else:
|
||||||
self.vec_scale = vec_scale
|
self.vec_scale = vec_scale
|
||||||
step_vec = 1
|
step_vec = 1
|
||||||
px_scale = self.other_wcs.wcs.get_cdelt()[0]/self.wcs_UV.wcs.get_cdelt()[0]
|
|
||||||
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
|
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
|
||||||
self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.)
|
self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.)
|
||||||
self.Q = self.ax_overplot.quiver(self.X[::step_vec,::step_vec],self.Y[::step_vec,::step_vec],self.U[::step_vec,::step_vec],self.V[::step_vec,::step_vec],units='xy',angles='uv',scale=1./self.vec_scale,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,linewidth=0.5,color='white',edgecolor='black',label="{0:s} polarisation map".format(self.map_observer))
|
self.Q = self.ax_overplot.quiver(self.X[::step_vec, ::step_vec], self.Y[::step_vec, ::step_vec], self.U[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec], units='xy', angles='uv', scale=1./self.vec_scale,
|
||||||
|
scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, linewidth=0.5, color='white', edgecolor='black', label="{0:s} polarisation map".format(self.map_observer))
|
||||||
self.ax_overplot.autoscale(False)
|
self.ax_overplot.autoscale(False)
|
||||||
|
|
||||||
# Display other map as contours
|
# Display other map as contours
|
||||||
if levels is None:
|
if levels is None:
|
||||||
levels = np.logspace(np.log(3)/np.log(10), 2., 5)/100.*other_data[other_data > 0.].max()
|
levels = np.logspace(np.log(3)/np.log(10), 2., 5)/100.*other_data[other_data > 0.].max()
|
||||||
other_cont = self.ax_overplot.contour(other_data*self.other_convert, transform=self.ax_overplot.get_transform(self.other_wcs.celestial), levels=levels*self.other_convert, colors='grey')
|
other_cont = self.ax_overplot.contour(
|
||||||
|
other_data*self.other_convert, transform=self.ax_overplot.get_transform(self.other_wcs.celestial), levels=levels*self.other_convert, colors='grey')
|
||||||
self.ax_overplot.clabel(other_cont, inline=True, fontsize=5)
|
self.ax_overplot.clabel(other_cont, inline=True, fontsize=5)
|
||||||
other_proxy = Rectangle((0, 0), 1, 1, fc='w', ec=other_cont.collections[0].get_edgecolor()[0], label=r"{0:s} contour".format(self.other_observer))
|
other_proxy = Rectangle((0, 0), 1, 1, fc='w', ec=other_cont.collections[0].get_edgecolor()[0], label=r"{0:s} contour".format(self.other_observer))
|
||||||
self.ax_overplot.add_patch(other_proxy)
|
self.ax_overplot.add_patch(other_proxy)
|
||||||
|
|
||||||
self.ax_overplot.set_xlabel(label="Right Ascension (J2000)")
|
self.ax_overplot.set_xlabel(label="Right Ascension (J2000)")
|
||||||
self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1)
|
self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1)
|
||||||
self.fig_overplot.suptitle("{0:s} polarisation map of {1:s} overplotted with {2:s} {3:.2f}GHz map in {4:s}.".format(self.map_observer, obj, self.other_observer, other_freq*1e-9, self.other_unit),wrap=True)
|
self.fig_overplot.suptitle("{0:s} polarisation map of {1:s} overplotted with {2:s} {3:.2f}GHz map in {4:s}.".format(
|
||||||
|
self.map_observer, obj, self.other_observer, other_freq*1e-9, self.other_unit), wrap=True)
|
||||||
|
|
||||||
# Display pixel scale and North direction
|
# Display pixel scale and North direction
|
||||||
fontprops = fm.FontProperties(size=16)
|
fontprops = fm.FontProperties(size=16)
|
||||||
px_size = self.wcs_UV.wcs.get_cdelt()[0]*3600.
|
px_size = self.wcs_UV.wcs.get_cdelt()[0]*3600.
|
||||||
px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5,
|
||||||
|
frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
||||||
self.ax_overplot.add_artist(px_sc)
|
self.ax_overplot.add_artist(px_sc)
|
||||||
north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1,'lw': 0.5})
|
north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01,
|
||||||
|
sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5})
|
||||||
self.ax_overplot.add_artist(north_dir)
|
self.ax_overplot.add_artist(north_dir)
|
||||||
pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5,
|
||||||
|
frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
||||||
self.ax_overplot.add_artist(pol_sc)
|
self.ax_overplot.add_artist(pol_sc)
|
||||||
|
|
||||||
self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1., 1.)), 'r+')
|
self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1., 1.)), 'r+')
|
||||||
self.cr_other, = self.ax_overplot.plot(*(self.other_wcs.celestial.wcs.crpix-(1., 1.)), 'g+', transform=self.ax_overplot.get_transform(self.other_wcs))
|
self.cr_other, = self.ax_overplot.plot(*(self.other_wcs.celestial.wcs.crpix-(1., 1.)), 'g+', transform=self.ax_overplot.get_transform(self.other_wcs))
|
||||||
|
|
||||||
h,l = self.ax_overplot.get_legend_handles_labels()
|
handles, labels = self.ax_overplot.get_legend_handles_labels()
|
||||||
h[np.argmax([li=="{0:s} polarisation map".format(self.map_observer) for li in l])] = FancyArrowPatch((0,0),(0,1),arrowstyle='-',fc='w',ec='k',lw=2)
|
handles[np.argmax([li == "{0:s} polarisation map".format(self.map_observer) for li in labels])
|
||||||
self.legend = self.ax_overplot.legend(handles=h,labels=l,bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.)
|
] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2)
|
||||||
|
self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=(
|
||||||
|
0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.)
|
||||||
|
|
||||||
if not (savename is None):
|
if not (savename is None):
|
||||||
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
|
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
|
||||||
@@ -785,18 +805,19 @@ class overplot_radio(align_maps):
|
|||||||
|
|
||||||
self.fig_overplot.canvas.draw()
|
self.fig_overplot.canvas.draw()
|
||||||
|
|
||||||
|
|
||||||
def plot(self, levels=None, SNRp_cut=3., SNRi_cut=30., savename=None, **kwargs) -> None:
|
def plot(self, levels=None, SNRp_cut=3., SNRi_cut=30., savename=None, **kwargs) -> None:
|
||||||
while not self.aligned:
|
while not self.aligned:
|
||||||
self.align()
|
self.align()
|
||||||
self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename, **kwargs)
|
self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename, **kwargs)
|
||||||
plt.show(block=True)
|
plt.show(block=True)
|
||||||
|
|
||||||
|
|
||||||
class overplot_chandra(align_maps):
|
class overplot_chandra(align_maps):
|
||||||
"""
|
"""
|
||||||
Class to overplot maps from different observations.
|
Class to overplot maps from different observations.
|
||||||
Inherit from class align_maps in order to get the same WCS on both maps.
|
Inherit from class align_maps in order to get the same WCS on both maps.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2, zoom=1, savename=None, **kwargs):
|
def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2, zoom=1, savename=None, **kwargs):
|
||||||
self.Stokes_UV = self.map
|
self.Stokes_UV = self.map
|
||||||
self.wcs_UV = self.map_wcs
|
self.wcs_UV = self.map_wcs
|
||||||
@@ -814,7 +835,7 @@ class overplot_chandra(align_maps):
|
|||||||
other_data = sc_zoom(other_data, zoom)
|
other_data = sc_zoom(other_data, zoom)
|
||||||
other_wcs.wcs.crpix *= zoom
|
other_wcs.wcs.crpix *= zoom
|
||||||
other_wcs.wcs.cdelt /= zoom
|
other_wcs.wcs.cdelt /= zoom
|
||||||
other_unit = 'counts'
|
self.other_unit = 'counts'
|
||||||
|
|
||||||
# Compute SNR and apply cuts
|
# Compute SNR and apply cuts
|
||||||
pol[pol == 0.] = np.nan
|
pol[pol == 0.] = np.nan
|
||||||
@@ -833,7 +854,7 @@ class overplot_chandra(align_maps):
|
|||||||
vmin, vmax = stkI[np.isfinite(stkI)].max()/1e3*self.map_convert, stkI[np.isfinite(stkI)].max()*self.map_convert
|
vmin, vmax = stkI[np.isfinite(stkI)].max()/1e3*self.map_convert, stkI[np.isfinite(stkI)].max()*self.map_convert
|
||||||
for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]:
|
for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]:
|
||||||
try:
|
try:
|
||||||
test = kwargs[key]
|
_ = kwargs[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
for key_i, val_i in value:
|
for key_i, val_i in value:
|
||||||
kwargs[key_i] = val_i
|
kwargs[key_i] = val_i
|
||||||
@@ -844,7 +865,8 @@ class overplot_chandra(align_maps):
|
|||||||
self.ax_overplot.set_facecolor('white')
|
self.ax_overplot.set_facecolor('white')
|
||||||
font_color = "black"
|
font_color = "black"
|
||||||
self.im = self.ax_overplot.imshow(stkI*self.map_convert, aspect='equal', **kwargs)
|
self.im = self.ax_overplot.imshow(stkI*self.map_convert, aspect='equal', **kwargs)
|
||||||
self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{{\lambda}}$ [{0:s}]".format(self.map_unit))
|
self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=50, shrink=0.75, pad=0.025,
|
||||||
|
label=r"$F_{{\lambda}}$ [{0:s}]".format(self.map_unit))
|
||||||
|
|
||||||
# Display full size polarisation vectors
|
# Display full size polarisation vectors
|
||||||
if vec_scale is None:
|
if vec_scale is None:
|
||||||
@@ -853,11 +875,10 @@ class overplot_chandra(align_maps):
|
|||||||
else:
|
else:
|
||||||
self.vec_scale = vec_scale
|
self.vec_scale = vec_scale
|
||||||
step_vec = 1
|
step_vec = 1
|
||||||
px_scale = 1./self.wcs_UV.wcs.get_cdelt()[0]
|
|
||||||
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
|
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
|
||||||
self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.)
|
self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.)
|
||||||
self.Q = self.ax_overplot.quiver(self.X[::step_vec,::step_vec],self.Y[::step_vec,::step_vec],self.U[::step_vec,::step_vec],self.V[::step_vec,::step_vec],units='xy',angles='uv',scale=1./self.vec_scale,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,linewidth=0.5,color='white',edgecolor='black',label="{0:s} polarisation map".format(self.map_observer))
|
self.Q = self.ax_overplot.quiver(self.X[::step_vec, ::step_vec], self.Y[::step_vec, ::step_vec], self.U[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec], units='xy', angles='uv', scale=1./self.vec_scale,
|
||||||
proxy_Q = FancyArrowPatch((0,0),(0,1),arrowstyle='-',fc='w',ec='k',lw=3)
|
scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, linewidth=0.5, color='white', edgecolor='black', label="{0:s} polarisation map".format(self.map_observer))
|
||||||
self.ax_overplot.autoscale(False)
|
self.ax_overplot.autoscale(False)
|
||||||
|
|
||||||
# Display other map as contours
|
# Display other map as contours
|
||||||
@@ -867,28 +888,35 @@ class overplot_chandra(align_maps):
|
|||||||
levels *= other_data.max()/self.other_data.max()
|
levels *= other_data.max()/self.other_data.max()
|
||||||
other_cont = self.ax_overplot.contour(other_data*self.other_convert, transform=self.ax_overplot.get_transform(other_wcs), levels=levels, colors='grey')
|
other_cont = self.ax_overplot.contour(other_data*self.other_convert, transform=self.ax_overplot.get_transform(other_wcs), levels=levels, colors='grey')
|
||||||
self.ax_overplot.clabel(other_cont, inline=True, fontsize=8)
|
self.ax_overplot.clabel(other_cont, inline=True, fontsize=8)
|
||||||
other_proxy = Rectangle((0,0),1.,1.,fc='w',ec=other_cont.collections[0].get_edgecolor()[0], lw=2, label=r"{0:s} contour in counts".format(self.other_observer))
|
other_proxy = Rectangle((0, 0), 1., 1., fc='w', ec=other_cont.collections[0].get_edgecolor()[
|
||||||
|
0], lw=2, label=r"{0:s} contour in counts".format(self.other_observer))
|
||||||
self.ax_overplot.add_patch(other_proxy)
|
self.ax_overplot.add_patch(other_proxy)
|
||||||
|
|
||||||
self.ax_overplot.set_xlabel(label="Right Ascension (J2000)")
|
self.ax_overplot.set_xlabel(label="Right Ascension (J2000)")
|
||||||
self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1)
|
self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1)
|
||||||
self.fig_overplot.suptitle("{0:s} polarisation map of {1:s} overplotted\nwith {2:s} contour in counts.".format(self.map_observer,obj,self.other_observer),wrap=True)
|
self.fig_overplot.suptitle("{0:s} polarisation map of {1:s} overplotted\nwith {2:s} contour in counts.".format(
|
||||||
|
self.map_observer, obj, self.other_observer), wrap=True)
|
||||||
|
|
||||||
# Display pixel scale and North direction
|
# Display pixel scale and North direction
|
||||||
fontprops = fm.FontProperties(size=16)
|
fontprops = fm.FontProperties(size=16)
|
||||||
px_size = self.wcs_UV.wcs.get_cdelt()[0]*3600.
|
px_size = self.wcs_UV.wcs.get_cdelt()[0]*3600.
|
||||||
px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5,
|
||||||
|
frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
||||||
self.ax_overplot.add_artist(px_sc)
|
self.ax_overplot.add_artist(px_sc)
|
||||||
north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1,'lw': 0.5})
|
north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01,
|
||||||
|
sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5})
|
||||||
self.ax_overplot.add_artist(north_dir)
|
self.ax_overplot.add_artist(north_dir)
|
||||||
pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5,
|
||||||
|
frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
||||||
self.ax_overplot.add_artist(pol_sc)
|
self.ax_overplot.add_artist(pol_sc)
|
||||||
|
|
||||||
self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1., 1.)), 'r+')
|
self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1., 1.)), 'r+')
|
||||||
self.cr_other, = self.ax_overplot.plot(*(other_wcs.celestial.wcs.crpix-(1., 1.)), 'g+', transform=self.ax_overplot.get_transform(other_wcs))
|
self.cr_other, = self.ax_overplot.plot(*(other_wcs.celestial.wcs.crpix-(1., 1.)), 'g+', transform=self.ax_overplot.get_transform(other_wcs))
|
||||||
h,l = self.ax_overplot.get_legend_handles_labels()
|
handles, labels = self.ax_overplot.get_legend_handles_labels()
|
||||||
h[np.argmax([li=="{0:s} polarisation map".format(self.map_observer) for li in l])] = FancyArrowPatch((0,0),(0,1),arrowstyle='-',fc='w',ec='k',lw=2)
|
handles[np.argmax([li == "{0:s} polarisation map".format(self.map_observer) for li in labels])
|
||||||
self.legend = self.ax_overplot.legend(handles=h,labels=l,bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.)
|
] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2)
|
||||||
|
self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=(
|
||||||
|
0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.)
|
||||||
|
|
||||||
if not (savename is None):
|
if not (savename is None):
|
||||||
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
|
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
|
||||||
@@ -909,6 +937,7 @@ class overplot_pol(align_maps):
|
|||||||
Class to overplot maps from different observations.
|
Class to overplot maps from different observations.
|
||||||
Inherit from class align_maps in order to get the same WCS on both maps.
|
Inherit from class align_maps in order to get the same WCS on both maps.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2., savename=None, **kwargs):
|
def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2., savename=None, **kwargs):
|
||||||
self.Stokes_UV = self.map
|
self.Stokes_UV = self.map
|
||||||
self.wcs_UV = self.map_wcs
|
self.wcs_UV = self.map_wcs
|
||||||
@@ -937,13 +966,14 @@ class overplot_pol(align_maps):
|
|||||||
|
|
||||||
self.ax_overplot.set_xlabel(label="Right Ascension (J2000)")
|
self.ax_overplot.set_xlabel(label="Right Ascension (J2000)")
|
||||||
self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1)
|
self.ax_overplot.set_ylabel(label="Declination (J2000)", labelpad=-1)
|
||||||
self.fig_overplot.suptitle("{0:s} observation from {1:s} overplotted with polarisation vectors and Stokes I contours from {2:s}".format(obj,self.other_observer,self.map_observer),wrap=True)
|
self.fig_overplot.suptitle("{0:s} observation from {1:s} overplotted with polarisation vectors and Stokes I contours from {2:s}".format(
|
||||||
|
obj, self.other_observer, self.map_observer), wrap=True)
|
||||||
|
|
||||||
# Display "other" intensity map
|
# Display "other" intensity map
|
||||||
vmin, vmax = other_data[other_data > 0.].max()/1e3*self.other_convert, other_data[other_data > 0.].max()*self.other_convert
|
vmin, vmax = other_data[other_data > 0.].max()/1e3*self.other_convert, other_data[other_data > 0.].max()*self.other_convert
|
||||||
for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["vmin", vmin], ["vmax", vmax]]]]:
|
for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["vmin", vmin], ["vmax", vmax]]]]:
|
||||||
try:
|
try:
|
||||||
test = kwargs[key]
|
_ = kwargs[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
for key_i, val_i in value:
|
for key_i, val_i in value:
|
||||||
kwargs[key_i] = val_i
|
kwargs[key_i] = val_i
|
||||||
@@ -954,7 +984,8 @@ class overplot_pol(align_maps):
|
|||||||
self.ax_overplot.set_facecolor('white')
|
self.ax_overplot.set_facecolor('white')
|
||||||
font_color = "black"
|
font_color = "black"
|
||||||
self.im = self.ax_overplot.imshow(other_data*self.other_convert, alpha=1., label="{0:s} observation".format(self.other_observer), **kwargs)
|
self.im = self.ax_overplot.imshow(other_data*self.other_convert, alpha=1., label="{0:s} observation".format(self.other_observer), **kwargs)
|
||||||
self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=80, shrink=0.75, pad=0.025, label=r"$F_{{\lambda}}$ [{0:s}]".format(self.other_unit))
|
self.cbar = self.fig_overplot.colorbar(self.im, ax=self.ax_overplot, aspect=80, shrink=0.75, pad=0.025,
|
||||||
|
label=r"$F_{{\lambda}}$ [{0:s}]".format(self.other_unit))
|
||||||
|
|
||||||
# Display full size polarisation vectors
|
# Display full size polarisation vectors
|
||||||
if vec_scale is None:
|
if vec_scale is None:
|
||||||
@@ -966,12 +997,14 @@ class overplot_pol(align_maps):
|
|||||||
px_scale = self.other_wcs.wcs.get_cdelt()[0]/self.wcs_UV.wcs.get_cdelt()[0]
|
px_scale = self.other_wcs.wcs.get_cdelt()[0]/self.wcs_UV.wcs.get_cdelt()[0]
|
||||||
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
|
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
|
||||||
self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.)
|
self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.)
|
||||||
self.Q = self.ax_overplot.quiver(self.X[::step_vec,::step_vec],self.Y[::step_vec,::step_vec],self.U[::step_vec,::step_vec],self.V[::step_vec,::step_vec],units='xy',angles='uv',scale=px_scale/self.vec_scale,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1/px_scale,linewidth=0.5,color='white',edgecolor='black', transform=self.ax_overplot.get_transform(self.wcs_UV),label="{0:s} polarisation map".format(self.map_observer))
|
self.Q = self.ax_overplot.quiver(self.X[::step_vec, ::step_vec], self.Y[::step_vec, ::step_vec], self.U[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec], units='xy', angles='uv', scale=px_scale/self.vec_scale, scale_units='xy', pivot='mid',
|
||||||
|
headwidth=0., headlength=0., headaxislength=0., width=0.1/px_scale, linewidth=0.5, color='white', edgecolor='black', transform=self.ax_overplot.get_transform(self.wcs_UV), label="{0:s} polarisation map".format(self.map_observer))
|
||||||
|
|
||||||
# Display Stokes I as contours
|
# Display Stokes I as contours
|
||||||
if levels is None:
|
if levels is None:
|
||||||
levels = np.logspace(np.log(3)/np.log(10), 2., 5)/100.*np.max(stkI[stkI > 0.])*self.map_convert
|
levels = np.logspace(np.log(3)/np.log(10), 2., 5)/100.*np.max(stkI[stkI > 0.])*self.map_convert
|
||||||
cont_stkI = self.ax_overplot.contour(stkI*self.map_convert, levels=levels, colors='grey', alpha=0.75, transform=self.ax_overplot.get_transform(self.wcs_UV))
|
cont_stkI = self.ax_overplot.contour(stkI*self.map_convert, levels=levels, colors='grey', alpha=0.75,
|
||||||
|
transform=self.ax_overplot.get_transform(self.wcs_UV))
|
||||||
# self.ax_overplot.clabel(cont_stkI, inline=True, fontsize=5)
|
# self.ax_overplot.clabel(cont_stkI, inline=True, fontsize=5)
|
||||||
cont_proxy = Rectangle((0, 0), 1, 1, fc='w', ec=cont_stkI.collections[0].get_edgecolor()[0], label="{0:s} Stokes I contour".format(self.map_observer))
|
cont_proxy = Rectangle((0, 0), 1, 1, fc='w', ec=cont_stkI.collections[0].get_edgecolor()[0], label="{0:s} Stokes I contour".format(self.map_observer))
|
||||||
self.ax_overplot.add_patch(cont_proxy)
|
self.ax_overplot.add_patch(cont_proxy)
|
||||||
@@ -979,11 +1012,14 @@ class overplot_pol(align_maps):
|
|||||||
# Display pixel scale and North direction
|
# Display pixel scale and North direction
|
||||||
fontprops = fm.FontProperties(size=16)
|
fontprops = fm.FontProperties(size=16)
|
||||||
px_size = self.other_wcs.wcs.get_cdelt()[0]*3600.
|
px_size = self.other_wcs.wcs.get_cdelt()[0]*3600.
|
||||||
px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
px_sc = AnchoredSizeBar(self.ax_overplot.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5,
|
||||||
|
frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
||||||
self.ax_overplot.add_artist(px_sc)
|
self.ax_overplot.add_artist(px_sc)
|
||||||
north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1,'lw': 0.5})
|
north_dir = AnchoredDirectionArrows(self.ax_overplot.transAxes, "E", "N", length=-0.08, fontsize=0.03, loc=1, aspect_ratio=-1, sep_y=0.01,
|
||||||
|
sep_x=0.01, angle=-self.Stokes_UV[0].header['orientat'], color=font_color, arrow_props={'ec': 'k', 'fc': 'w', 'alpha': 1, 'lw': 0.5})
|
||||||
self.ax_overplot.add_artist(north_dir)
|
self.ax_overplot.add_artist(north_dir)
|
||||||
pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale/px_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
pol_sc = AnchoredSizeBar(self.ax_overplot.transData, self.vec_scale/px_scale, r"$P$= 100%", 4, pad=0.5, sep=5,
|
||||||
|
borderpad=0.5, frameon=False, size_vertical=0.005, color=font_color, fontproperties=fontprops)
|
||||||
self.ax_overplot.add_artist(pol_sc)
|
self.ax_overplot.add_artist(pol_sc)
|
||||||
|
|
||||||
self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1., 1.)), 'r+', transform=self.ax_overplot.get_transform(self.wcs_UV))
|
self.cr_map, = self.ax_overplot.plot(*(self.map_wcs.celestial.wcs.crpix-(1., 1.)), 'r+', transform=self.ax_overplot.get_transform(self.wcs_UV))
|
||||||
@@ -996,9 +1032,11 @@ class overplot_pol(align_maps):
|
|||||||
else:
|
else:
|
||||||
self.legend_title = r"{0:s} image".format(self.other_observer)
|
self.legend_title = r"{0:s} image".format(self.other_observer)
|
||||||
|
|
||||||
h,l = self.ax_overplot.get_legend_handles_labels()
|
handles, labels = self.ax_overplot.get_legend_handles_labels()
|
||||||
h[np.argmax([li=="{0:s} polarisation map".format(self.map_observer) for li in l])] = FancyArrowPatch((0,0),(0,1),arrowstyle='-',fc='w',ec='k',lw=2)
|
handles[np.argmax([li == "{0:s} polarisation map".format(self.map_observer) for li in labels])
|
||||||
self.legend = self.ax_overplot.legend(handles=h,labels=l,bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.)
|
] = FancyArrowPatch((0, 0), (0, 1), arrowstyle='-', fc='w', ec='k', lw=2)
|
||||||
|
self.legend = self.ax_overplot.legend(handles=handles, labels=labels, bbox_to_anchor=(
|
||||||
|
0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.)
|
||||||
|
|
||||||
if not (savename is None):
|
if not (savename is None):
|
||||||
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
|
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
|
||||||
@@ -1016,22 +1054,24 @@ class overplot_pol(align_maps):
|
|||||||
def add_vector(self, position='center', pol_deg=1., pol_ang=0., **kwargs):
|
def add_vector(self, position='center', pol_deg=1., pol_ang=0., **kwargs):
|
||||||
if position == 'center':
|
if position == 'center':
|
||||||
position = np.array(self.X.shape)/2.
|
position = np.array(self.X.shape)/2.
|
||||||
if type(position) == SkyCoord:
|
if isinstance(position, SkyCoord):
|
||||||
position = self.other_wcs.world_to_pixel(position)
|
position = self.other_wcs.world_to_pixel(position)
|
||||||
|
|
||||||
u, v = pol_deg*np.cos(np.radians(pol_ang)+np.pi/2.), pol_deg*np.sin(np.radians(pol_ang)+np.pi/2.)
|
u, v = pol_deg*np.cos(np.radians(pol_ang)+np.pi/2.), pol_deg*np.sin(np.radians(pol_ang)+np.pi/2.)
|
||||||
for key, value in [["scale", [["scale", self.vec_scale]]], ["width", [["width", 0.1]]], ["color", [["color", 'k']]]]:
|
for key, value in [["scale", [["scale", self.vec_scale]]], ["width", [["width", 0.1]]], ["color", [["color", 'k']]]]:
|
||||||
try:
|
try:
|
||||||
test = kwargs[key]
|
_ = kwargs[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
for key_i, val_i in value:
|
for key_i, val_i in value:
|
||||||
kwargs[key_i] = val_i
|
kwargs[key_i] = val_i
|
||||||
new_vec = self.ax_overplot.quiver(*position,u,v,units='xy',angles='uv',scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,**kwargs)
|
new_vec = self.ax_overplot.quiver(*position, u, v, units='xy', angles='uv', scale_units='xy',
|
||||||
|
pivot='mid', headwidth=0., headlength=0., headaxislength=0., **kwargs)
|
||||||
self.legend.remove()
|
self.legend.remove()
|
||||||
self.legend = self.ax_overplot.legend(title=self.legend_title, bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.)
|
self.legend = self.ax_overplot.legend(title=self.legend_title, bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', mode="expand", borderaxespad=0.)
|
||||||
self.fig_overplot.canvas.draw()
|
self.fig_overplot.canvas.draw()
|
||||||
return new_vec
|
return new_vec
|
||||||
|
|
||||||
|
|
||||||
class align_pol(object):
|
class align_pol(object):
|
||||||
def __init__(self, maps, **kwargs):
|
def __init__(self, maps, **kwargs):
|
||||||
order = np.argsort(np.array([curr[0].header['mjd-obs'] for curr in maps]))
|
order = np.argsort(np.array([curr[0].header['mjd-obs'] for curr in maps]))
|
||||||
@@ -1047,34 +1087,29 @@ class align_pol(object):
|
|||||||
|
|
||||||
def single_plot(self, curr_map, wcs, v_lim=None, ax_lim=None, SNRp_cut=3., SNRi_cut=30., savename=None, **kwargs):
|
def single_plot(self, curr_map, wcs, v_lim=None, ax_lim=None, SNRp_cut=3., SNRi_cut=30., savename=None, **kwargs):
|
||||||
# Get data
|
# Get data
|
||||||
stkI = deepcopy(curr_map['I_STOKES'].data)
|
stkI = curr_map['I_STOKES'].data
|
||||||
stkQ = deepcopy(curr_map['Q_STOKES'].data)
|
stk_cov = curr_map['IQU_COV_MATRIX'].data
|
||||||
stkU = deepcopy(curr_map['U_STOKES'].data)
|
|
||||||
stk_cov = deepcopy(curr_map['IQU_COV_MATRIX'].data)
|
|
||||||
pol = deepcopy(curr_map['POL_DEG_DEBIASED'].data)
|
pol = deepcopy(curr_map['POL_DEG_DEBIASED'].data)
|
||||||
pol_err = deepcopy(curr_map['POL_DEG_ERR'].data)
|
pol_err = curr_map['POL_DEG_ERR'].data
|
||||||
pang = deepcopy(curr_map['POL_ANG'].data)
|
pang = curr_map['POL_ANG'].data
|
||||||
try:
|
try:
|
||||||
data_mask = curr_map['DATA_MASK'].data.astype(bool)
|
data_mask = curr_map['DATA_MASK'].data.astype(bool)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
data_mask = np.ones(stkI.shape).astype(bool)
|
data_mask = np.ones(stkI.shape).astype(bool)
|
||||||
|
|
||||||
pivot_wav = curr_map[0].header['photplam']
|
|
||||||
convert_flux = curr_map[0].header['photflam']
|
convert_flux = curr_map[0].header['photflam']
|
||||||
|
|
||||||
# Compute SNR and apply cuts
|
# Compute SNR and apply cuts
|
||||||
pol[pol == 0.] = np.nan
|
maskpol = np.logical_and(pol_err > 0., data_mask)
|
||||||
pol_err[pol_err == 0.] = np.nan
|
SNRp = np.zeros(pol.shape)
|
||||||
SNRp = pol/pol_err
|
SNRp[maskpol] = pol[maskpol]/pol_err[maskpol]
|
||||||
SNRp[np.isnan(SNRp)] = 0.
|
|
||||||
pol[SNRp < SNRp_cut] = np.nan
|
|
||||||
|
|
||||||
maskI = stk_cov[0,0] > 0
|
maskI = np.logical_and(stk_cov[0, 0] > 0, data_mask)
|
||||||
SNRi = np.zeros(stkI.shape)
|
SNRi = np.zeros(stkI.shape)
|
||||||
SNRi[maskI] = stkI[maskI]/np.sqrt(stk_cov[0, 0][maskI])
|
SNRi[maskI] = stkI[maskI]/np.sqrt(stk_cov[0, 0][maskI])
|
||||||
pol[SNRi < SNRi_cut] = np.nan
|
|
||||||
|
|
||||||
mask = (SNRp > SNRp_cut) * (SNRi > SNRi_cut)
|
mask = (SNRp > SNRp_cut) * (SNRi > SNRi_cut) * (pol >= 0.)
|
||||||
|
pol[mask] = np.nan
|
||||||
|
|
||||||
# Plot the map
|
# Plot the map
|
||||||
plt.rcParams.update({'font.size': 10})
|
plt.rcParams.update({'font.size': 10})
|
||||||
@@ -1083,10 +1118,9 @@ class align_pol(object):
|
|||||||
ax = fig.add_subplot(111, projection=wcs)
|
ax = fig.add_subplot(111, projection=wcs)
|
||||||
ax.set(xlabel="Right Ascension (J2000)", ylabel="Declination (J2000)", facecolor='k',
|
ax.set(xlabel="Right Ascension (J2000)", ylabel="Declination (J2000)", facecolor='k',
|
||||||
title="target {0:s} observed on {1:s}".format(curr_map[0].header['targname'], curr_map[0].header['date-obs']))
|
title="target {0:s} observed on {1:s}".format(curr_map[0].header['targname'], curr_map[0].header['date-obs']))
|
||||||
fig.subplots_adjust(hspace=0, wspace=0, right=0.9)
|
fig.subplots_adjust(hspace=0, wspace=0, right=0.102)
|
||||||
cbar_ax = fig.add_axes([0.95, 0.12, 0.01, 0.75])
|
|
||||||
|
|
||||||
if not ax_lim is None:
|
if ax_lim is not None:
|
||||||
lim = np.concatenate([wcs.world_to_pixel(ax_lim[i]) for i in range(len(ax_lim))])
|
lim = np.concatenate([wcs.world_to_pixel(ax_lim[i]) for i in range(len(ax_lim))])
|
||||||
x_lim, y_lim = lim[0::2], lim[1::2]
|
x_lim, y_lim = lim[0::2], lim[1::2]
|
||||||
ax.set(xlim=x_lim, ylim=y_lim)
|
ax.set(xlim=x_lim, ylim=y_lim)
|
||||||
@@ -1099,31 +1133,37 @@ class align_pol(object):
|
|||||||
for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["vmin", vmin], ["vmax", vmax]]]]:
|
for key, value in [["cmap", [["cmap", "inferno"]]], ["norm", [["vmin", vmin], ["vmax", vmax]]]]:
|
||||||
try:
|
try:
|
||||||
test = kwargs[key]
|
test = kwargs[key]
|
||||||
if str(type(test)) == "<class 'matplotlib.colors.LogNorm'>":
|
if isinstance(test, LogNorm):
|
||||||
kwargs[key] = LogNorm(vmin, vmax)
|
kwargs[key] = LogNorm(vmin, vmax)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
for key_i, val_i in value:
|
for key_i, val_i in value:
|
||||||
kwargs[key_i] = val_i
|
kwargs[key_i] = val_i
|
||||||
|
|
||||||
im = ax.imshow(stkI*convert_flux, aspect='equal', **kwargs)
|
im = ax.imshow(stkI*convert_flux, aspect='equal', **kwargs)
|
||||||
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||||
|
|
||||||
px_size = wcs.wcs.get_cdelt()[0]*3600.
|
px_size = wcs.wcs.get_cdelt()[0]*3600.
|
||||||
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
|
px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
|
||||||
ax.add_artist(px_sc)
|
ax.add_artist(px_sc)
|
||||||
|
|
||||||
north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10., angle=curr_map[0].header['orientat'], color='white', text_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': None,'fc':'w','alpha': 1,'lw': 1})
|
north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10.,
|
||||||
|
angle=curr_map[0].header['orientat'], color='white', text_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 1})
|
||||||
ax.add_artist(north_dir)
|
ax.add_artist(north_dir)
|
||||||
|
|
||||||
step_vec = 1
|
step_vec = 1
|
||||||
X, Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
|
X, Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
|
||||||
U, V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.)
|
U, V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.)
|
||||||
Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=0.5,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,color='w')
|
ax.quiver(X[::step_vec, ::step_vec], Y[::step_vec, ::step_vec], U[::step_vec, ::step_vec], V[::step_vec, ::step_vec], units='xy',
|
||||||
|
angles='uv', scale=0.5, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, color='w')
|
||||||
pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
|
pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
|
||||||
ax.add_artist(pol_sc)
|
ax.add_artist(pol_sc)
|
||||||
|
|
||||||
if not savename is None:
|
if 'PHOTPLAM' in list(curr_map[0].header.keys()):
|
||||||
if not savename[-4:] in ['.png', '.jpg', '.pdf']:
|
ax.annotate(r"$\lambda$ = {0:.0f} $\AA$".format(curr_map[0].header['photplam']), color='white', fontsize=12, xy=(
|
||||||
|
0.01, 0.93), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')])
|
||||||
|
|
||||||
|
if savename is not None:
|
||||||
|
if savename[-4:] not in ['.png', '.jpg', '.pdf']:
|
||||||
savename += '.pdf'
|
savename += '.pdf'
|
||||||
fig.savefig(savename, bbox_inches='tight', dpi=300)
|
fig.savefig(savename, bbox_inches='tight', dpi=300)
|
||||||
|
|
||||||
@@ -1140,10 +1180,14 @@ class align_pol(object):
|
|||||||
while not self.aligned.all():
|
while not self.aligned.all():
|
||||||
self.align()
|
self.align()
|
||||||
eps = 1e-35
|
eps = 1e-35
|
||||||
vmin = np.min([np.min(curr_map[0].data[curr_map[0].data > SNRi_cut*np.max([eps*np.ones(curr_map[0].data.shape),np.sqrt(curr_map[3].data[0,0])],axis=0)]) for curr_map in self.other_maps])/2.5
|
vmin = np.min([np.min(curr_map[0].data[curr_map[0].data > SNRi_cut*np.max([eps*np.ones(curr_map[0].data.shape),
|
||||||
vmax = np.max([np.max(curr_map[0].data[curr_map[0].data > SNRi_cut*np.max([eps*np.ones(curr_map[0].data.shape),np.sqrt(curr_map[3].data[0,0])],axis=0)]) for curr_map in self.other_maps])
|
np.sqrt(curr_map[3].data[0, 0])], axis=0)]) for curr_map in self.other_maps])/2.5
|
||||||
vmin = np.min([vmin, np.min(self.ref_map[0].data[self.ref_map[0].data > SNRi_cut*np.max([eps*np.ones(self.ref_map[0].data.shape),np.sqrt(self.ref_map[3].data[0,0])],axis=0)])])/2.5
|
vmax = np.max([np.max(curr_map[0].data[curr_map[0].data > SNRi_cut*np.max([eps*np.ones(curr_map[0].data.shape),
|
||||||
vmax = np.max([vmax, np.max(self.ref_map[0].data[self.ref_map[0].data > SNRi_cut*np.max([eps*np.ones(self.ref_map[0].data.shape),np.sqrt(self.ref_map[3].data[0,0])],axis=0)])])
|
np.sqrt(curr_map[3].data[0, 0])], axis=0)]) for curr_map in self.other_maps])
|
||||||
|
vmin = np.min([vmin, np.min(self.ref_map[0].data[self.ref_map[0].data > SNRi_cut *
|
||||||
|
np.max([eps*np.ones(self.ref_map[0].data.shape), np.sqrt(self.ref_map[3].data[0, 0])], axis=0)])])/2.5
|
||||||
|
vmax = np.max([vmax, np.max(self.ref_map[0].data[self.ref_map[0].data > SNRi_cut *
|
||||||
|
np.max([eps*np.ones(self.ref_map[0].data.shape), np.sqrt(self.ref_map[3].data[0, 0])], axis=0)])])
|
||||||
v_lim = np.array([vmin, vmax])
|
v_lim = np.array([vmin, vmax])
|
||||||
|
|
||||||
fig, ax = self.single_plot(self.ref_map, self.wcs, v_lim=v_lim, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename+'_0', **kwargs)
|
fig, ax = self.single_plot(self.ref_map, self.wcs, v_lim=v_lim, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename+'_0', **kwargs)
|
||||||
@@ -1151,13 +1195,15 @@ class align_pol(object):
|
|||||||
ax_lim = np.array([self.wcs.pixel_to_world(x_lim[i], y_lim[i]) for i in range(len(x_lim))])
|
ax_lim = np.array([self.wcs.pixel_to_world(x_lim[i], y_lim[i]) for i in range(len(x_lim))])
|
||||||
|
|
||||||
for i, curr_map in enumerate(self.other_maps):
|
for i, curr_map in enumerate(self.other_maps):
|
||||||
self.single_plot(curr_map, self.wcs_other[i], v_lim=v_lim, ax_lim=ax_lim, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename+'_'+str(i+1), **kwargs)
|
self.single_plot(curr_map, self.wcs_other[i], v_lim=v_lim, ax_lim=ax_lim, SNRp_cut=SNRp_cut,
|
||||||
|
SNRi_cut=SNRi_cut, savename=savename+'_'+str(i+1), **kwargs)
|
||||||
|
|
||||||
|
|
||||||
class crop_map(object):
|
class crop_map(object):
|
||||||
"""
|
"""
|
||||||
Class to interactively crop a map to desired Region of Interest
|
Class to interactively crop a map to desired Region of Interest
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, hdul, fig=None, ax=None, **kwargs):
|
def __init__(self, hdul, fig=None, ax=None, **kwargs):
|
||||||
# Get data
|
# Get data
|
||||||
self.cropped = False
|
self.cropped = False
|
||||||
@@ -1204,7 +1250,6 @@ class crop_map(object):
|
|||||||
self.RSextent = deepcopy(self.extent)
|
self.RSextent = deepcopy(self.extent)
|
||||||
self.RScenter = deepcopy(self.center)
|
self.RScenter = deepcopy(self.center)
|
||||||
|
|
||||||
|
|
||||||
def display(self, data=None, wcs=None, convert_flux=None, **kwargs):
|
def display(self, data=None, wcs=None, convert_flux=None, **kwargs):
|
||||||
if data is None:
|
if data is None:
|
||||||
data = self.data
|
data = self.data
|
||||||
@@ -1220,7 +1265,7 @@ class crop_map(object):
|
|||||||
vmin, vmax = np.min(data[data > 0.]*convert_flux), np.max(data[data > 0.]*convert_flux)
|
vmin, vmax = np.min(data[data > 0.]*convert_flux), np.max(data[data > 0.]*convert_flux)
|
||||||
for key, value in [["cmap", [["cmap", "inferno"]]], ["origin", [["origin", "lower"]]], ["aspect", [["aspect", "equal"]]], ["alpha", [["alpha", self.mask_alpha]]], ["norm", [["vmin", vmin], ["vmax", vmax]]]]:
|
for key, value in [["cmap", [["cmap", "inferno"]]], ["origin", [["origin", "lower"]]], ["aspect", [["aspect", "equal"]]], ["alpha", [["alpha", self.mask_alpha]]], ["norm", [["vmin", vmin], ["vmax", vmax]]]]:
|
||||||
try:
|
try:
|
||||||
test = kwargs[key]
|
_ = kwargs[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
for key_i, val_i in value:
|
for key_i, val_i in value:
|
||||||
kwargs[key_i] = val_i
|
kwargs[key_i] = val_i
|
||||||
@@ -1338,6 +1383,7 @@ class crop_Stokes(crop_map):
|
|||||||
Class to interactively crop a polarisation map to desired Region of Interest.
|
Class to interactively crop a polarisation map to desired Region of Interest.
|
||||||
Inherit from crop_map.
|
Inherit from crop_map.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def apply_crop(self, event):
|
def apply_crop(self, event):
|
||||||
"""
|
"""
|
||||||
Redefine apply_crop method for the Stokes HDUList.
|
Redefine apply_crop method for the Stokes HDUList.
|
||||||
@@ -1408,10 +1454,12 @@ class crop_Stokes(crop_map):
|
|||||||
QU_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[1, 2][mask]**2))
|
QU_diluted_err = np.sqrt(np.sum(self.hdul_crop[3].data[1, 2][mask]**2))
|
||||||
|
|
||||||
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
|
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
|
||||||
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
|
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted **
|
||||||
|
2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
|
||||||
|
|
||||||
PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted))
|
PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted))
|
||||||
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
|
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err **
|
||||||
|
2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
|
||||||
|
|
||||||
for dataset in self.hdul_crop:
|
for dataset in self.hdul_crop:
|
||||||
dataset.header['P_int'] = (P_diluted, 'Integrated polarisation degree')
|
dataset.header['P_int'] = (P_diluted, 'Integrated polarisation degree')
|
||||||
@@ -1572,7 +1620,7 @@ class aperture(object):
|
|||||||
if hasattr(self, 'displayed'):
|
if hasattr(self, 'displayed'):
|
||||||
try:
|
try:
|
||||||
self.displayed.remove()
|
self.displayed.remove()
|
||||||
except:
|
except AttributeError:
|
||||||
return
|
return
|
||||||
self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno', alpha=self.mask_alpha)
|
self.displayed = self.ax.imshow(self.img, vmin=self.vmin, vmax=self.vmax, aspect='equal', cmap='inferno', alpha=self.mask_alpha)
|
||||||
array = self.displayed.get_array().data
|
array = self.displayed.get_array().data
|
||||||
@@ -1584,7 +1632,7 @@ class aperture(object):
|
|||||||
for coll in self.cont.collections:
|
for coll in self.cont.collections:
|
||||||
try:
|
try:
|
||||||
coll.remove()
|
coll.remove()
|
||||||
except:
|
except AttributeError:
|
||||||
return
|
return
|
||||||
self.cont = self.ax.contour(self.mask.astype(float), levels=[0.5], colors='white', linewidths=1)
|
self.cont = self.ax.contour(self.mask.astype(float), levels=[0.5], colors='white', linewidths=1)
|
||||||
if not self.embedded:
|
if not self.embedded:
|
||||||
@@ -1598,9 +1646,10 @@ class pol_map(object):
|
|||||||
"""
|
"""
|
||||||
Class to interactively study polarisation maps.
|
Class to interactively study polarisation maps.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, Stokes, SNRp_cut=3., SNRi_cut=30., flux_lim=None, selection=None):
|
def __init__(self, Stokes, SNRp_cut=3., SNRi_cut=30., flux_lim=None, selection=None):
|
||||||
|
|
||||||
if type(Stokes) == str:
|
if isinstance(Stokes, str):
|
||||||
Stokes = fits.open(Stokes)
|
Stokes = fits.open(Stokes)
|
||||||
self.Stokes = deepcopy(Stokes)
|
self.Stokes = deepcopy(Stokes)
|
||||||
self.SNRp_cut = SNRp_cut
|
self.SNRp_cut = SNRp_cut
|
||||||
@@ -1672,7 +1721,6 @@ class pol_map(object):
|
|||||||
s_vec_sc.on_changed(update_vecsc)
|
s_vec_sc.on_changed(update_vecsc)
|
||||||
b_snr_reset.on_clicked(reset_snr)
|
b_snr_reset.on_clicked(reset_snr)
|
||||||
|
|
||||||
|
|
||||||
# Set axe for Aperture selection
|
# Set axe for Aperture selection
|
||||||
ax_aper = self.fig.add_axes([0.55, 0.040, 0.05, 0.02])
|
ax_aper = self.fig.add_axes([0.55, 0.040, 0.05, 0.02])
|
||||||
ax_aper_reset = self.fig.add_axes([0.605, 0.040, 0.05, 0.02])
|
ax_aper_reset = self.fig.add_axes([0.605, 0.040, 0.05, 0.02])
|
||||||
@@ -1717,7 +1765,6 @@ class pol_map(object):
|
|||||||
self.select_instance = aperture(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, radius=val)
|
self.select_instance = aperture(self.data, fig=self.fig, ax=self.ax, cdelt=self.wcs.wcs.cdelt, radius=val)
|
||||||
self.fig.canvas.draw_idle()
|
self.fig.canvas.draw_idle()
|
||||||
|
|
||||||
|
|
||||||
def reset_aperture(event):
|
def reset_aperture(event):
|
||||||
self.region = None
|
self.region = None
|
||||||
s_aper_radius.reset()
|
s_aper_radius.reset()
|
||||||
@@ -1885,7 +1932,8 @@ class pol_map(object):
|
|||||||
dump_list = []
|
dump_list = []
|
||||||
for i in range(shape[0]):
|
for i in range(shape[0]):
|
||||||
for j in range(shape[1]):
|
for j in range(shape[1]):
|
||||||
dump_list.append([x[i,j], y[i,j], self.I[i,j]*self.map_convert, self.Q[i,j]*self.map_convert, self.U[i,j]*self.map_convert, P[i,j], PA[i,j]])
|
dump_list.append([x[i, j], y[i, j], self.I[i, j]*self.map_convert, self.Q[i, j] *
|
||||||
|
self.map_convert, self.U[i, j]*self.map_convert, P[i, j], PA[i, j]])
|
||||||
self.data_dump = np.array(dump_list)
|
self.data_dump = np.array(dump_list)
|
||||||
|
|
||||||
b_dump.on_clicked(dump)
|
b_dump.on_clicked(dump)
|
||||||
@@ -1960,27 +2008,35 @@ class pol_map(object):
|
|||||||
@property
|
@property
|
||||||
def wcs(self):
|
def wcs(self):
|
||||||
return WCS(self.Stokes[0].header).celestial
|
return WCS(self.Stokes[0].header).celestial
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def I(self):
|
def I(self):
|
||||||
return self.Stokes['I_STOKES'].data
|
return self.Stokes['I_STOKES'].data
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def Q(self):
|
def Q(self):
|
||||||
return self.Stokes['Q_STOKES'].data
|
return self.Stokes['Q_STOKES'].data
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def U(self):
|
def U(self):
|
||||||
return self.Stokes['U_STOKES'].data
|
return self.Stokes['U_STOKES'].data
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def IQU_cov(self):
|
def IQU_cov(self):
|
||||||
return self.Stokes['IQU_COV_MATRIX'].data
|
return self.Stokes['IQU_COV_MATRIX'].data
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def P(self):
|
def P(self):
|
||||||
return self.Stokes['POL_DEG_DEBIASED'].data
|
return self.Stokes['POL_DEG_DEBIASED'].data
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def s_P(self):
|
def s_P(self):
|
||||||
return self.Stokes['POL_DEG_ERR'].data
|
return self.Stokes['POL_DEG_ERR'].data
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def PA(self):
|
def PA(self):
|
||||||
return self.Stokes['POL_ANG'].data
|
return self.Stokes['POL_ANG'].data
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def data_mask(self):
|
def data_mask(self):
|
||||||
return self.Stokes['DATA_MASK'].data
|
return self.Stokes['DATA_MASK'].data
|
||||||
@@ -2002,7 +2058,9 @@ class pol_map(object):
|
|||||||
ax.set(aspect='equal', fc='black')
|
ax.set(aspect='equal', fc='black')
|
||||||
|
|
||||||
ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
|
ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
|
||||||
ax.set_xlabel('Right Ascension (J2000)')
|
ax.coords[0].set_axislabel('Right Ascension (J2000)')
|
||||||
|
ax.coords[0].set_axislabel_position('t')
|
||||||
|
ax.coords[0].set_ticklabel_position('t')
|
||||||
ax.set_ylabel('Declination (J2000)', labelpad=-1)
|
ax.set_ylabel('Declination (J2000)', labelpad=-1)
|
||||||
|
|
||||||
# Display scales and orientation
|
# Display scales and orientation
|
||||||
@@ -2010,15 +2068,18 @@ class pol_map(object):
|
|||||||
px_size = self.wcs.wcs.cdelt[0]*3600.
|
px_size = self.wcs.wcs.cdelt[0]*3600.
|
||||||
if hasattr(self, 'px_sc'):
|
if hasattr(self, 'px_sc'):
|
||||||
self.px_sc.remove()
|
self.px_sc.remove()
|
||||||
self.px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='white', fontproperties=fontprops)
|
self.px_sc = AnchoredSizeBar(ax.transData, 1./px_size, '1 arcsec', 3, pad=0.5, sep=5, borderpad=0.5,
|
||||||
|
frameon=False, size_vertical=0.005, color='white', fontproperties=fontprops)
|
||||||
ax.add_artist(self.px_sc)
|
ax.add_artist(self.px_sc)
|
||||||
if hasattr(self, 'pol_sc'):
|
if hasattr(self, 'pol_sc'):
|
||||||
self.pol_sc.remove()
|
self.pol_sc.remove()
|
||||||
self.pol_sc = AnchoredSizeBar(ax.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='white', fontproperties=fontprops)
|
self.pol_sc = AnchoredSizeBar(ax.transData, self.vec_scale, r"$P$= 100%", 4, pad=0.5, sep=5, borderpad=0.5,
|
||||||
|
frameon=False, size_vertical=0.005, color='white', fontproperties=fontprops)
|
||||||
ax.add_artist(self.pol_sc)
|
ax.add_artist(self.pol_sc)
|
||||||
if hasattr(self, 'north_dir'):
|
if hasattr(self, 'north_dir'):
|
||||||
self.north_dir.remove()
|
self.north_dir.remove()
|
||||||
self.north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10., angle=-self.Stokes[0].header['orientat'], color='white', text_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': None,'fc':'w','alpha': 1,'lw': 1})
|
self.north_dir = AnchoredDirectionArrows(ax.transAxes, "E", "N", length=-0.08, fontsize=0.025, loc=1, aspect_ratio=-1, sep_y=0.01, sep_x=0.01, back_length=0., head_length=10., head_width=10.,
|
||||||
|
angle=-self.Stokes[0].header['orientat'], color='white', text_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 0.4}, arrow_props={'ec': None, 'fc': 'w', 'alpha': 1, 'lw': 1})
|
||||||
ax.add_artist(self.north_dir)
|
ax.add_artist(self.north_dir)
|
||||||
|
|
||||||
def display(self, fig=None, ax=None, flux_lim=None):
|
def display(self, fig=None, ax=None, flux_lim=None):
|
||||||
@@ -2073,7 +2134,7 @@ class pol_map(object):
|
|||||||
self.cbar.remove()
|
self.cbar.remove()
|
||||||
if hasattr(self, 'im'):
|
if hasattr(self, 'im'):
|
||||||
self.im.remove()
|
self.im.remove()
|
||||||
if not norm is None:
|
if norm is not None:
|
||||||
self.im = ax.imshow(self.data, norm=norm, aspect='equal', cmap='inferno')
|
self.im = ax.imshow(self.data, norm=norm, aspect='equal', cmap='inferno')
|
||||||
else:
|
else:
|
||||||
self.im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno')
|
self.im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno')
|
||||||
@@ -2081,7 +2142,7 @@ class pol_map(object):
|
|||||||
fig.canvas.draw_idle()
|
fig.canvas.draw_idle()
|
||||||
return self.im
|
return self.im
|
||||||
else:
|
else:
|
||||||
if not norm is None:
|
if norm is not None:
|
||||||
im = ax.imshow(self.data, norm=norm, aspect='equal', cmap='inferno')
|
im = ax.imshow(self.data, norm=norm, aspect='equal', cmap='inferno')
|
||||||
else:
|
else:
|
||||||
im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno')
|
im = ax.imshow(self.data, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno')
|
||||||
@@ -2102,11 +2163,13 @@ class pol_map(object):
|
|||||||
ax = self.ax
|
ax = self.ax
|
||||||
if hasattr(self, 'quiver'):
|
if hasattr(self, 'quiver'):
|
||||||
self.quiver.remove()
|
self.quiver.remove()
|
||||||
self.quiver = ax.quiver(X, Y, XY_U, XY_V, units='xy', scale=1./self.vec_scale, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.15, linewidth=0.5, color='white',edgecolor='black')
|
self.quiver = ax.quiver(X, Y, XY_U, XY_V, units='xy', scale=1./self.vec_scale, scale_units='xy', pivot='mid', headwidth=0.,
|
||||||
|
headlength=0., headaxislength=0., width=0.15, linewidth=0.5, color='white', edgecolor='black')
|
||||||
fig.canvas.draw_idle()
|
fig.canvas.draw_idle()
|
||||||
return self.quiver
|
return self.quiver
|
||||||
else:
|
else:
|
||||||
ax.quiver(X, Y, XY_U, XY_V, units='xy', scale=1./self.vec_scale, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.15, linewidth=0.5, color='white',edgecolor='black')
|
ax.quiver(X, Y, XY_U, XY_V, units='xy', scale=1./self.vec_scale, scale_units='xy', pivot='mid', headwidth=0.,
|
||||||
|
headlength=0., headaxislength=0., width=0.15, linewidth=0.5, color='white', edgecolor='black')
|
||||||
fig.canvas.draw_idle()
|
fig.canvas.draw_idle()
|
||||||
|
|
||||||
def pol_int(self, fig=None, ax=None):
|
def pol_int(self, fig=None, ax=None):
|
||||||
@@ -2138,10 +2201,12 @@ class pol_map(object):
|
|||||||
QU_cut_err = np.sqrt(np.sum(s_QU[self.cut]**2))
|
QU_cut_err = np.sqrt(np.sum(s_QU[self.cut]**2))
|
||||||
|
|
||||||
P_cut = np.sqrt(Q_cut**2+U_cut**2)/I_cut
|
P_cut = np.sqrt(Q_cut**2+U_cut**2)/I_cut
|
||||||
P_cut_err = np.sqrt((Q_cut**2*Q_cut_err**2 + U_cut**2*U_cut_err**2 + 2.*Q_cut*U_cut*QU_cut_err)/(Q_cut**2 + U_cut**2) + ((Q_cut/I_cut)**2 + (U_cut/I_cut)**2)*I_cut_err**2 - 2.*(Q_cut/I_cut)*IQ_cut_err - 2.*(U_cut/I_cut)*IU_cut_err)/I_cut
|
P_cut_err = np.sqrt((Q_cut**2*Q_cut_err**2 + U_cut**2*U_cut_err**2 + 2.*Q_cut*U_cut*QU_cut_err)/(Q_cut**2 + U_cut**2) +
|
||||||
|
((Q_cut/I_cut)**2 + (U_cut/I_cut)**2)*I_cut_err**2 - 2.*(Q_cut/I_cut)*IQ_cut_err - 2.*(U_cut/I_cut)*IU_cut_err)/I_cut
|
||||||
|
|
||||||
PA_cut = princ_angle(np.degrees((1./2.)*np.arctan2(U_cut, Q_cut)))
|
PA_cut = princ_angle(np.degrees((1./2.)*np.arctan2(U_cut, Q_cut)))
|
||||||
PA_cut_err = princ_angle(np.degrees((1./(2.*(Q_cut**2+U_cut**2)))*np.sqrt(U_cut**2*Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err)))
|
PA_cut_err = princ_angle(np.degrees((1./(2.*(Q_cut**2+U_cut**2)))*np.sqrt(U_cut**2 *
|
||||||
|
Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err)))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
n_pix = self.I[self.region].size
|
n_pix = self.I[self.region].size
|
||||||
@@ -2163,7 +2228,8 @@ class pol_map(object):
|
|||||||
QU_reg_err = np.sqrt(np.sum(s_QU[self.region]**2))
|
QU_reg_err = np.sqrt(np.sum(s_QU[self.region]**2))
|
||||||
|
|
||||||
P_reg = np.sqrt(Q_reg**2+U_reg**2)/I_reg
|
P_reg = np.sqrt(Q_reg**2+U_reg**2)/I_reg
|
||||||
P_reg_err = np.sqrt((Q_reg**2*Q_reg_err**2 + U_reg**2*U_reg_err**2 + 2.*Q_reg*U_reg*QU_reg_err)/(Q_reg**2 + U_reg**2) + ((Q_reg/I_reg)**2 + (U_reg/I_reg)**2)*I_reg_err**2 - 2.*(Q_reg/I_reg)*IQ_reg_err - 2.*(U_reg/I_reg)*IU_reg_err)/I_reg
|
P_reg_err = np.sqrt((Q_reg**2*Q_reg_err**2 + U_reg**2*U_reg_err**2 + 2.*Q_reg*U_reg*QU_reg_err)/(Q_reg**2 + U_reg**2) +
|
||||||
|
((Q_reg/I_reg)**2 + (U_reg/I_reg)**2)*I_reg_err**2 - 2.*(Q_reg/I_reg)*IQ_reg_err - 2.*(U_reg/I_reg)*IU_reg_err)/I_reg
|
||||||
|
|
||||||
PA_reg = princ_angle((90./np.pi)*np.arctan2(U_reg, Q_reg))
|
PA_reg = princ_angle((90./np.pi)*np.arctan2(U_reg, Q_reg))
|
||||||
PA_reg_err = (90./(np.pi*(Q_reg**2+U_reg**2)))*np.sqrt(U_reg**2*Q_reg_err**2 + Q_reg**2*U_reg_err**2 - 2.*Q_reg*U_reg*QU_reg_err)
|
PA_reg_err = (90./(np.pi*(Q_reg**2+U_reg**2)))*np.sqrt(U_reg**2*Q_reg_err**2 + Q_reg**2*U_reg_err**2 - 2.*Q_reg*U_reg*QU_reg_err)
|
||||||
@@ -2180,7 +2246,8 @@ class pol_map(object):
|
|||||||
QU_cut_err = np.sqrt(np.sum(s_QU[new_cut]**2))
|
QU_cut_err = np.sqrt(np.sum(s_QU[new_cut]**2))
|
||||||
|
|
||||||
P_cut = np.sqrt(Q_cut**2+U_cut**2)/I_cut
|
P_cut = np.sqrt(Q_cut**2+U_cut**2)/I_cut
|
||||||
P_cut_err = np.sqrt((Q_cut**2*Q_cut_err**2 + U_cut**2*U_cut_err**2 + 2.*Q_cut*U_cut*QU_cut_err)/(Q_cut**2 + U_cut**2) + ((Q_cut/I_cut)**2 + (U_cut/I_cut)**2)*I_cut_err**2 - 2.*(Q_cut/I_cut)*IQ_cut_err - 2.*(U_cut/I_cut)*IU_cut_err)/I_cut
|
P_cut_err = np.sqrt((Q_cut**2*Q_cut_err**2 + U_cut**2*U_cut_err**2 + 2.*Q_cut*U_cut*QU_cut_err)/(Q_cut**2 + U_cut**2) +
|
||||||
|
((Q_cut/I_cut)**2 + (U_cut/I_cut)**2)*I_cut_err**2 - 2.*(Q_cut/I_cut)*IQ_cut_err - 2.*(U_cut/I_cut)*IU_cut_err)/I_cut
|
||||||
|
|
||||||
PA_cut = 360.-princ_angle((90./np.pi)*np.arctan2(U_cut, Q_cut))
|
PA_cut = 360.-princ_angle((90./np.pi)*np.arctan2(U_cut, Q_cut))
|
||||||
PA_cut_err = (90./(np.pi*(Q_cut**2+U_cut**2)))*np.sqrt(U_cut**2*Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err)
|
PA_cut_err = (90./(np.pi*(Q_cut**2+U_cut**2)))*np.sqrt(U_cut**2*Q_cut_err**2 + Q_cut**2*U_cut_err**2 - 2.*Q_cut*U_cut*QU_cut_err)
|
||||||
@@ -2189,8 +2256,8 @@ class pol_map(object):
|
|||||||
for coll in self.cont.collections:
|
for coll in self.cont.collections:
|
||||||
try:
|
try:
|
||||||
coll.remove()
|
coll.remove()
|
||||||
except:
|
except AttributeError:
|
||||||
return
|
del coll
|
||||||
del self.cont
|
del self.cont
|
||||||
if fig is None:
|
if fig is None:
|
||||||
fig = self.fig
|
fig = self.fig
|
||||||
@@ -2198,13 +2265,15 @@ class pol_map(object):
|
|||||||
ax = self.ax
|
ax = self.ax
|
||||||
if hasattr(self, 'an_int'):
|
if hasattr(self, 'an_int'):
|
||||||
self.an_int.remove()
|
self.an_int.remove()
|
||||||
self.an_int = ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav,sci_not(I_reg*self.map_convert,I_reg_err*self.map_convert,2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100.,np.ceil(P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg,np.ceil(PA_reg_err*10.)/10.), color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction',path_effects=[pe.withStroke(linewidth=0.5,foreground='k')], verticalalignment='top', horizontalalignment='left')
|
self.an_int = ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_reg*self.map_convert, I_reg_err*self.map_convert, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100., np.ceil(
|
||||||
if not self.region is None:
|
P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err*10.)/10.), color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left')
|
||||||
|
if self.region is not None:
|
||||||
self.cont = ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8)
|
self.cont = ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8)
|
||||||
fig.canvas.draw_idle()
|
fig.canvas.draw_idle()
|
||||||
return self.an_int
|
return self.an_int
|
||||||
else:
|
else:
|
||||||
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav,sci_not(I_reg*self.map_convert,I_reg_err*self.map_convert,2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100.,np.ceil(P_reg_err*1000.)/10.)+"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg,np.ceil(PA_reg_err*10.)/10.), color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction',path_effects=[pe.withStroke(linewidth=0.5,foreground='k')], verticalalignment='top', horizontalalignment='left')
|
ax.annotate(r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(self.pivot_wav, sci_not(I_reg*self.map_convert, I_reg_err*self.map_convert, 2))+"\n"+r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg*100., np.ceil(P_reg_err*1000.)/10.) +
|
||||||
if not self.region is None:
|
"\n"+r"$\theta_{{P}}^{{int}}$ = {0:.1f} $\pm$ {1:.1f} °".format(PA_reg, np.ceil(PA_reg_err*10.)/10.), color='white', fontsize=12, xy=(0.01, 1.00), xycoords='axes fraction', path_effects=[pe.withStroke(linewidth=0.5, foreground='k')], verticalalignment='top', horizontalalignment='left')
|
||||||
|
if self.region is not None:
|
||||||
ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8)
|
ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8)
|
||||||
fig.canvas.draw_idle()
|
fig.canvas.draw_idle()
|
||||||
|
|||||||
@@ -18,16 +18,19 @@ def divide_proposal(products):
|
|||||||
"""
|
"""
|
||||||
for pid in np.unique(products['Proposal ID']):
|
for pid in np.unique(products['Proposal ID']):
|
||||||
obs = products[products['Proposal ID'] == pid].copy()
|
obs = products[products['Proposal ID'] == pid].copy()
|
||||||
close_date = np.unique(np.array([TimeDelta(np.abs(Time(obs['Start']).unix-date.unix),format='sec') < 7.*u.d for date in obs['Start']], dtype=bool), axis=0)
|
close_date = np.unique(np.array([TimeDelta(np.abs(Time(obs['Start']).unix-date.unix), format='sec')
|
||||||
|
< 7.*u.d for date in obs['Start']], dtype=bool), axis=0)
|
||||||
if len(close_date) > 1:
|
if len(close_date) > 1:
|
||||||
for date in close_date:
|
for date in close_date:
|
||||||
products['Proposal ID'][np.any([products['Dataset']==dataset for dataset in obs['Dataset'][date]],axis=0)] = "_".join([obs['Proposal ID'][date][0],str(obs['Start'][date][0])[:10]])
|
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][date]], axis=0)
|
||||||
|
] = "_".join([obs['Proposal ID'][date][0], str(obs['Start'][date][0])[:10]])
|
||||||
for pid in np.unique(products['Proposal ID']):
|
for pid in np.unique(products['Proposal ID']):
|
||||||
obs = products[products['Proposal ID'] == pid].copy()
|
obs = products[products['Proposal ID'] == pid].copy()
|
||||||
same_filt = np.unique(np.array(np.sum([obs['Filters'][:, 1:] == filt[1:] for filt in obs['Filters']], axis=2) < 3, dtype=bool), axis=0)
|
same_filt = np.unique(np.array(np.sum([obs['Filters'][:, 1:] == filt[1:] for filt in obs['Filters']], axis=2) < 3, dtype=bool), axis=0)
|
||||||
if len(same_filt) > 1:
|
if len(same_filt) > 1:
|
||||||
for filt in same_filt:
|
for filt in same_filt:
|
||||||
products['Proposal ID'][np.any([products['Dataset']==dataset for dataset in obs['Dataset'][filt]],axis=0)] = "_".join([obs['Proposal ID'][filt][0],"_".join([fi for fi in obs['Filters'][filt][0][1:] if fi[:-1]!="CLEAR"])])
|
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][filt]], axis=0)] = "_".join(
|
||||||
|
[obs['Proposal ID'][filt][0], "_".join([fi for fi in obs['Filters'][filt][0][1:] if fi[:-1] != "CLEAR"])])
|
||||||
return products
|
return products
|
||||||
|
|
||||||
|
|
||||||
@@ -86,13 +89,13 @@ def get_product_list(target=None, proposal_id=None):
|
|||||||
results = divide_proposal(results)
|
results = divide_proposal(results)
|
||||||
obs = results.copy()
|
obs = results.copy()
|
||||||
|
|
||||||
### Remove single observations for which a FIND filter is used
|
# Remove single observations for which a FIND filter is used
|
||||||
to_remove = []
|
to_remove = []
|
||||||
for i in range(len(obs)):
|
for i in range(len(obs)):
|
||||||
if "F1ND" in obs[i]['Filters']:
|
if "F1ND" in obs[i]['Filters']:
|
||||||
to_remove.append(i)
|
to_remove.append(i)
|
||||||
obs.remove_rows(to_remove)
|
obs.remove_rows(to_remove)
|
||||||
### Remove observations for which a polarization filter is missing
|
# Remove observations for which a polarization filter is missing
|
||||||
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
|
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
|
||||||
for pid in np.unique(obs['Proposal ID']):
|
for pid in np.unique(obs['Proposal ID']):
|
||||||
used_pol = np.zeros(3)
|
used_pol = np.zeros(3)
|
||||||
@@ -104,18 +107,18 @@ def get_product_list(target=None, proposal_id=None):
|
|||||||
tab = unique(obs, ['Target name', 'Proposal ID'])
|
tab = unique(obs, ['Target name', 'Proposal ID'])
|
||||||
obs["Obs"] = [np.argmax(np.logical_and(tab['Proposal ID'] == data['Proposal ID'], tab['Target name'] == data['Target name']))+1 for data in obs]
|
obs["Obs"] = [np.argmax(np.logical_and(tab['Proposal ID'] == data['Proposal ID'], tab['Target name'] == data['Target name']))+1 for data in obs]
|
||||||
try:
|
try:
|
||||||
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument",
|
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], 'Obs')
|
||||||
"Size", "Target name", "Proposal ID", "PI last name"]], 'Obs')
|
|
||||||
except IndexError:
|
except IndexError:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
|
"There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
|
||||||
|
|
||||||
b = np.zeros(len(results), dtype=bool)
|
b = np.zeros(len(results), dtype=bool)
|
||||||
if not proposal_id is None and str(proposal_id) in obs['Proposal ID']:
|
if proposal_id is not None and str(proposal_id) in obs['Proposal ID']:
|
||||||
b[results['Proposal ID'] == str(proposal_id)] = True
|
b[results['Proposal ID'] == str(proposal_id)] = True
|
||||||
else:
|
else:
|
||||||
n_obs.pprint(len(n_obs)+2)
|
n_obs.pprint(len(n_obs)+2)
|
||||||
a = [np.array(i.split(":"), dtype=str) for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(',')]
|
a = [np.array(i.split(":"), dtype=str)
|
||||||
|
for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(',')]
|
||||||
if a[0][0] == '':
|
if a[0][0] == '':
|
||||||
a = [[1]]
|
a = [[1]]
|
||||||
if a[0][0] in ['a', 'all', '*']:
|
if a[0][0] in ['a', 'all', '*']:
|
||||||
@@ -157,7 +160,7 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
|
|||||||
"""
|
"""
|
||||||
target, products = get_product_list(target=target, proposal_id=proposal_id)
|
target, products = get_product_list(target=target, proposal_id=proposal_id)
|
||||||
prodpaths = []
|
prodpaths = []
|
||||||
data_dir = path_join(output_dir, target)
|
# data_dir = path_join(output_dir, target)
|
||||||
out = ""
|
out = ""
|
||||||
for obs in unique(products, 'Obs'):
|
for obs in unique(products, 'Obs'):
|
||||||
filepaths = []
|
filepaths = []
|
||||||
|
|||||||
@@ -42,27 +42,29 @@ prototypes :
|
|||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import matplotlib.dates as mdates
|
|
||||||
from matplotlib.patches import Rectangle
|
from matplotlib.patches import Rectangle
|
||||||
from matplotlib.colors import LogNorm
|
from matplotlib.colors import LogNorm
|
||||||
from scipy.ndimage import rotate as sc_rotate, shift as sc_shift
|
from scipy.ndimage import rotate as sc_rotate, shift as sc_shift
|
||||||
from scipy.signal import fftconvolve
|
from scipy.signal import fftconvolve
|
||||||
from astropy.wcs import WCS
|
from astropy.wcs import WCS
|
||||||
from astropy import log
|
from astropy import log
|
||||||
log.setLevel('ERROR')
|
|
||||||
import warnings
|
import warnings
|
||||||
from lib.deconvolve import deconvolve_im, gaussian_psf, gaussian2d, zeropad
|
from lib.deconvolve import deconvolve_im, gaussian_psf, gaussian2d, zeropad
|
||||||
from lib.convex_hull import image_hull, clean_ROI
|
from lib.convex_hull import image_hull, clean_ROI
|
||||||
from lib.background import bkg_fit, bkg_hist, bkg_mini
|
from lib.background import bkg_fit, bkg_hist, bkg_mini
|
||||||
from lib.plots import plot_obs
|
from lib.plots import plot_obs, princ_angle
|
||||||
from lib.cross_correlation import phase_cross_correlation
|
from lib.cross_correlation import phase_cross_correlation
|
||||||
|
log.setLevel('ERROR')
|
||||||
|
|
||||||
|
|
||||||
# Useful tabulated values
|
# Useful tabulated values
|
||||||
# FOC instrument
|
# FOC instrument
|
||||||
globals()['trans2'] = {'f140w' : 0.21, 'f175w' : 0.24, 'f220w' : 0.39, 'f275w' : 0.40, 'f320w' : 0.89, 'f342w' : 0.81, 'f430w' : 0.74, 'f370lp' : 0.83, 'f486n' : 0.63, 'f501n' : 0.68, 'f480lp' : 0.82, 'clear2' : 1.0}
|
globals()['trans2'] = {'f140w': 0.21, 'f175w': 0.24, 'f220w': 0.39, 'f275w': 0.40, 'f320w': 0.89, 'f342w': 0.81,
|
||||||
globals()['trans3'] = {'f120m' : 0.10, 'f130m' : 0.10, 'f140m' : 0.08, 'f152m' : 0.08, 'f165w' : 0.28, 'f170m' : 0.18, 'f195w' : 0.42, 'f190m' : 0.15, 'f210m' : 0.18, 'f231m' : 0.18, 'clear3' : 1.0}
|
'f430w': 0.74, 'f370lp': 0.83, 'f486n': 0.63, 'f501n': 0.68, 'f480lp': 0.82, 'clear2': 1.0}
|
||||||
globals()['trans4'] = {'f253m' : 0.18, 'f278m' : 0.26, 'f307m' : 0.26, 'f130lp' : 0.92, 'f346m' : 0.58, 'f372m' : 0.73, 'f410m' : 0.58, 'f437m' : 0.71, 'f470m' : 0.79, 'f502m' : 0.82, 'f550m' : 0.77, 'clear4' : 1.0}
|
globals()['trans3'] = {'f120m': 0.10, 'f130m': 0.10, 'f140m': 0.08, 'f152m': 0.08, 'f165w': 0.28,
|
||||||
|
'f170m': 0.18, 'f195w': 0.42, 'f190m': 0.15, 'f210m': 0.18, 'f231m': 0.18, 'clear3': 1.0}
|
||||||
|
globals()['trans4'] = {'f253m': 0.18, 'f278m': 0.26, 'f307m': 0.26, 'f130lp': 0.92, 'f346m': 0.58,
|
||||||
|
'f372m': 0.73, 'f410m': 0.58, 'f437m': 0.71, 'f470m': 0.79, 'f502m': 0.82, 'f550m': 0.77, 'clear4': 1.0}
|
||||||
globals()['pol_efficiency'] = {'pol0': 0.92, 'pol60': 0.92, 'pol120': 0.91}
|
globals()['pol_efficiency'] = {'pol0': 0.92, 'pol60': 0.92, 'pol120': 0.91}
|
||||||
# POL0 = 0deg, POL60 = 60deg, POL120=120deg
|
# POL0 = 0deg, POL60 = 60deg, POL120=120deg
|
||||||
globals()['theta'] = np.array([180.*np.pi/180., 60.*np.pi/180., 120.*np.pi/180.])
|
globals()['theta'] = np.array([180.*np.pi/180., 60.*np.pi/180., 120.*np.pi/180.])
|
||||||
@@ -73,25 +75,6 @@ globals()['pol_shift'] = {'pol0' : np.array([0.,0.])*1., 'pol60' : np.array([3.6
|
|||||||
globals()['sigma_shift'] = {'pol0': [0.3, 0.3], 'pol60': [0.3, 0.3], 'pol120': [0.3, 0.3]}
|
globals()['sigma_shift'] = {'pol0': [0.3, 0.3], 'pol60': [0.3, 0.3], 'pol120': [0.3, 0.3]}
|
||||||
|
|
||||||
|
|
||||||
def princ_angle(ang):
|
|
||||||
"""
|
|
||||||
Return the principal angle in the 0° to 180° quadrant.
|
|
||||||
as PA is always defined at p/m 180°.
|
|
||||||
"""
|
|
||||||
if type(ang) != np.ndarray:
|
|
||||||
A = np.array([ang])
|
|
||||||
else:
|
|
||||||
A = np.array(ang)
|
|
||||||
while np.any(A < 0.):
|
|
||||||
A[A<0.] = A[A<0.]+360.
|
|
||||||
while np.any(A >= 180.):
|
|
||||||
A[A>=180.] = A[A>=180.]-180.
|
|
||||||
if type(ang) == type(A):
|
|
||||||
return A
|
|
||||||
else:
|
|
||||||
return A[0]
|
|
||||||
|
|
||||||
|
|
||||||
def get_row_compressor(old_dimension, new_dimension, operation='sum'):
|
def get_row_compressor(old_dimension, new_dimension, operation='sum'):
|
||||||
"""
|
"""
|
||||||
Return the matrix that allows to compress an array from an old dimension of
|
Return the matrix that allows to compress an array from an old dimension of
|
||||||
@@ -203,9 +186,7 @@ def bin_ndarray(ndarray, new_shape, operation='sum'):
|
|||||||
return ndarray
|
return ndarray
|
||||||
|
|
||||||
|
|
||||||
def crop_array(data_array, headers, error_array=None, data_mask=None, step=5,
|
def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, null_val=None, inside=False, display=False, savename=None, plots_folder=""):
|
||||||
null_val=None, inside=False, display=False, savename=None,
|
|
||||||
plots_folder=""):
|
|
||||||
"""
|
"""
|
||||||
Homogeneously crop an array: all contained images will have the same shape.
|
Homogeneously crop an array: all contained images will have the same shape.
|
||||||
'inside' parameter will decide how much should be cropped.
|
'inside' parameter will decide how much should be cropped.
|
||||||
@@ -327,7 +308,7 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5,
|
|||||||
cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75])
|
cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75])
|
||||||
fig.colorbar(im, cax=cbar_ax, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
fig.colorbar(im, cax=cbar_ax, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||||
|
|
||||||
if not(savename is None):
|
if savename is not None:
|
||||||
# fig.suptitle(savename+'_'+filt+'_crop_region')
|
# fig.suptitle(savename+'_'+filt+'_crop_region')
|
||||||
fig.savefig("/".join([plots_folder, savename+'_'+filt+'_crop_region.png']),
|
fig.savefig("/".join([plots_folder, savename+'_'+filt+'_crop_region.png']),
|
||||||
bbox_inches='tight')
|
bbox_inches='tight')
|
||||||
@@ -336,7 +317,7 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5,
|
|||||||
savename=savename+'_crop_region', plots_folder=plots_folder)
|
savename=savename+'_crop_region', plots_folder=plots_folder)
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
if not data_mask is None:
|
if data_mask is not None:
|
||||||
crop_mask = data_mask[v_array[0]:v_array[1], v_array[2]:v_array[3]]
|
crop_mask = data_mask[v_array[0]:v_array[1], v_array[2]:v_array[3]]
|
||||||
return crop_array, crop_error_array, crop_mask, crop_headers
|
return crop_array, crop_error_array, crop_mask, crop_headers
|
||||||
else:
|
else:
|
||||||
@@ -398,7 +379,7 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px',
|
|||||||
# Define Point-Spread-Function kernel
|
# Define Point-Spread-Function kernel
|
||||||
if psf.lower() in ['gauss', 'gaussian']:
|
if psf.lower() in ['gauss', 'gaussian']:
|
||||||
kernel = gaussian_psf(FWHM=FWHM, shape=shape)
|
kernel = gaussian_psf(FWHM=FWHM, shape=shape)
|
||||||
elif (type(psf) == np.ndarray) and (len(psf.shape) == 2):
|
elif isinstance(psf, np.ndarray) and (len(psf.shape) == 2):
|
||||||
kernel = psf
|
kernel = psf
|
||||||
else:
|
else:
|
||||||
raise ValueError("{} is not a valid value for 'psf'".format(psf))
|
raise ValueError("{} is not a valid value for 'psf'".format(psf))
|
||||||
@@ -406,15 +387,12 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px',
|
|||||||
# Deconvolve images in the array using given PSF
|
# Deconvolve images in the array using given PSF
|
||||||
deconv_array = np.zeros(data_array.shape)
|
deconv_array = np.zeros(data_array.shape)
|
||||||
for i, image in enumerate(data_array):
|
for i, image in enumerate(data_array):
|
||||||
deconv_array[i] = deconvolve_im(image, kernel, iterations=iterations,
|
deconv_array[i] = deconvolve_im(image, kernel, iterations=iterations, clip=True, filter_epsilon=None, algo='richardson')
|
||||||
clip=True, filter_epsilon=None, algo='richardson')
|
|
||||||
|
|
||||||
return deconv_array
|
return deconv_array
|
||||||
|
|
||||||
|
|
||||||
def get_error(data_array, headers, error_array=None, data_mask=None,
|
def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=None, subtract_error=True, display=False, savename=None, plots_folder="", return_background=False):
|
||||||
sub_type=None, subtract_error=True, display=False, savename=None,
|
|
||||||
plots_folder="", return_background=False):
|
|
||||||
"""
|
"""
|
||||||
Look for sub-image of shape sub_shape that have the smallest integrated
|
Look for sub-image of shape sub_shape that have the smallest integrated
|
||||||
flux (no source assumption) and define the background on the image by the
|
flux (no source assumption) and define the background on the image by the
|
||||||
@@ -478,7 +456,7 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
|
|||||||
if error_array is None:
|
if error_array is None:
|
||||||
error_array = np.zeros(data_array.shape)
|
error_array = np.zeros(data_array.shape)
|
||||||
data, error = deepcopy(data_array), deepcopy(error_array)
|
data, error = deepcopy(data_array), deepcopy(error_array)
|
||||||
if not data_mask is None:
|
if data_mask is not None:
|
||||||
mask = deepcopy(data_mask)
|
mask = deepcopy(data_mask)
|
||||||
else:
|
else:
|
||||||
data_c, error_c, _ = crop_array(data, headers, error, step=5, null_val=0., inside=False)
|
data_c, error_c, _ = crop_array(data, headers, error, step=5, null_val=0., inside=False)
|
||||||
@@ -499,14 +477,18 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
|
|||||||
err_flat = data*0.03
|
err_flat = data*0.03
|
||||||
|
|
||||||
if (sub_type is None):
|
if (sub_type is None):
|
||||||
n_data_array, c_error_bkg, headers, background = bkg_hist(data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
|
n_data_array, c_error_bkg, headers, background = bkg_hist(
|
||||||
elif type(sub_type)==str:
|
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
|
||||||
|
elif isinstance(sub_type, str):
|
||||||
if sub_type.lower() in ['auto']:
|
if sub_type.lower() in ['auto']:
|
||||||
n_data_array, c_error_bkg, headers, background = bkg_fit(data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
|
n_data_array, c_error_bkg, headers, background = bkg_fit(
|
||||||
|
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
|
||||||
else:
|
else:
|
||||||
n_data_array, c_error_bkg, headers, background = bkg_hist(data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
|
n_data_array, c_error_bkg, headers, background = bkg_hist(
|
||||||
elif type(sub_type)==tuple:
|
data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
|
||||||
n_data_array, c_error_bkg, headers, background = bkg_mini(data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
|
elif isinstance(sub_type, tuple):
|
||||||
|
n_data_array, c_error_bkg, headers, background = bkg_mini(
|
||||||
|
data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
|
||||||
else:
|
else:
|
||||||
print("Warning: Invalid subtype.")
|
print("Warning: Invalid subtype.")
|
||||||
|
|
||||||
@@ -519,8 +501,7 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
|
|||||||
return n_data_array, n_error_array, headers
|
return n_data_array, n_error_array, headers
|
||||||
|
|
||||||
|
|
||||||
def rebin_array(data_array, error_array, headers, pxsize, scale,
|
def rebin_array(data_array, error_array, headers, pxsize, scale, operation='sum', data_mask=None):
|
||||||
operation='sum', data_mask=None):
|
|
||||||
"""
|
"""
|
||||||
Homogeneously rebin a data array to get a new pixel size equal to pxsize
|
Homogeneously rebin a data array to get a new pixel size equal to pxsize
|
||||||
where pxsize is given in arcsec.
|
where pxsize is given in arcsec.
|
||||||
@@ -564,7 +545,7 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
|
|||||||
if not same_instr:
|
if not same_instr:
|
||||||
raise ValueError("All images in data_array are not from the same\
|
raise ValueError("All images in data_array are not from the same\
|
||||||
instrument, cannot proceed.")
|
instrument, cannot proceed.")
|
||||||
if not instr in ['FOC']:
|
if instr not in ['FOC']:
|
||||||
raise ValueError("Cannot reduce images from {0:s} instrument\
|
raise ValueError("Cannot reduce images from {0:s} instrument\
|
||||||
(yet)".format(instr))
|
(yet)".format(instr))
|
||||||
|
|
||||||
@@ -601,23 +582,18 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
|
|||||||
raise ValueError("Requested pixel size is below resolution.")
|
raise ValueError("Requested pixel size is below resolution.")
|
||||||
|
|
||||||
# Rebin data
|
# Rebin data
|
||||||
rebin_data = bin_ndarray(image, new_shape=new_shape,
|
rebin_data = bin_ndarray(image, new_shape=new_shape, operation=operation)
|
||||||
operation=operation)
|
|
||||||
rebinned_data.append(rebin_data)
|
rebinned_data.append(rebin_data)
|
||||||
|
|
||||||
# Propagate error
|
# Propagate error
|
||||||
rms_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape,
|
rms_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape, operation='average'))
|
||||||
operation='average'))
|
sum_image = bin_ndarray(image, new_shape=new_shape, operation='sum')
|
||||||
sum_image = bin_ndarray(image, new_shape=new_shape,
|
|
||||||
operation='sum')
|
|
||||||
mask = sum_image > 0.
|
mask = sum_image > 0.
|
||||||
new_error = np.zeros(rms_image.shape)
|
new_error = np.zeros(rms_image.shape)
|
||||||
if operation.lower() in ["mean", "average", "avg"]:
|
if operation.lower() in ["mean", "average", "avg"]:
|
||||||
new_error = np.sqrt(bin_ndarray(error**2,
|
new_error = np.sqrt(bin_ndarray(error**2, new_shape=new_shape, operation='average'))
|
||||||
new_shape=new_shape, operation='average'))
|
|
||||||
else:
|
else:
|
||||||
new_error = np.sqrt(bin_ndarray(error**2,
|
new_error = np.sqrt(bin_ndarray(error**2, new_shape=new_shape, operation='sum'))
|
||||||
new_shape=new_shape, operation='sum'))
|
|
||||||
rebinned_error.append(np.sqrt(rms_image**2 + new_error**2))
|
rebinned_error.append(np.sqrt(rms_image**2 + new_error**2))
|
||||||
|
|
||||||
# Update header
|
# Update header
|
||||||
@@ -629,7 +605,7 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
|
|||||||
for key, val in nw.to_header().items():
|
for key, val in nw.to_header().items():
|
||||||
new_header.set(key, val)
|
new_header.set(key, val)
|
||||||
rebinned_headers.append(new_header)
|
rebinned_headers.append(new_header)
|
||||||
if not data_mask is None:
|
if data_mask is not None:
|
||||||
data_mask = bin_ndarray(data_mask, new_shape=new_shape, operation='average') > 0.80
|
data_mask = bin_ndarray(data_mask, new_shape=new_shape, operation='average') > 0.80
|
||||||
|
|
||||||
rebinned_data = np.array(rebinned_data)
|
rebinned_data = np.array(rebinned_data)
|
||||||
@@ -641,8 +617,7 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
|
|||||||
return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask
|
return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask
|
||||||
|
|
||||||
|
|
||||||
def align_data(data_array, headers, error_array=None, background=None,
|
def align_data(data_array, headers, error_array=None, background=None, upsample_factor=1., ref_data=None, ref_center=None, return_shifts=False):
|
||||||
upsample_factor=1., ref_data=None, ref_center=None, return_shifts=False):
|
|
||||||
"""
|
"""
|
||||||
Align images in data_array using cross correlation, and rescale them to
|
Align images in data_array using cross correlation, and rescale them to
|
||||||
wider images able to contain any rotation of the reference image.
|
wider images able to contain any rotation of the reference image.
|
||||||
@@ -716,8 +691,7 @@ def align_data(data_array, headers, error_array=None, background=None,
|
|||||||
full_headers.append(headers[0])
|
full_headers.append(headers[0])
|
||||||
err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0)
|
err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0)
|
||||||
|
|
||||||
full_array, err_array, full_headers = crop_array(full_array, full_headers,
|
full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.)
|
||||||
err_array, step=5, inside=False, null_val=0.)
|
|
||||||
|
|
||||||
data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1]
|
data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1]
|
||||||
error_array = err_array[:-1]
|
error_array = err_array[:-1]
|
||||||
@@ -752,16 +726,13 @@ def align_data(data_array, headers, error_array=None, background=None,
|
|||||||
rescaled_error[i] *= 0.01*background[i]
|
rescaled_error[i] *= 0.01*background[i]
|
||||||
# Get shifts and error by cross-correlation to ref_data
|
# Get shifts and error by cross-correlation to ref_data
|
||||||
if do_shift:
|
if do_shift:
|
||||||
shift, error, _ = phase_cross_correlation(ref_data/ref_data.max(), image/image.max(),
|
shift, error, _ = phase_cross_correlation(ref_data/ref_data.max(), image/image.max(), upsample_factor=upsample_factor)
|
||||||
upsample_factor=upsample_factor)
|
|
||||||
else:
|
else:
|
||||||
shift = pol_shift[headers[i]['filtnam1'].lower()]
|
shift = pol_shift[headers[i]['filtnam1'].lower()]
|
||||||
error = sigma_shift[headers[i]['filtnam1'].lower()]
|
error = sigma_shift[headers[i]['filtnam1'].lower()]
|
||||||
# Rescale image to requested output
|
# Rescale image to requested output
|
||||||
rescaled_image[i,res_shift[0]:res_shift[0]+shape[1],
|
rescaled_image[i, res_shift[0]:res_shift[0]+shape[1], res_shift[1]:res_shift[1]+shape[2]] = deepcopy(image)
|
||||||
res_shift[1]:res_shift[1]+shape[2]] = deepcopy(image)
|
rescaled_error[i, res_shift[0]:res_shift[0]+shape[1], res_shift[1]:res_shift[1]+shape[2]] = deepcopy(error_array[i])
|
||||||
rescaled_error[i,res_shift[0]:res_shift[0]+shape[1],
|
|
||||||
res_shift[1]:res_shift[1]+shape[2]] = deepcopy(error_array[i])
|
|
||||||
# Shift images to align
|
# Shift images to align
|
||||||
rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.)
|
rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.)
|
||||||
rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i])
|
rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i])
|
||||||
@@ -802,8 +773,7 @@ def align_data(data_array, headers, error_array=None, background=None,
|
|||||||
return data_array, error_array, headers, data_mask
|
return data_array, error_array, headers, data_mask
|
||||||
|
|
||||||
|
|
||||||
def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
|
def smooth_data(data_array, error_array, data_mask, headers, FWHM=1., scale='pixel', smoothing='gaussian'):
|
||||||
scale='pixel', smoothing='gaussian'):
|
|
||||||
"""
|
"""
|
||||||
Smooth a data_array using selected function.
|
Smooth a data_array using selected function.
|
||||||
----------
|
----------
|
||||||
@@ -873,7 +843,8 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
|
|||||||
g_rc = np.array([np.exp(-0.5*(dist_rc/stdev)**2)/(2.*np.pi*stdev**2),]*data_array.shape[0])
|
g_rc = np.array([np.exp(-0.5*(dist_rc/stdev)**2)/(2.*np.pi*stdev**2),]*data_array.shape[0])
|
||||||
# Apply weighted combination
|
# Apply weighted combination
|
||||||
smoothed[r, c] = np.where(data_mask[r, c], np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc), data_array.mean(axis=0)[r, c])
|
smoothed[r, c] = np.where(data_mask[r, c], np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc), data_array.mean(axis=0)[r, c])
|
||||||
error[r,c] = np.where(data_mask[r,c], np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc), (np.sqrt(np.sum(error_array**2,axis=0)/error_array.shape[0]))[r,c])
|
error[r, c] = np.where(data_mask[r, c], np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc),
|
||||||
|
(np.sqrt(np.sum(error_array**2, axis=0)/error_array.shape[0]))[r, c])
|
||||||
|
|
||||||
# Nan handling
|
# Nan handling
|
||||||
error[np.logical_or(np.isnan(smoothed*error), 1-data_mask)] = 0.
|
error[np.logical_or(np.isnan(smoothed*error), 1-data_mask)] = 0.
|
||||||
@@ -906,8 +877,7 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
|
|||||||
return smoothed, error
|
return smoothed, error
|
||||||
|
|
||||||
|
|
||||||
def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
|
def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale='pixel', smoothing='gaussian'):
|
||||||
scale='pixel', smoothing='gaussian'):
|
|
||||||
"""
|
"""
|
||||||
Make the average image from a single polarizer for a given instrument.
|
Make the average image from a single polarizer for a given instrument.
|
||||||
-----------
|
-----------
|
||||||
@@ -950,7 +920,7 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
|
|||||||
if not same_instr:
|
if not same_instr:
|
||||||
raise ValueError("All images in data_array are not from the same\
|
raise ValueError("All images in data_array are not from the same\
|
||||||
instrument, cannot proceed.")
|
instrument, cannot proceed.")
|
||||||
if not instr in ['FOC']:
|
if instr not in ['FOC']:
|
||||||
raise ValueError("Cannot reduce images from {0:s} instrument\
|
raise ValueError("Cannot reduce images from {0:s} instrument\
|
||||||
(yet)".format(instr))
|
(yet)".format(instr))
|
||||||
|
|
||||||
@@ -982,14 +952,11 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
|
|||||||
err60_array = error_array[is_pol60]
|
err60_array = error_array[is_pol60]
|
||||||
err120_array = error_array[is_pol120]
|
err120_array = error_array[is_pol120]
|
||||||
|
|
||||||
if not(FWHM is None) and (smoothing.lower() in ['combine','combining']):
|
if (FWHM is not None) and (smoothing.lower() in ['combine', 'combining']):
|
||||||
# Smooth by combining each polarizer images
|
# Smooth by combining each polarizer images
|
||||||
pol0, err0 = smooth_data(pol0_array, err0_array, data_mask, headers0,
|
pol0, err0 = smooth_data(pol0_array, err0_array, data_mask, headers0, FWHM=FWHM, scale=scale, smoothing=smoothing)
|
||||||
FWHM=FWHM, scale=scale, smoothing=smoothing)
|
pol60, err60 = smooth_data(pol60_array, err60_array, data_mask, headers60, FWHM=FWHM, scale=scale, smoothing=smoothing)
|
||||||
pol60, err60 = smooth_data(pol60_array, err60_array, data_mask, headers60,
|
pol120, err120 = smooth_data(pol120_array, err120_array, data_mask, headers120, FWHM=FWHM, scale=scale, smoothing=smoothing)
|
||||||
FWHM=FWHM, scale=scale, smoothing=smoothing)
|
|
||||||
pol120, err120 = smooth_data(pol120_array, err120_array, data_mask, headers120,
|
|
||||||
FWHM=FWHM, scale=scale, smoothing=smoothing)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Sum on each polarisation filter.
|
# Sum on each polarisation filter.
|
||||||
@@ -1021,8 +988,7 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
|
|||||||
|
|
||||||
if not (FWHM is None) and (smoothing.lower() in ['gaussian', 'gauss', 'weighted_gaussian', 'weight_gauss']):
|
if not (FWHM is None) and (smoothing.lower() in ['gaussian', 'gauss', 'weighted_gaussian', 'weight_gauss']):
|
||||||
# Smooth by convoluting with a gaussian each polX image.
|
# Smooth by convoluting with a gaussian each polX image.
|
||||||
pol_array, polerr_array = smooth_data(pol_array, polerr_array,
|
pol_array, polerr_array = smooth_data(pol_array, polerr_array, data_mask, pol_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
|
||||||
data_mask, pol_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
|
|
||||||
pol0, pol60, pol120 = pol_array
|
pol0, pol60, pol120 = pol_array
|
||||||
err0, err60, err120 = polerr_array
|
err0, err60, err120 = polerr_array
|
||||||
|
|
||||||
@@ -1056,8 +1022,7 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
|
|||||||
return polarizer_array, polarizer_cov, pol_headers
|
return polarizer_array, polarizer_cov, pol_headers
|
||||||
|
|
||||||
|
|
||||||
def compute_Stokes(data_array, error_array, data_mask, headers,
|
def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale='pixel', smoothing='combine', transmitcorr=False):
|
||||||
FWHM=None, scale='pixel', smoothing='combine', transmitcorr=False):
|
|
||||||
"""
|
"""
|
||||||
Compute the Stokes parameters I, Q and U for a given data_set
|
Compute the Stokes parameters I, Q and U for a given data_set
|
||||||
----------
|
----------
|
||||||
@@ -1114,15 +1079,14 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
|
|||||||
if not same_instr:
|
if not same_instr:
|
||||||
raise ValueError("All images in data_array are not from the same\
|
raise ValueError("All images in data_array are not from the same\
|
||||||
instrument, cannot proceed.")
|
instrument, cannot proceed.")
|
||||||
if not instr in ['FOC']:
|
if instr not in ['FOC']:
|
||||||
raise ValueError("Cannot reduce images from {0:s} instrument\
|
raise ValueError("Cannot reduce images from {0:s} instrument\
|
||||||
(yet)".format(instr))
|
(yet)".format(instr))
|
||||||
|
|
||||||
# Routine for the FOC instrument
|
# Routine for the FOC instrument
|
||||||
if instr == 'FOC':
|
if instr == 'FOC':
|
||||||
# Get image from each polarizer and covariance matrix
|
# Get image from each polarizer and covariance matrix
|
||||||
pol_array, pol_cov, pol_headers = polarizer_avg(data_array, error_array, data_mask,
|
pol_array, pol_cov, pol_headers = polarizer_avg(data_array, error_array, data_mask, headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
|
||||||
headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
|
|
||||||
pol0, pol60, pol120 = pol_array
|
pol0, pol60, pol120 = pol_array
|
||||||
|
|
||||||
if (pol0 < 0.).any() or (pol60 < 0.).any() or (pol120 < 0.).any():
|
if (pol0 < 0.).any() or (pol60 < 0.).any() or (pol120 < 0.).any():
|
||||||
@@ -1180,8 +1144,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
|
|||||||
Stokes_error = np.array([np.sqrt(Stokes_cov[i, i]) for i in range(3)])
|
Stokes_error = np.array([np.sqrt(Stokes_cov[i, i]) for i in range(3)])
|
||||||
Stokes_headers = headers[0:3]
|
Stokes_headers = headers[0:3]
|
||||||
|
|
||||||
Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask,
|
Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask, headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
|
||||||
headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
|
|
||||||
|
|
||||||
I_stokes, Q_stokes, U_stokes = Stokes_array
|
I_stokes, Q_stokes, U_stokes = Stokes_array
|
||||||
Stokes_cov[0, 0], Stokes_cov[1, 1], Stokes_cov[2, 2] = deepcopy(Stokes_error**2)
|
Stokes_cov[0, 0], Stokes_cov[1, 1], Stokes_cov[2, 2] = deepcopy(Stokes_error**2)
|
||||||
@@ -1209,19 +1172,28 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
|
|||||||
s_U2_stat = np.sum([coeff_stokes[2, i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))], axis=0)
|
s_U2_stat = np.sum([coeff_stokes[2, i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))], axis=0)
|
||||||
|
|
||||||
# Compute the derivative of each Stokes parameter with respect to the polarizer orientation
|
# Compute the derivative of each Stokes parameter with respect to the polarizer orientation
|
||||||
dI_dtheta1 = 2.*pol_eff[0]/A*(pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes))
|
dI_dtheta1 = 2.*pol_eff[0]/A*(pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes) -
|
||||||
dI_dtheta2 = 2.*pol_eff[1]/A*(pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes))
|
pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes))
|
||||||
dI_dtheta3 = 2.*pol_eff[2]/A*(pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes))
|
dI_dtheta2 = 2.*pol_eff[1]/A*(pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes) -
|
||||||
|
pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes))
|
||||||
|
dI_dtheta3 = 2.*pol_eff[2]/A*(pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes) -
|
||||||
|
pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes))
|
||||||
dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3])
|
dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3])
|
||||||
|
|
||||||
dQ_dtheta1 = 2.*pol_eff[0]/A*(np.cos(2.*theta[0])*(pol_flux[1]-pol_flux[2]) - (pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0]) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1]))*Q_stokes)
|
dQ_dtheta1 = 2.*pol_eff[0]/A*(np.cos(2.*theta[0])*(pol_flux[1]-pol_flux[2]) - (pol_eff[2]*np.cos(-2. *
|
||||||
dQ_dtheta2 = 2.*pol_eff[1]/A*(np.cos(2.*theta[1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1]) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2]))*Q_stokes)
|
theta[2]+2.*theta[0]) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1]))*Q_stokes)
|
||||||
dQ_dtheta3 = 2.*pol_eff[2]/A*(np.cos(2.*theta[2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2]) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0]))*Q_stokes)
|
dQ_dtheta2 = 2.*pol_eff[1]/A*(np.cos(2.*theta[1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2. *
|
||||||
|
theta[0]+2.*theta[1]) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2]))*Q_stokes)
|
||||||
|
dQ_dtheta3 = 2.*pol_eff[2]/A*(np.cos(2.*theta[2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2. *
|
||||||
|
theta[1]+2.*theta[2]) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0]))*Q_stokes)
|
||||||
dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3])
|
dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3])
|
||||||
|
|
||||||
dU_dtheta1 = 2.*pol_eff[0]/A*(np.sin(2.*theta[0])*(pol_flux[1]-pol_flux[2]) - (pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0]) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1]))*U_stokes)
|
dU_dtheta1 = 2.*pol_eff[0]/A*(np.sin(2.*theta[0])*(pol_flux[1]-pol_flux[2]) - (pol_eff[2]*np.cos(-2. *
|
||||||
dU_dtheta2 = 2.*pol_eff[1]/A*(np.sin(2.*theta[1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1]) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2]))*U_stokes)
|
theta[2]+2.*theta[0]) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1]))*U_stokes)
|
||||||
dU_dtheta3 = 2.*pol_eff[2]/A*(np.sin(2.*theta[2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2.*theta[1]+2.*theta[2]) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0]))*U_stokes)
|
dU_dtheta2 = 2.*pol_eff[1]/A*(np.sin(2.*theta[1])*(pol_flux[2]-pol_flux[0]) - (pol_eff[0]*np.cos(-2. *
|
||||||
|
theta[0]+2.*theta[1]) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2]))*U_stokes)
|
||||||
|
dU_dtheta3 = 2.*pol_eff[2]/A*(np.sin(2.*theta[2])*(pol_flux[0]-pol_flux[1]) - (pol_eff[1]*np.cos(-2. *
|
||||||
|
theta[1]+2.*theta[2]) - pol_eff[0]*np.cos(-2.*theta[2]+2.*theta[0]))*U_stokes)
|
||||||
dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3])
|
dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3])
|
||||||
|
|
||||||
# Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999)
|
# Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999)
|
||||||
@@ -1251,10 +1223,12 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
|
|||||||
QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask]**2))
|
QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask]**2))
|
||||||
|
|
||||||
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
|
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
|
||||||
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
|
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted **
|
||||||
|
2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
|
||||||
|
|
||||||
PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted))
|
PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted))
|
||||||
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
|
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err **
|
||||||
|
2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
|
||||||
|
|
||||||
for header in headers:
|
for header in headers:
|
||||||
header['P_int'] = (P_diluted, 'Integrated polarisation degree')
|
header['P_int'] = (P_diluted, 'Integrated polarisation degree')
|
||||||
@@ -1324,8 +1298,10 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
|
|||||||
s_PA = np.ones(I_stokes.shape)*fmax
|
s_PA = np.ones(I_stokes.shape)*fmax
|
||||||
|
|
||||||
# Propagate previously computed errors
|
# Propagate previously computed errors
|
||||||
s_P[mask] = (1/I_stokes[mask])*np.sqrt((Q_stokes[mask]**2*Stokes_cov[1,1][mask] + U_stokes[mask]**2*Stokes_cov[2,2][mask] + 2.*Q_stokes[mask]*U_stokes[mask]*Stokes_cov[1,2][mask])/(Q_stokes[mask]**2 + U_stokes[mask]**2) + ((Q_stokes[mask]/I_stokes[mask])**2 + (U_stokes[mask]/I_stokes[mask])**2)*Stokes_cov[0,0][mask] - 2.*(Q_stokes[mask]/I_stokes[mask])*Stokes_cov[0,1][mask] - 2.*(U_stokes[mask]/I_stokes[mask])*Stokes_cov[0,2][mask])
|
s_P[mask] = (1/I_stokes[mask])*np.sqrt((Q_stokes[mask]**2*Stokes_cov[1, 1][mask] + U_stokes[mask]**2*Stokes_cov[2, 2][mask] + 2.*Q_stokes[mask]*U_stokes[mask]*Stokes_cov[1, 2][mask])/(Q_stokes[mask]**2 + U_stokes[mask]**2) +
|
||||||
s_PA[mask] = (90./(np.pi*(Q_stokes[mask]**2 + U_stokes[mask]**2)))*np.sqrt(U_stokes[mask]**2*Stokes_cov[1,1][mask] + Q_stokes[mask]**2*Stokes_cov[2,2][mask] - 2.*Q_stokes[mask]*U_stokes[mask]*Stokes_cov[1,2][mask])
|
((Q_stokes[mask]/I_stokes[mask])**2 + (U_stokes[mask]/I_stokes[mask])**2)*Stokes_cov[0, 0][mask] - 2.*(Q_stokes[mask]/I_stokes[mask])*Stokes_cov[0, 1][mask] - 2.*(U_stokes[mask]/I_stokes[mask])*Stokes_cov[0, 2][mask])
|
||||||
|
s_PA[mask] = (90./(np.pi*(Q_stokes[mask]**2 + U_stokes[mask]**2)))*np.sqrt(U_stokes[mask]**2*Stokes_cov[1, 1][mask] +
|
||||||
|
Q_stokes[mask]**2*Stokes_cov[2, 2][mask] - 2.*Q_stokes[mask]*U_stokes[mask]*Stokes_cov[1, 2][mask])
|
||||||
s_P[np.isnan(s_P)] = fmax
|
s_P[np.isnan(s_P)] = fmax
|
||||||
s_PA[np.isnan(s_PA)] = fmax
|
s_PA[np.isnan(s_PA)] = fmax
|
||||||
|
|
||||||
@@ -1361,8 +1337,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
|
|||||||
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
|
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
|
||||||
|
|
||||||
|
|
||||||
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
|
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, ang=None, SNRi_cut=None):
|
||||||
ang=None, SNRi_cut=None):
|
|
||||||
"""
|
"""
|
||||||
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
|
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
|
||||||
matrix to rotate Q, U of a given angle in degrees and update header
|
matrix to rotate Q, U of a given angle in degrees and update header
|
||||||
@@ -1412,7 +1387,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
|
|||||||
Updated 2D boolean array delimiting the data to work on.
|
Updated 2D boolean array delimiting the data to work on.
|
||||||
"""
|
"""
|
||||||
# Apply cuts
|
# Apply cuts
|
||||||
if not(SNRi_cut is None):
|
if SNRi_cut is not None:
|
||||||
SNRi = I_stokes/np.sqrt(Stokes_cov[0, 0])
|
SNRi = I_stokes/np.sqrt(Stokes_cov[0, 0])
|
||||||
mask = SNRi < SNRi_cut
|
mask = SNRi < SNRi_cut
|
||||||
eps = 1e-5
|
eps = 1e-5
|
||||||
@@ -1510,10 +1485,12 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
|
|||||||
QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 2][mask]**2))
|
QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 2][mask]**2))
|
||||||
|
|
||||||
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
|
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
|
||||||
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
|
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted **
|
||||||
|
2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
|
||||||
|
|
||||||
PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted))
|
PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted))
|
||||||
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
|
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err **
|
||||||
|
2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
|
||||||
|
|
||||||
for header in new_headers:
|
for header in new_headers:
|
||||||
header['P_int'] = (P_diluted, 'Integrated polarisation degree')
|
header['P_int'] = (P_diluted, 'Integrated polarisation degree')
|
||||||
@@ -1521,7 +1498,6 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
|
|||||||
header['PA_int'] = (PA_diluted, 'Integrated polarisation angle')
|
header['PA_int'] = (PA_diluted, 'Integrated polarisation angle')
|
||||||
header['PA_int_err'] = (np.ceil(PA_diluted_err*10.)/10., 'Integrated polarisation angle error')
|
header['PA_int_err'] = (np.ceil(PA_diluted_err*10.)/10., 'Integrated polarisation angle error')
|
||||||
|
|
||||||
|
|
||||||
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_headers
|
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_headers
|
||||||
|
|
||||||
|
|
||||||
@@ -1569,10 +1545,8 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
|
|||||||
new_data_array = []
|
new_data_array = []
|
||||||
new_error_array = []
|
new_error_array = []
|
||||||
for i in range(data_array.shape[0]):
|
for i in range(data_array.shape[0]):
|
||||||
new_data_array.append(sc_rotate(data_array[i], ang, order=1, reshape=False,
|
new_data_array.append(sc_rotate(data_array[i], ang, order=1, reshape=False, cval=0.))
|
||||||
cval=0.))
|
new_error_array.append(sc_rotate(error_array[i], ang, order=1, reshape=False, cval=0.))
|
||||||
new_error_array.append(sc_rotate(error_array[i], ang, order=1, reshape=False,
|
|
||||||
cval=0.))
|
|
||||||
new_data_array = np.array(new_data_array)
|
new_data_array = np.array(new_data_array)
|
||||||
new_error_array = np.array(new_error_array)
|
new_error_array = np.array(new_error_array)
|
||||||
new_data_mask = sc_rotate(data_mask*10., ang, order=1, reshape=False, cval=0.)
|
new_data_mask = sc_rotate(data_mask*10., ang, order=1, reshape=False, cval=0.)
|
||||||
@@ -1584,8 +1558,7 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
|
|||||||
|
|
||||||
# Update headers to new angle
|
# Update headers to new angle
|
||||||
new_headers = []
|
new_headers = []
|
||||||
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)],
|
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
|
||||||
[np.sin(-alpha), np.cos(-alpha)]])
|
|
||||||
for header in headers:
|
for header in headers:
|
||||||
new_header = deepcopy(header)
|
new_header = deepcopy(header)
|
||||||
new_header['orientat'] = header['orientat'] + ang
|
new_header['orientat'] = header['orientat'] + ang
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fit
|
|||||||
# Stokes_S2 = fits.open("./data/IC5063/POLARIZATION_COMPARISON/S2_rot_crop.fits")
|
# Stokes_S2 = fits.open("./data/IC5063/POLARIZATION_COMPARISON/S2_rot_crop.fits")
|
||||||
Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
|
Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
|
||||||
|
|
||||||
##levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
|
# levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
|
||||||
# levelsMorganti = np.logspace(0.,1.97,5)/100.
|
# levelsMorganti = np.logspace(0.,1.97,5)/100.
|
||||||
#
|
#
|
||||||
# levels18GHz = levelsMorganti*Stokes_18GHz[0].data.max()
|
# levels18GHz = levelsMorganti*Stokes_18GHz[0].data.max()
|
||||||
@@ -42,7 +42,8 @@ Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
|
|||||||
# F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot_forced.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18))
|
# F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot_forced.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18))
|
||||||
|
|
||||||
G = overplot_pol(Stokes_UV, Stokes_IR, cmap='inferno')
|
G = overplot_pol(Stokes_UV, Stokes_IR, cmap='inferno')
|
||||||
G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot_forced.pdf',vec_scale=None,norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3,Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']),cmap='inferno_r')
|
G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot_forced.pdf', vec_scale=None,
|
||||||
|
norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3, Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']), cmap='inferno_r')
|
||||||
|
|
||||||
# data_folder1 = "./data/M87/POS1/"
|
# data_folder1 = "./data/M87/POS1/"
|
||||||
# plots_folder1 = "./plots/M87/POS1/"
|
# plots_folder1 = "./plots/M87/POS1/"
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
from astropy.io import fits
|
from astropy.io import fits
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from lib.plots import overplot_chandra, overplot_pol, align_pol
|
from lib.plots import overplot_chandra, overplot_pol
|
||||||
from matplotlib.colors import LogNorm
|
from matplotlib.colors import LogNorm
|
||||||
|
|
||||||
Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.fits")
|
Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.fits")
|
||||||
@@ -13,8 +13,8 @@ levels = np.geomspace(1.,99.,10)
|
|||||||
# A = overplot_chandra(Stokes_UV, Stokes_Xr)
|
# A = overplot_chandra(Stokes_UV, Stokes_Xr)
|
||||||
# A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf')
|
# A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf')
|
||||||
|
|
||||||
#B = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm())
|
B = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm())
|
||||||
#B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot_forced.pdf')
|
B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot_forced.pdf')
|
||||||
|
|
||||||
# C = overplot_pol(Stokes_UV, Stokes_IR)
|
# C = overplot_pol(Stokes_UV, Stokes_IR)
|
||||||
# C.plot(SNRp_cut=3.0, SNRi_cut=20.0, savename='./plots/MRK463E/IR_overplot.pdf')
|
# C.plot(SNRp_cut=3.0, SNRi_cut=20.0, savename='./plots/MRK463E/IR_overplot.pdf')
|
||||||
|
|||||||
Reference in New Issue
Block a user