merge bkg display change to directory change

This commit is contained in:
2024-06-03 12:32:34 +02:00
18 changed files with 262 additions and 156 deletions

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python3
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
Main script where are progressively added the steps for the FOC pipeline reduction.
@@ -7,10 +7,11 @@ Main script where are progressively added the steps for the FOC pipeline reducti
# Project libraries
import numpy as np
from copy import deepcopy
from os import system
from os.path import exists as path_exists
import lib.fits as proj_fits # Functions to handle fits files
import lib.reduction as proj_red # Functions used in reduction pipeline
import lib.plots as proj_plots # Functions for plotting data
from lib.query import retrieve_products, path_exists, system
from lib.utils import sci_not, princ_angle
from matplotlib.colors import LogNorm
@@ -18,7 +19,7 @@ from matplotlib.colors import LogNorm
def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False):
# Reduction parameters
# Deconvolution
deconvolve = True
deconvolve = False
if deconvolve:
# from lib.deconvolve import from_file_psf
psf = 'gaussian' # Can be user-defined as well
@@ -26,15 +27,15 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
psf_FWHM = 3.1
psf_scale = 'px'
psf_shape = None # (151, 151)
iterations = 3
algo = "richardson"
iterations = 1
algo = "conjgrad"
# Initial crop
display_crop = False
# Background estimation
error_sub_type = 'sturges' # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 0.25
error_sub_type = 'freedman-diaconis' # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 0.50
display_bkg = True
# Data binning
@@ -45,12 +46,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Alignement
align_center = 'center' # If None will not align the images
display_align = True
display_align = False
display_data = False
# Smoothing
smoothing_function = 'combine' # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 0.10 # If None, no smoothing is done
smoothing_FWHM = 0.150 # If None, no smoothing is done
smoothing_scale = 'arcsec' # pixel or arcsec
# Rotation
@@ -61,7 +62,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
SNRp_cut = 3. # P measurments with SNR>3
SNRi_cut = 3. # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
vec_scale = 3
vec_scale = 5
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
# Pipeline start
@@ -75,10 +76,11 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
if target is None:
target = input("Target name:\n>")
else:
from lib.query import retrieve_products
target, products = retrieve_products(target, proposal_id, output_dir=output_dir)
prod = products.pop()
for prods in products:
main(target=target, infiles=["/".join(pr) for pr in prods], output_dir=output_dir)
main(target=target, infiles=["/".join(pr) for pr in prods], output_dir=output_dir, crop=crop, interactive=interactive)
data_folder = prod[0][0]
try:
plots_folder = data_folder.replace("data", "plots")
@@ -107,6 +109,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Crop data to remove outside blank margins.
data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0.,
inside=True, display=display_crop, savename=figname, plots_folder=plots_folder)
data_mask = np.ones(data_array[0].shape, dtype=bool)
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
if deconvolve:
@@ -114,8 +117,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Estimate error from data background, estimated from sub-image of desired sub_shape.
background = None
data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, sub_type=error_sub_type, subtract_error=subtract_error, display=display_bkg, savename="_".join([
figname, "errors"]), plots_folder=plots_folder, return_background=True)
data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, data_mask=data_mask, sub_type=error_sub_type, subtract_error=subtract_error, display=display_bkg, savename="_".join([figname, "errors"]), plots_folder=plots_folder, return_background=True)
# Align and rescale images with oversampling.
data_array, error_array, headers, data_mask = proj_red.align_data(

2
package/__init__.py Normal file
View File

@@ -0,0 +1,2 @@
from . import lib
from . import src

9
package/lib/__init__.py Normal file
View File

@@ -0,0 +1,9 @@
from . import background
from . import convex_hull
from . import cross_correlation
from . import deconvolve
from . import fits
from . import plots
from . import query
from . import reduction
from . import utils

View File

@@ -80,7 +80,8 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')')
ax_h.plot([background[i]*convert_flux[i], background[i]*convert_flux[i]], [hist.min(), hist.max()], 'x--', color="C{0:d}".format(i), alpha=0.8)
if not (coeff is None):
ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
# ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
ax_h.plot(bins*convert_flux[i], gauss(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
ax_h.set_xscale('log')
ax_h.set_ylim([0., np.max([hist.max() for hist in histograms])])
ax_h.set_xlim([np.min(background*convert_flux)*1e-2, np.max(background*convert_flux)*1e2])
@@ -159,12 +160,15 @@ def bkg_estimate(img, bins=None, chi2=None, coeff=None):
peak = binning[np.argmax(hist)]
bins_stdev = binning[hist > hist.max()/2.]
stdev = bins_stdev[-1]-bins_stdev[0]
p0 = [hist.max(), peak, stdev, 1e-3, 1e-3, 1e-3, 1e-3]
# p0 = [hist.max(), peak, stdev, 1e-3, 1e-3, 1e-3, 1e-3]
p0 = [hist.max(), peak, stdev]
try:
popt, pcov = curve_fit(gausspol, binning, hist, p0=p0)
# popt, pcov = curve_fit(gausspol, binning, hist, p0=p0)
popt, pcov = curve_fit(gauss, binning, hist, p0=p0)
except RuntimeError:
popt = p0
chi2.append(np.sum((hist - gausspol(binning, *popt))**2)/hist.size)
# chi2.append(np.sum((hist - gausspol(binning, *popt))**2)/hist.size)
chi2.append(np.sum((hist - gauss(binning, *popt))**2)/hist.size)
coeff.append(popt)
return bins, chi2, coeff
@@ -330,8 +334,10 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
# Fit a gaussian to the log-intensity histogram
bins_stdev = binning[-1][hist > hist.max()/2.]
stdev = bins_stdev[-1]-bins_stdev[0]
p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3]
popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
# p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev, 1e-3, 1e-3, 1e-3, 1e-3]
p0 = [hist.max(), binning[-1][np.argmax(hist)], stdev]
# popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
popt, pcov = curve_fit(gauss, binning[-1], hist, p0=p0)
coeff.append(popt)
bkg = popt[1]+np.abs(popt[2])*subtract_error

View File

@@ -270,7 +270,7 @@ def del_aligned(s, Omega):
# Implement Graham algorithm
def convex_hull(A):
def convex_hull(H):
"""
Implement Graham algorithm to find the convex hull of a given list of
points, handling aligned points.
@@ -283,17 +283,20 @@ def convex_hull(A):
List of points defining the convex hull of the input list A.
"""
S = empty_stack()
Omega = min_lexico(A)
sort_angles_distances(Omega, A)
A = del_aligned(A, Omega)
stack(S, A[0])
stack(S, A[1])
stack(S, A[2])
for i in range(3, len(A)):
while pseudo_angle(stack_sub_top(S), stack_top(S), A[i]) <= 0:
unstack(S)
stack(S, A[i])
return S
Omega = min_lexico(H)
sort_angles_distances(Omega, H)
A = del_aligned(H, Omega)
if len(A) < 3:
return H
else:
stack(S, A[0])
stack(S, A[1])
stack(S, A[2])
for i in range(3, len(A)):
while pseudo_angle(stack_sub_top(S), stack_top(S), A[i]) <= 0:
unstack(S)
stack(S, A[i])
return S
def image_hull(image, step=5, null_val=0., inside=True):
@@ -328,25 +331,24 @@ def image_hull(image, step=5, null_val=0., inside=True):
H = []
shape = np.array(image.shape)
row, col = np.indices(shape)
for i in range(0, shape[0], step):
r = row[i, :][image[i, :] > null_val]
c = col[i, :][image[i, :] > null_val]
if len(r) > 1 and len(c) > 1:
H.append((r[0], c[0]))
H.append((r[-1], c[-1]))
for j in range(0, shape[1], step):
r = row[:, j][image[:, j] > null_val]
c = col[:, j][image[:, j] > null_val]
if len(r) > 1 and len(c) > 1:
if not ((r[0], c[0]) in H):
H.append((r[0], c[0]))
if not ((r[-1], c[-1]) in H):
H.append((r[-1], c[-1]))
for i in range(0, int(min(shape)/2), step):
r1, r2 = row[i, :][image[i, :] > null_val], row[-i, :][image[-i, :] > null_val]
c1, c2 = col[i, :][image[i, :] > null_val], col[-i, :][image[-i, :] > null_val]
if r1.shape[0] > 1:
H.append((r1[0], c1[0]))
H.append((r1[-1], c1[-1]))
if r2.shape[0] > 1:
H.append((r2[0], c2[0]))
H.append((r2[-1], c2[-1]))
S = np.array(convex_hull(H))
x_min, y_min = S[:, 0] < S[:, 0].mean(), S[:, 1] < S[:, 1].mean()
x_max, y_max = S[:, 0] > S[:, 0].mean(), S[:, 1] > S[:, 1].mean()
# Get the 4 extrema
# S0 = S[x_min*y_min][np.argmin(S[x_min*y_min][:, 0])]
# S1 = S[x_min*y_max][np.argmax(S[x_min*y_max][:, 1])]
# S2 = S[x_max*y_min][np.argmin(S[x_max*y_min][:, 1])]
# S3 = S[x_max*y_max][np.argmax(S[x_max*y_max][:, 0])]
S0 = S[x_min*y_min][np.abs(0-S[x_min*y_min].sum(axis=1)).min() == np.abs(0-S[x_min*y_min].sum(axis=1))][0]
S1 = S[x_min*y_max][np.abs(shape[1]-S[x_min*y_max].sum(axis=1)).min() == np.abs(shape[1]-S[x_min*y_max].sum(axis=1))][0]
S2 = S[x_max*y_min][np.abs(shape[0]-S[x_max*y_min].sum(axis=1)).min() == np.abs(shape[0]-S[x_max*y_min].sum(axis=1))][0]

View File

@@ -13,8 +13,7 @@ import numpy as np
from os.path import join as path_join
from astropy.io import fits
from astropy.wcs import WCS
from lib.convex_hull import clean_ROI
from lib.utils import princ_angle
from .convex_hull import clean_ROI
def get_obs_data(infiles, data_folder="", compute_flux=False):
@@ -67,7 +66,7 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
new_wcs.wcs.cdelt = new_cdelt
for key, val in new_wcs.to_header().items():
header[key] = val
header['orientat'] = princ_angle(float(header['orientat']))
# header['orientat'] = princ_angle(float(header['orientat']))
# force WCS for POL60 to have same pixel size as POL0 and POL120
is_pol60 = np.array([head['filtnam1'].lower() == 'pol60' for head in headers], dtype=bool)

View File

@@ -1,3 +1,4 @@
#!/usr/bin/python
"""
Library functions for displaying informations using matplotlib
@@ -54,7 +55,10 @@ from astropy.wcs import WCS
from astropy.io import fits
from astropy.coordinates import SkyCoord
from scipy.ndimage import zoom as sc_zoom
from lib.utils import rot2D, princ_angle, sci_not
try:
from .utils import rot2D, princ_angle, sci_not
except ImportError:
from utils import rot2D, princ_angle, sci_not
def plot_obs(data_array, headers, rectangle=None, savename=None, plots_folder="", **kwargs):
@@ -90,7 +94,7 @@ def plot_obs(data_array, headers, rectangle=None, savename=None, plots_folder=""
plt.rcParams.update({'font.size': 10})
nb_obs = np.max([np.sum([head['filtnam1'] == curr_pol for head in headers]) for curr_pol in ['POL0', 'POL60', 'POL120']])
shape = np.array((3, nb_obs))
fig, ax = plt.subplots(shape[0], shape[1], figsize=(10, 10), dpi=200,
fig, ax = plt.subplots(shape[0], shape[1], figsize=(3*shape[1], 3*shape[0]), dpi=200, layout='constrained',
sharex=True, sharey=True)
r_pol = dict(pol0=0, pol60=1, pol120=2)
c_pol = dict(pol0=0, pol60=0, pol120=0)
@@ -107,7 +111,11 @@ def plot_obs(data_array, headers, rectangle=None, savename=None, plots_folder=""
else:
ax_curr = ax[r_ax]
# plots
vmin, vmax = convert*data[data > 0.].min()/10., convert*data[data > 0.].max()
if ('vmin' in kwargs.keys() or 'vmax' in kwargs.keys()):
vmin, vmax = kwargs['vmin'], kwargs['vmax']
del kwargs['vmin'], kwargs['vmax']
else:
vmin, vmax = convert*data[data > 0.].min()/10., convert*data[data > 0.].max()
for key, value in [["cmap", [["cmap", "gray"]]], ["norm", [["norm", LogNorm(vmin, vmax)]]]]:
try:
_ = kwargs[key]
@@ -127,7 +135,7 @@ def plot_obs(data_array, headers, rectangle=None, savename=None, plots_folder=""
ax_curr.annotate(filt, color='white', fontsize=10, xy=(0.01, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left')
ax_curr.annotate(exptime, color='white', fontsize=5, xy=(1.00, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right')
fig.subplots_adjust(hspace=0.01, wspace=0.01, right=1.02)
# fig.subplots_adjust(hspace=0.01, wspace=0.01, right=1.02)
fig.colorbar(im, ax=ax, location='right', shrink=0.75, aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
if not (savename is None):
@@ -160,6 +168,10 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""):
stkI = Stokes['I_stokes'].data.copy()
stkQ = Stokes['Q_stokes'].data.copy()
stkU = Stokes['U_stokes'].data.copy()
data_mask = Stokes['Data_mask'].data.astype(bool)
for dataset in [stkI, stkQ, stkU]:
dataset[np.logical_not(data_mask)] = np.nan
wcs = WCS(Stokes[0]).deepcopy()
@@ -192,7 +204,7 @@ def plot_Stokes(Stokes, savename=None, plots_folder=""):
return 0
def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_cut=30.,
def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_cut=3.,
flux_lim=None, step_vec=1, vec_scale=2., savename=None, plots_folder="", display="default"):
"""
Plots polarisation map from Stokes HDUList.
@@ -244,17 +256,23 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
The figure and ax created for interactive contour maps.
"""
# Get data
stkI = Stokes[np.argmax([Stokes[i].header['datatype'] == 'I_stokes' for i in range(len(Stokes))])]
stk_cov = Stokes[np.argmax([Stokes[i].header['datatype'] == 'IQU_cov_matrix' for i in range(len(Stokes))])]
pol = Stokes[np.argmax([Stokes[i].header['datatype'] == 'Pol_deg_debiased' for i in range(len(Stokes))])]
pol_err = Stokes[np.argmax([Stokes[i].header['datatype'] == 'Pol_deg_err' for i in range(len(Stokes))])]
pang = Stokes[np.argmax([Stokes[i].header['datatype'] == 'Pol_ang' for i in range(len(Stokes))])]
stkI = Stokes['I_stokes'].data.copy()
stk_cov = Stokes['IQU_cov_matrix'].data.copy()
pol = Stokes['Pol_deg_debiased'].data.copy()
pol_err = Stokes['Pol_deg_err'].data.copy()
pang = Stokes['Pol_ang'].data.copy()
try:
if data_mask is None:
data_mask = Stokes[np.argmax([Stokes[i].header['datatype'] == 'Data_mask' for i in range(len(Stokes))])].data.astype(bool)
data_mask = Stokes['Data_mask'].data.astype(bool).copy()
except KeyError:
data_mask = np.ones(stkI.shape).astype(bool)
for dataset in [stkI, pol, pol_err, pang]:
dataset[np.logical_not(data_mask)] = np.nan
for i in range(3):
for j in range(3):
stk_cov[i][j][np.logical_not(data_mask)] = np.nan
pivot_wav = Stokes[0].header['photplam']
convert_flux = Stokes[0].header['photflam']
wcs = WCS(Stokes[0]).deepcopy()
@@ -264,95 +282,94 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
plot_Stokes(Stokes, savename=savename, plots_folder=plots_folder)
# Compute SNR and apply cuts
poldata, pangdata = pol.data.copy(), pang.data.copy()
maskP = pol_err.data > 0
SNRp = np.zeros(pol.data.shape)
SNRp[maskP] = pol.data[maskP]/pol_err.data[maskP]
poldata, pangdata = pol.copy(), pang.copy()
maskP = pol_err > 0
SNRp = np.ones(pol.shape)*np.nan
SNRp[maskP] = pol[maskP]/pol_err[maskP]
maskI = stk_cov.data[0, 0] > 0
SNRi = np.zeros(stkI.data.shape)
SNRi[maskI] = stkI.data[maskI]/np.sqrt(stk_cov.data[0, 0][maskI])
maskI = stk_cov[0, 0] > 0
SNRi = np.ones(stkI.shape)*np.nan
SNRi[maskI] = stkI[maskI]/np.sqrt(stk_cov[0, 0][maskI])
mask = (SNRp > SNRp_cut) * (SNRi > SNRi_cut)
poldata[np.logical_not(mask)] = np.nan
pangdata[np.logical_not(mask)] = np.nan
# Look for pixel of max polarisation
if np.isfinite(pol.data).any():
p_max = np.max(pol.data[np.isfinite(pol.data)])
x_max, y_max = np.unravel_index(np.argmax(pol.data == p_max), pol.data.shape)
if np.isfinite(pol).any():
p_max = np.max(pol[np.isfinite(pol)])
x_max, y_max = np.unravel_index(np.argmax(pol == p_max), pol.shape)
else:
print("No pixel with polarisation information above requested SNR.")
# Plot the map
plt.rcParams.update({'font.size': 10})
plt.rcdefaults()
fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(projection=wcs))
fig, ax = plt.subplots(figsize=(10, 10), layout='constrained', subplot_kw=dict(projection=wcs))
ax.set(aspect='equal', fc='k')
fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02)
# fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02)
if display.lower() in ['intensity']:
# If no display selected, show intensity map
display = 'i'
if flux_lim is None:
if mask.sum() > 0.:
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov[0, 0][mask])*convert_flux), np.max(stkI[stkI > 0.]*convert_flux)
else:
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov[0, 0][stkI > 0.])*convert_flux), np.max(stkI[stkI > 0.]*convert_flux)
else:
vmin, vmax = flux_lim
im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
im = ax.imshow(stkI*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsI = np.array([0.8, 2., 5., 10., 20., 50.])/100.*vmax
print("Total flux contour levels : ", levelsI)
ax.contour(stkI.data*convert_flux, levels=levelsI, colors='grey', linewidths=0.5)
ax.contour(stkI*convert_flux, levels=levelsI, colors='grey', linewidths=0.5)
elif display.lower() in ['pol_flux']:
# Display polarisation flux
display = 'pf'
if flux_lim is None:
if mask.sum() > 0.:
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov[0, 0][mask])*convert_flux), np.max(stkI[stkI > 0.]*convert_flux)
else:
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov[0, 0][stkI > 0.])*convert_flux), np.max(stkI[stkI > 0.]*convert_flux)
else:
vmin, vmax = flux_lim
im = ax.imshow(stkI.data*convert_flux*pol.data, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
im = ax.imshow(stkI*convert_flux*pol, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsPf = np.linspace(vmax*0.01, vmax*0.99, 10)
print("Polarized flux contour levels : ", levelsPf)
ax.contour(stkI.data*convert_flux*pol.data, levels=levelsPf, colors='grey', linewidths=0.5)
ax.contour(stkI*convert_flux*pol, levels=levelsPf, colors='grey', linewidths=0.5)
elif display.lower() in ['p', 'pol', 'pol_deg']:
# Display polarisation degree map
display = 'p'
vmin, vmax = 0., 100.
im = ax.imshow(pol.data*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
im = ax.imshow(pol*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$P$ [%]")
elif display.lower() in ['pa', 'pang', 'pol_ang']:
# Display polarisation degree map
display = 'pa'
vmin, vmax = 0., 180.
im = ax.imshow(princ_angle(pang.data), vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
im = ax.imshow(princ_angle(pang), vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\theta_P$ [°]")
elif display.lower() in ['s_p', 'pol_err', 'pol_deg_err']:
# Display polarisation degree error map
display = 's_p'
if (SNRp > SNRp_cut).any():
vmin, vmax = 0., np.max(pol_err.data[SNRp > SNRp_cut])*100.
p_err = deepcopy(pol_err.data)
p_err[p_err > vmax/100.] = np.nan
im = ax.imshow(p_err*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
vmin, vmax = 0., np.max([pol_err[SNRp > SNRp_cut].max(), 1.])*100.
im = ax.imshow(pol_err*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno_r', alpha=1.)
else:
im = ax.imshow(pol_err.data*100., aspect='equal', cmap='inferno', alpha=1.)
vmin, vmax = 0., 100.
im = ax.imshow(pol_err*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno_r', alpha=1.)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\sigma_P$ [%]")
elif display.lower() in ['s_i', 'i_err']:
# Display intensity error map
display = 's_i'
if (SNRi > SNRi_cut).any():
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov.data[0, 0][stk_cov.data[0, 0] > 0.]) *
convert_flux), np.max(np.sqrt(stk_cov.data[0, 0][stk_cov.data[0, 0] > 0.])*convert_flux)
im = ax.imshow(np.sqrt(stk_cov.data[0, 0])*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
vmin, vmax = 1./2.*np.median(np.sqrt(stk_cov[0, 0][stk_cov[0, 0] > 0.]) *
convert_flux), np.max(np.sqrt(stk_cov[0, 0][stk_cov[0, 0] > 0.])*convert_flux)
im = ax.imshow(np.sqrt(stk_cov[0, 0])*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno_r', alpha=1.)
else:
im = ax.imshow(np.sqrt(stk_cov.data[0, 0])*convert_flux, aspect='equal', cmap='inferno', alpha=1.)
im = ax.imshow(np.sqrt(stk_cov[0, 0])*convert_flux, aspect='equal', cmap='inferno', alpha=1.)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
elif display.lower() in ['snr', 'snri']:
# Display I_stokes signal-to-noise map
@@ -381,15 +398,15 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
else:
# Defaults to intensity map
if mask.sum() > 0.:
vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0, 0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
vmin, vmax = 1.*np.mean(np.sqrt(stk_cov[0, 0][mask])*convert_flux), np.max(stkI[stkI > 0.]*convert_flux)
else:
vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0, 0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
vmin, vmax = 1.*np.mean(np.sqrt(stk_cov[0, 0][stkI > 0.])*convert_flux), np.max(stkI[stkI > 0.]*convert_flux)
im = ax.imshow(stkI*convert_flux, norm=LogNorm(vmin, vmax), aspect='equal', cmap='inferno', alpha=1.)
fig.colorbar(im, ax=ax, aspect=50, shrink=0.75, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
# Get integrated values from header
I_diluted = stkI.data[data_mask].sum()
I_diluted_err = np.sqrt(np.sum(stk_cov.data[0, 0][data_mask]))
I_diluted = stkI[data_mask].sum()
I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask]))
P_diluted = Stokes[0].header['P_int']
P_diluted_err = Stokes[0].header['P_int_err']
@@ -407,10 +424,10 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
poldata[np.isfinite(poldata)] = 1./2.
step_vec = 1
vec_scale = 2.
X, Y = np.meshgrid(np.arange(stkI.data.shape[1]), np.arange(stkI.data.shape[0]))
X, Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
U, V = poldata*np.cos(np.pi/2.+pangdata*np.pi/180.), poldata*np.sin(np.pi/2.+pangdata*np.pi/180.)
ax.quiver(X[::step_vec, ::step_vec], Y[::step_vec, ::step_vec], U[::step_vec, ::step_vec], V[::step_vec, ::step_vec], units='xy', angles='uv',
scale=1./vec_scale, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.2, linewidth=0.3, color='w', edgecolor='k')
scale=1./vec_scale, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.5, linewidth=0.8, color='w', edgecolor='k')
pol_sc = AnchoredSizeBar(ax.transData, vec_scale, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
ax.add_artist(pol_sc)
@@ -429,7 +446,7 @@ def polarisation_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
# Display instrument FOV
if not (rectangle is None):
x, y, width, height, angle, color = rectangle
x, y = np.array([x, y]) - np.array(stkI.data.shape)/2.
x, y = np.array([x, y]) - np.array(stkI.shape)/2.
ax.add_patch(Rectangle((x, y), width, height, angle=angle,
edgecolor=color, fill=False))
@@ -660,7 +677,7 @@ class overplot_radio(align_maps):
Inherit from class align_maps in order to get the same WCS on both maps.
"""
def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2, savename=None, **kwargs):
def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=3., vec_scale=2, savename=None, **kwargs):
self.Stokes_UV = self.map
self.wcs_UV = self.map_wcs
# Get Data
@@ -721,7 +738,7 @@ class overplot_radio(align_maps):
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.)
self.Q = self.ax_overplot.quiver(self.X[::step_vec, ::step_vec], self.Y[::step_vec, ::step_vec], self.U[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec], units='xy', angles='uv', scale=1./self.vec_scale,
scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, linewidth=0.5, color='white', edgecolor='black', label="{0:s} polarisation map".format(self.map_observer))
scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.5, linewidth=0.8, color='white', edgecolor='black', label="{0:s} polarisation map".format(self.map_observer))
self.ax_overplot.autoscale(False)
# Display other map as contours
@@ -767,7 +784,7 @@ class overplot_radio(align_maps):
self.fig_overplot.canvas.draw()
def plot(self, levels=None, SNRp_cut=3., SNRi_cut=30., savename=None, **kwargs) -> None:
def plot(self, levels=None, SNRp_cut=3., SNRi_cut=3., savename=None, **kwargs) -> None:
while not self.aligned:
self.align()
self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=savename, **kwargs)
@@ -780,7 +797,7 @@ class overplot_chandra(align_maps):
Inherit from class align_maps in order to get the same WCS on both maps.
"""
def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2, zoom=1, savename=None, **kwargs):
def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=3., vec_scale=2, zoom=1, savename=None, **kwargs):
self.Stokes_UV = self.map
self.wcs_UV = self.map_wcs
# Get Data
@@ -840,7 +857,7 @@ class overplot_chandra(align_maps):
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.)
self.Q = self.ax_overplot.quiver(self.X[::step_vec, ::step_vec], self.Y[::step_vec, ::step_vec], self.U[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec], units='xy', angles='uv', scale=1./self.vec_scale,
scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, linewidth=0.5, color='white', edgecolor='black', label="{0:s} polarisation map".format(self.map_observer))
scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.5, linewidth=0.8, color='white', edgecolor='black', label="{0:s} polarisation map".format(self.map_observer))
self.ax_overplot.autoscale(False)
# Display other map as contours
@@ -886,7 +903,7 @@ class overplot_chandra(align_maps):
self.fig_overplot.canvas.draw()
def plot(self, levels=None, SNRp_cut=3., SNRi_cut=30., zoom=1, savename=None, **kwargs) -> None:
def plot(self, levels=None, SNRp_cut=3., SNRi_cut=3., zoom=1, savename=None, **kwargs) -> None:
while not self.aligned:
self.align()
self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, zoom=zoom, savename=savename, **kwargs)
@@ -899,7 +916,7 @@ class overplot_pol(align_maps):
Inherit from class align_maps in order to get the same WCS on both maps.
"""
def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2., savename=None, **kwargs):
def overplot(self, levels=None, SNRp_cut=3., SNRi_cut=3., vec_scale=2., savename=None, **kwargs):
self.Stokes_UV = self.map
self.wcs_UV = self.map_wcs
# Get Data
@@ -959,7 +976,7 @@ class overplot_pol(align_maps):
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
self.U, self.V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.)
self.Q = self.ax_overplot.quiver(self.X[::step_vec, ::step_vec], self.Y[::step_vec, ::step_vec], self.U[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec], units='xy', angles='uv', scale=px_scale/self.vec_scale, scale_units='xy', pivot='mid',
headwidth=0., headlength=0., headaxislength=0., width=0.1/px_scale, linewidth=0.5, color='white', edgecolor='black', transform=self.ax_overplot.get_transform(self.wcs_UV), label="{0:s} polarisation map".format(self.map_observer))
headwidth=0., headlength=0., headaxislength=0., width=2.0, linewidth=1.0, color='white', edgecolor='black', transform=self.ax_overplot.get_transform(self.wcs_UV), label="{0:s} polarisation map".format(self.map_observer))
# Display Stokes I as contours
if levels is None:
@@ -1006,7 +1023,7 @@ class overplot_pol(align_maps):
self.fig_overplot.canvas.draw()
def plot(self, levels=None, SNRp_cut=3., SNRi_cut=30., vec_scale=2., savename=None, **kwargs) -> None:
def plot(self, levels=None, SNRp_cut=3., SNRi_cut=3., vec_scale=2., savename=None, **kwargs) -> None:
while not self.aligned:
self.align()
self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, vec_scale=vec_scale, savename=savename, **kwargs)
@@ -1046,7 +1063,7 @@ class align_pol(object):
self.kwargs = kwargs
def single_plot(self, curr_map, wcs, v_lim=None, ax_lim=None, SNRp_cut=3., SNRi_cut=30., savename=None, **kwargs):
def single_plot(self, curr_map, wcs, v_lim=None, ax_lim=None, SNRp_cut=3., SNRi_cut=3., savename=None, **kwargs):
# Get data
stkI = curr_map['I_STOKES'].data
stk_cov = curr_map['IQU_COV_MATRIX'].data
@@ -1115,7 +1132,7 @@ class align_pol(object):
X, Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
U, V = pol*np.cos(np.pi/2.+pang*np.pi/180.), pol*np.sin(np.pi/2.+pang*np.pi/180.)
ax.quiver(X[::step_vec, ::step_vec], Y[::step_vec, ::step_vec], U[::step_vec, ::step_vec], V[::step_vec, ::step_vec], units='xy',
angles='uv', scale=0.5, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, color='w')
angles='uv', scale=0.5, scale_units='xy', pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.5, linewidth=0.8, color='w')
pol_sc = AnchoredSizeBar(ax.transData, 2., r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
ax.add_artist(pol_sc)
@@ -1137,7 +1154,7 @@ class align_pol(object):
self.wcs, self.wcs_other[i] = curr_align.align()
self.aligned[i] = curr_align.aligned
def plot(self, SNRp_cut=3., SNRi_cut=30., savename=None, **kwargs):
def plot(self, SNRp_cut=3., SNRi_cut=3., savename=None, **kwargs):
while not self.aligned.all():
self.align()
eps = 1e-35
@@ -1724,7 +1741,7 @@ class pol_map(object):
Class to interactively study polarisation maps.
"""
def __init__(self, Stokes, SNRp_cut=3., SNRi_cut=30., flux_lim=None, selection=None):
def __init__(self, Stokes, SNRp_cut=3., SNRi_cut=3., flux_lim=None, selection=None):
if isinstance(Stokes, str):
Stokes = fits.open(Stokes)
@@ -2053,7 +2070,7 @@ class pol_map(object):
def submit_save(expression):
ax_text_save.set(visible=False)
if expression != '':
save_fig, save_ax = plt.subplots(figsize=(12, 10), layout='tight', subplot_kw=dict(projection=self.wcs))
save_fig, save_ax = plt.subplots(figsize=(12, 10), layout='constrained', subplot_kw=dict(projection=self.wcs))
self.ax_cosmetics(ax=save_ax)
self.display(fig=save_fig, ax=save_ax)
self.pol_vector(fig=save_fig, ax=save_ax)
@@ -2336,12 +2353,12 @@ class pol_map(object):
if hasattr(self, 'quiver'):
self.quiver.remove()
self.quiver = ax.quiver(X, Y, XY_U, XY_V, units='xy', scale=1./self.vec_scale, scale_units='xy', pivot='mid', headwidth=0.,
headlength=0., headaxislength=0., width=0.2, linewidth=0.3, color='white', edgecolor='black')
headlength=0., headaxislength=0., width=0.5, linewidth=0.8, color='white', edgecolor='black')
fig.canvas.draw_idle()
return self.quiver
else:
ax.quiver(X, Y, XY_U, XY_V, units='xy', scale=1./self.vec_scale, scale_units='xy', pivot='mid', headwidth=0.,
headlength=0., headaxislength=0., width=0.2, linewidth=0.3, color='white', edgecolor='black')
headlength=0., headaxislength=0., width=0.5, linewidth=0.8, color='white', edgecolor='black')
fig.canvas.draw_idle()
def pol_int(self, fig=None, ax=None):
@@ -2450,3 +2467,21 @@ class pol_map(object):
if self.region is not None:
ax.contour(self.region.astype(float), levels=[0.5], colors='white', linewidths=0.8)
fig.canvas.draw_idle()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Interactively plot the pipeline products')
parser.add_argument('-f', '--file', metavar='path', required=False, help='the full or relative path to the data product', type=str, default=None)
parser.add_argument('-p', '--snrp', metavar='snrp_cut', required=False, help='the cut in signal-to-noise for the polarisation degree', type=float, default=3.)
parser.add_argument('-i', '--snri', metavar='snri_cut', required=False, help='the cut in signal-to-noise for the intensity', type=float, default=3.)
parser.add_argument('-l', '--lim', metavar='flux_lim', nargs=2, required=False, help='limits for the intensity map', default=None)
args = parser.parse_args()
if args.file is not None:
Stokes_UV = fits.open(args.file, mode='readonly')
p = pol_map(Stokes_UV, SNRp_cut=args.snrp, SNRi_cut=args.snri, flux_lim=args.lim)
else:
print("python3 plots.py -f <path_to_reduced_fits> -p <SNRp_cut> -i <SNRi_cut> -l <flux_lim>")

View File

@@ -16,14 +16,6 @@ def divide_proposal(products):
"""
Divide observation in proposals by time or filter
"""
for pid in np.unique(products['Proposal ID']):
obs = products[products['Proposal ID'] == pid].copy()
close_date = np.unique(np.array([TimeDelta(np.abs(obs['Start'][0].unix-date.unix), format='sec')
< 7.*u.d for date in obs['Start']], dtype=bool), axis=0)
if len(close_date) > 1:
for date in close_date:
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][date]], axis=0)
] = "_".join([obs['Proposal ID'][date][0], str(obs['Start'][date][0])[:10]])
for pid in np.unique(products['Proposal ID']):
obs = products[products['Proposal ID'] == pid].copy()
same_filt = np.unique(np.array(np.sum([obs['Filters'][:, 1:] == filt[1:] for filt in obs['Filters']], axis=2) < 3, dtype=bool), axis=0)
@@ -31,6 +23,14 @@ def divide_proposal(products):
for filt in same_filt:
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][filt]], axis=0)] = "_".join(
[obs['Proposal ID'][filt][0], "_".join([fi for fi in obs['Filters'][filt][0][1:] if fi[:-1] != "CLEAR"])])
for pid in np.unique(products['Proposal ID']):
obs = products[products['Proposal ID'] == pid].copy()
close_date = np.unique([[np.abs(TimeDelta(obs['Start'][i].unix-date.unix, format='sec'))
< 7.*u.d for i in range(len(obs))] for date in obs['Start']], axis=0)
if len(close_date) > 1:
for date in close_date:
products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][date]], axis=0)
] = "_".join([obs['Proposal ID'][date][0], str(obs['Start'][date][0])[:10]])
return products
@@ -147,8 +147,6 @@ def get_product_list(target=None, proposal_id=None):
for prod in products:
prod['target_name'] = observations['target_name'][observations['obsid'] == prod['obsID']][0]
tab = unique(products, ['target_name', 'proposal_id'])
if len(tab) > 1 and np.all(tab['target_name'] == tab['target_name'][0]):
target = tab['target_name'][0]
products["Obs"] = [np.argmax(np.logical_and(tab['proposal_id'] == data['proposal_id'], tab['target_name'] == data['target_name']))+1 for data in products]
return target, products

View File

@@ -49,12 +49,12 @@ from scipy.signal import fftconvolve
from astropy.wcs import WCS
from astropy import log
import warnings
from lib.deconvolve import deconvolve_im, gaussian_psf, gaussian2d, zeropad
from lib.convex_hull import image_hull, clean_ROI
from lib.background import bkg_fit, bkg_hist, bkg_mini
from lib.plots import plot_obs
from lib.utils import princ_angle
from lib.cross_correlation import phase_cross_correlation
from .deconvolve import deconvolve_im, gaussian_psf, gaussian2d, zeropad
from .convex_hull import image_hull, clean_ROI
from .background import bkg_fit, bkg_hist, bkg_mini
from .plots import plot_obs
from .utils import princ_angle
from .cross_correlation import phase_cross_correlation
log.setLevel('ERROR')
@@ -276,7 +276,7 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu
if display:
plt.rcParams.update({'font.size': 15})
fig, ax = plt.subplots(figsize=(10, 10))
fig, ax = plt.subplots(figsize=(10, 10), layout='constrained')
convert_flux = headers[0]['photflam']
data = deepcopy(data_array[0]*convert_flux)
data[data <= data[data > 0.].min()] = data[data > 0.].min()
@@ -301,18 +301,15 @@ def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, nu
xycoords='axes fraction')
ax.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(0.80, 0.02),
xycoords='axes fraction')
ax.set( # title="Location of cropped image.",
xlabel='pixel offset',
ylabel='pixel offset')
ax.set(title="Location of cropped image.", xlabel='pixel offset', ylabel='pixel offset')
fig.subplots_adjust(hspace=0, wspace=0, right=0.85)
cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75])
fig.colorbar(im, cax=cbar_ax, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
# fig.subplots_adjust(hspace=0, wspace=0, right=0.85)
# cbar_ax = fig.add_axes([0.9, 0.12, 0.02, 0.75])
fig.colorbar(im, ax=ax, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
if savename is not None:
# fig.suptitle(savename+'_'+filt+'_crop_region')
fig.savefig("/".join([plots_folder, savename+'_'+filt+'_crop_region.png']),
bbox_inches='tight')
fig.savefig("/".join([plots_folder, savename+'_'+filt+'_crop_region.pdf']),
bbox_inches='tight', dpi=200)
plot_obs(data_array, headers, vmin=convert_flux*data_array[data_array > 0.].mean()/5.,
vmax=convert_flux*data_array[data_array > 0.].max(), rectangle=[rectangle,]*len(headers),
savename=savename+'_crop_region', plots_folder=plots_folder)
@@ -445,11 +442,11 @@ def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=No
Returns:
data_array : numpy.ndarray
Array containing the data to study minus the background.
headers : header list
Updated headers associated with the images in data_array.
error_array : numpy.ndarray
Array containing the background values associated to the images in
data_array.
headers : header list
Updated headers associated with the images in data_array.
background : numpy.ndarray
Array containing the pixel background value for each image in
data_array.
@@ -955,6 +952,10 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale=
err60_array = error_array[is_pol60]
err120_array = error_array[is_pol120]
# For a single observation, combination amount to a weighted gaussian
if np.max([is_pol0.sum(), is_pol60.sum(), is_pol120.sum()]) == 1 and smoothing.lower() in ['combine', 'combining']:
smoothing = 'weighted_gaussian'
if (FWHM is not None) and (smoothing.lower() in ['combine', 'combining']):
# Smooth by combining each polarizer images
pol0, err0 = smooth_data(pol0_array, err0_array, data_mask, headers0, FWHM=FWHM, scale=scale, smoothing=smoothing)
@@ -1227,10 +1228,12 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask]**2))
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted **
2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted))
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err **
2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
for header in headers:
header['P_int'] = (P_diluted, 'Integrated polarisation degree')
@@ -1406,7 +1409,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
for i, head in enumerate(headers):
ang[i] = -head['orientat']
ang = ang.mean()
alpha = np.radians(ang)
alpha = np.pi/180.*ang
mrot = np.array([[1., 0., 0.],
[0., np.cos(2.*alpha), np.sin(2.*alpha)],
[0, -np.sin(2.*alpha), np.cos(2.*alpha)]])
@@ -1486,10 +1489,12 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 2][mask]**2))
P_diluted = np.sqrt(Q_diluted**2+U_diluted**2)/I_diluted
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted**2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
P_diluted_err = (1./I_diluted)*np.sqrt((Q_diluted**2*Q_diluted_err**2 + U_diluted**2*U_diluted_err**2 + 2.*Q_diluted*U_diluted*QU_diluted_err)/(Q_diluted**2 + U_diluted **
2) + ((Q_diluted/I_diluted)**2 + (U_diluted/I_diluted)**2)*I_diluted_err**2 - 2.*(Q_diluted/I_diluted)*IQ_diluted_err - 2.*(U_diluted/I_diluted)*IU_diluted_err)
PA_diluted = princ_angle((90./np.pi)*np.arctan2(U_diluted, Q_diluted))
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err**2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
PA_diluted_err = (90./(np.pi*(Q_diluted**2 + U_diluted**2)))*np.sqrt(U_diluted**2*Q_diluted_err **
2 + Q_diluted**2*U_diluted_err**2 - 2.*Q_diluted*U_diluted*QU_diluted_err)
for header in new_headers:
header['P_int'] = (P_diluted, 'Integrated polarisation degree')

View File

@@ -6,7 +6,7 @@ from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.fits")
Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits")
Stokes_Xr = fits.open("./data/MRK463E/Chandra/4913/primary/acisf04913N004_cntr_img2.fits")
Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits")
levels = np.geomspace(1., 99., 7)
@@ -14,12 +14,13 @@ levels = np.geomspace(1., 99., 7)
# A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf')
B = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm())
B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=30.0, vec_scale=3, zoom=1, savename='./plots/MRK463E/Chandra_overplot_forced.pdf')
B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, zoom=1, savename='./plots/MRK463E/Chandra_overplot_forced.pdf')
B.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned")
# C = overplot_pol(Stokes_UV, Stokes_IR)
# C.plot(SNRp_cut=3.0, SNRi_cut=20.0, savename='./plots/MRK463E/IR_overplot.pdf')
levels = np.array([0.8, 2, 5, 10, 20, 50])/100.*Stokes_UV[0].header['photflam']
D = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm())
D.plot(SNRp_cut=3.0, SNRi_cut=30.0, vec_scale=3, norm=LogNorm(1e-18, 1e-15), savename='./plots/MRK463E/IR_overplot_forced.pdf')
D.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, norm=LogNorm(8.5e-18, 2.5e-15), savename='./plots/MRK463E/IR_overplot_forced.pdf')
D.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned")

0
package/src/__init__.py Normal file
View File

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python3
#!/usr/bin/python
from getopt import getopt, error as get_error
from sys import argv

View File

@@ -1,8 +1,8 @@
# !/usr/bin/env python
from lib.background import gauss, bin_centers
from lib.deconvolve import zeropad
from lib.reduction import align_data
from lib.plots import princ_angle
#!/usr/bin/python
from src.lib.background import gauss, bin_centers
from src.lib.deconvolve import zeropad
from src.lib.reduction import align_data
from src.lib.plots import princ_angle
from matplotlib.colors import LogNorm
from os.path import join as path_join
from astropy.io import fits

47
package/src/get_cdelt.py Executable file
View File

@@ -0,0 +1,47 @@
#!/usr/bin/python
def main(infiles=None):
"""
Retrieve native spatial resolution from given observation.
"""
from os.path import join as path_join
from warnings import catch_warnings, filterwarnings
from astropy.io.fits import getheader
from astropy.wcs import WCS, FITSFixedWarning
from numpy.linalg import eig
if infiles is None:
print("Usage: \"python get_cdelt.py -f infiles\"")
return 1
prod = [["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles]
data_folder = prod[0][0]
infiles = [p[1] for p in prod]
cdelt = {}
size = {}
for currfile in infiles:
with catch_warnings():
filterwarnings('ignore', message="'datfix' made the change", category=FITSFixedWarning)
wcs = WCS(getheader(path_join(data_folder, currfile))).celestial
key = currfile[:-5]
size[key] = wcs.array_shape
if wcs.wcs.has_cd():
cdelt[key] = eig(wcs.wcs.cd)[0]*3600.
else:
cdelt[key] = wcs.wcs.cdelt*3600.
print("Image name, native resolution in arcsec and shape")
for currfile in infiles:
key = currfile[:-5]
print(key, cdelt[key], size[key])
return 0
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Query MAST for target products')
parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None)
args = parser.parse_args()
exitcode = main(infiles=args.files)