11 Commits
main ... czfork

Author SHA1 Message Date
d505ef6f8a merge CZ fork to testing, prepare pipeline for clenup and fix 2024-07-17 16:44:06 +02:00
sugar_jo
2d0d8105be fix the fits header handling 2024-07-17 12:03:43 +08:00
sugar_jo
c41526c0a6 disable cropping in align_data when optimal_binning 2024-07-16 22:13:39 +08:00
sugar_jo
a72b799713 Update background.py 2024-07-16 22:06:46 +08:00
sugar_jo
fa4dce398f change some variable names 2024-07-16 21:59:22 +08:00
sugar_jo
3c8ca6ac1a Merge branch 'test' into main 2024-07-16 21:39:25 +08:00
sugar_jo
62aef1b1c4 add optimal_binning to plotting 2024-07-15 19:39:21 +08:00
sugar_jo
8e5f439259 add subtract_bkg funcition
Allow subtracting the bkg simpler
2024-07-14 15:46:22 +08:00
sugar_jo
a4e8f51c50 Update reduction.py 2024-06-30 11:06:26 +08:00
sugar_jo
b176e7a56e Update plots.py 2024-06-28 17:33:05 +08:00
sugar_jo
69b3937e9c Update reduction.py 2024-06-27 20:46:42 +08:00
26 changed files with 1829 additions and 2746 deletions

View File

@@ -1,6 +1,2 @@
# FOC_Reduction
FOC reduction pipeline
TODO:
- Add all polarimetry capables instruments from HST (starting with FOS and ACS)
- Build science case for future UV polarimeters (AGN outflow geometry, dust scattering, torus outline ?)

Binary file not shown.

View File

@@ -1,10 +0,0 @@
MIT License
Copyright (c) 2024 Thibault Barnouin
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,9 +1,6 @@
#!/usr/bin/env python3
#!/usr/bin/python
# -*- coding:utf-8 -*-
from pathlib import Path
from sys import path as syspath
syspath.append(str(Path(__file__).parent.parent))
# Project libraries
import numpy as np
@@ -173,7 +170,7 @@ def main(infiles, target=None, output_dir="./data/"):
# Reduction parameters
kwargs = {}
# Polarization map output
kwargs["P_cut"] = 0.99
kwargs["SNRp_cut"] = 3.0
kwargs["SNRi_cut"] = 1.0
kwargs["flux_lim"] = 1e-19, 3e-17
kwargs["scale_vec"] = 5
@@ -186,7 +183,9 @@ def main(infiles, target=None, output_dir="./data/"):
new_infiles = []
for i, group in enumerate(grouped_infiles):
new_infiles.append(FOC_reduction(target=target + "-" + str(i + 1), infiles=["/".join([data_folder, file]) for file in group], interactive=True)[0])
new_infiles.append(
FOC_reduction(target=target + "-" + str(i + 1), infiles=["/".join([data_folder, file]) for file in group], interactive=True)[0]
)
infiles = new_infiles

View File

@@ -1,14 +1,9 @@
#!/usr/bin/env python3
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
Main script where are progressively added the steps for the FOC pipeline reduction.
"""
from pathlib import Path
from sys import path as syspath
syspath.append(str(Path(__file__).parent.parent))
# Project libraries
from copy import deepcopy
from os import system
@@ -30,7 +25,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# from lib.deconvolve import from_file_psf
psf = "gaussian" # Can be user-defined as well
# psf = from_file_psf(data_folder+psf_file)
psf_FWHM = 1.55
psf_FWHM = 3.1
psf_scale = "px"
psf_shape = None # (151, 151)
iterations = 1
@@ -41,8 +36,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Background estimation
error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 1.33
display_bkg = True
subtract_error = 1.0
display_bkg = False
# Data binning
pxsize = 0.05
@@ -59,19 +54,29 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Smoothing
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 0.075 # If None, no smoothing is done
smoothing_FWHM = 0.10 # If None, no smoothing is done
smoothing_scale = "arcsec" # pixel or arcsec
# Rotation
rotate_North = True
# Polarization map output
P_cut = 3 # if >=1.0 cut on the signal-to-noise else cut on the confidence level in Q, U
SNRp_cut = 3.0 # P measurments with SNR>3
SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
scale_vec = 5
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
# Adaptive binning
# in order to perfrom optimal binning, there are several steps to follow:
# 1. Load the data again and preserve the full images
# 2. Skip the cropping step but use the same error and background estimation
# 3. Use the same alignment as the routine
# 4. Skip the rebinning step
# 5. Calulate the Stokes parameters without smoothing
optimal_binning = False
optimize = False
# Pipeline start
# Step 1:
@@ -116,221 +121,592 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
if align_center is None:
figtype = "_".join([figtype, "not_aligned"] if figtype != "" else ["not_aligned"])
# Crop data to remove outside blank margins.
data_array, error_array, data_mask, headers = proj_red.crop_array(
data_array, headers, step=5, null_val=0.0, crop=True, inside=True, display=display_crop, savename=figname, plots_folder=plots_folder
)
if optimal_binning:
from lib.background import subtract_bkg
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
if deconvolve:
data_array = proj_red.deconvolve_array(data_array, headers, psf=psf, FWHM=psf_FWHM, scale=psf_scale, shape=psf_shape, iterations=iterations, algo=algo)
options = {"optimize": optimize, "optimal_binning": True}
# Estimate error from data background, estimated from sub-image of desired sub_shape.
background = None
data_array, error_array, headers, background = proj_red.get_error(
data_array,
headers,
error_array,
data_mask=data_mask,
sub_type=error_sub_type,
subtract_error=subtract_error,
display=display_bkg,
savename="_".join([figname, "errors"]),
plots_folder=plots_folder,
return_background=True,
)
# Step 1: Load the data again and preserve the full images
_data_array, _headers = deepcopy(data_array), deepcopy(headers) # Preserve full images
_data_mask = np.ones(_data_array[0].shape, dtype=bool)
# Rotate data to have same orientation
rotate_data = np.unique([np.round(float(head["ORIENTAT"]), 3) for head in headers]).size != 1
if rotate_data:
ang = np.mean([head["ORIENTAT"] for head in headers])
for head in headers:
head["ORIENTAT"] -= ang
data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers)
if display_data:
# Step 2: Skip the cropping step but use the same error and background estimation (I don't understand why this is wrong)
data_array, error_array, headers = proj_red.crop_array(
data_array, headers, step=5, null_val=0.0, inside=True, display=display_crop, savename=figname, plots_folder=plots_folder
)
data_mask = np.ones(data_array[0].shape, dtype=bool)
background = None
_, _, _, background, error_bkg = proj_red.get_error(
data_array,
headers,
error_array,
data_mask=data_mask,
sub_type=error_sub_type,
subtract_error=subtract_error,
display=display_bkg,
savename="_".join([figname, "errors"]),
plots_folder=plots_folder,
return_background=True,
)
# _background is the same as background, but for the optimal binning
_background = None
_data_array, _error_array, _ = proj_red.get_error(
_data_array,
_headers,
error_array=None,
data_mask=_data_mask,
sub_type=error_sub_type,
subtract_error=False,
display=display_bkg,
savename="_".join([figname, "errors"]),
plots_folder=plots_folder,
return_background=False,
)
_error_bkg = np.ones_like(_data_array) * error_bkg[:, 0, 0, np.newaxis, np.newaxis]
_data_array, _error_array, _background, _ = subtract_bkg(_data_array, _error_array, _data_mask, background, _error_bkg)
# Step 3: Align and rescale images with oversampling. (has to disable croping in align_data function)
_data_array, _error_array, _headers, _, shifts, error_shifts = proj_red.align_data(
_data_array,
_headers,
error_array=_error_array,
background=_background,
upsample_factor=10,
ref_center=align_center,
return_shifts=True,
optimal_binning=True,
)
print("Image shifts: {} \nShifts uncertainty: {}".format(shifts, error_shifts))
_data_mask = np.ones(_data_array[0].shape, dtype=bool)
# Step 4: Compute Stokes I, Q, U
_background = np.array([np.array(bkg).reshape(1, 1) for bkg in _background])
_background_error = np.array(
[
np.array(
np.sqrt(
(bkg - _background[np.array([h["filtnam1"] == head["filtnam1"] for h in _headers], dtype=bool)].mean()) ** 2
/ np.sum([h["filtnam1"] == head["filtnam1"] for h in _headers])
)
).reshape(1, 1)
for bkg, head in zip(_background, _headers)
]
)
_I_stokes, _Q_stokes, _U_stokes, _Stokes_cov, _header_stokes = proj_red.compute_Stokes(
_data_array, _error_array, _data_mask, _headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr
)
_I_bkg, _Q_bkg, _U_bkg, _S_cov_bkg, _header_bkg = proj_red.compute_Stokes(
_background,
_background_error,
np.array(True).reshape(1, 1),
_headers,
FWHM=None,
scale=smoothing_scale,
smoothing=smoothing_function,
transmitcorr=False,
)
# Step 5: Compute polarimetric parameters (polarization degree and angle).
_P, _debiased_P, _s_P, _s_P_P, _PA, _s_PA, _s_PA_P = proj_red.compute_pol(_I_stokes, _Q_stokes, _U_stokes, _Stokes_cov, _header_stokes)
_P_bkg, _debiased_P_bkg, _s_P_bkg, _s_P_P_bkg, _PA_bkg, _s_PA_bkg, _s_PA_P_bkg = proj_red.compute_pol(_I_bkg, _Q_bkg, _U_bkg, _S_cov_bkg, _header_bkg)
# Step 6: Save image to FITS.
figname = "_".join([figname, figtype]) if figtype != "" else figname
_Stokes_hdul = proj_fits.save_Stokes(
_I_stokes,
_Q_stokes,
_U_stokes,
_Stokes_cov,
_P,
_debiased_P,
_s_P,
_s_P_P,
_PA,
_s_PA,
_s_PA_P,
_header_stokes,
_data_mask,
figname,
data_folder=data_folder,
return_hdul=True,
)
# Step 6:
_data_mask = _Stokes_hdul["data_mask"].data.astype(bool)
print(
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
_header_stokes["PHOTPLAM"],
*sci_not(
_Stokes_hdul[0].data[_data_mask].sum() * _header_stokes["PHOTFLAM"],
np.sqrt(_Stokes_hdul[3].data[0, 0][_data_mask].sum()) * _header_stokes["PHOTFLAM"],
2,
out=int,
),
)
)
print("P_int = {0:.1f} ± {1:.1f} %".format(_header_stokes["p_int"] * 100.0, np.ceil(_header_stokes["sP_int"] * 1000.0) / 10.0))
print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(_header_stokes["pa_int"]), princ_angle(np.ceil(_header_stokes["sPA_int"] * 10.0) / 10.0)))
# Background values
print(
"F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
_header_stokes["PHOTFLAM"],
*sci_not(_I_bkg[0, 0] * _header_stokes["PHOTFLAM"], np.sqrt(_S_cov_bkg[0, 0][0, 0]) * _header_stokes["PHOTFLAM"], 2, out=int),
)
)
print("P_bkg = {0:.1f} ± {1:.1f} %".format(_debiased_P_bkg[0, 0] * 100.0, np.ceil(_s_P_bkg[0, 0] * 1000.0) / 10.0))
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(_PA_bkg[0, 0]), princ_angle(np.ceil(_s_PA_bkg[0, 0] * 10.0) / 10.0)))
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
if pxscale.lower() not in ["full", "integrate"] and not interactive:
proj_plots.polarization_map(
deepcopy(_Stokes_hdul),
_data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=scale_vec,
savename="_".join([figname]),
plots_folder=plots_folder,
**options,
)
proj_plots.polarization_map(
deepcopy(_Stokes_hdul),
_data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=scale_vec,
savename="_".join([figname, "I"]),
plots_folder=plots_folder,
display="Intensity",
**options,
)
proj_plots.polarization_map(
deepcopy(_Stokes_hdul),
_data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=scale_vec,
savename="_".join([figname, "P_flux"]),
plots_folder=plots_folder,
display="Pol_Flux",
**options,
)
proj_plots.polarization_map(
deepcopy(_Stokes_hdul),
_data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=scale_vec,
savename="_".join([figname, "P"]),
plots_folder=plots_folder,
display="Pol_deg",
**options,
)
proj_plots.polarization_map(
deepcopy(_Stokes_hdul),
_data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=scale_vec,
savename="_".join([figname, "PA"]),
plots_folder=plots_folder,
display="Pol_ang",
**options,
)
proj_plots.polarization_map(
deepcopy(_Stokes_hdul),
_data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=scale_vec,
savename="_".join([figname, "I_err"]),
plots_folder=plots_folder,
display="I_err",
**options,
)
proj_plots.polarization_map(
deepcopy(_Stokes_hdul),
_data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=scale_vec,
savename="_".join([figname, "P_err"]),
plots_folder=plots_folder,
display="Pol_deg_err",
**options,
)
proj_plots.polarization_map(
deepcopy(_Stokes_hdul),
_data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=scale_vec,
savename="_".join([figname, "SNRi"]),
plots_folder=plots_folder,
display="SNRi",
**options,
)
proj_plots.polarization_map(
deepcopy(_Stokes_hdul),
_data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
vec_scale=scale_vec,
savename="_".join([figname, "SNRp"]),
plots_folder=plots_folder,
display="SNRp",
**options,
)
elif not interactive:
proj_plots.polarization_map(
deepcopy(_Stokes_hdul),
_data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
savename=figname,
plots_folder=plots_folder,
display="integrate",
**options,
)
elif pxscale.lower() not in ["full", "integrate"]:
proj_plots.pol_map(_Stokes_hdul, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
else:
options = {"optimize": optimize, "optimal_binning": False}
# Crop data to remove outside blank margins.
data_array, error_array, headers = proj_red.crop_array(
data_array, headers, step=5, null_val=0.0, inside=True, display=display_crop, savename=figname, plots_folder=plots_folder
)
data_mask = np.ones(data_array[0].shape, dtype=bool)
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
if deconvolve:
data_array = proj_red.deconvolve_array(
data_array, headers, psf=psf, FWHM=psf_FWHM, scale=psf_scale, shape=psf_shape, iterations=iterations, algo=algo
)
# Estimate error from data background, estimated from sub-image of desired sub_shape.
background = None
data_array, error_array, headers, background, error_bkg = proj_red.get_error(
data_array,
headers,
error_array,
data_mask=data_mask,
sub_type=error_sub_type,
subtract_error=subtract_error,
display=display_bkg,
savename="_".join([figname, "errors"]),
plots_folder=plots_folder,
return_background=True,
)
# Align and rescale images with oversampling.
data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data(
data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=True
)
if display_align:
print("Image shifts: {} \nShifts uncertainty: {}".format(shifts, error_shifts))
proj_plots.plot_obs(
data_array,
headers,
savename="_".join([figname, "rotate_data"]),
savename="_".join([figname, str(align_center)]),
plots_folder=plots_folder,
norm=LogNorm(
vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]
),
)
# Align and rescale images with oversampling.
data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data(
data_array,
headers,
error_array=error_array,
data_mask=data_mask,
background=background,
upsample_factor=10,
ref_center=align_center,
return_shifts=True,
)
# Rebin data to desired pixel size.
if (pxsize is not None) and not (pxsize == 1 and pxscale.lower() in ["px", "pixel", "pixels"]):
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(
data_array, error_array, headers, pxsize=pxsize, scale=pxscale, operation=rebin_operation, data_mask=data_mask
)
if display_align:
print("Image shifts: {} \nShifts uncertainty: {}".format(shifts, error_shifts))
proj_plots.plot_obs(
data_array,
headers,
shifts=shifts,
savename="_".join([figname, str(align_center)]),
plots_folder=plots_folder,
norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]),
)
# Rebin data to desired pixel size.
if (pxsize is not None) and not (pxsize == 1 and pxscale.lower() in ["px", "pixel", "pixels"]):
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(
data_array, error_array, headers, pxsize=pxsize, scale=pxscale, operation=rebin_operation, data_mask=data_mask
)
# Plot array for checking output
if display_data and pxscale.lower() not in ["full", "integrate"]:
proj_plots.plot_obs(
data_array,
headers,
savename="_".join([figname, "rebin"]),
plots_folder=plots_folder,
norm=LogNorm(vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]),
)
background = np.array([np.array(bkg).reshape(1, 1) for bkg in background])
background_error = np.array(
[
np.array(
np.sqrt(
(bkg - background[np.array([h["filtnam1"] == head["filtnam1"] for h in headers], dtype=bool)].mean()) ** 2
/ np.sum([h["filtnam1"] == head["filtnam1"] for h in headers])
# Rotate data to have same orientation
rotate_data = np.unique([np.round(float(head["ORIENTAT"]), 3) for head in headers]).size != 1
if rotate_data:
ang = np.mean([head["ORIENTAT"] for head in headers])
for head in headers:
head["ORIENTAT"] -= ang
data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers)
if display_data:
proj_plots.plot_obs(
data_array,
headers,
savename="_".join([figname, "rotate_data"]),
plots_folder=plots_folder,
norm=LogNorm(
vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]
),
)
).reshape(1, 1)
for bkg, head in zip(background, headers)
]
)
# Step 2:
# Compute Stokes I, Q, U with smoothed polarized images
# SMOOTHING DISCUSSION :
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
# Bibcode : 1995chst.conf...10J
I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, header_stokes = proj_red.compute_Stokes(
data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr
)
I_bkg, Q_bkg, U_bkg, S_cov_bkg, S_stat_cov_bkg, header_bkg = proj_red.compute_Stokes(
background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False
)
# Plot array for checking output
if display_data and pxscale.lower() not in ["full", "integrate"]:
proj_plots.plot_obs(
data_array,
headers,
savename="_".join([figname, "rebin"]),
plots_folder=plots_folder,
norm=LogNorm(
vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]
),
)
# Step 3:
# Rotate images to have North up
if rotate_North:
I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, data_mask, header_stokes = proj_red.rotate_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, data_mask, header_stokes, SNRi_cut=None
)
I_bkg, Q_bkg, U_bkg, S_cov_bkg, S_stat_cov_bkg, data_mask_bkg, header_bkg = proj_red.rotate_Stokes(
I_bkg, Q_bkg, U_bkg, S_cov_bkg, S_stat_cov_bkg, np.array(True).reshape(1, 1), header_bkg, SNRi_cut=None
background = np.array([np.array(bkg).reshape(1, 1) for bkg in background])
background_error = np.array(
[
np.array(
np.sqrt(
(bkg - background[np.array([h["filtnam1"] == head["filtnam1"] for h in headers], dtype=bool)].mean()) ** 2
/ np.sum([h["filtnam1"] == head["filtnam1"] for h in headers])
)
).reshape(1, 1)
for bkg, head in zip(background, headers)
]
)
# Compute polarimetric parameters (polarization degree and angle).
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, header_stokes)
P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, S_stat_cov_bkg, header_bkg)
# Step 2:
# Compute Stokes I, Q, U with smoothed polarized images
# SMOOTHING DISCUSSION :
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
# Bibcode : 1995chst.conf...10J
I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes = proj_red.compute_Stokes(
data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr
)
I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg = proj_red.compute_Stokes(
background,
background_error,
np.array(True).reshape(1, 1),
headers,
FWHM=None,
scale=smoothing_scale,
smoothing=smoothing_function,
transmitcorr=False,
)
# Step 4:
# Save image to FITS.
figname = "_".join([figname, figtype]) if figtype != "" else figname
Stokes_hdul = proj_fits.save_Stokes(
I_stokes,
Q_stokes,
U_stokes,
Stokes_cov,
Stokes_stat_cov,
P,
debiased_P,
s_P,
s_P_P,
PA,
s_PA,
s_PA_P,
header_stokes,
data_mask,
figname,
data_folder=data_folder,
return_hdul=True,
)
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"]))
# Step 3:
# Rotate images to have North up
if rotate_North:
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes = proj_red.rotate_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None
)
I_bkg, Q_bkg, U_bkg, S_cov_bkg, data_mask_bkg, header_bkg = proj_red.rotate_Stokes(
I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), header_bkg, SNRi_cut=None
)
# Step 5:
# crop to desired region of interest (roi)
if crop:
figname += "_crop"
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_hdul), norm=LogNorm())
stokescrop.crop()
stokescrop.write_to("/".join([data_folder, figname + ".fits"]))
Stokes_hdul, header_stokes = stokescrop.hdul_crop, stokescrop.hdul_crop[0].header
# Compute polarimetric parameters (polarization degree and angle).
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes)
P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg)
# Step 4:
# Save image to FITS.
figname = "_".join([figname, figtype]) if figtype != "" else figname
Stokes_hdul = proj_fits.save_Stokes(
I_stokes,
Q_stokes,
U_stokes,
Stokes_cov,
P,
debiased_P,
s_P,
s_P_P,
PA,
s_PA,
s_PA_P,
header_stokes,
data_mask,
figname,
data_folder=data_folder,
return_hdul=True,
)
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"]))
data_mask = Stokes_hdul["data_mask"].data.astype(bool)
print(
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
header_stokes["PHOTPLAM"],
*sci_not(
Stokes_hdul[0].data[data_mask].sum() * header_stokes["PHOTFLAM"],
np.sqrt(Stokes_hdul[3].data[0, 0][data_mask].sum()) * header_stokes["PHOTFLAM"],
2,
out=int,
),
# Step 5:
# crop to desired region of interest (roi)
if crop:
figname += "_crop"
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_hdul), norm=LogNorm())
stokescrop.crop()
stokescrop.write_to("/".join([data_folder, figname + ".fits"]))
Stokes_hdul, header_stokes = stokescrop.hdul_crop, [dataset.header for dataset in stokescrop.hdul_crop]
outfiles.append("/".join([data_folder, Stokes_hdul[0].header["FILENAME"] + ".fits"]))
data_mask = Stokes_hdul["data_mask"].data.astype(bool)
print(
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
header_stokes["PHOTPLAM"],
*sci_not(
Stokes_hdul[0].data[data_mask].sum() * header_stokes["PHOTFLAM"],
np.sqrt(Stokes_hdul[3].data[0, 0][data_mask].sum()) * header_stokes["PHOTFLAM"],
2,
out=int,
),
)
)
)
print("P_int = {0:.1f} ± {1:.1f} %".format(header_stokes["p_int"] * 100.0, np.ceil(header_stokes["sP_int"] * 1000.0) / 10.0))
print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(header_stokes["pa_int"]), princ_angle(np.ceil(header_stokes["sPA_int"] * 10.0) / 10.0)))
# Background values
print(
"F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
header_stokes["photplam"], *sci_not(I_bkg[0, 0] * header_stokes["photflam"], np.sqrt(S_cov_bkg[0, 0][0, 0]) * header_stokes["photflam"], 2, out=int)
print("P_int = {0:.1f} ± {1:.1f} %".format(header_stokes["p_int"] * 100.0, np.ceil(header_stokes["sP_int"] * 1000.0) / 10.0))
print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(header_stokes["pa_int"]), princ_angle(np.ceil(header_stokes["sPA_int"] * 10.0) / 10.0)))
# Background values
print(
"F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
header_stokes["PHOTPLAM"],
*sci_not(I_bkg[0, 0] * header_stokes["PHOTPLAM"], np.sqrt(S_cov_bkg[0, 0][0, 0]) * header_stokes["PHOTPLAM"], 2, out=int),
)
)
)
print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0] * 100.0, np.ceil(s_P_bkg[0, 0] * 1000.0) / 10.0))
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0] * 10.0) / 10.0)))
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
if pxscale.lower() not in ["full", "integrate"] and not interactive:
proj_plots.polarization_map(
deepcopy(Stokes_hdul),
data_mask,
P_cut=P_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
scale_vec=scale_vec,
savename="_".join([figname]),
plots_folder=plots_folder,
)
for figtype, figsuffix in zip(
["Intensity", "Pol_flux", "Pol_deg", "Pol_ang", "I_err", "P_err", "SNRi", "SNRp", "confp"],
["I", "P_flux", "P", "PA", "I_err", "P_err", "SNRi", "SNRp", "confP"],
):
try:
proj_plots.polarization_map(
deepcopy(Stokes_hdul),
data_mask,
P_cut=P_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
scale_vec=scale_vec,
savename="_".join([figname, figsuffix]),
plots_folder=plots_folder,
display=figtype,
)
except ValueError:
pass
elif not interactive:
proj_plots.polarization_map(
deepcopy(Stokes_hdul), data_mask, P_cut=P_cut, SNRi_cut=SNRi_cut, savename=figname, plots_folder=plots_folder, display="integrate"
)
elif pxscale.lower() not in ["full", "integrate"]:
proj_plots.pol_map(Stokes_hdul, P_cut=P_cut, SNRi_cut=SNRi_cut, step_vec=step_vec, scale_vec=scale_vec, flux_lim=flux_lim)
print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0] * 100.0, np.ceil(s_P_bkg[0, 0] * 1000.0) / 10.0))
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0] * 10.0) / 10.0)))
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
if pxscale.lower() not in ["full", "integrate"] and not interactive:
proj_plots.polarization_map(
deepcopy(Stokes_hdul),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
scale_vec=scale_vec,
savename="_".join([figname]),
plots_folder=plots_folder,
**options,
)
proj_plots.polarization_map(
deepcopy(Stokes_hdul),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
scale_vec=scale_vec,
savename="_".join([figname, "I"]),
plots_folder=plots_folder,
display="Intensity",
**options,
)
proj_plots.polarization_map(
deepcopy(Stokes_hdul),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
scale_vece=scale_vec,
savename="_".join([figname, "P_flux"]),
plots_folder=plots_folder,
display="Pol_Flux",
**options,
)
proj_plots.polarization_map(
deepcopy(Stokes_hdul),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
scale_vec=scale_vec,
savename="_".join([figname, "P"]),
plots_folder=plots_folder,
display="Pol_deg",
**options,
)
proj_plots.polarization_map(
deepcopy(Stokes_hdul),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
scale_vec=scale_vec,
savename="_".join([figname, "PA"]),
plots_folder=plots_folder,
display="Pol_ang",
**options,
)
proj_plots.polarization_map(
deepcopy(Stokes_hdul),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
scale_vec=scale_vec,
savename="_".join([figname, "I_err"]),
plots_folder=plots_folder,
display="I_err",
**options,
)
proj_plots.polarization_map(
deepcopy(Stokes_hdul),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
scale_vec=scale_vec,
savename="_".join([figname, "P_err"]),
plots_folder=plots_folder,
display="Pol_deg_err",
**options,
)
proj_plots.polarization_map(
deepcopy(Stokes_hdul),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
scale_vec=scale_vec,
savename="_".join([figname, "SNRi"]),
plots_folder=plots_folder,
display="SNRi",
**options,
)
proj_plots.polarization_map(
deepcopy(Stokes_hdul),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
flux_lim=flux_lim,
step_vec=step_vec,
scale_vec=scale_vec,
savename="_".join([figname, "SNRp"]),
plots_folder=plots_folder,
display="SNRp",
**options,
)
elif not interactive:
proj_plots.polarization_map(
deepcopy(Stokes_hdul),
data_mask,
SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut,
savename=figname,
plots_folder=plots_folder,
display="integrate",
**options,
)
elif pxscale.lower() not in ["full", "integrate"]:
proj_plots.pol_map(Stokes_hdul, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
return outfiles

View File

@@ -1,2 +1,3 @@
from .lib import *
from . import lib
from . import src
from . import FOC_reduction

View File

@@ -85,20 +85,12 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
label=headers[i]["filtnam1"] + " (Obs " + str(filt_obs[headers[i]["filtnam1"]]) + ")",
)
ax_h.plot([background[i] * convert_flux[i], background[i] * convert_flux[i]], [hist.min(), hist.max()], "x--", color="C{0:d}".format(i), alpha=0.8)
if i == 0:
xmin, xmax = np.min(np.array(bins)[np.array(hist) > 5e1]) * convert_flux[0], np.max(np.array(bins)[np.array(hist) > 5e1]) * convert_flux[0]
else:
xmin, xmax = (
min(xmin, np.min(np.array(bins)[np.array(hist) > 5e1]) * convert_flux[0]),
max(xmax, np.max(np.array(bins)[np.array(hist) > 5e1]) * convert_flux[0]),
)
if coeff is not None:
# ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
ax_h.plot(bins * convert_flux[i], gauss(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8)
ax_h.set_xscale("log")
ax_h.set_yscale("log")
ax_h.set_ylim([5e1, np.max([hist.max() for hist in histograms])])
ax_h.set_xlim([max(xmin, np.min(background * convert_flux) * 1e-2), min(xmax, np.max(background * convert_flux) * 1e2)])
ax_h.set_ylim([0.0, np.max([hist.max() for hist in histograms])])
ax_h.set_xlim([np.min(background * convert_flux) * 1e-2, np.max(background * convert_flux) * 1e2])
ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
ax_h.set_ylabel(r"Number of pixels in bin")
ax_h.set_title("Histogram for each observation")
@@ -134,7 +126,7 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
ax2.set(xlabel="pixel offset", ylabel="pixel offset", aspect="equal")
fig2.subplots_adjust(hspace=0, wspace=0, right=1.0)
fig2.colorbar(im2, ax=ax2, location="right", shrink=0.60, aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
fig2.colorbar(im2, ax=ax2, location="right", aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
if savename is not None:
this_savename = deepcopy(savename)
@@ -259,23 +251,18 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
weights = 1 / chi2**2
weights /= weights.sum()
bkg = np.sum(weights * (coeff[:, 1] + np.abs(coeff[:, 2]) * subtract_error))
bkg = np.sum(weights*(coeff[:, 1]+np.abs(coeff[:, 2]) * subtract_error))
error_bkg[i] *= bkg
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
# Substract background
if np.abs(subtract_error) > 0:
n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
background[i] = bkg
if np.abs(subtract_error) > 0:
n_data_array, n_error_array, background, error_bkg = subtract_bkg(n_data_array, n_error_array, mask, background, error_bkg)
if display:
display_bkg(data, background, std_bkg, headers, histograms=histograms, binning=binning, coeff=coeff, savename=savename, plots_folder=plots_folder)
return n_data_array, n_error_array, headers, background
return n_data_array, n_error_array, headers, background, error_bkg
def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, display=False, savename=None, plots_folder=""):
@@ -294,7 +281,7 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
sub_type : str or int, optional
If str, statistic rule to be used for the number of bins in counts/s.
If int, number of bins for the counts/s histogram.
Defaults to "Scott".
Defaults to "Freedman-Diaconis".
subtract_error : float or bool, optional
If float, factor to which the estimated background should be multiplied
If False the background is not subtracted.
@@ -336,25 +323,25 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
if sub_type is not None:
if isinstance(sub_type, int):
n_bins = sub_type
elif sub_type.lower() in ["square-root", "squareroot", "sqrt"]:
elif sub_type.lower() in ["sqrt"]:
n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
elif sub_type.lower() in ["sturges"]:
n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int) + 1 # Sturges
elif sub_type.lower() in ["rice"]:
n_bins = 2 * np.fix(np.power(image[n_mask].size, 1 / 3)).astype(int) # Rice
elif sub_type.lower() in ["freedman-diaconis", "freedmandiaconis", "freedman", "diaconis"]:
elif sub_type.lower() in ["scott"]:
n_bins = np.fix((image[n_mask].max() - image[n_mask].min()) / (3.5 * image[n_mask].std() / np.power(image[n_mask].size, 1 / 3))).astype(
int
) # Scott
else:
n_bins = np.fix(
(image[n_mask].max() - image[n_mask].min())
/ (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3))
).astype(int) # Freedman-Diaconis
else: # Fallback
n_bins = np.fix((image[n_mask].max() - image[n_mask].min()) / (3.5 * image[n_mask].std() / np.power(image[n_mask].size, 1 / 3))).astype(
int
) # Scott
else: # Default statistic
n_bins = np.fix((image[n_mask].max() - image[n_mask].min()) / (3.5 * image[n_mask].std() / np.power(image[n_mask].size, 1 / 3))).astype(
int
) # Scott
else:
n_bins = np.fix(
(image[n_mask].max() - image[n_mask].min()) / (2 * np.subtract(*np.percentile(image[n_mask], [75, 25])) / np.power(image[n_mask].size, 1 / 3))
).astype(int) # Freedman-Diaconis
hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins)
histograms.append(hist)
@@ -368,23 +355,19 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
# popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
popt, pcov = curve_fit(gauss, binning[-1], hist, p0=p0)
coeff.append(popt)
bkg = popt[1] + np.abs(popt[2]) * subtract_error
error_bkg[i] *= bkg
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
# Substract background
if np.abs(subtract_error) > 0:
n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
background[i] = bkg
if np.abs(subtract_error) > 0:
n_data_array, n_error_array, background, error_bkg = subtract_bkg(n_data_array, n_error_array, mask, background, error_bkg)
if display:
display_bkg(data, background, std_bkg, headers, histograms=histograms, binning=binning, coeff=coeff, savename=savename, plots_folder=plots_folder)
return n_data_array, n_error_array, headers, background
return n_data_array, n_error_array, headers, background, error_bkg
def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True, display=False, savename=None, plots_folder=""):
@@ -466,19 +449,28 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True
# Compute error : root mean square of the background
sub_image = image[minima[0] : minima[0] + sub_shape[0], minima[1] : minima[1] + sub_shape[1]]
# bkg = np.std(sub_image) # Previously computed using standard deviation over the background
bkg = np.sqrt(np.sum(sub_image**2) / sub_image.size) * subtract_error if subtract_error > 0 else np.sqrt(np.sum(sub_image**2) / sub_image.size)
bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*subtract_error if subtract_error > 0 else np.sqrt(np.sum(sub_image**2)/sub_image.size)
error_bkg[i] *= bkg
n_error_array[i] = np.sqrt(n_error_array[i] ** 2 + error_bkg[i] ** 2)
# Substract background
if np.abs(subtract_error) > 0:
n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * bkg)] = 1e-3 * bkg
std_bkg[i] = image[np.abs(image - bkg) / bkg < 1.0].std()
background[i] = bkg
if np.abs(subtract_error) > 0:
n_data_array, n_error_array, background, error_bkg = subtract_bkg(n_data_array, n_error_array, mask, background, error_bkg)
if display:
display_bkg(data, background, std_bkg, headers, rectangle=rectangle, savename=savename, plots_folder=plots_folder)
return n_data_array, n_error_array, headers, background
return n_data_array, n_error_array, headers, background, error_bkg
def subtract_bkg(data, error, mask, background, error_bkg):
assert data.ndim == 3, "Input data must have more than 1 image."
n_data_array, n_error_array = deepcopy(data), deepcopy(error)
for i in range(data.shape[0]):
n_data_array[i][mask] = n_data_array[i][mask] - background[i]
n_data_array[i][np.logical_and(mask, n_data_array[i] <= 1e-3 * background[i])] = 1e-3 * background[i]
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2)
return n_data_array, n_error_array, background, error_bkg

View File

@@ -48,16 +48,20 @@ def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, axis_offsets=
"""
# if people pass in an integer, expand it to a list of equal-sized sections
if not hasattr(upsampled_region_size, "__iter__"):
upsampled_region_size = [upsampled_region_size] * data.ndim
upsampled_region_size = [
upsampled_region_size,
] * data.ndim
else:
if len(upsampled_region_size) != data.ndim:
raise ValueError("shape of upsampled region sizes must be equal to input data's number of dimensions.")
raise ValueError("shape of upsampled region sizes must be equal " "to input data's number of dimensions.")
if axis_offsets is None:
axis_offsets = [0] * data.ndim
axis_offsets = [
0,
] * data.ndim
else:
if len(axis_offsets) != data.ndim:
raise ValueError("number of axis offsets must be equal to input data's number of dimensions.")
raise ValueError("number of axis offsets must be equal to input " "data's number of dimensions.")
im2pi = 1j * 2 * np.pi

View File

@@ -286,13 +286,11 @@ def richardson_lucy(image, psf, iterations=20, clip=True, filter_epsilon=None):
image = image.astype(float_type, copy=False)
psf = psf.astype(float_type, copy=False)
psf /= psf.sum()
im_deconv = np.full(image.shape, 0.5, dtype=float_type)
im_deconv = image.copy()
psf_mirror = np.flip(psf)
eps = 1e-20
for _ in range(iterations):
conv = convolve(im_deconv, psf, mode="same") + eps
conv = convolve(im_deconv, psf, mode="same")
if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image / conv)
else:

View File

@@ -16,7 +16,7 @@ from astropy.io import fits
from astropy.wcs import WCS
from .convex_hull import clean_ROI
from .utils import wcs_CD_to_PC, wcs_PA
from .utils import wcs_PA
def get_obs_data(infiles, data_folder="", compute_flux=False):
@@ -46,46 +46,32 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
data_array.append(f[0].data)
wcs_array.append(WCS(header=f[0].header, fobj=f).celestial)
f.flush()
# Save pixel area for flux density computation
if headers[i]["PXFORMT"] == "NORMAL":
headers[i]["PXAREA"] = 1.96e-4 # 14x14 milliarcsec squared pixel area in arcsec^2
elif headers[i]["PXFORMT"] == "ZOOM":
headers[i]["PXAREA"] = 4.06e-4 # 29x14 milliarcsec squared pixel area in arcsec^2
else:
headers[i]["PXAREA"] = 1.0 # unknown default to 1 arcsec^2
# Convert PHOTFLAM value from 1arcsec aperture to the pixel area
# headers[i]["PHOTFLAM"] *= np.pi / headers[i]["PXAREA"]
data_array = np.array(data_array, dtype=np.double)
# Prevent negative count value in imported data
for i in range(len(data_array)):
data_array[i][data_array[i] < 0.0] = 0.0
# Compute CDELT, ORIENTAT from header
# force WCS to convention PCi_ja unitary, cdelt in deg
for wcs, header in zip(wcs_array, headers):
new_wcs = wcs.deepcopy()
if new_wcs.wcs.has_cd():
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1.0, 1.0])).all():
# Update WCS with relevant information
del new_wcs.wcs.cd
keys = list(new_wcs.to_header().keys()) + ["CD1_1", "CD1_2", "CD1_3", "CD2_1", "CD2_2", "CD2_3", "CD3_1", "CD3_2", "CD3_3"]
for key in keys:
header.remove(key, ignore_missing=True)
new_cdelt = np.linalg.eigvals(wcs.wcs.cd)
if new_wcs.wcs.has_cd():
del new_wcs.wcs.cd
keys = list(new_wcs.to_header().keys()) + ["CD1_1", "CD1_2", "CD1_3", "CD2_1", "CD2_2", "CD2_3", "CD3_1", "CD3_2", "CD3_3"]
for key in keys:
header.remove(key, ignore_missing=True)
new_cdelt = np.linalg.eigvals(wcs.wcs.cd)
new_cdelt.sort()
new_wcs.wcs.pc = wcs.wcs.cd.dot(np.diag(1.0 / new_cdelt))
new_wcs.wcs.cdelt = new_cdelt
try:
header["ORIENTAT"] = float(header["ORIENTAT"])
except KeyError:
header["ORIENTAT"] = -wcs_CD_to_PC(new_wcs.wcs.cd)[1]
elif (new_wcs.wcs.cdelt[:2] != np.array([1.0, 1.0])).all():
try:
header["ORIENTAT"] = float(header["ORIENTAT"])
except KeyError:
header["ORIENTAT"] = -wcs_PA(new_wcs.wcs.pc, new_wcs.wcs.cdelt)
else:
print("Couldn't compute ORIENTAT from WCS")
for key, val in new_wcs.to_header().items():
header[key] = val
for key, val in new_wcs.to_header().items():
header[key] = val
try:
_ = header["ORIENTAT"]
except KeyError:
header["ORIENTAT"] = wcs_PA(new_wcs.wcs.pc[1, 0], np.diag(new_wcs.wcs.pc).mean())
# force WCS for POL60 to have same pixel size as POL0 and POL120
is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
@@ -106,23 +92,7 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
def save_Stokes(
I_stokes,
Q_stokes,
U_stokes,
Stokes_cov,
Stokes_stat_cov,
P,
debiased_P,
s_P,
s_P_P,
PA,
s_PA,
s_PA_P,
header_stokes,
data_mask,
filename,
data_folder="",
return_hdul=False,
I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, header_stokes, data_mask, filename, data_folder="", return_hdul=False
):
"""
Save computed polarimetry parameters to a single fits file,
@@ -171,9 +141,7 @@ def save_Stokes(
header["TELESCOP"] = (header_stokes["TELESCOP"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "telescope used to acquire data")
header["INSTRUME"] = (header_stokes["INSTRUME"] if "INSTRUME" in list(header_stokes.keys()) else "FOC", "identifier for instrument used to acuire data")
header["PHOTPLAM"] = (header_stokes["PHOTPLAM"], "Pivot Wavelength")
header["PHOTBW"] = (header_stokes["PHOTBW"], "RMS Bandwidth of the Filter and Detector")
header["PHOTFLAM"] = (header_stokes["PHOTFLAM"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
header["PXAREA"] = (header_stokes["PXAREA"], "Pixel area in arcsec**2")
header["EXPTIME"] = (header_stokes["EXPTIME"], "Total exposure time in sec")
header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation")
header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name")
@@ -202,15 +170,11 @@ def save_Stokes(
s_PA_P = s_PA_P[vertex[2] : vertex[3], vertex[0] : vertex[1]]
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1]))
new_Stokes_stat_cov = np.zeros((*Stokes_stat_cov.shape[:-2], *shape[::-1]))
for i in range(3):
for j in range(3):
Stokes_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
Stokes_stat_cov[i, j][(1 - data_mask).astype(bool)] = 0.0
new_Stokes_stat_cov[i, j] = Stokes_stat_cov[i, j][vertex[2] : vertex[3], vertex[0] : vertex[1]]
Stokes_cov = new_Stokes_cov
Stokes_stat_cov = new_Stokes_stat_cov
data_mask = data_mask[vertex[2] : vertex[3], vertex[0] : vertex[1]]
data_mask = data_mask.astype(float, copy=False)
@@ -230,19 +194,18 @@ def save_Stokes(
[Q_stokes, "Q_stokes"],
[U_stokes, "U_stokes"],
[Stokes_cov, "IQU_cov_matrix"],
[Stokes_stat_cov, "IQU_stat_cov_matrix"],
[P, "Pol_deg"],
[debiased_P, "Pol_deg_debiased"],
[s_P, "Pol_deg_err"],
[s_P_P, "Pol_deg_stat_err"],
[s_P_P, "Pol_deg_err_Poisson_noise"],
[PA, "Pol_ang"],
[s_PA, "Pol_ang_err"],
[s_PA_P, "Pol_ang_stat_err"],
[s_PA_P, "Pol_ang_err_Poisson_noise"],
[data_mask, "Data_mask"],
]:
hdu_header = header.copy()
hdu_header["datatype"] = name
if not name[-10:] == "cov_matrix":
if not name == "IQU_cov_matrix":
data[(1 - data_mask).astype(bool)] = 0.0
hdu = fits.ImageHDU(data=data, header=hdu_header)
hdu.name = name

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env python3
#!/usr/bin/python3
# -*- coding:utf-8 -*-
"""
Library function to query and download datatsets from MAST api.
@@ -25,11 +25,11 @@ def divide_proposal(products):
"""
for pid in np.unique(products["Proposal ID"]):
obs = products[products["Proposal ID"] == pid].copy()
same_filt = np.unique(np.array(np.sum([obs["Filters"] == filt for filt in obs["Filters"]], axis=2) >= len(obs["Filters"][0]), dtype=bool), axis=0)
same_filt = np.unique(np.array(np.sum([obs["Filters"][:, 1:] == filt[1:] for filt in obs["Filters"]], axis=2) < 3, dtype=bool), axis=0)
if len(same_filt) > 1:
for filt in same_filt:
products["Proposal ID"][np.any([products["Dataset"] == dataset for dataset in obs["Dataset"][filt]], axis=0)] = "_".join(
[obs["Proposal ID"][filt][0], "_".join([fi for fi in obs["Filters"][filt][0] if fi[:-1] != "CLEAR"])]
[obs["Proposal ID"][filt][0], "_".join([fi for fi in obs["Filters"][filt][0][1:] if fi[:-1] != "CLEAR"])]
)
for pid in np.unique(products["Proposal ID"]):
obs = products[products["Proposal ID"] == pid].copy()
@@ -44,69 +44,38 @@ def divide_proposal(products):
return products
def get_product_list(target=None, proposal_id=None, instrument="foc"):
def get_product_list(target=None, proposal_id=None):
"""
Retrieve products list for a given target from the MAST archive
"""
mission = MastMissions(mission="hst")
radius = "3"
select_cols = [
"sci_pep_id",
"sci_pi_last_name",
"sci_targname",
"sci_aper_1234",
"sci_spec_1234",
"sci_central_wavelength",
"sci_actual_duration",
"sci_instrume",
"sci_operating_mode",
"sci_data_set_name",
"sci_spec_1234",
"sci_actual_duration",
"sci_start_time",
"sci_stop_time",
"sci_refnum",
"sci_central_wavelength",
"sci_instrume",
"sci_aper_1234",
"sci_targname",
"sci_pep_id",
"sci_pi_last_name",
]
cols = [
"Proposal ID",
"PI last name",
"Target name",
"Aperture",
"Filters",
"Central wavelength",
"Exptime",
"Instrument",
"Operating Mode",
"Dataset",
"Start",
"Stop",
"References",
]
cols = ["Dataset", "Filters", "Exptime", "Start", "Stop", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]
if target is None:
target = input("Target name:\n>")
# Use query_object method to resolve the object name into coordinates
if instrument == "foc":
results = mission.query_object(
target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc"
)
dataproduct_type = "image"
description = "DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP"
elif instrument == "fos":
results = mission.query_object(
target, radius=radius, select_cols=select_cols, sci_operating_mode="SPECTROPOLARIMETRY", sci_obs_type="spectrum", sci_aec="S", sci_instrume="fos"
)
dataproduct_type = "spectrum"
description = ["DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP", "DADS C3F file - Calibrated exposure GHRS/FOS/HSP"]
results = mission.query_object(target, radius=radius, select_cols=select_cols, sci_spec_1234="POL*", sci_obs_type="image", sci_aec="S", sci_instrume="foc")
for c, n_c in zip(select_cols, cols):
results.rename_column(c, n_c)
results["Proposal ID"] = Column(results["Proposal ID"], dtype="U35")
if instrument == "foc":
results["POLFilters"] = Column(np.array([filt.split(";")[0] for filt in results["Filters"]], dtype=str))
results["Filters"] = Column(np.array([filt.split(";")[1:] for filt in results["Filters"]], dtype=str))
else:
results["Filters"] = Column(np.array([filt.split(";") for filt in results["Filters"]], dtype=str))
results["Filters"] = Column(np.array([filt.split(";") for filt in results["Filters"]], dtype=str))
results["Start"] = Column(Time(results["Start"]))
results["Stop"] = Column(Time(results["Stop"]))
@@ -120,34 +89,20 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
to_remove.append(i)
obs.remove_rows(to_remove)
# Remove observations for which a polarization filter is missing
if instrument == "foc":
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
for pid in np.unique(obs["Proposal ID"]):
used_pol = np.zeros(3)
for dataset in obs[obs["Proposal ID"] == pid]:
used_pol[polfilt[dataset["POLFilters"]]] += 1
if np.any(used_pol < 1):
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
# Remove observations for which a spectropolarization has not been reduced
if instrument == "fos":
for pid in np.unique(obs["Proposal ID"]):
observations = Observations.query_criteria(proposal_id=pid.split("_")[0])
c3prod = Observations.filter_products(
Observations.get_product_list(observations),
productType=["SCIENCE"],
dataproduct_type=dataproduct_type,
calib_level=[2],
description=description[1],
)
if len(c3prod) < 1:
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
for pid in np.unique(obs["Proposal ID"]):
used_pol = np.zeros(3)
for dataset in obs[obs["Proposal ID"] == pid]:
used_pol[polfilt[dataset["Filters"][0]]] += 1
if np.any(used_pol < 1):
obs.remove_rows(np.arange(len(obs))[obs["Proposal ID"] == pid])
tab = unique(obs, ["Target name", "Proposal ID"])
obs["Obs"] = [np.argmax(np.logical_and(tab["Proposal ID"] == data["Proposal ID"], tab["Target name"] == data["Target name"])) + 1 for data in obs]
try:
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Aperture", "Target name", "Proposal ID", "PI last name"]], "Obs")
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], "Obs")
except IndexError:
raise ValueError("There is no observation with polarimetry for {0:s} in HST/{1:s} Legacy Archive".format(target, instrument.upper()))
raise ValueError("There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
b = np.zeros(len(results), dtype=bool)
if proposal_id is not None and str(proposal_id) in obs["Proposal ID"]:
@@ -173,25 +128,31 @@ def get_product_list(target=None, proposal_id=None, instrument="foc"):
observations = Observations.query_criteria(obs_id=list(results["Dataset"][b]))
products = Observations.filter_products(
Observations.get_product_list(observations), productType=["SCIENCE"], dataproduct_type=dataproduct_type, calib_level=[2], description=description
Observations.get_product_list(observations),
productType=["SCIENCE"],
dataproduct_type=["image"],
calib_level=[2],
description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP",
)
products["proposal_id"] = Column(products["proposal_id"], dtype="U35")
products["target_name"] = Column(observations["target_name"])
for prod in products:
prod["proposal_id"] = results["Proposal ID"][results["Dataset"] == prod["productFilename"][: len(results["Dataset"][0])].upper()][0]
tab = unique(products, "proposal_id")
for prod in products:
prod["target_name"] = observations["target_name"][observations["obsid"] == prod["obsID"]][0]
tab = unique(products, ["target_name", "proposal_id"])
products["Obs"] = [np.argmax(tab["proposal_id"] == data["proposal_id"]) + 1 for data in products]
products["Obs"] = [np.argmax(np.logical_and(tab["proposal_id"] == data["proposal_id"], tab["target_name"] == data["target_name"])) + 1 for data in products]
return target, products
def retrieve_products(target=None, proposal_id=None, instrument="foc", output_dir="./data"):
def retrieve_products(target=None, proposal_id=None, output_dir="./data"):
"""
Given a target name and a proposal_id, create the local directories and retrieve the fits files from the MAST Archive
"""
target, products = get_product_list(target=target, proposal_id=proposal_id, instrument=instrument)
target, products = get_product_list(target=target, proposal_id=proposal_id)
prodpaths = []
# data_dir = path_join(output_dir, target)
out = ""
@@ -222,11 +183,10 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Query MAST for target products")
parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
parser.add_argument("-p", "--proposal_id", metavar="proposal_id", required=False, help="the proposal id of the data products", type=int, default=None)
parser.add_argument("-i", "--instrum", metavar="instrum", required=False, help="the instrument used for observation", type=str, default="foc")
parser.add_argument(
"-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data"
)
args = parser.parse_args()
print(args.target)
prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id, instrument=args.instrum.lower(), output_dir=args.output_dir)
prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id)
print(prodpaths)

View File

@@ -191,8 +191,8 @@ def bin_ndarray(ndarray, new_shape, operation="sum"):
Example
-------
>>> m = np.arange(0, 100, 1).reshape((10, 10))
>>> n = bin_ndarray(m, new_shape=(5, 5), operation="sum")
>>> m = np.arange(0,100,1).reshape((10,10))
>>> n = bin_ndarray(m, new_shape=(5,5), operation='sum')
>>> print(n)
[[ 22 30 38 46 54]
@@ -224,9 +224,7 @@ def bin_ndarray(ndarray, new_shape, operation="sum"):
return ndarray
def crop_array(
data_array, headers, error_array=None, data_mask=None, step=5, null_val=None, crop=True, inside=False, display=False, savename=None, plots_folder=""
):
def crop_array(data_array, headers, error_array=None, data_mask=None, step=5, null_val=None, inside=False, display=False, savename=None, plots_folder=""):
"""
Homogeneously crop an array: all contained images will have the same shape.
'inside' parameter will decide how much should be cropped.
@@ -258,10 +256,6 @@ def crop_array(
If None, will be put to 75% of the mean value of the associated error
array.
Defaults to None.
crop : boolean, optional
If True, data_array will be cropped down to contain only relevant data.
If False, this information will be kept in the crop_mask output.
Defaults to True.
inside : boolean, optional
If True, the cropped image will be the maximum rectangle included
inside the image. If False, the cropped image will be the minimum
@@ -285,7 +279,9 @@ def crop_array(
if null_val is None:
null_val = [1.00 * error.mean() for error in error_array]
elif type(null_val) is float:
null_val = [null_val] * error_array.shape[0]
null_val = [
null_val,
] * error_array.shape[0]
vertex = np.zeros((data_array.shape[0], 4), dtype=int)
for i, image in enumerate(data_array): # Get vertex of the rectangular convex hull of each image
@@ -301,9 +297,6 @@ def crop_array(
v_array[1] = np.max(vertex[:, 1]).astype(int)
v_array[2] = np.min(vertex[:, 2]).astype(int)
v_array[3] = np.max(vertex[:, 3]).astype(int)
if data_mask is None:
data_mask = np.zeros(data_array[0].shape).astype(bool)
data_mask[v_array[0] : v_array[1], v_array[2] : v_array[3]] = True
new_shape = np.array([v_array[1] - v_array[0], v_array[3] - v_array[2]])
rectangle = [v_array[2], v_array[0], new_shape[1], new_shape[0], 0.0, "b"]
@@ -355,17 +348,20 @@ def crop_array(
headers,
vmin=convert_flux * data_array[data_array > 0.0].mean() / 5.0,
vmax=convert_flux * data_array[data_array > 0.0].max(),
rectangle=[rectangle] * len(headers),
rectangle=[
rectangle,
]
* len(headers),
savename=savename + "_crop_region",
plots_folder=plots_folder,
)
plt.show()
if crop:
if data_mask is not None:
crop_mask = data_mask[v_array[0] : v_array[1], v_array[2] : v_array[3]]
return crop_array, crop_error_array, crop_mask, crop_headers
else:
return data_array, error_array, data_mask, headers
return crop_array, crop_error_array, crop_headers
def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px", shape=None, iterations=20, algo="richardson"):
@@ -420,12 +416,12 @@ def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px",
FWHM /= pxsize[0].min()
# Define Point-Spread-Function kernel
if isinstance(psf, np.ndarray) and (len(psf.shape) == 2):
kernel = psf
elif psf.lower() in ["gauss", "gaussian"]:
if psf.lower() in ["gauss", "gaussian"]:
if shape is None:
shape = np.min(data_array[0].shape) - 2, np.min(data_array[0].shape) - 2
kernel = gaussian_psf(FWHM=FWHM, shape=shape)
elif isinstance(psf, np.ndarray) and (len(psf.shape) == 2):
kernel = psf
else:
raise ValueError("{} is not a valid value for 'psf'".format(psf))
@@ -450,9 +446,9 @@ def get_error(
return_background=False,
):
"""
Estimate background intensity level from either fitting the intensity histogram
or by looking for the sub-image of smallest integrated intensity (no source assumption)
and define the background on the image by the standard deviation on this sub-image.
Look for sub-image of shape sub_shape that have the smallest integrated
flux (no source assumption) and define the background on the image by the
standard deviation on this sub-image.
----------
Inputs:
data_array : numpy.ndarray
@@ -472,7 +468,7 @@ def get_error(
If 'auto', look for optimal binning and fit intensity histogram with au gaussian.
If str or None, statistic rule to be used for the number of bins in counts/s.
If int, number of bins for the counts/s histogram.
If tuple, shape of the sub-image of lowest intensity to look for.
If tuple, shape of the sub-image to look for. Must be odd.
Defaults to None.
subtract_error : float or bool, optional
If float, factor to which the estimated background should be multiplied
@@ -532,27 +528,25 @@ def get_error(
# estimated to less than 3%
err_flat = data * 0.03
if sub_type is None:
n_data_array, c_error_bkg, headers, background = bkg_hist(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
)
if (sub_type is None):
n_data_array, c_error_bkg, headers, background, error_bkg = bkg_hist(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
sub_type, subtract_error = "histogram ", str(int(subtract_error > 0.0))
elif isinstance(sub_type, str):
if sub_type.lower() in ["auto"]:
n_data_array, c_error_bkg, headers, background = bkg_fit(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
)
if sub_type.lower() in ['auto']:
n_data_array, c_error_bkg, headers, background, error_bkg = bkg_fit(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
else:
n_data_array, c_error_bkg, headers, background = bkg_hist(
data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
)
n_data_array, c_error_bkg, headers, background, error_bkg = bkg_hist(
data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
sub_type, subtract_error = "histogram ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
elif isinstance(sub_type, tuple):
n_data_array, c_error_bkg, headers, background = bkg_mini(
data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
)
n_data_array, c_error_bkg, headers, background, error_bkg = bkg_mini(
data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
sub_type, subtract_error = "minimal flux ", "mean+%.1fsigma" % subtract_error if subtract_error != 0.0 else 0.0
else:
print("Warning: Invalid subtype.")
@@ -564,7 +558,7 @@ def get_error(
n_error_array = np.sqrt(err_wav**2 + err_psf**2 + err_flat**2 + c_error_bkg**2)
if return_background:
return n_data_array, n_error_array, headers, background
return n_data_array, n_error_array, headers, background, error_bkg # return background error as well
else:
return n_data_array, n_error_array, headers
@@ -633,7 +627,12 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
# Compute binning ratio
if scale.lower() in ["px", "pixel"]:
Dxy_arr[i] = np.array([pxsize] * 2)
Dxy_arr[i] = np.array(
[
pxsize,
]
* 2
)
scale = "px"
elif scale.lower() in ["arcsec", "arcseconds"]:
Dxy_arr[i] = np.array(pxsize / np.abs(w.wcs.cdelt) / 3600.0)
@@ -675,7 +674,6 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
nw.wcs.crpix /= Dxy
nw.array_shape = new_shape
new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape
new_header["PXAREA"] *= Dxy[0] * Dxy[1]
for key, val in nw.to_header().items():
new_header.set(key, val)
new_header["SAMPLING"] = (str(pxsize) + scale, "Resampling performed during reduction")
@@ -693,7 +691,7 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
def align_data(
data_array, headers, error_array=None, data_mask=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False
data_array, headers, error_array=None, data_mask=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False, optimal_binning=False
):
"""
Align images in data_array using cross correlation, and rescale them to
@@ -772,12 +770,13 @@ def align_data(
full_headers.append(headers[0])
err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0)
if data_mask is None:
full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0)
else:
full_array, err_array, data_mask, full_headers = crop_array(
full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0
)
if not optimal_binning:
if data_mask is None:
full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0)
else:
full_array, err_array, data_mask, full_headers = crop_array(
full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0
)
data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1]
error_array = err_array[:-1]
@@ -856,7 +855,9 @@ def align_data(
headers[i].update(headers_wcs[i].to_header())
data_mask = rescaled_mask.all(axis=0)
data_array, error_array, data_mask, headers = crop_array(rescaled_image, headers, rescaled_error, data_mask, null_val=0.01 * background)
if not optimal_binning:
data_array, error_array, data_mask, headers = crop_array(rescaled_image, headers, rescaled_error, data_mask, null_val=0.01*background)
if return_shifts:
return data_array, error_array, headers, data_mask, shifts, errors
@@ -937,7 +938,12 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.5, scale="pi
dist_rc = np.where(data_mask, np.sqrt((r - xx) ** 2 + (c - yy) ** 2), fmax)
# Catch expected "OverflowWarning" as we overflow values that are not in the image
with warnings.catch_warnings(record=True) as w:
g_rc = np.array([np.exp(-0.5 * (dist_rc / stdev) ** 2) / (2.0 * np.pi * stdev**2)] * data_array.shape[0])
g_rc = np.array(
[
np.exp(-0.5 * (dist_rc / stdev) ** 2) / (2.0 * np.pi * stdev**2),
]
* data_array.shape[0]
)
# Apply weighted combination
smoothed[r, c] = np.where(data_mask[r, c], np.sum(data_array * weight * g_rc) / np.sum(weight * g_rc), data_array.mean(axis=0)[r, c])
error[r, c] = np.where(
@@ -1252,7 +1258,6 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Orientation and error for each polarizer
# fmax = np.finfo(np.float64).max
pol_flux = np.array([corr[0] * pol0, corr[1] * pol60, corr[2] * pol120])
pol_flux_corr = np.array([pf * 2.0 / t for (pf, t) in zip(pol_flux, transmit)])
coeff_stokes = np.zeros((3, 3))
# Coefficients linking each polarizer flux to each Stokes parameter
@@ -1268,7 +1273,6 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Normalization parameter for Stokes parameters computation
N = (coeff_stokes[0, :] * transmit / 2.0).sum()
coeff_stokes = coeff_stokes / N
coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T
I_stokes = np.zeros(pol_array[0].shape)
Q_stokes = np.zeros(pol_array[0].shape)
U_stokes = np.zeros(pol_array[0].shape)
@@ -1310,81 +1314,121 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
# Statistical error: Poisson noise is assumed
sigma_flux = np.array([np.sqrt(flux / head["exptime"]) for flux, head in zip(pol_flux, pol_headers)])
Stokes_stat_cov = np.zeros(Stokes_cov.shape)
for i in range(Stokes_cov.shape[0]):
Stokes_stat_cov[i, i] = np.sum([coeff_stokes[i, k] ** 2 * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
for j in [k for k in range(3) if k > i]:
Stokes_stat_cov[i, j] = np.sum([abs(coeff_stokes[i, k] * coeff_stokes[j, k]) * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
Stokes_stat_cov[j, i] = np.sum([abs(coeff_stokes[i, k] * coeff_stokes[j, k]) * sigma_flux[k] ** 2 for k in range(len(sigma_flux))], axis=0)
s_I2_stat = np.sum([coeff_stokes[0, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0)
s_Q2_stat = np.sum([coeff_stokes[1, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0)
s_U2_stat = np.sum([coeff_stokes[2, i] ** 2 * sigma_flux[i] ** 2 for i in range(len(sigma_flux))], axis=0)
pol_flux_corr = np.array([pf * 2.0 / t for (pf, t) in zip(pol_flux, transmit)])
coeff_stokes_corr = np.array([cs * t / 2.0 for (cs, t) in zip(coeff_stokes.T, transmit)]).T
# Compute the derivative of each Stokes parameter with respect to the polarizer orientation
dIQU_dtheta = np.zeros(Stokes_cov.shape)
# Derivative of I_stokes wrt theta_1, 2, 3
for j in range(3):
dIQU_dtheta[0, j] = (
2.0
* pol_eff[j]
/ N
* (
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - I_stokes)
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3]) * (pol_flux_corr[(j + 2) % 3] - I_stokes)
+ coeff_stokes_corr[0, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes)
)
dI_dtheta1 = (
2.0
* pol_eff[0]
/ N
* (
pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes)
- pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes)
+ coeff_stokes_corr[0, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
)
# Derivative of Q_stokes wrt theta_1, 2, 3
for j in range(3):
dIQU_dtheta[1, j] = (
2.0
* pol_eff[j]
/ N
* (
np.cos(2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - pol_flux_corr[(j + 2) % 3])
- (
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3])
)
* Q_stokes
+ coeff_stokes_corr[1, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes)
)
)
dI_dtheta2 = (
2.0
* pol_eff[1]
/ N
* (
pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes)
- pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes)
+ coeff_stokes_corr[0, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
)
# Derivative of U_stokes wrt theta_1, 2, 3
for j in range(3):
dIQU_dtheta[2, j] = (
2.0
* pol_eff[j]
/ N
* (
np.sin(2.0 * theta[j]) * (pol_flux_corr[(j + 1) % 3] - pol_flux_corr[(j + 2) % 3])
- (
pol_eff[(j + 2) % 3] * np.cos(-2.0 * theta[(j + 2) % 3] + 2.0 * theta[j])
- pol_eff[(j + 1) % 3] * np.cos(-2.0 * theta[j] + 2.0 * theta[(j + 1) % 3])
)
* U_stokes
+ coeff_stokes_corr[2, j] * (np.sin(2.0 * theta[j]) * Q_stokes - np.cos(2 * theta[j]) * U_stokes)
)
)
dI_dtheta3 = (
2.0
* pol_eff[2]
/ N
* (
pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes)
- pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes)
+ coeff_stokes_corr[0, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
)
)
dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3])
dQ_dtheta1 = (
2.0
* pol_eff[0]
/ N
* (
np.cos(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
- (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * Q_stokes
+ coeff_stokes_corr[1, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
)
)
dQ_dtheta2 = (
2.0
* pol_eff[1]
/ N
* (
np.cos(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * Q_stokes
+ coeff_stokes_corr[1, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
)
)
dQ_dtheta3 = (
2.0
* pol_eff[2]
/ N
* (
np.cos(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * Q_stokes
+ coeff_stokes_corr[1, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
)
)
dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3])
dU_dtheta1 = (
2.0
* pol_eff[0]
/ N
* (
np.sin(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
- (pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])) * U_stokes
+ coeff_stokes_corr[2, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
)
)
dU_dtheta2 = (
2.0
* pol_eff[1]
/ N
* (
np.sin(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- (pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])) * U_stokes
+ coeff_stokes_corr[2, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
)
)
dU_dtheta3 = (
2.0
* pol_eff[2]
/ N
* (
np.sin(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- (pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])) * U_stokes
+ coeff_stokes_corr[2, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
)
)
dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3])
# Compute the uncertainty associated with the polarizers' orientation (see Kishimoto 1999)
Stokes_axis_cov = np.zeros(Stokes_cov.shape)
for i in range(Stokes_cov.shape[0]):
Stokes_axis_cov[i, i] = np.sum([dIQU_dtheta[i, k] ** 2 * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0)
for j in [k for k in range(3) if k > i]:
Stokes_axis_cov[i, j] = np.sum(
[abs(dIQU_dtheta[i, k] * dIQU_dtheta[j, k]) * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0
)
Stokes_axis_cov[j, i] = np.sum(
[abs(dIQU_dtheta[i, k] * dIQU_dtheta[j, k]) * globals()["sigma_theta"][k] ** 2 for k in range(len(globals()["sigma_theta"]))], axis=0
)
s_I2_axis = np.sum([dI_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0)
s_Q2_axis = np.sum([dQ_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0)
s_U2_axis = np.sum([dU_dtheta[i] ** 2 * globals()["sigma_theta"][i] ** 2 for i in range(len(globals()["sigma_theta"]))], axis=0)
# np.savetxt("output/sI_dir.txt", np.sqrt(s_I2_axis))
# np.savetxt("output/sQ_dir.txt", np.sqrt(s_Q2_axis))
# np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis))
# Add quadratically the uncertainty to the Stokes covariance matrix
for i in range(Stokes_cov.shape[0]):
Stokes_cov[i, i] += Stokes_axis_cov[i, i] + Stokes_stat_cov[i, i]
for j in [k for k in range(Stokes_cov.shape[0]) if k > i]:
Stokes_cov[i, j] = np.sqrt(Stokes_cov[i, j] ** 2 + Stokes_axis_cov[i, j] ** 2 + Stokes_stat_cov[i, j] ** 2)
Stokes_cov[j, i] = np.sqrt(Stokes_cov[j, i] ** 2 + Stokes_axis_cov[j, i] ** 2 + Stokes_stat_cov[j, i] ** 2)
Stokes_cov[0, 0] += s_I2_axis + s_I2_stat
Stokes_cov[1, 1] += s_Q2_axis + s_Q2_stat
Stokes_cov[2, 2] += s_U2_axis + s_U2_stat
# Save values to single header
header_stokes = pol_headers[0]
@@ -1394,7 +1438,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
all_Q_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_U_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_Stokes_cov = np.zeros((np.unique(rotate).size, 3, 3, data_array.shape[1], data_array.shape[2]))
all_header_stokes = [{}] * np.unique(rotate).size
all_header_stokes = [
{},
] * np.unique(rotate).size
for i, rot in enumerate(np.unique(rotate)):
rot_mask = rotate == rot
@@ -1418,8 +1464,8 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
for i in range(3):
Stokes_cov[i, i] = np.sum([exp**2 * cov for exp, cov in zip(all_exp, all_Stokes_cov[:, i, i])], axis=0) / all_exp.sum() ** 2
for j in [x for x in range(3) if x != i]:
Stokes_cov[i, j] = np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, i, j])], axis=0) / all_exp.sum() ** 2
Stokes_cov[j, i] = np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, j, i])], axis=0) / all_exp.sum() ** 2
Stokes_cov[i, j] = np.sqrt(np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, i, j])], axis=0) / all_exp.sum() ** 2)
Stokes_cov[j, i] = np.sqrt(np.sum([exp**2 * cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:, j, i])], axis=0) / all_exp.sum() ** 2)
# Save values to single header
header_stokes = all_header_stokes[0]
@@ -1434,7 +1480,6 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
Q_stokes[np.isnan(Q_stokes)] = 0.0
U_stokes[np.isnan(U_stokes)] = 0.0
Stokes_cov[np.isnan(Stokes_cov)] = fmax
Stokes_stat_cov[np.isnan(Stokes_cov)] = fmax
if integrate:
# Compute integrated values for P, PA before any rotation
@@ -1448,47 +1493,29 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
IQ_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 1][mask] ** 2))
IU_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 2][mask] ** 2))
QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask] ** 2))
I_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 0][mask]))
Q_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[1, 1][mask]))
U_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[2, 2][mask]))
IQ_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 1][mask] ** 2))
IU_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 2][mask] ** 2))
QU_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[1, 2][mask] ** 2))
P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted
P_diluted_err = (1.0 / I_diluted) * np.sqrt(
P_diluted_err = np.sqrt(
(Q_diluted**2 * Q_diluted_err**2 + U_diluted**2 * U_diluted_err**2 + 2.0 * Q_diluted * U_diluted * QU_diluted_err) / (Q_diluted**2 + U_diluted**2)
+ ((Q_diluted / I_diluted) ** 2 + (U_diluted / I_diluted) ** 2) * I_diluted_err**2
- 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err
- 2.0 * (U_diluted / I_diluted) * IU_diluted_err
)
P_diluted_stat_err = (
P_diluted
/ I_diluted
* np.sqrt(
I_diluted_stat_err
- 2.0 / (I_diluted * P_diluted**2) * (Q_diluted * IQ_diluted_stat_err + U_diluted * IU_diluted_stat_err)
+ 1.0
/ (I_diluted**2 * P_diluted**4)
* (Q_diluted**2 * Q_diluted_stat_err + U_diluted**2 * U_diluted_stat_err + 2.0 * Q_diluted * U_diluted * QU_diluted_stat_err)
)
)
debiased_P_diluted = np.sqrt(P_diluted**2 - P_diluted_stat_err**2) if P_diluted**2 > P_diluted_stat_err**2 else 0.0
PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted))
PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt(
U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err
)
header_stokes["P_int"] = (debiased_P_diluted, "Integrated polarization degree")
header_stokes["P_int"] = (P_diluted, "Integrated polarization degree")
header_stokes["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
return I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, header_stokes
return I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes
def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, header_stokes):
def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes):
"""
Compute the polarization degree (in %) and angle (in deg) and their
respective errors from given Stokes parameters.
@@ -1563,44 +1590,27 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, heade
s_P[np.isnan(s_P)] = fmax
s_PA[np.isnan(s_PA)] = fmax
# Errors on P, PA supposing Poisson noise
s_P_P = np.ones(I_stokes.shape) * fmax
s_PA_P = np.ones(I_stokes.shape) * fmax
maskP = np.logical_and(mask, P > 0.0)
s_P_P[maskP] = (
P[maskP]
/ I_stokes[maskP]
* np.sqrt(
Stokes_stat_cov[0, 0][maskP]
- 2.0 / (I_stokes[maskP] * P[maskP] ** 2) * (Q_stokes[maskP] * Stokes_stat_cov[0, 1][maskP] + U_stokes[maskP] * Stokes_stat_cov[0, 2][maskP])
+ 1.0
/ (I_stokes[maskP] ** 2 * P[maskP] ** 4)
* (
Q_stokes[maskP] ** 2 * Stokes_stat_cov[1, 1][maskP]
+ U_stokes[maskP] ** 2 * Stokes_stat_cov[2, 2][maskP]
+ 2.0 * Q_stokes[maskP] * U_stokes[maskP] * Stokes_stat_cov[1, 2][maskP]
)
)
)
s_PA_P[maskP] = (
90.0
/ (np.pi * I_stokes[maskP] ** 2 * P[maskP] ** 2)
* (
Q_stokes[maskP] ** 2 * Stokes_stat_cov[2, 2][maskP]
+ U_stokes[maskP] * Stokes_stat_cov[1, 1][maskP]
- 2.0 * Q_stokes[maskP] * U_stokes[maskP] * Stokes_stat_cov[1, 2][maskP]
)
)
# Catch expected "OverflowWarning" as wrong pixel have an overflowing error
with warnings.catch_warnings(record=True) as _:
mask2 = P**2 >= s_P_P**2
mask2 = P**2 >= s_P**2
debiased_P = np.zeros(I_stokes.shape)
debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P_P[mask2] ** 2)
debiased_P[mask2] = np.sqrt(P[mask2] ** 2 - s_P[mask2] ** 2)
if (debiased_P > 1.0).any():
print("WARNING : found {0:d} pixels for which debiased_P > 100%".format(debiased_P[debiased_P > 1.0].size))
# Compute the total exposure time so that
# I_stokes*exp_tot = N_tot the total number of events
exp_tot = header_stokes["exptime"]
# print("Total exposure time : {} sec".format(exp_tot))
N_obs = I_stokes * exp_tot
# Errors on P, PA supposing Poisson noise
s_P_P = np.ones(I_stokes.shape) * fmax
s_P_P[mask] = np.sqrt(2.0) / np.sqrt(N_obs[mask]) * 100.0
s_PA_P = np.ones(I_stokes.shape) * fmax
s_PA_P[mask2] = s_P_P[mask2] / (2.0 * P[mask2]) * 180.0 / np.pi
# Nan handling :
P[np.isnan(P)] = 0.0
s_P[np.isnan(s_P)] = fmax
@@ -1612,7 +1622,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, heade
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, data_mask, header_stokes, SNRi_cut=None):
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None):
"""
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header
@@ -1629,11 +1639,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, dat
Image (2D floats) containing the Stokes parameters accounting for
+45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray
Covariance matrix containing all uncertainties of the Stokes
parameters I, Q, U.
Stokes_stat_cov : numpy.ndarray
Covariance matrix containing statistical uncertainty of the Stokes
parameters I, Q, U.
Covariance matrix of the Stokes parameters I, Q, U.
data_mask : numpy.ndarray
2D boolean array delimiting the data to work on.
header_stokes : astropy.fits.header.Header
@@ -1655,8 +1661,6 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, dat
accounting for +45/-45deg linear polarization intensity.
new_Stokes_cov : numpy.ndarray
Updated covariance matrix of the Stokes parameters I, Q, U.
new_Stokes_stat_cov : numpy.ndarray
Updated statistical covariance matrix of the Stokes parameters I, Q, U.
new_header_stokes : astropy.fits.header.Header
Updated Header file associated with the Stokes fluxes accounting
for the new orientation angle.
@@ -1688,9 +1692,11 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, dat
Q_stokes = zeropad(Q_stokes, shape)
U_stokes = zeropad(U_stokes, shape)
data_mask = zeropad(data_mask, shape)
Stokes_cov = zeropad(Stokes_cov, [*Stokes_cov.shape[:-2], *shape])
new_I_stokes = np.zeros(shape)
new_Q_stokes = np.zeros(shape)
new_U_stokes = np.zeros(shape)
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape))
# Rotate original images using scipy.ndimage.rotate
new_I_stokes = sc_rotate(I_stokes, ang, order=1, reshape=False, cval=0.0)
@@ -1699,10 +1705,6 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, dat
new_data_mask = sc_rotate(data_mask.astype(float) * 10.0, ang, order=1, reshape=False, cval=0.0)
new_data_mask[new_data_mask < 1.0] = 0.0
new_data_mask = new_data_mask.astype(bool)
# Rotate covariance matrix
Stokes_cov = zeropad(Stokes_cov, [*Stokes_cov.shape[:-2], *shape])
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape))
for i in range(3):
for j in range(3):
new_Stokes_cov[i, j] = sc_rotate(Stokes_cov[i, j], ang, order=1, reshape=False, cval=0.0)
@@ -1713,17 +1715,6 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, dat
new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j] = np.dot(mrot, np.array([new_I_stokes[i, j], new_Q_stokes[i, j], new_U_stokes[i, j]])).T
new_Stokes_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:, :, i, j], mrot.T))
# Rotate statistical covariance matrix
Stokes_stat_cov = zeropad(Stokes_stat_cov, [*Stokes_stat_cov.shape[:-2], *shape])
new_Stokes_stat_cov = np.zeros((*Stokes_stat_cov.shape[:-2], *shape))
for i in range(3):
for j in range(3):
new_Stokes_stat_cov[i, j] = sc_rotate(Stokes_stat_cov[i, j], ang, order=1, reshape=False, cval=0.0)
new_Stokes_stat_cov[i, i] = np.abs(new_Stokes_stat_cov[i, i])
for i in range(shape[0]):
for j in range(shape[1]):
new_Stokes_stat_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_stat_cov[:, :, i, j], mrot.T))
# Update headers to new angle
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
@@ -1735,6 +1726,10 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, dat
new_wcs.wcs.set()
for key, val in new_wcs.to_header().items():
new_header_stokes.set(key, val)
if new_wcs.wcs.pc[0, 0] == 1.0:
new_header_stokes.set("PC1_1", 1.0)
if new_wcs.wcs.pc[1, 1] == 1.0:
new_header_stokes.set("PC2_2", 1.0)
new_header_stokes["ORIENTAT"] += ang
# Nan handling :
@@ -1752,18 +1747,12 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, dat
I_diluted = new_I_stokes[mask].sum()
Q_diluted = new_Q_stokes[mask].sum()
U_diluted = new_U_stokes[mask].sum()
I_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 0][mask]))
Q_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 1][mask]))
U_diluted_err = np.sqrt(np.sum(Stokes_cov[2, 2][mask]))
IQ_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 1][mask] ** 2))
IU_diluted_err = np.sqrt(np.sum(Stokes_cov[0, 2][mask] ** 2))
QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask] ** 2))
I_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 0][mask]))
Q_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[1, 1][mask]))
U_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[2, 2][mask]))
IQ_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 1][mask] ** 2))
IU_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[0, 2][mask] ** 2))
QU_diluted_stat_err = np.sqrt(np.sum(Stokes_stat_cov[1, 2][mask] ** 2))
I_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 0][mask]))
Q_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 1][mask]))
U_diluted_err = np.sqrt(np.sum(new_Stokes_cov[2, 2][mask]))
IQ_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 1][mask] ** 2))
IU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[0, 2][mask] ** 2))
QU_diluted_err = np.sqrt(np.sum(new_Stokes_cov[1, 2][mask] ** 2))
P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted
P_diluted_err = (1.0 / I_diluted) * np.sqrt(
@@ -1772,30 +1761,18 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, Stokes_stat_cov, dat
- 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err
- 2.0 * (U_diluted / I_diluted) * IU_diluted_err
)
P_diluted_stat_err = (
P_diluted
/ I_diluted
* np.sqrt(
I_diluted_stat_err
- 2.0 / (I_diluted * P_diluted**2) * (Q_diluted * IQ_diluted_stat_err + U_diluted * IU_diluted_stat_err)
+ 1.0
/ (I_diluted**2 * P_diluted**4)
* (Q_diluted**2 * Q_diluted_stat_err + U_diluted**2 * U_diluted_stat_err + 2.0 * Q_diluted * U_diluted * QU_diluted_stat_err)
)
)
debiased_P_diluted = np.sqrt(P_diluted**2 - P_diluted_stat_err**2) if P_diluted**2 > P_diluted_stat_err**2 else 0.0
PA_diluted = princ_angle((90.0 / np.pi) * np.arctan2(U_diluted, Q_diluted))
PA_diluted_err = (90.0 / (np.pi * (Q_diluted**2 + U_diluted**2))) * np.sqrt(
U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err
)
new_header_stokes["P_int"] = (debiased_P_diluted, "Integrated polarization degree")
new_header_stokes["P_int"] = (P_diluted, "Integrated polarization degree")
new_header_stokes["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
new_header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
new_header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_Stokes_stat_cov, new_data_mask, new_header_stokes
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_header_stokes
def rotate_data(data_array, error_array, data_mask, headers):
@@ -1825,6 +1802,8 @@ def rotate_data(data_array, error_array, data_mask, headers):
Updated list of headers corresponding to the reduced images accounting
for the new orientation angle.
"""
# Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
old_center = np.array(data_array[0].shape) / 2
shape = np.fix(np.array(data_array[0].shape) * np.sqrt(2.5)).astype(int)
new_center = np.array(shape) / 2
@@ -1869,4 +1848,4 @@ def rotate_data(data_array, error_array, data_mask, headers):
for i in range(new_data_array.shape[0]):
new_data_array[i][new_data_array[i] < 0.0] = 0.0
return new_data_array, new_error_array, new_data_mask, new_headers
return new_data_array, new_error_array, new_data_mask, new_headers

View File

@@ -4,14 +4,6 @@ import numpy as np
def rot2D(ang):
"""
Return the 2D rotation matrix of given angle in degrees
----------
Inputs:
ang : float
Angle in degrees
----------
Returns:
rot_mat : numpy.ndarray
2D matrix of shape (2,2) to perform vector rotation at angle "ang".
"""
alpha = np.pi * ang / 180
return np.array([[np.cos(alpha), np.sin(alpha)], [-np.sin(alpha), np.cos(alpha)]])
@@ -21,14 +13,6 @@ def princ_angle(ang):
"""
Return the principal angle in the 0° to 180° quadrant as PA is always
defined at p/m 180°.
----------
Inputs:
ang : float, numpy.ndarray
Angle in degrees. Can be an array of angles.
----------
Returns:
princ_ang : float, numpy.ndarray
Principal angle in the 0°-180° quadrant in the same shape as input.
"""
if not isinstance(ang, np.ndarray):
A = np.array([ang])
@@ -44,100 +28,9 @@ def princ_angle(ang):
return A[0]
def PCconf(QN, UN, QN_ERR, UN_ERR):
"""
Compute the confidence level for 2 parameters polarisation degree and
polarisation angle from the PCUBE analysis.
----------
Inputs:
QN : float, numpy.ndarray
Normalized Q Stokes flux.
UN : float, numpy.ndarray
Normalized U Stokes flux.
QN_ERR : float, numpy.ndarray
Normalized error on Q Stokes flux.
UN_ERR : float, numpy.ndarray
Normalized error on U Stokes flux.
----------
Returns:
conf : numpy.ndarray
2D matrix of same shape as input containing the confidence on the polarization
computation between 0 and 1 for 2 parameters of interest (Q and U Stokes fluxes).
"""
mask = np.logical_and(QN_ERR > 0.0, UN_ERR > 0.0)
conf = np.full(QN.shape, -1.0)
chi2 = QN**2 / QN_ERR**2 + UN**2 / UN_ERR**2
conf[mask] = 1.0 - np.exp(-0.5 * chi2[mask])
return conf
def CenterConf(mask, PA, sPA):
"""
Compute the confidence map for the position of the center of emission.
----------
Inputs:
mask : bool, numpy.ndarray
Mask of the polarization vectors from which the center of emission should be drawn.
PA : float, numpy.ndarray
2D matrix containing the computed polarization angle.
sPA : float, numpy.ndarray
2D matrix containing the total uncertainties on the polarization angle.
----------
Returns:
conf : numpy.ndarray
2D matrix of same shape as input containing the confidence on the polarization
computation between 0 and 1 for 2 parameters of interest (Q and U Stokes fluxes).
"""
chi2 = np.full(PA.shape, np.nan, dtype=np.float64)
conf = np.full(PA.shape, -1.0, dtype=np.float64)
yy, xx = np.indices(PA.shape)
Nobs = np.sum(mask)
def ideal(c):
itheta = np.full(PA.shape, np.nan)
itheta[(xx + 0.5) != c[0]] = np.degrees(np.arctan((yy[(xx + 0.5) != c[0]] + 0.5 - c[1]) / (xx[(xx + 0.5) != c[0]] + 0.5 - c[0])))
itheta[(xx + 0.5) == c[0]] = PA[(xx + 0.5) == c[0]]
return princ_angle(itheta)
def chisq(c):
return np.sum((princ_angle(PA[mask]) - ideal((c[0], c[1]))[mask]) ** 2 / sPA[mask] ** 2) / (Nobs - 2)
for x, y in zip(xx[np.isfinite(PA)], yy[np.isfinite(PA)]):
chi2[y, x] = chisq((x, y))
from scipy.optimize import minimize
conf[np.isfinite(PA)] = 0.5 * np.exp(-0.5 * chi2[np.isfinite(PA)])
c0 = np.unravel_index(np.argmax(conf), conf.shape)[::-1]
result = minimize(chisq, c0, bounds=[(0, PA.shape[1]), (0.0, PA.shape[0])], method="trust-constr")
if result.success:
print("Center of emission found: reduced chi_squared {0:.2f}/{1:d}={2:.2f}".format(chisq(result.x) * (Nobs - 2), Nobs - 2, chisq(result.x)))
else:
print("Center of emission not found", result)
return conf, result.x
def sci_not(v, err, rnd=1, out=str):
"""
Return the scientific error notation as a string.
----------
Inputs:
v : float
Value to be transformed into scientific notation.
err : float
Error on the value to be transformed into scientific notation.
rnd : int
Number of significant numbers for the scientific notation.
Default to 1.
out : str or other
Format in which the notation should be returned. "str" means the notation
is returned as a single string, "other" means it is returned as a list of "str".
Default to str.
----------
Returns:
conf : numpy.ndarray
2D matrix of same shape as input containing the confidence on the polarization
computation between 0 and 1 for 2 parameters of interest (Q and U Stokes fluxes).
Return the scientifque error notation as a string.
"""
power = -int(("%E" % v)[-3:]) + 1
output = [r"({0}".format(round(v * 10**power, rnd)), round(v * 10**power, rnd)]
@@ -153,47 +46,17 @@ def sci_not(v, err, rnd=1, out=str):
else:
return *output[1:], -power
def wcs_CD_to_PC(CD):
def wcs_PA(PC21, PC22):
"""
Return the position angle in degrees to the North direction of a wcs
from the values of coefficient of its transformation matrix.
----------
Inputs:
CD : np.ndarray
Value of the WCS matrix CD
----------
Returns:
cdelt : (float, float)
Scaling factor in arcsec between pixel in X and Y directions.
orient : float
Angle in degrees between the North direction and the Up direction of the WCS.
"""
det = CD[0, 0] * CD[1, 1] - CD[0, 1] * CD[1, 0]
sgn = -1.0 if det < 0 else 1.0
cdelt = np.array([sgn, 1.0]) * np.sqrt(np.sum(CD**2, axis=1))
rot = np.arctan2(-CD[1, 0], sgn * CD[0, 0])
rot2 = np.arctan2(sgn * CD[0, 1], CD[1, 1])
orient = 0.5 * (rot + rot2) * 180.0 / np.pi
return cdelt, orient
def wcs_PA(PC, cdelt):
"""
Return the position angle in degrees to the North direction of a wcs
from the values of coefficient of its transformation matrix.
----------
Inputs:
PC : np.ndarray
Value of the WCS matrix PC
cdelt : (float, float)
Scaling factor in arcsec between pixel in X and Y directions.
----------
Returns:
orient : float
Angle in degrees between the North direction and the Up direction of the WCS.
"""
rot = np.pi / 2.0 - np.arctan2(-cdelt[1] * PC[1, 0], abs(cdelt[0]) * PC[0, 0])
rot2 = np.pi / 2.0 - np.arctan2(abs(cdelt[0]) * PC[0, 1], cdelt[1] * PC[1, 1])
orient = 0.5 * (rot + rot2) * 180.0 / np.pi
if (abs(PC21) > abs(PC22)) and (PC21 >= 0):
orient = -np.arccos(PC22) * 180.0 / np.pi
elif (abs(PC21) > abs(PC22)) and (PC21 < 0):
orient = np.arccos(PC22) * 180.0 / np.pi
elif (abs(PC21) < abs(PC22)) and (PC22 >= 0):
orient = np.arccos(PC22) * 180.0 / np.pi
elif (abs(PC21) < abs(PC22)) and (PC22 < 0):
orient = -np.arccos(PC22) * 180.0 / np.pi
return orient

50
package/overplot_IC5063.py Executable file
View File

@@ -0,0 +1,50 @@
#!/usr/bin/python3
import numpy as np
from astropy.io import fits
from lib.plots import overplot_pol, overplot_radio
from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits")
Stokes_18GHz = fits.open("./data/IC5063/radio/IC5063_18GHz.fits")
Stokes_24GHz = fits.open("./data/IC5063/radio/IC5063_24GHz.fits")
Stokes_103GHz = fits.open("./data/IC5063/radio/IC5063_103GHz.fits")
Stokes_229GHz = fits.open("./data/IC5063/radio/IC5063_229GHz.fits")
Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits")
# Stokes_S2 = fits.open("./data/IC5063/POLARIZATION_COMPARISON/S2_rot_crop.fits")
Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
# levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
levelsMorganti = np.logspace(-0.1249, 1.97, 7) / 100.0
levels18GHz = levelsMorganti * Stokes_18GHz[0].data.max()
A = overplot_radio(Stokes_UV, Stokes_18GHz)
A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/18GHz_overplot.pdf", vec_scale=None)
levels24GHz = levelsMorganti * Stokes_24GHz[0].data.max()
B = overplot_radio(Stokes_UV, Stokes_24GHz)
B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/24GHz_overplot.pdf", vec_scale=None)
levels103GHz = levelsMorganti * Stokes_103GHz[0].data.max()
C = overplot_radio(Stokes_UV, Stokes_103GHz)
C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/103GHz_overplot.pdf", vec_scale=None)
levels229GHz = levelsMorganti * Stokes_229GHz[0].data.max()
D = overplot_radio(Stokes_UV, Stokes_229GHz)
D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/229GHz_overplot.pdf", vec_scale=None)
levels357GHz = levelsMorganti * Stokes_357GHz[0].data.max()
E = overplot_radio(Stokes_UV, Stokes_357GHz)
E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/357GHz_overplot.pdf", vec_scale=None)
# F = overplot_pol(Stokes_UV, Stokes_S2)
# F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18))
G = overplot_pol(Stokes_UV, Stokes_IR, cmap="inferno")
G.plot(
SNRp_cut=2.0,
SNRi_cut=10.0,
savename="./plots/IC5063/IR_overplot.pdf",
vec_scale=None,
norm=LogNorm(Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"] / 1e3, Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"]),
cmap="inferno_r",
)

20
package/overplot_MRK463E.py Executable file
View File

@@ -0,0 +1,20 @@
#!/usr/bin/python3
import numpy as np
from astropy.io import fits
from lib.plots import overplot_chandra, overplot_pol
from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.fits")
Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits")
Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits")
levels = np.geomspace(1.0, 99.0, 7)
A = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm())
A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, zoom=1, savename="./plots/MRK463E/Chandra_overplot.pdf")
A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned")
levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"]
B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm())
B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=5, norm=LogNorm(8.5e-18, 2.5e-15), savename="./plots/MRK463E/IR_overplot.pdf")
B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned")

0
package/src/__init__.py Normal file
View File

40
package/src/analysis.py Executable file
View File

@@ -0,0 +1,40 @@
#!/usr/bin/python
from getopt import error as get_error
from getopt import getopt
from sys import argv
arglist = argv[1:]
options = "hf:p:i:l:"
long_options = ["help", "fits=", "snrp=", "snri=", "lim="]
fits_path = None
SNRp_cut, SNRi_cut = 3, 3
flux_lim = None
out_txt = None
try:
arg, val = getopt(arglist, options, long_options)
for curr_arg, curr_val in arg:
if curr_arg in ("-h", "--help"):
print("python3 analysis.py -f <path_to_reduced_fits> -p <SNRp_cut> -i <SNRi_cut> -l <flux_lim>")
elif curr_arg in ("-f", "--fits"):
fits_path = str(curr_val)
elif curr_arg in ("-p", "--snrp"):
SNRp_cut = int(curr_val)
elif curr_arg in ("-i", "--snri"):
SNRi_cut = int(curr_val)
elif curr_arg in ("-l", "--lim"):
flux_lim = list("".join(curr_val).split(","))
except get_error as err:
print(str(err))
if fits_path is not None:
from astropy.io import fits
from lib.plots import pol_map
Stokes_UV = fits.open(fits_path)
p = pol_map(Stokes_UV, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
else:
print("python3 analysis.py -f <path_to_reduced_fits> -p <SNRp_cut> -i <SNRi_cut> -l <flux_lim>")

View File

@@ -0,0 +1,214 @@
#!/usr/bin/python
from src.lib.background import gauss, bin_centers
from src.lib.deconvolve import zeropad
from src.lib.reduction import align_data
from src.lib.plots import princ_angle
from matplotlib.colors import LogNorm
from os.path import join as path_join
from astropy.io import fits
from astropy.wcs import WCS
from scipy.ndimage import shift
from scipy.optimize import curve_fit
import numpy as np
import matplotlib.pyplot as plt
root_dir = path_join('/home/t.barnouin/Documents/Thesis/HST')
root_dir_K = path_join(root_dir, 'Kishimoto', 'output')
root_dir_S = path_join(root_dir, 'FOC_Reduction', 'output')
root_dir_data_S = path_join(root_dir, 'FOC_Reduction', 'data', 'NGC1068', '5144')
root_dir_plot_S = path_join(root_dir, 'FOC_Reduction', 'plots', 'NGC1068', '5144', 'notaligned')
filename_S = "NGC1068_FOC_b10.00pixel_not_aligned.fits"
plt.rcParams.update({'font.size': 15})
SNRi_cut = 30.
SNRp_cut = 3.
data_K = {}
data_S = {}
for d, i in zip(['I', 'Q', 'U', 'P', 'PA', 'sI', 'sQ', 'sU', 'sP', 'sPA'], [0, 1, 2, 5, 8, (3, 0, 0), (3, 1, 1), (3, 2, 2), 6, 9]):
data_K[d] = np.loadtxt(path_join(root_dir_K, d+'.txt'))
with fits.open(path_join(root_dir_data_S, filename_S)) as f:
if not type(i) is int:
data_S[d] = np.sqrt(f[i[0]].data[i[1], i[2]])
else:
data_S[d] = f[i].data
if i == 0:
header = f[i].header
wcs = WCS(header)
convert_flux = header['photflam']
bkg_S = np.median(data_S['I'])/3
bkg_K = np.median(data_K['I'])/3
# zeropad data to get same size of array
shape = data_S['I'].shape
for d in data_K:
data_K[d] = zeropad(data_K[d], shape)
# shift array to get same information in same pixel
data_arr, error_ar, heads, data_msk, shifts, shifts_err = align_data(np.array([data_S['I'], data_K['I']]), [header, header], error_array=np.array(
[data_S['sI'], data_K['sI']]), background=np.array([bkg_S, bkg_K]), upsample_factor=10., ref_center='center', return_shifts=True)
for d in data_K:
data_K[d] = shift(data_K[d], shifts[1], order=1, cval=0.)
# compute pol components from shifted array
for d in [data_S, data_K]:
for i in d:
d[i][np.isnan(d[i])] = 0.
d['P'] = np.where(np.logical_and(np.isfinite(d['I']), d['I'] > 0.), np.sqrt(d['Q']**2+d['U']**2)/d['I'], 0.)
d['sP'] = np.where(np.logical_and(np.isfinite(d['I']), d['I'] > 0.), np.sqrt((d['Q']**2*d['sQ']**2+d['U']**2*d['sU']**2) /
(d['Q']**2+d['U']**2)+((d['Q']/d['I'])**2+(d['U']/d['I'])**2)*d['sI']**2)/d['I'], 0.)
d['d_P'] = np.where(np.logical_and(np.isfinite(d['P']), np.isfinite(d['sP'])), np.sqrt(d['P']**2-d['sP']**2), 0.)
d['PA'] = 0.5*np.arctan2(d['U'], d['Q'])+np.pi
d['SNRp'] = np.zeros(d['d_P'].shape)
d['SNRp'][d['sP'] > 0.] = d['d_P'][d['sP'] > 0.]/d['sP'][d['sP'] > 0.]
d['SNRi'] = np.zeros(d['I'].shape)
d['SNRi'][d['sI'] > 0.] = d['I'][d['sI'] > 0.]/d['sI'][d['sI'] > 0.]
d['mask'] = np.logical_and(d['SNRi'] > SNRi_cut, d['SNRp'] > SNRp_cut)
data_S['mask'], data_K['mask'] = np.logical_and(data_S['mask'], data_K['mask']), np.logical_and(data_S['mask'], data_K['mask'])
#
# Compute histogram of measured polarization in cut
#
bins = int(data_S['mask'].sum()/5)
bin_size = 1./bins
mod_p = np.linspace(0., 1., 300)
for d in [data_S, data_K]:
d['hist'], d['bin_edges'] = np.histogram(d['d_P'][d['mask']], bins=bins, range=(0., 1.))
d['binning'] = bin_centers(d['bin_edges'])
peak, bins_fwhm = d['binning'][np.argmax(d['hist'])], d['binning'][d['hist'] > d['hist'].max()/2.]
fwhm = bins_fwhm[1]-bins_fwhm[0]
p0 = [d['hist'].max(), peak, fwhm]
try:
popt, pcov = curve_fit(gauss, d['binning'], d['hist'], p0=p0)
except RuntimeError:
popt = p0
d['hist_chi2'] = np.sum((d['hist']-gauss(d['binning'], *popt))**2)/d['hist'].size
d['hist_popt'] = popt
fig_p, ax_p = plt.subplots(num="Polarization degree histogram", figsize=(10, 6), constrained_layout=True)
ax_p.errorbar(data_S['binning'], data_S['hist'], xerr=bin_size/2., fmt='b.', ecolor='b', label='P through this pipeline')
ax_p.plot(mod_p, gauss(mod_p, *data_S['hist_popt']), 'b--', label='mean = {1:.2f}, stdev = {2:.2f}'.format(*data_S['hist_popt']))
ax_p.errorbar(data_K['binning'], data_K['hist'], xerr=bin_size/2., fmt='r.', ecolor='r', label="P through Kishimoto's pipeline")
ax_p.plot(mod_p, gauss(mod_p, *data_K['hist_popt']), 'r--', label='mean = {1:.2f}, stdev = {2:.2f}'.format(*data_K['hist_popt']))
ax_p.set(xlabel="Polarization degree", ylabel="Counts", title="Histogram of polarization degree computed in the cut for both pipelines.")
ax_p.legend()
fig_p.savefig(path_join(root_dir_plot_S, "NGC1068_K_pol_deg.png"), bbox_inches="tight", dpi=300)
#
# Compute angular difference between the maps in cut
#
dtheta = np.where(data_S['mask'], 0.5*np.arctan((np.sin(2*data_S['PA'])*np.cos(2*data_K['PA'])-np.cos(2*data_S['PA']) *
np.cos(2*data_K['PA']))/(np.cos(2*data_S['PA'])*np.cos(2*data_K['PA'])+np.cos(2*data_S['PA'])*np.sin(2*data_K['PA']))), np.nan)
fig_pa = plt.figure(num="Polarization degree alignement")
ax_pa = fig_pa.add_subplot(111, projection=wcs)
cbar_ax_pa = fig_pa.add_axes([0.88, 0.12, 0.01, 0.75])
ax_pa.set_title(r"Degree of alignement $\zeta$ of the polarization angles from the 2 pipelines in the cut")
im_pa = ax_pa.imshow(np.cos(2*dtheta), vmin=-1., vmax=1., origin='lower', cmap='bwr', label=r"$\zeta$ between this pipeline and Kishimoto's")
cbar_pa = plt.colorbar(im_pa, cax=cbar_ax_pa, label=r"$\zeta = \cos\left( 2 \cdot \delta\theta_P \right)$")
ax_pa.coords[0].set_axislabel('Right Ascension (J2000)')
ax_pa.coords[1].set_axislabel('Declination (J2000)')
fig_pa.savefig(path_join(root_dir_plot_S, "NGC1068_K_pol_ang.png"), bbox_inches="tight", dpi=300)
#
# Compute power uncertainty difference between the maps in cut
#
eta = np.where(data_S['mask'], np.abs(data_K['d_P']-data_S['d_P'])/np.sqrt(data_S['sP']**2+data_K['sP']**2)/2., np.nan)
fig_dif_p = plt.figure(num="Polarization power difference ratio")
ax_dif_p = fig_dif_p.add_subplot(111, projection=wcs)
cbar_ax_dif_p = fig_dif_p.add_axes([0.88, 0.12, 0.01, 0.75])
ax_dif_p.set_title(r"Degree of difference $\eta$ of the polarization from the 2 pipelines in the cut")
im_dif_p = ax_dif_p.imshow(eta, vmin=0., vmax=2., origin='lower', cmap='bwr_r', label=r"$\eta$ between this pipeline and Kishimoto's")
cbar_dif_p = plt.colorbar(im_dif_p, cax=cbar_ax_dif_p, label=r"$\eta = \frac{2 \left|P^K-P^S\right|}{\sqrt{{\sigma^K_P}^2+{\sigma^S_P}^2}}$")
ax_dif_p.coords[0].set_axislabel('Right Ascension (J2000)')
ax_dif_p.coords[1].set_axislabel('Declination (J2000)')
fig_dif_p.savefig(path_join(root_dir_plot_S, "NGC1068_K_pol_diff.png"), bbox_inches="tight", dpi=300)
#
# Compute angle uncertainty difference between the maps in cut
#
eta = np.where(data_S['mask'], np.abs(data_K['PA']-data_S['PA'])/np.sqrt(data_S['sPA']**2+data_K['sPA']**2)/2., np.nan)
fig_dif_pa = plt.figure(num="Polarization angle difference ratio")
ax_dif_pa = fig_dif_pa.add_subplot(111, projection=wcs)
cbar_ax_dif_pa = fig_dif_pa.add_axes([0.88, 0.12, 0.01, 0.75])
ax_dif_pa.set_title(r"Degree of difference $\eta$ of the polarization from the 2 pipelines in the cut")
im_dif_pa = ax_dif_pa.imshow(eta, vmin=0., vmax=2., origin='lower', cmap='bwr_r', label=r"$\eta$ between this pipeline and Kishimoto's")
cbar_dif_pa = plt.colorbar(im_dif_pa, cax=cbar_ax_dif_pa,
label=r"$\eta = \frac{2 \left|\theta_P^K-\theta_P^S\right|}{\sqrt{{\sigma^K_{\theta_P}}^2+{\sigma^S_{\theta_P}}^2}}$")
ax_dif_pa.coords[0].set_axislabel('Right Ascension (J2000)')
ax_dif_pa.coords[1].set_axislabel('Declination (J2000)')
fig_dif_pa.savefig(path_join(root_dir_plot_S, "NGC1068_K_polang_diff.png"), bbox_inches="tight", dpi=300)
# display both polarization maps to check consistency
# plt.rcParams.update({'font.size': 15})
fig = plt.figure(num="Polarization maps comparison", figsize=(10, 10))
ax = fig.add_subplot(111, projection=wcs)
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.88, 0.12, 0.01, 0.75])
for d in [data_S, data_K]:
d['X'], d['Y'] = np.meshgrid(np.arange(d['I'].shape[1]), np.arange(d['I'].shape[0]))
d['xy_U'], d['xy_V'] = np.where(d['mask'], d['d_P']*np.cos(np.pi/2.+d['PA']), np.nan), np.where(d['mask'], d['d_P']*np.sin(np.pi/2.+d['PA']), np.nan)
im0 = ax.imshow(data_S['I']*convert_flux, norm=LogNorm(data_S['I'][data_S['I'] > 0].min()*convert_flux, data_S['I']
[data_S['I'] > 0].max()*convert_flux), origin='lower', cmap='gray', label=r"$I_{STOKES}$ through this pipeline")
quiv0 = ax.quiver(data_S['X'], data_S['Y'], data_S['xy_U'], data_S['xy_V'], units='xy', angles='uv', scale=0.5, scale_units='xy',
pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.2, color='b', alpha=0.75, label="PA through this pipeline")
quiv1 = ax.quiver(data_K['X'], data_K['Y'], data_K['xy_U'], data_K['xy_V'], units='xy', angles='uv', scale=0.5, scale_units='xy',
pivot='mid', headwidth=0., headlength=0., headaxislength=0., width=0.1, color='r', alpha=0.75, label="PA through Kishimoto's pipeline")
ax.set_title(r"$SNR_P \geq$ "+str(SNRi_cut)+r"$\; & \; SNR_I \geq $"+str(SNRp_cut))
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
ax.coords[0].set_axislabel('Right Ascension (J2000)')
ax.coords[0].set_axislabel_position('b')
ax.coords[0].set_ticklabel_position('b')
ax.coords[1].set_axislabel('Declination (J2000)')
ax.coords[1].set_axislabel_position('l')
ax.coords[1].set_ticklabel_position('l')
# ax.axis('equal')
cbar = plt.colorbar(im0, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
ax.legend(loc='upper right')
fig.savefig(path_join(root_dir_plot_S, "NGC1068_K_comparison.png"), bbox_inches="tight", dpi=300)
# compute integrated polarization parameters on a specific cut
for d in [data_S, data_K]:
d['I_dil'] = np.sum(d['I'][d['mask']])
d['sI_dil'] = np.sqrt(np.sum(d['sI'][d['mask']]**2))
d['Q_dil'] = np.sum(d['Q'][d['mask']])
d['sQ_dil'] = np.sqrt(np.sum(d['sQ'][d['mask']]**2))
d['U_dil'] = np.sum(d['U'][d['mask']])
d['sU_dil'] = np.sqrt(np.sum(d['sU'][d['mask']]**2))
d['P_dil'] = np.sqrt(d['Q_dil']**2+d['U_dil']**2)/d['I_dil']
d['sP_dil'] = np.sqrt((d['Q_dil']**2*d['sQ_dil']**2+d['U_dil']**2*d['sU_dil']**2)/(d['Q_dil']**2+d['U_dil']**2) +
((d['Q_dil']/d['I_dil'])**2+(d['U_dil']/d['I_dil'])**2)*d['sI_dil']**2)/d['I_dil']
d['d_P_dil'] = np.sqrt(d['P_dil']**2-d['sP_dil']**2)
d['PA_dil'] = princ_angle((90./np.pi)*np.arctan2(d['U_dil'], d['Q_dil']))
d['sPA_dil'] = princ_angle((90./(np.pi*(d['Q_dil']**2+d['U_dil']**2)))*np.sqrt(d['Q_dil']**2*d['sU_dil']**2+d['U_dil']**2*d['sU_dil']**2))
print('From this pipeline :\n', "P = {0:.2f} ± {1:.2f} %\n".format(
data_S['d_P_dil']*100., data_S['sP_dil']*100.), "PA = {0:.2f} ± {1:.2f} °".format(data_S['PA_dil'], data_S['sPA_dil']))
print("From Kishimoto's pipeline :\n", "P = {0:.2f} ± {1:.2f} %\n".format(
data_K['d_P_dil']*100., data_K['sP_dil']*100.), "PA = {0:.2f} ± {1:.2f} °".format(data_K['PA_dil'], data_K['sPA_dil']))
# compare different types of error
print("This pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(np.mean(data_S['sI'][data_S['mask']]/data_S['I'][data_S['mask']]), np.mean(
data_S['sQ'][data_S['mask']]/data_S['Q'][data_S['mask']]), np.mean(data_S['sU'][data_S['mask']]/data_S['U'][data_S['mask']]), np.mean(data_S['sP'][data_S['mask']]/data_S['P'][data_S['mask']])))
print("Kishimoto's pipeline : average sI/I={0:.2f} ; sQ/Q={1:.2f} ; sU/U={2:.2f} ; sP/P={3:.2f}".format(np.mean(data_K['sI'][data_S['mask']]/data_K['I'][data_S['mask']]), np.mean(
data_K['sQ'][data_S['mask']]/data_K['Q'][data_S['mask']]), np.mean(data_K['sU'][data_S['mask']]/data_K['U'][data_S['mask']]), np.mean(data_K['sP'][data_S['mask']]/data_K['P'][data_S['mask']])))
for d, i in zip(['I', 'Q', 'U', 'P', 'PA', 'sI', 'sQ', 'sU', 'sP', 'sPA'], [0, 1, 2, 5, 8, (3, 0, 0), (3, 1, 1), (3, 2, 2), 6, 9]):
data_K[d] = np.loadtxt(path_join(root_dir_K, d+'.txt'))
with fits.open(path_join(root_dir_data_S, filename_S)) as f:
if not type(i) is int:
data_S[d] = np.sqrt(f[i[0]].data[i[1], i[2]])
else:
data_S[d] = f[i].data
if i == 0:
header = f[i].header
# from Kishimoto's pipeline : IQU_dir, IQU_shift, IQU_stat, IQU_trans
# from my pipeline : raw_bg, raw_flat, raw_psf, raw_shift, raw_wav, IQU_dir
# but errors from my pipeline are propagated all along, how to compare then ?
plt.show()

View File

@@ -1,88 +0,0 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from pathlib import Path
from sys import path as syspath
syspath.append(str(Path(__file__).parent.parent))
def main(infile, P_cut=0.99, target=None, display="pf", output_dir=None):
from os.path import join as pathjoin
import numpy as np
from astropy.io.fits import open as fits_open
from astropy.wcs import WCS
from lib.plots import polarization_map
from lib.utils import CenterConf, PCconf
from matplotlib.patches import Rectangle
from matplotlib.pyplot import figure, show
output = []
Stokes = fits_open(infile)
stkI = Stokes["I_STOKES"].data
QN, UN, QN_ERR, UN_ERR = np.full((4, stkI.shape[0], stkI.shape[1]), np.nan)
for sflux, nflux in zip(
[Stokes["Q_STOKES"].data, Stokes["U_STOKES"].data, np.sqrt(Stokes["IQU_COV_MATRIX"].data[1, 1]), np.sqrt(Stokes["IQU_COV_MATRIX"].data[2, 2])],
[QN, UN, QN_ERR, UN_ERR],
):
nflux[stkI > 0.0] = sflux[stkI > 0.0] / stkI[stkI > 0.0]
Stokesconf = PCconf(QN, UN, QN_ERR, UN_ERR)
Stokesmask = Stokes["DATA_MASK"].data.astype(bool)
Stokessnr = np.zeros(Stokesmask.shape)
Stokessnr[Stokes["POL_DEG_ERR"].data > 0.0] = (
Stokes["POL_DEG_DEBIASED"].data[Stokes["POL_DEG_ERR"].data > 0.0] / Stokes["POL_DEG_ERR"].data[Stokes["POL_DEG_ERR"].data > 0.0]
)
if P_cut < 1.0:
Stokescentconf, Stokescenter = CenterConf(Stokesconf > P_cut, Stokes["POL_ANG"].data, Stokes["POL_ANG_ERR"].data)
else:
Stokescentconf, Stokescenter = CenterConf(Stokessnr > P_cut, Stokes["POL_ANG"].data, Stokes["POL_ANG_ERR"].data)
Stokespos = WCS(Stokes[0].header).pixel_to_world(*Stokescenter)
if target is None:
target = Stokes[0].header["TARGNAME"]
ratiox = max(int(stkI.shape[1] / (stkI.shape[0])), 1)
ratioy = max(int((stkI.shape[0]) / stkI.shape[1]), 1)
fig = figure(figsize=(8 * ratiox, 8 * ratioy), layout="constrained")
fig, ax = polarization_map(Stokes, P_cut=P_cut, step_vec=1, scale_vec=5, display=display, fig=fig, width=0.33, linewidth=0.5)
ax.plot(*Stokescenter, marker="+", color="k", lw=3)
ax.plot(*Stokescenter, marker="+", color="red", lw=1.5, label="Best confidence for center: {0}".format(Stokespos.to_string("hmsdms")))
ax.contour(Stokescentconf, [0.01], colors="k", linewidths=3)
confcentcont = ax.contour(Stokescentconf, [0.01], colors="red")
# confcont = ax.contour(Stokesconf, [0.9905], colors="r")
# snr3cont = ax.contour(Stokessnr, [3.0], colors="b", linestyles="dashed")
# snr4cont = ax.contour(Stokessnr, [4.0], colors="b")
handles, labels = ax.get_legend_handles_labels()
labels.append(r"Center $Conf_{99\%}$ contour")
handles.append(Rectangle((0, 0), 1, 1, fill=False, ec=confcentcont.get_edgecolor()[0]))
# labels.append(r"Polarization $Conf_{99\%}$ contour")
# handles.append(Rectangle((0, 0), 1, 1, fill=False, ec=confcont.get_edgecolor()[0]))
# labels.append(r"$SNR_P \geq$ 3 contour")
# handles.append(Rectangle((0, 0), 1, 1, fill=False, ls="--", ec=snr3cont.get_edgecolor()[0]))
# labels.append(r"$SNR_P \geq$ 4 contour")
# handles.append(Rectangle((0, 0), 1, 1, fill=False, ec=snr4cont.get_edgecolor()[0]))
ax.legend(handles=handles, labels=labels, bbox_to_anchor=(-0.05, -0.02, 1.10, 0.01), loc="upper left", mode="expand", borderaxespad=0.0)
if output_dir is not None:
filename = pathjoin(output_dir, "%s_center.pdf" % target)
fig.savefig(filename, bbox_inches="tight", dpi=300, facecolor="None")
output.append(filename)
show()
return output
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Look for the center of emission for a given reduced observation")
parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
parser.add_argument("-f", "--file", metavar="path", required=False, help="The full or relative path to the data product", type=str, default=None)
parser.add_argument("-c", "--pcut", metavar="pcut", required=False, help="The polarization cut for the data mask", type=float, default=0.99)
parser.add_argument("-d", "--display", metavar="display", required=False, help="The map on which to display info", type=str, default="pf")
parser.add_argument("-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the plots", type=str, default="./data")
args = parser.parse_args()
exitcode = main(infile=args.file, P_cut=args.pcut, target=args.target, display=args.display, output_dir=args.output_dir)
print("Written to: ", exitcode)

View File

@@ -1,9 +1,4 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from pathlib import Path
from sys import path as syspath
syspath.append(str(Path(__file__).parent.parent))
#!/usr/bin/python
def main(infiles=None):
@@ -18,7 +13,7 @@ def main(infiles=None):
from numpy.linalg import eig
if infiles is None:
print('Usage: "env python3 get_cdelt.py -f infiles"')
print('Usage: "python get_cdelt.py -f infiles"')
return 1
prod = [["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles]
data_folder = prod[0][0]

View File

@@ -1,56 +0,0 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from pathlib import Path
from sys import path as syspath
syspath.append(str(Path(__file__).parent.parent))
import numpy as np
from astropy.io import fits
from lib.plots import overplot_pol, overplot_radio
from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits")
# Stokes_18GHz = fits.open("./data/IC5063/radio/IC5063_18GHz.fits")
# Stokes_24GHz = fits.open("./data/IC5063/radio/IC5063_24GHz.fits")
# Stokes_103GHz = fits.open("./data/IC5063/radio/IC5063_103GHz.fits")
# Stokes_229GHz = fits.open("./data/IC5063/radio/IC5063_229GHz.fits")
# Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits")
# Stokes_S2 = fits.open("./data/IC5063/POLARIZATION_COMPARISON/S2_rot_crop.fits")
Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
# levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
# levelsMorganti = np.logspace(-0.1249, 1.97, 7) / 100.0
# levels18GHz = levelsMorganti * Stokes_18GHz[0].data.max()
# A = overplot_radio(Stokes_UV, Stokes_18GHz)
# A.plot(levels=levels18GHz, P_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/18GHz_overplot.pdf", scale_vec=None)
# levels24GHz = levelsMorganti * Stokes_24GHz[0].data.max()
# B = overplot_radio(Stokes_UV, Stokes_24GHz)
# B.plot(levels=levels24GHz, P_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/24GHz_overplot.pdf", scale_vec=None)
# levels103GHz = levelsMorganti * Stokes_103GHz[0].data.max()
# C = overplot_radio(Stokes_UV, Stokes_103GHz)
# C.plot(levels=levels103GHz, P_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/103GHz_overplot.pdf", scale_vec=None)
# levels229GHz = levelsMorganti * Stokes_229GHz[0].data.max()
# D = overplot_radio(Stokes_UV, Stokes_229GHz)
# D.plot(levels=levels229GHz, P_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/229GHz_overplot.pdf", scale_vec=None)
# levels357GHz = levelsMorganti * Stokes_357GHz[0].data.max()
# E = overplot_radio(Stokes_UV, Stokes_357GHz)
# E.plot(levels=levels357GHz, P_cut=2.0, SNRi_cut=10.0, savename="./plots/IC5063/357GHz_overplot.pdf", scale_vec=None)
# F = overplot_pol(Stokes_UV, Stokes_S2)
# F.plot(P_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18))
G = overplot_pol(Stokes_UV, Stokes_IR, cmap="inferno")
G.plot(
P_cut=0.99,
SNRi_cut=1.0,
savename="./plots/IC5063/IR_overplot.pdf",
scale_vec=None,
norm=LogNorm(Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"] / 1e3, Stokes_IR[0].data.max() * Stokes_IR[0].header["photflam"]),
cmap="inferno",
)

View File

@@ -1,46 +0,0 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from pathlib import Path
from sys import path as syspath
syspath.append(str(Path(__file__).parent.parent))
import numpy as np
from astropy.io import fits
from lib.plots import overplot_chandra, overplot_pol
from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.fits")
# Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits")
# Stokes_Xr = fits.open("./data/MRK463E/Chandra/X_ray_crop.fits")
Radio = fits.open("./data/MRK463E/EMERLIN/Voorwerpjes_1356+1822_1356+1822_uniform-image.fits")
# levels = np.geomspace(1.0, 99.0, 7)
# A = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm())
# A.plot(levels=levels, P_cut=0.99, SNRi_cut=1.0, scale_vec=5, zoom=1, savename="./plots/MRK463E/Chandra_overplot.pdf")
# A.write_to(path1="./data/MRK463E/FOC_data_Chandra.fits", path2="./data/MRK463E/Chandra_data.fits", suffix="aligned")
# levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"]
# B = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm())
# B.plot(levels=levels, P_cut=0.99, SNRi_cut=1.0, scale_vec=5, norm=LogNorm(8.5e-18, 2.5e-15), savename="./plots/MRK463E/IR_overplot.pdf")
# B.write_to(path1="./data/MRK463E/FOC_data_WFPC.fits", path2="./data/MRK463E/WFPC_data.fits", suffix="aligned")
# levels = np.array([0.8, 2, 5, 10, 20, 50]) / 100.0 * Stokes_UV[0].header["photflam"]
levels = np.array([5, 10, 20, 50])
C = overplot_pol(Stokes_UV, Radio, norm=LogNorm())
C.other_im.set(norm=LogNorm(1e-4, 2e-2))
C.plot(
levels=levels,
P_cut=0.99,
SNRi_cut=1.0,
step_vec=0,
scale_vec=3,
norm=LogNorm(1e-4, 2e-2),
cmap="inferno_r",
width=0.5,
linewidth=0.5,
disptype="snri",
savename="./plots/MRK463E/EMERLIN_snri_overplot.pdf",
)
C.write_to(path1="./data/MRK463E/FOC_data_EMERLIN.fits", path2="./data/MRK463E/EMERLIN_data.fits", suffix="aligned")

View File

@@ -1,32 +0,0 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from pathlib import Path
from sys import path as syspath
syspath.append(str(Path(__file__).parent.parent))
import numpy as np
from astropy.io import fits
from lib.plots import overplot_pol, plt
from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/NGC1068/5144/NGC1068_FOC_b0.05arcsec_c0.07arcsec_crop.fits")
Radio = fits.open("./data/NGC1068/MERLIN-VLA/Combined_crop.fits")
levels = np.logspace(-0.5, 1.99, 7) / 100.0 * Stokes_UV[0].data.max() * Stokes_UV[0].header["photflam"]
A = overplot_pol(Stokes_UV, Radio, norm=LogNorm())
A.plot(levels=levels, P_cut=0.99, SNRi_cut=1.0, scale_vec=3, step_vec=1, norm=LogNorm(5e-5, 1e-1), cmap="inferno_r", width=0.8, linewidth=1.2)
A.add_vector(
A.other_wcs.celestial.wcs.crpix - (1.0, 1.0),
pol_deg=0.124,
pol_ang=100.7,
width=2.0,
linewidth=1.0,
scale=1.0 / (A.px_scale * 6.0),
edgecolor="w",
color="b",
label=r"IXPE torus: P = $12.4 (\pm 3.6)$%, PA = $100.7 (\pm 8.3)$°",
)
A.fig_overplot.savefig("./plots/NGC1068/NGC1068_radio_overplot_full.pdf", dpi=300, bbox_inches="tight")
plt.show()
A.write_to(path1="./data/NGC1068/FOC_Radio.fits", path2="./data/NGC1068/Radio_FOC.fits", suffix="aligned")

View File

@@ -1,850 +0,0 @@
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from astropy.io.fits import getheader, getdata, hdu
from os.path import join as join_path, exists as path_exists
from os import system
from copy import deepcopy
# consecutive spectra are made up of the summ of all previous ACCUMs, so the S/N increases along sequence
# _c0f.fits - calibrated vacuum wavelength
# _c1f.fits - calibrated fluxes (ergs sec^-1 cm^-2 Angs^-1)
# _c2f.fits - statistical errors (no sky, bkg subtraction, flatfield or sensitivity error)
# _c3f.fits - for SPECTROPOLARIMETRY mode contains I, Q, U, V, linear and circular polarization and polarization position angle
# _c4f.fits - object+sky count rate spectrum (corrected for overscanning, noise rejection, lost signal)
# _c5f.fits - flat-fielded object count rate spectrum (corrected for paired pulses, detector background, flatfield structure, GIM effects)
# _c6f.fits - flat-fielded sky count rate spectrum (corrected for paired pulses, detector background, flatfield structure, GIM effects)
# _c7f.fits - background count rate spectrum (scaled background subtracted from c4 products)
# _c8f.fits - flat-fielded and sky-subtracted object count rate spectrum
def princ_angle(ang):
"""
Return the principal angle in the 0° to 180° quadrant as PA is always
defined at p/m 180°.
----------
Inputs:
ang : float, numpy.ndarray
Angle in degrees. Can be an array of angles.
----------
Returns:
princ_ang : float, numpy.ndarray
Principal angle in the 0°-180° quadrant in the same shape as input.
"""
if not isinstance(ang, np.ndarray):
A = np.array([ang])
else:
A = np.array(ang)
while np.any(A < 0.0):
A[A < 0.0] = A[A < 0.0] + 360.0
while np.any(A >= 180.0):
A[A >= 180.0] = A[A >= 180.0] - 180.0
if type(ang) is type(A):
return A
else:
return A[0]
class specpol(object):
"""
Class object for studying spectropolarimetry.
"""
def __init__(self, other=None):
if isinstance(other, __class__):
# Copy constructor
self.hd = deepcopy(other.hd)
self.bin_edges = deepcopy(other.bin_edges)
self.wav = deepcopy(other.wav)
self.wav_err = deepcopy(other.wav_err)
self.I = deepcopy(other.I)
self.Q = deepcopy(other.Q)
self.U = deepcopy(other.U)
self.V = deepcopy(other.V)
self.IQUV_cov = deepcopy(other.IQUV_cov)
if hasattr(other, "I_r"):
self.I_r = deepcopy(other.I_r)
self.I_r_err = deepcopy(other.I_r_err)
self.wav_r = deepcopy(other.wav_r)
self.wav_r_err = deepcopy(other.wav_r_err)
elif isinstance(other, str):
self.from_txt(other)
else:
# Initialise to zero
if isinstance(other, int):
self.zero(other)
else:
self.zero()
@classmethod
def zero(self, n=1):
"""
Set all values to zero.
"""
self.hd = dict([])
self.hd["TARGNAME"], self.hd["PROPOSID"], self.hd["ROOTNAME"], self.hd["APER_ID"] = "", 0, "", ""
self.hd["DENSITY"] = False
self.hd["XUNIT"], self.hd["YUNIT"] = r"Wavelength [m]", r"{0:s}F [$10^{{{1:d}}}$ count s$^{{-1}}$]"
self.bin_edges = np.zeros(n + 1)
self.wav = np.zeros(n)
self.wav_err = np.zeros((n, 2))
self.I = np.zeros(n)
self.Q = np.zeros(n)
self.U = np.zeros(n)
self.V = np.zeros(n)
self.IQUV_cov = np.zeros((4, 4, n))
def rest(self, wav=None, z=None):
if z is None and self.hd["TARGNAME"] == "":
z = 0
elif z is None and "REDSHIFT" not in self.hd.keys():
from astroquery.ipac.ned import Ned
z = Ned.query_object(self.hd["TARGNAME"])["Redshift"][0]
self.hd["REDSHIFT"] = z
elif z is None:
z = self.hd["REDSHIFT"]
if wav is None:
wav = self.wav
return wav / (z + 1)
def unrest(self, wav=None, z=None):
if z is None and self.hd["TARGNAME"] == "":
z = 0
elif z is None and "REDSHIFT" not in self.hd.keys():
from astroquery.ipac.ned import Ned
z = Ned.query_object(self.hd["TARGNAME"])["Redshift"][0]
self.hd["REDSHIFT"] = z
elif z is None:
z = self.hd["REDSHIFT"]
if wav is None:
wav = self.wav
return wav * (z + 1)
@property
def I_err(self):
return np.sqrt(self.IQUV_cov[0][0])
@property
def Q_err(self):
return np.sqrt(self.IQUV_cov[1][1])
@property
def U_err(self):
return np.sqrt(self.IQUV_cov[2][2])
@property
def V_err(self):
return np.sqrt(self.IQUV_cov[3][3])
@property
def QN(self):
np.seterr(divide="ignore", invalid="ignore")
return self.Q / np.where(self.I > 0, self.I, np.nan)
@property
def QN_err(self):
np.seterr(divide="ignore", invalid="ignore")
return self.Q_err / np.where(self.I > 0, self.I, np.nan)
@property
def UN(self):
np.seterr(divide="ignore", invalid="ignore")
return self.U / np.where(self.I > 0, self.I, np.nan)
@property
def UN_err(self):
np.seterr(divide="ignore", invalid="ignore")
return self.U_err / np.where(self.I > 0, self.I, np.nan)
@property
def VN(self):
np.seterr(divide="ignore", invalid="ignore")
return self.V / np.where(self.I > 0, self.I, np.nan)
@property
def VN_err(self):
np.seterr(divide="ignore", invalid="ignore")
return self.V_err / np.where(self.I > 0, self.I, np.nan)
@property
def PF(self):
np.seterr(divide="ignore", invalid="ignore")
return np.sqrt(self.Q**2 + self.U**2 + self.V**2)
@property
def PF_err(self):
np.seterr(divide="ignore", invalid="ignore")
return np.sqrt(self.Q**2 * self.Q_err**2 + self.U**2 * self.U_err**2 + self.V**2 * self.V_err**2) / np.where(self.PF > 0, self.PF, np.nan)
@property
def P(self):
np.seterr(divide="ignore", invalid="ignore")
return self.PF / np.where(self.I > 0, self.I, np.nan)
@property
def P_err(self):
np.seterr(divide="ignore", invalid="ignore")
return np.where(self.I > 0, np.sqrt(self.PF_err**2 + (self.PF / self.I) ** 2 * self.I_err**2) / self.I, np.nan)
@property
def PA(self):
return princ_angle((90.0 / np.pi) * np.arctan2(self.U, self.Q))
@property
def PA_err(self):
return princ_angle((90.0 / np.pi) * np.sqrt(self.U**2 * self.Q_err**2 + self.Q**2 * self.U_err**2) / np.where(self.PF > 0, self.PF**2, np.nan))
def rotate(self, angle):
alpha = np.pi / 180.0 * angle
mrot = np.array(
[
[1.0, 0.0, 0.0, 0.0],
[0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha), 0.0],
[0.0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha), 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
self.I, self.Q, self.U, self.V = np.dot(mrot, np.array([self.I, self.Q, self.U, self.V]))
self.IQUV_cov = np.dot(mrot, np.dot(self.IQUV_cov.T, mrot.T).T)
def bin(self, bin_edges):
"""
Rebin spectra to given list of bin edges.
"""
# Get new binning distribution and define new empty spectra
in_bin = np.digitize(self.wav, bin_edges) - 1
out = specpol(bin_edges.shape[0] - 1)
if hasattr(self, "I_r"):
# Propagate "raw" flux spectra to new bin
out.I_r = deepcopy(self.I_r)
out.I_r_err = deepcopy(self.I_r_err)
out.wav_r = deepcopy(self.wav_r)
out.wav_r_err = deepcopy(self.wav_r_err)
else:
# Create "raw" flux spectra from previously unbinned spectra
out.I_r = deepcopy(self.I[self.I > 0.0])
out.I_r_err = deepcopy(self.I_err[self.I > 0.0])
out.wav_r = deepcopy(self.wav[self.I > 0.0])
out.wav_r_err = deepcopy(self.wav_err[self.I > 0.0])
for i in range(bin_edges.shape[0] - 1):
# Set the wavelength as the mean wavelength of acquisitions in bin, default to the bin center
out.wav[i] = np.mean(self.wav[in_bin == i]) if np.any(in_bin == i) else 0.5 * (bin_edges[i] + bin_edges[i + 1])
out.wav_err[i] = (out.wav[i] - bin_edges[i], bin_edges[i + 1] - out.wav[i])
if self.hd["DENSITY"] and np.any(in_bin == i):
# If flux density, convert to flux before converting back to the new density
wav1 = np.abs(self.wav_err[in_bin == i]).sum(axis=1)
wav2 = np.abs(out.wav_err[i]).sum()
else:
wav1, wav2 = 1.0, 1.0
out.I[i] = np.sum(self.I[in_bin == i] * wav1) / wav2 if np.any(in_bin == i) else 0.0
out.Q[i] = np.sum(self.Q[in_bin == i] * wav1) / wav2 if np.any(in_bin == i) else 0.0
out.U[i] = np.sum(self.U[in_bin == i] * wav1) / wav2 if np.any(in_bin == i) else 0.0
out.V[i] = np.sum(self.V[in_bin == i] * wav1) / wav2 if np.any(in_bin == i) else 0.0
for m in range(4):
# Quadratically sum the uncertainties
out.IQUV_cov[m][m][i] = np.sum(self.IQUV_cov[m][m][in_bin == i] * wav1**2) / wav2**2 if np.any(in_bin == i) else 0.0
for n in [k for k in range(4) if k != m]:
out.IQUV_cov[m][n][i] = np.sqrt(np.sum((self.IQUV_cov[m][n][in_bin == i] * wav1) ** 2)) / wav2 if np.any(in_bin == i) else 0.0
# Update bin edges and header
out.bin_edges = bin_edges
out.hd = deepcopy(self.hd)
out.hd["NAXIS1"] = bin_edges.shape[0] - 1
out.hd["DATAMIN"], out.hd["DATAMAX"] = out.I.min(), out.I.max()
out.hd["MINWAV"], out.hd["MAXWAV"] = out.wav.min(), out.wav.max()
out.hd["STEPWAV"] = np.max(bin_edges[1:] - bin_edges[:-1])
return out
def bin_size(self, size):
"""
Rebin spectra to selected bin size in Angstrom.
"""
bin_edges = np.arange(self.bin_edges.min(), self.bin_edges.max() + size, size, dtype=np.float32)
return self.bin(bin_edges)
def from_txt(self, filename, data_dir=""):
"""
Fill current spectra from a text file.
"""
data_dump = np.loadtxt(join_path(data_dir, filename), skiprows=1).T
self.zero(data_dump.shape[1])
(self.wav, self.wav_err[:, 0], self.I, self.IQUV_cov[0, 0], self.Q, self.IQUV_cov[1, 1], self.U, self.IQUV_cov[2, 2], self.V, self.IQUV_cov[3, 3]) = (
data_dump[:10]
)
self.wav_err[:, 1] = deepcopy(self.wav_err[:, 0])
self.bin_edges[:-1], self.bin_edges[-1] = deepcopy(self.wav - self.wav_err[:, 0]), deepcopy(self.wav[-1] + self.wav_err[-1, 1])
for i in range(4):
self.IQUV_cov[i][i] = deepcopy(self.IQUV_cov[i][i]) ** 2
with open(join_path(data_dir, filename)) as f:
self.hd["TARGNAME"], self.hd["PROPOSID"], self.hd["ROOTNAME"], self.hd["APER_ID"], self.hd["XUNIT"], self.hd["YUNIT"] = f.readline()[2:].split(";")
def dump_txt(self, filename, output_dir=""):
"""
Dump current spectra to a text file.
"""
header = ";".join([self.hd["TARGNAME"], str(self.hd["PROPOSID"]), self.hd["ROOTNAME"], self.hd["APER_ID"], self.hd["XUNIT"], self.hd["YUNIT"]])
header += "\nwav\t wav_err\t I\t I_err\t Q\t Q_err\t U\t U_err\t V\t V_err\t P\t P_err\t PA\t PA_err"
data_dump = np.array(
[
self.wav,
self.wav_err.mean(axis=1),
self.I,
self.I_err,
self.Q,
self.Q_err,
self.U,
self.U_err,
self.V,
self.V_err,
self.P,
self.P_err,
self.PA,
self.PA_err,
]
).T
np.savetxt(join_path(output_dir, filename + ".txt"), data_dump, header=header)
return join_path(output_dir, filename)
def plot(self, fig=None, ax=None, savename=None, plots_folder=""):
"""
Display current spectra.
"""
if fig is None:
plt.rcParams.update({"font.size": 15})
if ax is None:
self.fig, self.ax = plt.subplots(3, 1, sharex=True, figsize=(20, 15))
self.fig.subplots_adjust(hspace=0)
self.fig.suptitle("_".join([self.hd["TARGNAME"], str(self.hd["PROPOSID"]), self.hd["ROOTNAME"], self.hd["APER_ID"]]))
else:
self.ax = ax
else:
if ax is None:
self.fig = fig
self.ax = self.fig.add_subplot(111)
else:
self.fig = fig
self.ax = ax
if isinstance(self.ax, np.ndarray):
if self.ax.shape[0] == 2:
ax1, ax2 = self.ax[:2]
ax22 = ax2.twinx()
ax2.set_xlabel(self.hd["XUNIT"])
secax1 = ax1.secondary_xaxis("top", functions=(self.rest, self.unrest))
secax1.set_xlabel(r"Rest " + self.hd["XUNIT"])
else:
ax1, ax2, ax22 = self.ax[::-1]
else:
ax1 = self.ax
# Display flux and polarized flux on first ax
if hasattr(self, "I_r"):
# If available, display "raw" total flux
yoffset = np.floor(np.log10(self.I_r[self.I_r > 0.0].min())).astype(int)
yoff = 10.0**yoffset
ymin, ymax = (
np.min((self.I_r - 1.5 * self.I_r_err)[self.I_r > 1.5 * self.I_r_err]) / yoff,
np.max((self.I_r + self.I_r_err * 1.5)[self.I_r > 1.5 * self.I_r_err]) / yoff,
)
xmin, xmax = np.min(self.wav_r - self.wav_r_err[:, 0]), np.max(self.wav_r + self.wav_r_err[:, 1])
ax1.errorbar(self.wav_r, self.I_r / yoff, xerr=self.wav_r_err.T, yerr=self.I_r_err / yoff, color="k", fmt=".", label="I")
else:
yoffset = np.floor(np.log10(self.I[self.I > 0.0].min())).astype(int)
yoff = 10.0**yoffset
ymin, ymax = (
np.min((self.I - 1.5 * self.I_err)[self.I > 1.5 * self.I_err]) / yoff,
np.max((self.I + self.I_err * 1.5)[self.I > 1.5 * self.I_err]) / yoff,
)
xmin, xmax = np.min(self.wav - self.wav_err[:, 0]), np.max(self.wav + self.wav_err[:, 1])
ax1.errorbar(self.wav, self.I / yoff, xerr=self.wav_err.T, yerr=self.I_err / yoff, color="k", fmt=".", label="I")
ax1.set_xlim([np.min([xmin, self.bin_edges.min()]), np.max([xmax, self.bin_edges.max()])])
ax1.set_xlabel(self.hd["XUNIT"])
ax1.set_ylim([ymin, ymax])
ax1.set_ylabel(self.hd["YUNIT"].format("", yoffset))
# ax11 = ax1.twinx()
# pfoffset = np.floor(np.log10(self.PF[self.PF > 0.0].min())).astype(int)
# pfoff = 10.0**pfoffset
# ax11.errorbar(self.wav, self.PF / pfoff, xerr=self.wav_err.T, yerr=self.PF_err / pfoff, color="b", fmt=".", label="PF")
# ax11.set_ylim(
# [
# ymin * yoff * self.P[np.logical_and(self.P > 0.0, np.isfinite(self.P))].min() / pfoff,
# ymax * yoff * self.P[np.logical_and(self.P > 0.0, np.isfinite(self.P))].max() / pfoff,
# ]
# )
# ax11.set_ylabel(self.hd["YUNIT"].format("Px", pfoffset), color="b")
# ax11.tick_params(axis="y", color="b", labelcolor="b")
# ax1.legend(ncols=2, loc=1)
if isinstance(self.ax, np.ndarray):
# When given 2 axes, display P and PA on second
ax2.errorbar(self.wav, self.P * 100.0, xerr=self.wav_err.T, yerr=self.P_err * 100.0, color="b", fmt=".", label="P")
pmin, pmax = (
np.min(self.P[self.I > 0.0] - 1.5 * self.P_err[self.I > 0.0]) * 100.0,
np.max(self.P[self.I > 0.0] + 1.5 * self.P_err[self.I > 0.0]) * 100.0,
)
ax2.set_ylim([pmin if pmin > 0.0 else 0.0, pmax if pmax < 100.0 else 100.0])
ax2.set_ylabel(r"P [%]", color="b")
ax2.tick_params(axis="y", color="b", labelcolor="b")
ax22.errorbar(self.wav, self.PA, xerr=self.wav_err.T, yerr=self.PA_err, color="r", fmt=".", label="PA [°]")
pamin, pamax = np.min(self.PA[self.I > 0.0] - 1.5 * self.PA_err[self.I > 0.0]), np.max(self.PA[self.I > 0.0] + 1.5 * self.PA_err[self.I > 0.0])
ax22.set_ylim([pamin if pamin > 0.0 else 0.0, pamax if pamax < 180.0 else 180.0])
ax22.set_ylabel(r"PA [°]", color="r")
ax22.tick_params(axis="y", color="r", labelcolor="r")
secax22 = ax22.secondary_xaxis("top", functions=(self.rest, self.unrest))
secax22.set_xlabel(r"Rest " + self.hd["XUNIT"])
h2, l2 = ax2.get_legend_handles_labels()
h22, l22 = ax22.get_legend_handles_labels()
if self.ax.shape[0] == 2:
ax2.legend(h2 + h22, l2 + l22, ncols=2, loc=1)
if hasattr(self, "fig") and savename is not None:
self.fig.savefig(join_path(plots_folder, savename + ".pdf"), dpi=300, bbox_inches="tight")
return self.fig, self.ax, join_path(plots_folder, savename + ".pdf")
elif hasattr(self, "fig"):
return self.fig, self.ax
else:
return self.ax
def __add__(self, other):
"""
Spectra addition, if not same binning concatenate both spectra binning.
"""
if (self.bin_edges.shape == other.bin_edges.shape) and np.all(self.bin_edges == other.bin_edges):
bin_edges = deepcopy(self.bin_edges)
else:
# If different binning, concatenate binnings
if self.bin_edges[0] <= other.bin_edges[0]:
bin_edges = deepcopy(self.bin_edges)
else:
bin_edges = deepcopy(other.bin_edges)
if other.bin_edges[-1] > bin_edges[-1]:
bin_edges = np.concat((bin_edges, deepcopy(other.bin_edges[other.bin_edges > bin_edges[-1]])), axis=0)
elif self.bin_edges[-1] > bin_edges[-1]:
bin_edges = np.concat((bin_edges, deepcopy(self.bin_edges[self.bin_edges > bin_edges[-1]])), axis=0)
# Rebin spectra to be added to ensure same binning
spec_a = specpol(specpol(self).bin(bin_edges=bin_edges))
spec_b = specpol(specpol(other).bin(bin_edges=bin_edges))
# Create sum spectra
spec = specpol(bin_edges.shape[0] - 1)
spec.hd = deepcopy(self.hd)
spec.bin_edges = bin_edges
spec.wav = np.mean([spec_a.wav, spec_b.wav], axis=0)
spec.wav_err = np.array([spec.wav - spec.bin_edges[:-1], spec.bin_edges[1:] - spec.wav]).T
# Propagate "raw" flux spectra to sum
if hasattr(self, "I_r") and hasattr(other, "I_r"):
# Deal with the concatenation of the "raw" total flux spectra
if self.wav_r[0] <= other.wav_r[0]:
inter = other.wav_r[0], self.wav_r[-1]
spec.wav_r = deepcopy(np.concat((self.wav_r, other.wav_r[other.wav_r > self.wav_r[-1]])))
spec.wav_r_err = deepcopy(np.concat((self.wav_r_err, other.wav_r_err[other.wav_r > self.wav_r[-1]]), axis=0))
spec.I_r = deepcopy(np.concat((self.I_r, other.I_r[other.wav_r > self.wav_r[-1]])))
spec.I_r_err = deepcopy(np.concat((self.I_r_err, other.I_r_err[other.wav_r > self.wav_r[-1]]), axis=0))
else:
inter = self.wav_r[0], other.wav_r[-1]
spec.wav_r = deepcopy(np.concat((other.wav_r, self.wav_r[self.wav_r > other.wav_r[-1]])))
spec.wav_r_err = deepcopy(np.concat((other.wav_r_err, self.wav_r_err[self.wav_r > other.wav_r[-1]]), axis=0))
spec.I_r = deepcopy(np.concat((other.I_r, self.I_r[self.wav_r > other.wav_r[-1]])))
spec.I_r_err = deepcopy(np.concat((other.I_r_err, self.I_r_err[self.wav_r > other.wav_r[-1]]), axis=0))
# When both spectra intersect, compute intersection as the mean
edges = np.concat((spec.wav_r - spec.wav_r_err[:, 0], [spec.wav_r[-1] + spec.wav_r_err[-1, 1]]))
edges.sort()
bin, bino = np.digitize(self.wav_r, edges) - 1, np.digitize(other.wav_r, edges) - 1
for w in np.arange(spec.wav_r.shape[0])[np.logical_and(spec.wav_r >= inter[0], spec.wav_r <= inter[1])]:
if self.hd["DENSITY"] and np.any(bin == w):
# If flux density, convert to flux before converting back to the new density
wav, wavo = (
np.abs(self.wav_r_err[bin == w]).sum(axis=1) * (self.I_r[bin == w] > self.I_r_err[bin == w]),
np.abs(other.wav_r_err[bino == w]).sum(axis=1) * (other.I_r[bino == w] > other.I_r_err[bino == w]),
)
wavs = np.abs(spec.wav_r_err[w]).sum()
else:
wav, wavo, wavs = 1.0, 1.0, 1.0
n = np.sum(self.I_r[bin == w] > self.I_r_err[bin == w]) + np.sum(other.I_r[bino == w] > other.I_r_err[bino == w])
spec.I_r[w] = np.sum(np.concat([self.I_r[bin == w] * wav, other.I_r[bino == w] * wavo])) / wavs / n
spec.I_r_err[w] = np.sqrt(np.sum(np.concat([self.I_r_err[bin == w] ** 2 * wav**2, other.I_r_err[bino == w] ** 2 * wavo**2]))) / wavs / n
# Sum stokes fluxes
spec.I = deepcopy(spec_a.I + spec_b.I)
spec.Q = deepcopy(spec_a.Q + spec_b.Q)
spec.U = deepcopy(spec_a.U + spec_b.U)
spec.V = deepcopy(spec_a.V + spec_b.V)
# Quadratically sum uncertainties
for i in range(4):
spec.IQUV_cov[i][i] = deepcopy(spec_a.IQUV_cov[i][i] + spec_b.IQUV_cov[i][i])
for j in [k for k in range(4) if k != i]:
spec.IQUV_cov[i][j] = deepcopy(np.sqrt(spec_a.IQUV_cov[i][j] ** 2 + spec_b.IQUV_cov[i][j] ** 2))
# Update header to reflect sum
spec.hd["DATAMIN"], spec.hd["DATAMAX"] = spec.I.min(), spec.I.max()
spec.hd["MINWAV"], spec.hd["MAXWAV"] = spec.wav.min(), spec.wav.max()
spec.hd["EXPTIME"] = spec_a.hd["EXPTIME"] + spec_b.hd["EXPTIME"]
rootnames = [spec_a.hd["ROOTNAME"], spec_b.hd["ROOTNAME"]]
spec.hd["ROOTNAME"] = "".join(p for p, *r in zip(*rootnames) if all(p == c for c in r)) + "_SUM"
return spec
def __deepcopy__(self, memo={}):
spec = specpol(self)
spec.__dict__.update(self.__dict__)
spec.hd = deepcopy(self.hd, memo)
spec.bin_edges = deepcopy(spec.bin_edges, memo)
spec.wav = deepcopy(self.wav, memo)
spec.wav_err = deepcopy(self.wav_err, memo)
spec.I = deepcopy(self.I, memo)
spec.Q = deepcopy(self.Q, memo)
spec.U = deepcopy(self.U, memo)
spec.V = deepcopy(self.V, memo)
spec.IQUV_cov = deepcopy(self.IQUV_cov, memo)
return spec
class FOSspecpol(specpol):
"""
Class object for studying FOS SPECTROPOLARYMETRY mode spectra.
"""
def __init__(self, stokes, data_folder=""):
"""
Initialise object from fits filename, fits hdulist or copy.
"""
if isinstance(stokes, __class__):
# Copy constructor
self.rootname = deepcopy(stokes.rootname)
self.hd = deepcopy(stokes.hd)
self.bin_edges = deepcopy(stokes.bin_edges)
self.wav = deepcopy(stokes.wav)
self.wav_err = deepcopy(stokes.wav_err)
self.I = deepcopy(stokes.I)
self.Q = deepcopy(stokes.Q)
self.U = deepcopy(stokes.U)
self.V = deepcopy(stokes.V)
self.IQUV_cov = deepcopy(stokes.IQUV_cov)
self.P_fos = deepcopy(stokes.P_fos)
self.P_fos_err = deepcopy(stokes.P_fos_err)
self.PC_fos = deepcopy(stokes.PC_fos)
self.PC_fos_err = deepcopy(stokes.PC_fos_err)
self.PA_fos = deepcopy(stokes.PA_fos)
self.PA_fos_err = deepcopy(stokes.PA_fos_err)
self.subspec = {}
for name in ["PASS1", "PASS2", "PASS12", "PASS12corr"]:
spec = deepcopy(stokes.subspec[name])
self.subspec[name] = spec
elif stokes is None or isinstance(stokes, int):
self.zero(n=stokes)
else:
self.from_file(stokes, data_folder=data_folder)
@classmethod
def zero(self, n=1):
"""
Set all values to zero.
"""
self.rootname = ""
self.hd = dict([])
self.hd["DENSITY"] = True
self.hd["TARGNAME"] = "Undefined"
self.hd["XUNIT"], self.hd["YUNIT"] = r"Wavelength [$\AA$]", r"{0:s}F$_\lambda$ [$10^{{{1:d}}}$ erg s$^{{-1}}$ cm$^{{-2}} \AA^{{-1}}$]"
self.bin_edges = np.zeros((4, n + 1))
self.wav = np.zeros((4, n))
self.wav_err = np.zeros((4, n, 2))
self.I = np.zeros((4, n))
self.Q = np.zeros((4, n))
self.U = np.zeros((4, n))
self.V = np.zeros((4, n))
self.IQUV_cov = np.zeros((4, 4, 4, n))
self.subspec = {}
for i, name in enumerate(["PASS1", "PASS2", "PASS12", "PASS12corr"]):
spec = specpol(n)
spec.hd, spec.wav, spec.wav_err, spec.I, spec.Q, spec.U, spec.V = self.hd, self.wav[i], self.wav_err[i], self.I[i], self.Q[i], self.U[i], self.V[i]
spec.IQUV_cov = self.IQUV_cov[:, :, i, :]
self.subspec[name] = spec
self.P_fos = np.zeros(self.I.shape)
self.P_fos_err = np.zeros(self.I.shape)
self.PC_fos = np.zeros(self.I.shape)
self.PC_fos_err = np.zeros(self.I.shape)
self.PA_fos = np.zeros(self.I.shape)
self.PA_fos_err = np.zeros(self.I.shape)
def from_file(self, stokes, data_folder=""):
"""
Initialise object from fits file or hdulist.
"""
if isinstance(stokes, str):
self.rootname = stokes.split("_")[0]
self.hd = dict(getheader(join_path(data_folder, self.rootname + "_c3f.fits")))
wav = getdata(join_path(data_folder, self.rootname + "_c0f.fits"))
stokes = getdata(join_path(data_folder, self.rootname + "_c3f.fits"))
elif isinstance(stokes, hdu.hdulist.HDUList):
self.hd = dict(stokes.header)
self.rootname = self.hd["FILENAME"].split("_")[0]
wav = getdata(join_path(data_folder, self.rootname + "_c0f"))
stokes = stokes.data
else:
raise ValueError("Input must be a path to a fits file or an HDUlist")
# FOS spectra are given in flux density with respect to angstrom wavelength
self.hd["DENSITY"] = True
self.hd["XUNIT"], self.hd["YUNIT"] = r"Wavelength [$\AA$]", r"{0:s}F$_\lambda$ [$10^{{{1:d}}}$ erg s$^{{-1}}$ cm$^{{-2}} \AA^{{-1}}$]"
# We set the error to be half the distance to the next mesure
self.wav = np.concat((wav[0:2, :], wav[0].reshape(1, wav.shape[1]), wav[0].reshape(1, wav.shape[1])), axis=0)
self.wav_err = np.zeros((self.wav.shape[0], self.wav.shape[1], 2))
for i in range(1, self.wav.shape[1] - 1):
self.wav_err[:, i] = np.abs(
np.array([((self.wav[j][i] - self.wav[j][i - 1]) / 2.0, (self.wav[j][i + 1] - self.wav[j][i - 1]) / 2.0) for j in range(self.wav.shape[0])])
)
self.wav_err[:, 0] = np.array([self.wav_err[:, 1, 0], self.wav_err[:, 1, 0]]).T
self.wav_err[:, -1] = np.array([self.wav_err[:, -2, 1], self.wav_err[:, -2, 1]]).T
self.hd["MINWAV"], self.hd["MAXWAV"] = self.wav.min(), self.wav.max()
self.hd["STEPWAV"] = np.mean(self.wav_err) * 2.0
self.bin_edges = np.array(
[np.concat((self.wav[i] - self.wav_err[i, :, 0], [self.wav[i, -1] + self.wav_err[i, -1, -1]]), axis=0) for i in range(self.wav.shape[0])]
)
self.IQUV_cov = np.zeros((4, 4, self.wav.shape[0], self.wav.shape[1]))
# Special way of reading FOS spectropolarimetry fits files
self.I = stokes[0::14]
self.IQUV_cov[0, 0] = stokes[4::14] ** 2
self.Q = stokes[1::14]
self.IQUV_cov[1, 1] = stokes[5::14] ** 2
self.U = stokes[2::14]
self.IQUV_cov[2, 2] = stokes[6::14] ** 2
self.V = stokes[3::14]
self.IQUV_cov[3, 3] = stokes[7::14] ** 2
self.hd["DATAMIN"], self.hd["DATAMAX"] = self.I.min(), self.I.max()
# Each file contain 4 spectra: Pass 1, Pass 2, combination of the 2, Combination corrected for orientation and background
self.subspec = {}
for i, name in enumerate(["PASS1", "PASS2", "PASS12", "PASS12corr"]):
spec = specpol(self.wav[i].shape[0])
spec.hd, spec.wav, spec.wav_err, spec.I, spec.Q, spec.U, spec.V = self.hd, self.wav[i], self.wav_err[i], self.I[i], self.Q[i], self.U[i], self.V[i]
spec.bin_edges = np.concat((spec.wav - spec.wav_err[:, 0], [spec.wav[-1] + spec.wav_err[-1, 1]]), axis=0)
spec.hd["MINWAV"], spec.hd["MAXWAV"] = spec.wav.min(), spec.wav.max()
spec.hd["DATAMIN"], spec.hd["DATAMAX"] = spec.I.min(), spec.I.max()
spec.IQUV_cov = self.IQUV_cov[:, :, i, :]
# Only PASS12corr is corrected for telescope orientation
spec.rotate(-(name[-4:] != "corr") * spec.hd["PA_APER"])
self.subspec[name] = spec
# Following lines contain the polarization components computed by calfos
self.P_fos = stokes[8::14]
self.P_fos_err = stokes[11::14]
self.PC_fos = stokes[9::14]
self.PC_fos_err = stokes[12::14]
self.PA_fos = princ_angle(
np.degrees(stokes[10::14]) + np.concat((np.ones((3, stokes.shape[1])), np.zeros((1, stokes.shape[1]))), axis=0) * self.hd["PA_APER"]
)
self.PA_fos_err = princ_angle(np.degrees(stokes[13::14]))
def dump_txt(self, filename, spec_list=None, output_dir=""):
"""
Dump current spectra to a text file.
"""
outfiles = []
if spec_list is None:
spec_list = self.subspec
for i, name in enumerate(["PASS1", "PASS2", "PASS12", "PASS12corr"]):
outfiles.append(spec_list[name].dump_txt(filename="_".join([filename, name]), output_dir=output_dir))
return outfiles
def plot(self, spec_list=None, savename=None, plots_folder="", fos=False):
"""
Display current spectra in single figure.
"""
outfiles = []
if hasattr(self, "ax"):
del self.ax
if hasattr(self, "fig"):
del self.fig
if spec_list is None:
spec_list = self.subspec
self.fig, self.ax = plt.subplots(4, 2, sharex=True, sharey="col", figsize=(20, 10), layout="constrained")
for i, (name, title) in enumerate(
[("PASS1", "Pass Direction 1"), ("PASS2", "Pass Direction 2"), ("PASS12", "Pass Direction 1&2"), ("PASS12corr", "Pass Direction 1&2 corrected")]
):
self.ax[i][0].set_title(title)
if fos:
self.ax[i][0] = spec_list[name].plot(ax=self.ax[i][0])
self.ax[i][1].set_xlabel(r"Wavelength [$\AA$]")
secax1 = self.ax[i][0].secondary_xaxis("top", functions=(self.rest, self.unrest))
secax1.set_xlabel(r"Rest wavelength [$\AA$]")
secax2 = self.ax[i][1].secondary_xaxis("top", functions=(self.rest, self.unrest))
secax2.set_xlabel(r"Rest wavelength [$\AA$]")
self.ax[i][1].errorbar(self.wav[i], self.P_fos[i], xerr=self.wav_err[i].T, yerr=self.P_fos_err[i], color="b", fmt=".", label="P_fos")
self.ax[i][1].set_ylim([0.0, 1.0])
self.ax[i][1].set_ylabel(r"P", color="b")
self.ax[i][1].tick_params(axis="y", color="b", labelcolor="b")
ax22 = self.ax[i][1].twinx()
ax22.errorbar(self.wav[i], self.PA_fos[i], xerr=self.wav_err[i].T, yerr=self.PA_fos_err[i], color="r", fmt=".", label="PA_fos [°]")
ax22.set_ylim([0.0, 180.0])
ax22.set_ylabel(r"PA", color="r")
ax22.tick_params(axis="y", color="r", labelcolor="r")
h2, l2 = self.ax[i][1].get_legend_handles_labels()
h22, l22 = ax22.get_legend_handles_labels()
self.ax[i][1].legend(h2 + h22, l2 + l22, ncols=2, loc=1)
else:
self.ax[i] = spec_list[name].plot(ax=self.ax[i])
# self.ax[0][0].set_ylim(ymin=0.0)
self.fig.suptitle("_".join([self.hd["TARGNAME"], str(self.hd["PROPOSID"]), self.hd["ROOTNAME"], self.hd["APER_ID"]]))
if savename is not None:
self.fig.savefig(join_path(plots_folder, savename + ".pdf"), dpi=300, bbox_inches="tight")
outfiles.append(join_path(plots_folder, savename + ".pdf"))
return outfiles
def bin_size(self, size):
"""
Rebin spectra to selected bin size in Angstrom.
"""
key = "{0:.2f}bin".format(size)
if key not in self.subspec.keys():
self.subspec[key] = dict([])
for name in ["PASS1", "PASS2", "PASS12", "PASS12corr"]:
self.subspec[key][name] = self.subspec[name].bin_size(size)
return self.subspec[key]
def __add__(self, other):
"""
Spectra addition, if not same binning default to self bins.
"""
spec_a = FOSspecpol(self)
if np.all(self.wav == other.wav):
spec_b = other
else:
bin_edges = np.zeros(spec_a.wav.shape[0] + 1)
bin_edges[:-1], bin_edges[-1] = spec_a.wav - spec_a.wav_err[:, 0], spec_a.wav[-1] + spec_a.wav_err[-1:1]
spec_b = other.bin(bin_edges=bin_edges)
spec_a.I += deepcopy(spec_b.I)
spec_a.Q += deepcopy(spec_b.Q)
spec_a.U += deepcopy(spec_b.U)
spec_a.V += deepcopy(spec_b.V)
spec_a.IQUV_cov += deepcopy(spec_b.IQUV_cov)
for name in ["PASS1", "PASS2", "PASS12", "PASS12corr"]:
spec_a.subspec[name] += deepcopy(spec_b.subspec[name])
spec_a.subspec[name].hd["DATAMIN"], spec_a.subspec[name].hd["DATAMAX"] = spec_a.subspec[name].I.min(), spec_a.subspec[name].I.max()
spec_a.subspec[name].hd["EXPTIME"] += spec_b.subspec[name].hd["EXPTIME"]
spec_a.subspec[name].hd["ROOTNAME"] += "+" + spec_b.subspec[name].hd["ROOTNAME"]
spec_a.hd["DATAMIN"], spec_a.hd["DATAMAX"] = spec_a.I.min(), spec_a.I.max()
spec_a.hd["EXPTIME"] += spec_b.hd["EXPTIME"]
spec_a.hd["ROOTNAME"] += "+" + spec_b.hd["ROOTNAME"]
return spec_a
def __deepcopy__(self, memo):
spec = FOSspecpol(self.wav.shape[0])
spec.__dict__.update(self.__dict__)
for key in self.subspec.keys():
spec.subspec[key] = deepcopy(self.subspec[key])
return spec
def __del__(self):
if hasattr(self, "ax"):
del self.ax
if hasattr(self, "fig"):
del self.fig
def main(infiles, bin_size=None, output_dir=None):
"""
Produce (binned and summed) spectra for a list of given fits files.
"""
outfiles = []
if infiles is not None:
# Divide path in folder + filename
prod = np.array([["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles], dtype=str)
obs_dir = np.unique(["/".join(file.split("/")[:-1]) for file in infiles])
for dir in obs_dir:
# Create missing data/plot folder for tydiness
if not path_exists(dir):
system("mkdir -p {0:s} {1:s}".format(dir, dir.replace("data", "plots")))
else:
print("Must input files to process.")
return 1
data_folder = np.unique(prod[:, 0])
if output_dir is None:
output_dir = data_folder[0]
try:
plots_folder = output_dir.replace("data", "plots")
except ValueError:
plots_folder = output_dir
if not path_exists(plots_folder):
system("mkdir -p {0:s} ".format(plots_folder))
aper = dict([])
roots = np.unique([p[1].split("_")[0] for p in prod])
# Iteration on each observation in infiles
for rootname in roots:
print(rootname)
if data_folder.shape[0] > 1:
# For multiple folders (multiple filters) match data_folder on file rootname
spec = FOSspecpol(rootname, prod[np.array([p[1].split("_")[0] == rootname for p in prod])][0, 0])
else:
spec = FOSspecpol(rootname, data_folder[0])
filename = "_".join([spec.hd["TARGNAME"], "FOS", str(spec.hd["PROPOSID"]), spec.rootname, spec.hd["APER_ID"]])
if bin_size is not None:
key = "{0:.2f}bin".format(bin_size)
spec.bin_size(bin_size)
# Only output binned spectra
outfiles += spec.dump_txt("_".join([filename, key]), spec_list=spec.subspec[key], output_dir=output_dir)
outfiles += spec.plot(savename="_".join([filename, key]), spec_list=spec.subspec[key], plots_folder=plots_folder)
# Save corrected and combined pass for later summation, only sum on same aperture
if spec.hd["APER_ID"] in aper.keys():
aper[str(spec.hd["APER_ID"])].append(specpol(spec.subspec[key]["PASS12corr"]))
else:
aper[str(spec.hd["APER_ID"])] = [specpol(spec.subspec[key]["PASS12corr"])]
else:
outfiles += spec.dump_txt(filename, output_dir=output_dir)
outfiles += spec.plot(savename=filename, plots_folder=plots_folder)
if spec.hd["APER_ID"] in aper.keys():
aper[str(spec.hd["APER_ID"])].append(specpol(spec.subspec["PASS12corr"]))
else:
aper[str(spec.hd["APER_ID"])] = [specpol(spec.subspec["PASS12corr"])]
plt.close("all")
# Sum spectra acquired through same aperture
for key in aper.keys():
rootnames = [s.hd["ROOTNAME"] for s in aper[key]]
print(*rootnames)
spec = np.sum(aper[key])
spec.hd["ROOTNAME"] = "".join(p for p, *r in zip(*rootnames) if all(p == c for c in r)) + "_SUM"
filename = "_".join([spec.hd["TARGNAME"], "FOS", str(spec.hd["PROPOSID"]), spec.hd["ROOTNAME"]])
if bin_size is not None:
filename += "_{0:.2f}bin".format(bin_size)
# Output summed spectra
outfiles.append(spec.dump_txt("_".join([filename, key]), output_dir=output_dir))
outfiles.append(spec.plot(savename="_".join([filename, key]), plots_folder=plots_folder)[2])
plt.show()
return outfiles
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Display and dump FOS Spectropolarimetry")
parser.add_argument("-f", "--files", metavar="path", required=False, nargs="*", help="the full or relative path to the data products", default=None)
parser.add_argument("-b", "--bin", metavar="bin_size", required=False, help="The bin size to resample spectra", type=float, default=None)
parser.add_argument(
"-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default=None
)
args = parser.parse_args()
exitcode = main(infiles=args.files, bin_size=args.bin, output_dir=args.output_dir)
print("Written to: ", exitcode)

View File

@@ -1,7 +0,0 @@
# Requirements for FOC_Reduction
numpy
scipy
astropy
astroquery # To directly query the Mast archives
matplotlib >= 3.8 # Brought some depreciation on ContourSet