Merge branch 'testing' into 'main'

add observation combinaison

See merge request t.barnouin/FOC_Reduction!1
This commit is contained in:
Thibault Barnouin
2024-07-04 12:32:57 +00:00
7 changed files with 544 additions and 305 deletions

142
package/Combine.py Executable file
View File

@@ -0,0 +1,142 @@
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Project libraries
import numpy as np
def same_reduction(infiles):
"""
Test if infiles are pipeline productions with same parameters.
"""
from astropy.io.fits import open as fits_open
params = {"IQU": [], "TARGNAME": [], "BKG_SUB": [], "SAMPLING": [], "SMOOTHING": []}
for file in infiles:
with fits_open(file) as f:
# test for presence of I, Q, U images
datatype = []
for hdu in f:
try:
datatype.append(hdu.header["datatype"])
except KeyError:
pass
test_IQU = True
for look in ["I_stokes", "Q_stokes", "U_stokes", "IQU_cov_matrix"]:
test_IQU *= look in datatype
params["IQU"].append(test_IQU)
# look for information on reduction procedure
for key in ["TARGNAME", "BKG_SUB", "SAMPLING", "SMOOTHING"]:
try:
params[key].append(f[0].header[key])
except KeyError:
params[key].append("null")
result = np.all(params["IQU"])
for key in ["TARGNAME", "BKG_SUB", "SAMPLING", "SMOOTHING"]:
result *= np.unique(params[key]).size == 1
return result
def same_obs(infiles, data_folder):
"""
Group infiles into same observations.
"""
import astropy.units as u
from astropy.io.fits import getheader
from astropy.table import Table
from astropy.time import Time, TimeDelta
headers = [getheader("/".join([data_folder,file])) for file in infiles]
files = {}
files["PROPOSID"] = np.array([str(head["PROPOSID"]) for head in headers],dtype=str)
files["ROOTNAME"] = np.array([head["ROOTNAME"].lower()+"_c0f.fits" for head in headers],dtype=str)
files["EXPSTART"] = np.array([Time(head["EXPSTART"],format='mjd') for head in headers])
products = Table(files)
new_infiles = []
for pid in np.unique(products["PROPOSID"]):
obs = products[products["PROPOSID"] == pid].copy()
close_date = np.unique(
[[np.abs(TimeDelta(obs["EXPSTART"][i].unix - date.unix, format="sec")) < 7.0 * u.d for i in range(len(obs))] for date in obs["EXPSTART"]], axis=0
)
if len(close_date) > 1:
for date in close_date:
new_infiles.append(list(products["ROOTNAME"][np.any([products["ROOTNAME"] == dataset for dataset in obs["ROOTNAME"][date]], axis=0)]))
else:
new_infiles.append(list(products["ROOTNAME"][products["PROPOSID"] == pid]))
return new_infiles
def combine_Stokes(infiles):
"""
Combine I, Q, U from different observations of a same object.
"""
print("not implemented yet")
return infiles
def main(infiles, target=None, output_dir="./data/"):
""" """
if target is None:
target = input("Target name:\n>")
if not same_reduction(infiles):
print("NOT SAME REDUC")
from FOC_reduction import main as FOC_reduction
prod = np.array([["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles], dtype=str)
data_folder = prod[0][0]
infiles = [p[1] for p in prod]
# Reduction parameters
kwargs = {}
# Background estimation
kwargs["error_sub_type"] = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
kwargs["subtract_error"] = 0.7
# Data binning
kwargs["pxsize"] = 0.1
kwargs["pxscale"] = "arcsec" # pixel, arcsec or full
# Smoothing
kwargs["smoothing_function"] = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
kwargs["smoothing_FWHM"] = 0.2 # If None, no smoothing is done
kwargs["smoothing_scale"] = "arcsec" # pixel or arcsec
# Polarization map output
kwargs["SNRp_cut"] = 3.0 # P measurments with SNR>3
kwargs["SNRi_cut"] = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
kwargs["flux_lim"] = 1e-19, 3e-17 # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
kwargs["scale_vec"] = 5
kwargs["step_vec"] = (
1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
)
grouped_infiles = same_obs(infiles, data_folder)
print(grouped_infiles)
new_infiles = []
for i,group in enumerate(grouped_infiles):
new_infiles.append(FOC_reduction(target=target+"-"+str(i+1), infiles=["/".join([data_folder,file]) for file in group], interactive=True, **kwargs))
combined_Stokes = combine_Stokes(new_infiles)
else:
print("SAME REDUC")
combined_Stokes = combine_Stokes(infiles)
return combined_Stokes
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Combine different observations of a single object")
parser.add_argument("-t", "--target", metavar="targetname", required=False, help="the name of the target", type=str, default=None)
parser.add_argument("-f", "--files", metavar="path", required=False, nargs="*", help="the full or relative path to the data products", default=None)
parser.add_argument(
"-o", "--output_dir", metavar="directory_path", required=False, help="output directory path for the data products", type=str, default="./data"
)
args = parser.parse_args()
exitcode = main(target=args.target, infiles=args.files, output_dir=args.output_dir)
print("Written to: ", exitcode)

View File

@@ -17,7 +17,7 @@ from lib.utils import princ_angle, sci_not
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False): def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False, **kwargs):
# Reduction parameters # Reduction parameters
# Deconvolution # Deconvolution
deconvolve = False deconvolve = False
@@ -36,13 +36,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Background estimation # Background estimation
error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 1.0 subtract_error = 0.7
display_bkg = False display_bkg = False
# Data binning # Data binning
rebin = True pxsize = 0.1
pxsize = 2 pxscale = "arcsec" # pixel, arcsec or full
px_scale = "px" # pixel, arcsec or full
rebin_operation = "sum" # sum or average rebin_operation = "sum" # sum or average
# Alignement # Alignement
@@ -55,23 +54,45 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Smoothing # Smoothing
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 1.5 # If None, no smoothing is done smoothing_FWHM = 0.2 # If None, no smoothing is done
smoothing_scale = "px" # pixel or arcsec smoothing_scale = "arcsec" # pixel or arcsec
# Rotation # Rotation
rotate_data = False # rotation to North convention can give erroneous results rotate_North = True
rotate_stokes = True
# Polarization map output # Polarization map output
SNRp_cut = 3.0 # P measurments with SNR>3 SNRp_cut = 3.0 # P measurments with SNR>3
SNRi_cut = 3.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None flux_lim = 1e-19, 3e-17 # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
vec_scale = 3 scale_vec = 5
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
# Pipeline start # Pipeline start
# Step 0:
# Get parameters from kwargs
for key, value in [
["error_sub_type", error_sub_type],
["subtract_error", subtract_error],
["pxsize", pxsize],
["pxscale", pxscale],
["smoothing_function", smoothing_function],
["smoothing_FWHM", smoothing_FWHM],
["smoothing_scale", smoothing_scale],
["SNRp_cut", SNRp_cut],
["SNRi_cut", SNRi_cut],
["flux_lim", flux_lim],
["scale_vec", scale_vec],
["step_vec", step_vec],
]:
try:
value = kwargs[key]
except KeyError:
pass
rebin = True if pxsize is not None else False
# Step 1: # Step 1:
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom. # Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
outfiles = []
if infiles is not None: if infiles is not None:
prod = np.array([["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles], dtype=str) prod = np.array([["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles], dtype=str)
obs_dir = "/".join(infiles[0].split("/")[:-1]) obs_dir = "/".join(infiles[0].split("/")[:-1])
@@ -85,7 +106,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
target, products = retrieve_products(target, proposal_id, output_dir=output_dir) target, products = retrieve_products(target, proposal_id, output_dir=output_dir)
prod = products.pop() prod = products.pop()
for prods in products: for prods in products:
main(target=target, infiles=["/".join(pr) for pr in prods], output_dir=output_dir, crop=crop, interactive=interactive) outfiles.append(main(target=target, infiles=["/".join(pr) for pr in prods], output_dir=output_dir, crop=crop, interactive=interactive)[0])
data_folder = prod[0][0] data_folder = prod[0][0]
try: try:
plots_folder = data_folder.replace("data", "plots") plots_folder = data_folder.replace("data", "plots")
@@ -99,8 +120,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
figname = "_".join([target, "FOC"]) figname = "_".join([target, "FOC"])
figtype = "" figtype = ""
if rebin: if rebin:
if px_scale not in ["full"]: if pxscale not in ["full"]:
figtype = "".join(["b", "{0:.2f}".format(pxsize), px_scale]) # additionnal informations figtype = "".join(["b", "{0:.2f}".format(pxsize), pxscale]) # additionnal informations
else: else:
figtype = "full" figtype = "full"
if smoothing_FWHM is not None: if smoothing_FWHM is not None:
@@ -137,9 +158,34 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
return_background=True, return_background=True,
) )
# Rotate data to have same orientation
rotate_data = np.unique([float(head["ORIENTAT"]) for head in headers]).size != 1
if rotate_data:
ang = np.mean([head["ORIENTAT"] for head in headers])
for head in headers:
head["ORIENTAT"] -= ang
data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers)
if display_data:
proj_plots.plot_obs(
data_array,
headers,
savename="_".join([figname, "rotate_data"]),
plots_folder=plots_folder,
norm=LogNorm(
vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]
),
)
# Align and rescale images with oversampling. # Align and rescale images with oversampling.
data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data( data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data(
data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=True data_array,
headers,
error_array=error_array,
data_mask=data_mask,
background=background,
upsample_factor=10,
ref_center=align_center,
return_shifts=True,
) )
if display_align: if display_align:
@@ -155,17 +201,11 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Rebin data to desired pixel size. # Rebin data to desired pixel size.
if rebin: if rebin:
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array( data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(
data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask data_array, error_array, headers, pxsize=pxsize, scale=pxscale, operation=rebin_operation, data_mask=data_mask
) )
# Rotate data to have North up
if rotate_data:
data_mask = np.ones(data_array.shape[1:]).astype(bool)
alpha = headers[0]["orientat"]
data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers, -alpha)
# Plot array for checking output # Plot array for checking output
if display_data and px_scale.lower() not in ["full", "integrate"]: if display_data and pxscale.lower() not in ["full", "integrate"]:
proj_plots.plot_obs( proj_plots.plot_obs(
data_array, data_array,
headers, headers,
@@ -193,29 +233,29 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide # FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2 # see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
# Bibcode : 1995chst.conf...10J # Bibcode : 1995chst.conf...10J
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes( I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes = proj_red.compute_Stokes(
data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=transmitcorr
) )
I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes( I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg = proj_red.compute_Stokes(
background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False
) )
# Step 3: # Step 3:
# Rotate images to have North up # Rotate images to have North up
if rotate_stokes: if rotate_North:
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes( I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes = proj_red.rotate_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None
) )
I_bkg, Q_bkg, U_bkg, S_cov_bkg, _, _ = proj_red.rotate_Stokes(I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), headers, SNRi_cut=None) I_bkg, Q_bkg, U_bkg, S_cov_bkg, _, _ = proj_red.rotate_Stokes(I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), header_bkg, SNRi_cut=None)
# Compute polarimetric parameters (polarization degree and angle). # Compute polarimetric parameters (polarization degree and angle).
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers) P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes)
P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, headers) P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, header_bkg)
# Step 4: # Step 4:
# Save image to FITS. # Save image to FITS.
figname = "_".join([figname, figtype]) if figtype != "" else figname figname = "_".join([figname, figtype]) if figtype != "" else figname
Stokes_test = proj_fits.save_Stokes( Stokes_hdul = proj_fits.save_Stokes(
I_stokes, I_stokes,
Q_stokes, Q_stokes,
U_stokes, U_stokes,
@@ -227,7 +267,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
PA, PA,
s_PA, s_PA,
s_PA_P, s_PA_P,
headers, header_stokes,
data_mask, data_mask,
figname, figname,
data_folder=data_folder, data_folder=data_folder,
@@ -238,150 +278,152 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# crop to desired region of interest (roi) # crop to desired region of interest (roi)
if crop: if crop:
figname += "_crop" figname += "_crop"
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_test), norm=LogNorm()) stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_hdul), norm=LogNorm())
stokescrop.crop() stokescrop.crop()
stokescrop.write_to("/".join([data_folder, figname + ".fits"])) stokescrop.write_to("/".join([data_folder, figname + ".fits"]))
Stokes_test, headers = stokescrop.hdul_crop, [dataset.header for dataset in stokescrop.hdul_crop] Stokes_hdul, headers = stokescrop.hdul_crop, [dataset.header for dataset in stokescrop.hdul_crop]
data_mask = Stokes_test["data_mask"].data.astype(bool) data_mask = Stokes_hdul["data_mask"].data.astype(bool)
print( print(
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format( "F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
headers[0]["photplam"], header_stokes["photplam"],
*sci_not( *sci_not(
Stokes_test[0].data[data_mask].sum() * headers[0]["photflam"], Stokes_hdul[0].data[data_mask].sum() * header_stokes["photflam"],
np.sqrt(Stokes_test[3].data[0, 0][data_mask].sum()) * headers[0]["photflam"], np.sqrt(Stokes_hdul[3].data[0, 0][data_mask].sum()) * header_stokes["photflam"],
2, 2,
out=int, out=int,
), ),
) )
) )
print("P_int = {0:.1f} ± {1:.1f} %".format(headers[0]["p_int"] * 100.0, np.ceil(headers[0]["p_int_err"] * 1000.0) / 10.0)) print("P_int = {0:.1f} ± {1:.1f} %".format(header_stokes["p_int"] * 100.0, np.ceil(header_stokes["sP_int"] * 1000.0) / 10.0))
print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(headers[0]["pa_int"]), princ_angle(np.ceil(headers[0]["pa_int_err"] * 10.0) / 10.0))) print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(header_stokes["pa_int"]), princ_angle(np.ceil(header_stokes["sPA_int"] * 10.0) / 10.0)))
# Background values # Background values
print( print(
"F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format( "F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
headers[0]["photplam"], *sci_not(I_bkg[0, 0] * headers[0]["photflam"], np.sqrt(S_cov_bkg[0, 0][0, 0]) * headers[0]["photflam"], 2, out=int) header_stokes["photplam"], *sci_not(I_bkg[0, 0] * header_stokes["photflam"], np.sqrt(S_cov_bkg[0, 0][0, 0]) * header_stokes["photflam"], 2, out=int)
) )
) )
print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0] * 100.0, np.ceil(s_P_bkg[0, 0] * 1000.0) / 10.0)) print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0] * 100.0, np.ceil(s_P_bkg[0, 0] * 1000.0) / 10.0))
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0] * 10.0) / 10.0))) print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0] * 10.0) / 10.0)))
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error). # Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
if px_scale.lower() not in ["full", "integrate"] and not interactive: if pxscale.lower() not in ["full", "integrate"] and not interactive:
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname]), savename="_".join([figname]),
plots_folder=plots_folder, plots_folder=plots_folder,
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "I"]), savename="_".join([figname, "I"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="Intensity", display="Intensity",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "P_flux"]), savename="_".join([figname, "P_flux"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="Pol_Flux", display="Pol_Flux",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "P"]), savename="_".join([figname, "P"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="Pol_deg", display="Pol_deg",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "PA"]), savename="_".join([figname, "PA"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="Pol_ang", display="Pol_ang",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "I_err"]), savename="_".join([figname, "I_err"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="I_err", display="I_err",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "P_err"]), savename="_".join([figname, "P_err"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="Pol_deg_err", display="Pol_deg_err",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "SNRi"]), savename="_".join([figname, "SNRi"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="SNRi", display="SNRi",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "SNRp"]), savename="_".join([figname, "SNRp"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="SNRp", display="SNRp",
) )
elif not interactive: elif not interactive:
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=figname, plots_folder=plots_folder, display="integrate" deepcopy(Stokes_hdul), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=figname, plots_folder=plots_folder, display="integrate"
) )
elif px_scale.lower() not in ["full", "integrate"]: elif pxscale.lower() not in ["full", "integrate"]:
proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim) proj_plots.pol_map(Stokes_hdul, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
return 0 outfiles.append(Stokes_hdul[0].header["FILENAME"])
return outfiles
if __name__ == "__main__": if __name__ == "__main__":
@@ -400,4 +442,4 @@ if __name__ == "__main__":
exitcode = main( exitcode = main(
target=args.target, proposal_id=args.proposal_id, infiles=args.files, output_dir=args.output_dir, crop=args.crop, interactive=args.interactive target=args.target, proposal_id=args.proposal_id, infiles=args.files, output_dir=args.output_dir, crop=args.crop, interactive=args.interactive
) )
print("Finished with ExitCode: ", exitcode) print("Written to: ", exitcode)

View File

@@ -1,2 +1,3 @@
from . import lib from . import lib
from . import src from . import src
from . import FOC_reduction

View File

@@ -73,7 +73,7 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
if histograms is not None: if histograms is not None:
filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0} filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0}
fig_h, ax_h = plt.subplots(figsize=(10, 6), constrained_layout=True) fig_h, ax_h = plt.subplots(figsize=(10, 8), constrained_layout=True)
for i, (hist, bins) in enumerate(zip(histograms, binning)): for i, (hist, bins) in enumerate(zip(histograms, binning)):
filt_obs[headers[i]["filtnam1"]] += 1 filt_obs[headers[i]["filtnam1"]] += 1
ax_h.plot( ax_h.plot(

View File

@@ -76,7 +76,7 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
# force WCS for POL60 to have same pixel size as POL0 and POL120 # force WCS for POL60 to have same pixel size as POL0 and POL120
is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool) is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 14) cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 10)
if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2: if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0)) print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0))
raise ValueError("Not all images have same pixel size") raise ValueError("Not all images have same pixel size")
@@ -93,7 +93,7 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
def save_Stokes( def save_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="", return_hdul=False I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, header_stokes, data_mask, filename, data_folder="", return_hdul=False
): ):
""" """
Save computed polarimetry parameters to a single fits file, Save computed polarimetry parameters to a single fits file,
@@ -130,9 +130,8 @@ def save_Stokes(
Only returned if return_hdul is True. Only returned if return_hdul is True.
""" """
# Create new WCS object given the modified images # Create new WCS object given the modified images
ref_header = headers[0] exp_tot = header_stokes['exptime']
exp_tot = np.array([header["exptime"] for header in headers]).sum() new_wcs = WCS(header_stokes).deepcopy()
new_wcs = WCS(ref_header).deepcopy()
if data_mask.shape != (1, 1): if data_mask.shape != (1, 1):
vertex = clean_ROI(data_mask) vertex = clean_ROI(data_mask)
@@ -141,19 +140,23 @@ def save_Stokes(
new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2] new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2]
header = new_wcs.to_header() header = new_wcs.to_header()
header["telescop"] = (ref_header["telescop"] if "TELESCOP" in list(ref_header.keys()) else "HST", "telescope used to acquire data") header["TELESCOP"] = (header_stokes["telescop"] if "TELESCOP" in list(header_stokes.keys()) else "HST", "telescope used to acquire data")
header["instrume"] = (ref_header["instrume"] if "INSTRUME" in list(ref_header.keys()) else "FOC", "identifier for instrument used to acuire data") header["INSTRUME"] = (header_stokes["instrume"] if "INSTRUME" in list(header_stokes.keys()) else "FOC", "identifier for instrument used to acuire data")
header["photplam"] = (ref_header["photplam"], "Pivot Wavelength") header["PHOTPLAM"] = (header_stokes["photplam"], "Pivot Wavelength")
header["photflam"] = (ref_header["photflam"], "Inverse Sensitivity in DN/sec/cm**2/Angst") header["PHOTFLAM"] = (header_stokes["photflam"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
header["exptot"] = (exp_tot, "Total exposure time in sec") header["EXPTOT"] = (exp_tot, "Total exposure time in sec")
header["proposid"] = (ref_header["proposid"], "PEP proposal identifier for observation") header["PROPOSID"] = (header_stokes["proposid"], "PEP proposal identifier for observation")
header["targname"] = (ref_header["targname"], "Target name") header["TARGNAME"] = (header_stokes["targname"], "Target name")
header["orientat"] = (ref_header["orientat"], "Angle between North and the y-axis of the image") header["ORIENTAT"] = (np.arccos(new_wcs.wcs.pc[0, 0]) * 180.0 / np.pi, "Angle between North and the y-axis of the image")
header["filename"] = (filename, "Original filename") header["FILENAME"] = (filename, "Original filename")
header["P_int"] = (ref_header["P_int"], "Integrated polarization degree") header["BKG_TYPE"] = (header_stokes["BKG_TYPE"], "Bkg estimation method used during reduction")
header["P_int_err"] = (ref_header["P_int_err"], "Integrated polarization degree error") header["BKG_SUB"] = (header_stokes["BKG_SUB"], "Amount of bkg subtracted from images")
header["PA_int"] = (ref_header["PA_int"], "Integrated polarization angle") header["SMOOTH"] = (header_stokes["SMOOTH"], "Smoothing method used during reduction")
header["PA_int_err"] = (ref_header["PA_int_err"], "Integrated polarization angle error") header["SAMPLING"] = (header_stokes["SAMPLING"], "Resampling performed during reduction")
header["P_INT"] = (header_stokes["P_int"], "Integrated polarization degree")
header["sP_INT"] = (header_stokes["sP_int"], "Integrated polarization degree error")
header["PA_INT"] = (header_stokes["PA_int"], "Integrated polarization angle")
header["sPA_INT"] = (header_stokes["sPA_int"], "Integrated polarization angle error")
# Crop Data to mask # Crop Data to mask
if data_mask.shape != (1, 1): if data_mask.shape != (1, 1):

View File

@@ -218,7 +218,7 @@ def polarization_map(
SNRi_cut=3.0, SNRi_cut=3.0,
flux_lim=None, flux_lim=None,
step_vec=1, step_vec=1,
vec_scale=2.0, scale_vec=2.0,
savename=None, savename=None,
plots_folder="", plots_folder="",
display="default", display="default",
@@ -250,9 +250,9 @@ def polarization_map(
Number of steps between each displayed polarization vector. Number of steps between each displayed polarization vector.
If step_vec = 2, every other vector will be displayed. If step_vec = 2, every other vector will be displayed.
Defaults to 1 Defaults to 1
vec_scale : float, optional scale_vec : float, optional
Pixel length of displayed 100% polarization vector. Pixel length of displayed 100% polarization vector.
If vec_scale = 2, a vector of 50% polarization will be 1 pixel wide. If scale_vec = 2, a vector of 50% polarization will be 1 pixel wide.
Defaults to 2. Defaults to 2.
savename : str, optional savename : str, optional
Name of the figure the map should be saved to. If None, the map won't Name of the figure the map should be saved to. If None, the map won't
@@ -428,9 +428,9 @@ def polarization_map(
I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask])) I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask]))
P_diluted = Stokes[0].header["P_int"] P_diluted = Stokes[0].header["P_int"]
P_diluted_err = Stokes[0].header["P_int_err"] P_diluted_err = Stokes[0].header["sP_int"]
PA_diluted = Stokes[0].header["PA_int"] PA_diluted = Stokes[0].header["PA_int"]
PA_diluted_err = Stokes[0].header["PA_int_err"] PA_diluted_err = Stokes[0].header["sPA_int"]
plt.rcParams.update({"font.size": 12}) plt.rcParams.update({"font.size": 12})
px_size = wcs.wcs.get_cdelt()[0] * 3600.0 px_size = wcs.wcs.get_cdelt()[0] * 3600.0
@@ -457,7 +457,7 @@ def polarization_map(
if step_vec == 0: if step_vec == 0:
poldata[np.isfinite(poldata)] = 1.0 / 2.0 poldata[np.isfinite(poldata)] = 1.0 / 2.0
step_vec = 1 step_vec = 1
vec_scale = 2.0 scale_vec = 2.0
X, Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) X, Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
U, V = poldata * np.cos(np.pi / 2.0 + pangdata * np.pi / 180.0), poldata * np.sin(np.pi / 2.0 + pangdata * np.pi / 180.0) U, V = poldata * np.cos(np.pi / 2.0 + pangdata * np.pi / 180.0), poldata * np.sin(np.pi / 2.0 + pangdata * np.pi / 180.0)
ax.quiver( ax.quiver(
@@ -467,7 +467,7 @@ def polarization_map(
V[::step_vec, ::step_vec], V[::step_vec, ::step_vec],
units="xy", units="xy",
angles="uv", angles="uv",
scale=1.0 / vec_scale, scale=1.0 / scale_vec,
scale_units="xy", scale_units="xy",
pivot="mid", pivot="mid",
headwidth=0.0, headwidth=0.0,
@@ -478,7 +478,7 @@ def polarization_map(
color="w", color="w",
edgecolor="k", edgecolor="k",
) )
pol_sc = AnchoredSizeBar(ax.transData, vec_scale, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="w") pol_sc = AnchoredSizeBar(ax.transData, scale_vec, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="w")
ax.add_artist(pol_sc) ax.add_artist(pol_sc)
ax.add_artist(px_sc) ax.add_artist(px_sc)
@@ -839,7 +839,7 @@ class overplot_radio(align_maps):
Inherit from class align_maps in order to get the same WCS on both maps. Inherit from class align_maps in order to get the same WCS on both maps.
""" """
def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=2, savename=None, **kwargs): def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, scale_vec=2, savename=None, **kwargs):
self.Stokes_UV = self.map self.Stokes_UV = self.map
self.wcs_UV = self.map_wcs self.wcs_UV = self.map_wcs
# Get Data # Get Data
@@ -915,11 +915,11 @@ class overplot_radio(align_maps):
) )
# Display full size polarization vectors # Display full size polarization vectors
if vec_scale is None: if scale_vec is None:
self.vec_scale = 2.0 self.scale_vec = 2.0
pol[np.isfinite(pol)] = 1.0 / 2.0 pol[np.isfinite(pol)] = 1.0 / 2.0
else: else:
self.vec_scale = vec_scale self.scale_vec = scale_vec
step_vec = 1 step_vec = 1
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0) self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0)
@@ -930,7 +930,7 @@ class overplot_radio(align_maps):
self.V[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec],
units="xy", units="xy",
angles="uv", angles="uv",
scale=1.0 / self.vec_scale, scale=1.0 / self.scale_vec,
scale_units="xy", scale_units="xy",
pivot="mid", pivot="mid",
headwidth=0.0, headwidth=0.0,
@@ -998,7 +998,7 @@ class overplot_radio(align_maps):
self.ax_overplot.add_artist(north_dir) self.ax_overplot.add_artist(north_dir)
pol_sc = AnchoredSizeBar( pol_sc = AnchoredSizeBar(
self.ax_overplot.transData, self.ax_overplot.transData,
self.vec_scale, self.scale_vec,
r"$P$= 100%", r"$P$= 100%",
4, 4,
pad=0.5, pad=0.5,
@@ -1046,7 +1046,7 @@ class overplot_chandra(align_maps):
Inherit from class align_maps in order to get the same WCS on both maps. Inherit from class align_maps in order to get the same WCS on both maps.
""" """
def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=2, zoom=1, savename=None, **kwargs): def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, scale_vec=2, zoom=1, savename=None, **kwargs):
self.Stokes_UV = self.map self.Stokes_UV = self.map
self.wcs_UV = self.map_wcs self.wcs_UV = self.map_wcs
# Get Data # Get Data
@@ -1121,11 +1121,11 @@ class overplot_chandra(align_maps):
) )
# Display full size polarization vectors # Display full size polarization vectors
if vec_scale is None: if scale_vec is None:
self.vec_scale = 2.0 self.scale_vec = 2.0
pol[np.isfinite(pol)] = 1.0 / 2.0 pol[np.isfinite(pol)] = 1.0 / 2.0
else: else:
self.vec_scale = vec_scale self.scale_vec = scale_vec
step_vec = 1 step_vec = 1
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0) self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0)
@@ -1136,7 +1136,7 @@ class overplot_chandra(align_maps):
self.V[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec],
units="xy", units="xy",
angles="uv", angles="uv",
scale=1.0 / self.vec_scale, scale=1.0 / self.scale_vec,
scale_units="xy", scale_units="xy",
pivot="mid", pivot="mid",
headwidth=0.0, headwidth=0.0,
@@ -1200,7 +1200,7 @@ class overplot_chandra(align_maps):
self.ax_overplot.add_artist(north_dir) self.ax_overplot.add_artist(north_dir)
pol_sc = AnchoredSizeBar( pol_sc = AnchoredSizeBar(
self.ax_overplot.transData, self.ax_overplot.transData,
self.vec_scale, self.scale_vec,
r"$P$= 100%", r"$P$= 100%",
4, 4,
pad=0.5, pad=0.5,
@@ -1245,7 +1245,7 @@ class overplot_pol(align_maps):
Inherit from class align_maps in order to get the same WCS on both maps. Inherit from class align_maps in order to get the same WCS on both maps.
""" """
def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=2.0, savename=None, **kwargs): def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, scale_vec=2.0, savename=None, **kwargs):
self.Stokes_UV = self.map self.Stokes_UV = self.map
self.wcs_UV = self.map_wcs self.wcs_UV = self.map_wcs
# Get Data # Get Data
@@ -1323,11 +1323,11 @@ class overplot_pol(align_maps):
) )
# Display full size polarization vectors # Display full size polarization vectors
if vec_scale is None: if scale_vec is None:
self.vec_scale = 2.0 self.scale_vec = 2.0
pol[np.isfinite(pol)] = 1.0 / 2.0 pol[np.isfinite(pol)] = 1.0 / 2.0
else: else:
self.vec_scale = vec_scale self.scale_vec = scale_vec
step_vec = 1 step_vec = 1
px_scale = np.abs(self.wcs_UV.wcs.get_cdelt()[0] / self.other_wcs.wcs.get_cdelt()[0]) px_scale = np.abs(self.wcs_UV.wcs.get_cdelt()[0] / self.other_wcs.wcs.get_cdelt()[0])
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
@@ -1339,7 +1339,7 @@ class overplot_pol(align_maps):
self.V[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec],
units="xy", units="xy",
angles="uv", angles="uv",
scale=px_scale / self.vec_scale, scale=px_scale / self.scale_vec,
scale_units="xy", scale_units="xy",
pivot="mid", pivot="mid",
headwidth=0.0, headwidth=0.0,
@@ -1395,7 +1395,7 @@ class overplot_pol(align_maps):
self.ax_overplot.add_artist(north_dir) self.ax_overplot.add_artist(north_dir)
pol_sc = AnchoredSizeBar( pol_sc = AnchoredSizeBar(
self.ax_overplot.transData, self.ax_overplot.transData,
self.vec_scale / px_scale, self.scale_vec / px_scale,
r"$P$= 100%", r"$P$= 100%",
4, 4,
pad=0.5, pad=0.5,
@@ -1435,10 +1435,10 @@ class overplot_pol(align_maps):
self.fig_overplot.canvas.draw() self.fig_overplot.canvas.draw()
def plot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=2.0, savename=None, **kwargs) -> None: def plot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, scale_vec=2.0, savename=None, **kwargs) -> None:
while not self.aligned: while not self.aligned:
self.align() self.align()
self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, vec_scale=vec_scale, savename=savename, **kwargs) self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, scale_vec=scale_vec, savename=savename, **kwargs)
plt.show(block=True) plt.show(block=True)
def add_vector(self, position="center", pol_deg=1.0, pol_ang=0.0, **kwargs): def add_vector(self, position="center", pol_deg=1.0, pol_ang=0.0, **kwargs):
@@ -1448,7 +1448,7 @@ class overplot_pol(align_maps):
position = self.other_wcs.world_to_pixel(position) position = self.other_wcs.world_to_pixel(position)
u, v = pol_deg * np.cos(np.radians(pol_ang) + np.pi / 2.0), pol_deg * np.sin(np.radians(pol_ang) + np.pi / 2.0) u, v = pol_deg * np.cos(np.radians(pol_ang) + np.pi / 2.0), pol_deg * np.sin(np.radians(pol_ang) + np.pi / 2.0)
for key, value in [["scale", [["scale", self.vec_scale]]], ["width", [["width", 0.1]]], ["color", [["color", "k"]]]]: for key, value in [["scale", [["scale", self.scale_vec]]], ["width", [["width", 0.1]]], ["color", [["color", "k"]]]]:
try: try:
_ = kwargs[key] _ = kwargs[key]
except KeyError: except KeyError:
@@ -1937,9 +1937,9 @@ class crop_Stokes(crop_map):
for dataset in self.hdul_crop: for dataset in self.hdul_crop:
dataset.header["P_int"] = (P_diluted, "Integrated polarization degree") dataset.header["P_int"] = (P_diluted, "Integrated polarization degree")
dataset.header["P_int_err"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error") dataset.header["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
dataset.header["PA_int"] = (PA_diluted, "Integrated polarization angle") dataset.header["PA_int"] = (PA_diluted, "Integrated polarization angle")
dataset.header["PA_int_err"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") dataset.header["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
self.fig.canvas.draw_idle() self.fig.canvas.draw_idle()
@property @property
@@ -3043,9 +3043,9 @@ class pol_map(object):
I_reg = self.I.sum() I_reg = self.I.sum()
I_reg_err = np.sqrt(np.sum(s_I**2)) I_reg_err = np.sqrt(np.sum(s_I**2))
P_reg = self.Stokes[0].header["P_int"] P_reg = self.Stokes[0].header["P_int"]
P_reg_err = self.Stokes[0].header["P_int_err"] P_reg_err = self.Stokes[0].header["sP_int"]
PA_reg = self.Stokes[0].header["PA_int"] PA_reg = self.Stokes[0].header["PA_int"]
PA_reg_err = self.Stokes[0].header["PA_int_err"] PA_reg_err = self.Stokes[0].header["sPA_int"]
s_I = np.sqrt(self.IQU_cov[0, 0]) s_I = np.sqrt(self.IQU_cov[0, 0])
s_Q = np.sqrt(self.IQU_cov[1, 1]) s_Q = np.sqrt(self.IQU_cov[1, 1])

View File

@@ -53,7 +53,7 @@ from scipy.ndimage import shift as sc_shift
from scipy.signal import fftconvolve from scipy.signal import fftconvolve
from .background import bkg_fit, bkg_hist, bkg_mini from .background import bkg_fit, bkg_hist, bkg_mini
from .convex_hull import clean_ROI, image_hull from .convex_hull import image_hull
from .cross_correlation import phase_cross_correlation from .cross_correlation import phase_cross_correlation
from .deconvolve import deconvolve_im, gaussian2d, gaussian_psf, zeropad from .deconvolve import deconvolve_im, gaussian2d, gaussian_psf, zeropad
from .plots import plot_obs from .plots import plot_obs
@@ -433,18 +433,7 @@ def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px",
return deconv_array return deconv_array
def get_error( def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=None, subtract_error=0.5, display=False, savename=None, plots_folder="", return_background=False):
data_array,
headers,
error_array=None,
data_mask=None,
sub_type=None,
subtract_error=True,
display=False,
savename=None,
plots_folder="",
return_background=False,
):
""" """
Look for sub-image of shape sub_shape that have the smallest integrated Look for sub-image of shape sub_shape that have the smallest integrated
flux (no source assumption) and define the background on the image by the flux (no source assumption) and define the background on the image by the
@@ -532,22 +521,30 @@ def get_error(
n_data_array, c_error_bkg, headers, background = bkg_hist( n_data_array, c_error_bkg, headers, background = bkg_hist(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "histogram ", str(int(subtract_error>0.))
elif isinstance(sub_type, str): elif isinstance(sub_type, str):
if sub_type.lower() in ["auto"]: if sub_type.lower() in ["auto"]:
n_data_array, c_error_bkg, headers, background = bkg_fit( n_data_array, c_error_bkg, headers, background = bkg_fit(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "histogram fit ", "mean+%.1fsigma"%subtract_error if subtract_error != 0. else 0.
else: else:
n_data_array, c_error_bkg, headers, background = bkg_hist( n_data_array, c_error_bkg, headers, background = bkg_hist(
data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "histogram ", "mean+%.1fsigma"%subtract_error if subtract_error != 0. else 0.
elif isinstance(sub_type, tuple): elif isinstance(sub_type, tuple):
n_data_array, c_error_bkg, headers, background = bkg_mini( n_data_array, c_error_bkg, headers, background = bkg_mini(
data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "minimal flux ", "mean+%.1fsigma"%subtract_error if subtract_error != 0. else 0.
else: else:
print("Warning: Invalid subtype.") print("Warning: Invalid subtype.")
for header in headers:
header["BKG_TYPE"] = (sub_type,"Bkg estimation method used during reduction")
header["BKG_SUB"] = (subtract_error,"Amount of bkg subtracted from images")
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999) # Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
n_error_array = np.sqrt(err_wav**2 + err_psf**2 + err_flat**2 + c_error_bkg**2) n_error_array = np.sqrt(err_wav**2 + err_psf**2 + err_flat**2 + c_error_bkg**2)
@@ -557,7 +554,7 @@ def get_error(
return n_data_array, n_error_array, headers return n_data_array, n_error_array, headers
def rebin_array(data_array, error_array, headers, pxsize, scale, operation="sum", data_mask=None): def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operation="sum", data_mask=None):
""" """
Homogeneously rebin a data array to get a new pixel size equal to pxsize Homogeneously rebin a data array to get a new pixel size equal to pxsize
where pxsize is given in arcsec. where pxsize is given in arcsec.
@@ -613,8 +610,6 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, operation="sum"
Dxy = np.array([1.0, 1.0]) Dxy = np.array([1.0, 1.0])
# Routine for the FOC instrument # Routine for the FOC instrument
if instr == "FOC":
# HST_aper = 2400.0 # HST aperture in mm
Dxy_arr = np.ones((data_array.shape[0], 2)) Dxy_arr = np.ones((data_array.shape[0], 2))
for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))): for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))):
# Get current pixel size # Get current pixel size
@@ -623,16 +618,14 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, operation="sum"
# Compute binning ratio # Compute binning ratio
if scale.lower() in ["px", "pixel"]: if scale.lower() in ["px", "pixel"]:
Dxy_arr[i] = np.array( Dxy_arr[i] = np.array( [ pxsize, ] * 2)
[ scale = "px"
pxsize,
]
* 2
)
elif scale.lower() in ["arcsec", "arcseconds"]: elif scale.lower() in ["arcsec", "arcseconds"]:
Dxy_arr[i] = np.array(pxsize / np.abs(w.wcs.cdelt) / 3600.0) Dxy_arr[i] = np.array(pxsize / np.abs(w.wcs.cdelt) / 3600.0)
scale = "arcsec"
elif scale.lower() in ["full", "integrate"]: elif scale.lower() in ["full", "integrate"]:
Dxy_arr[i] = image.shape Dxy_arr[i] = image.shape
pxsize, scale = "", "full"
else: else:
raise ValueError("'{0:s}' invalid scale for binning.".format(scale)) raise ValueError("'{0:s}' invalid scale for binning.".format(scale))
new_shape = np.ceil(min(image.shape / Dxy_arr, key=lambda x: x[0] + x[1])).astype(int) new_shape = np.ceil(min(image.shape / Dxy_arr, key=lambda x: x[0] + x[1])).astype(int)
@@ -669,6 +662,7 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, operation="sum"
new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape
for key, val in nw.to_header().items(): for key, val in nw.to_header().items():
new_header.set(key, val) new_header.set(key, val)
new_header["SAMPLING"] = (str(pxsize)+scale, "Resampling performed during reduction")
rebinned_headers.append(new_header) rebinned_headers.append(new_header)
if data_mask is not None: if data_mask is not None:
data_mask = bin_ndarray(data_mask, new_shape=new_shape, operation="average") > 0.80 data_mask = bin_ndarray(data_mask, new_shape=new_shape, operation="average") > 0.80
@@ -682,7 +676,7 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, operation="sum"
return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask
def align_data(data_array, headers, error_array=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False): def align_data(data_array, headers, error_array=None, data_mask=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False):
""" """
Align images in data_array using cross correlation, and rescale them to Align images in data_array using cross correlation, and rescale them to
wider images able to contain any rotation of the reference image. wider images able to contain any rotation of the reference image.
@@ -760,10 +754,14 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_
full_headers.append(headers[0]) full_headers.append(headers[0])
err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0) err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0)
if data_mask is None:
full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0) full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0)
else:
full_array, err_array, data_mask, full_headers = crop_array(full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0)
data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1] data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1]
error_array = err_array[:-1] error_array = err_array[:-1]
do_shift = True do_shift = True
if ref_center is None: if ref_center is None:
# Define the center of the reference image to be the center pixel # Define the center of the reference image to be the center pixel
@@ -788,6 +786,8 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_
res_shift = res_center - ref_center res_shift = res_center - ref_center
res_mask = np.zeros((res_shape, res_shape), dtype=bool) res_mask = np.zeros((res_shape, res_shape), dtype=bool)
res_mask[res_shift[0] : res_shift[0] + shape[1], res_shift[1] : res_shift[1] + shape[2]] = True res_mask[res_shift[0] : res_shift[0] + shape[1], res_shift[1] : res_shift[1] + shape[2]] = True
if data_mask is not None:
res_mask = np.logical_and(res_mask,zeropad(data_mask, (res_shape, res_shape)).astype(bool))
shifts, errors = [], [] shifts, errors = [], []
for i, image in enumerate(data_array): for i, image in enumerate(data_array):
@@ -806,9 +806,11 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_
rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.0) rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.0)
rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i]) rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i])
curr_mask = sc_shift(res_mask, shift, order=1, cval=False) curr_mask = sc_shift(res_mask*10., shift, order=1, cval=0.0)
mask_vertex = clean_ROI(curr_mask) curr_mask[curr_mask < curr_mask.max()*2./3.] = 0.0
rescaled_mask[i, mask_vertex[2] : mask_vertex[3], mask_vertex[0] : mask_vertex[1]] = True rescaled_mask[i] = curr_mask.astype(bool)
# mask_vertex = clean_ROI(curr_mask)
# rescaled_mask[i, mask_vertex[2] : mask_vertex[3], mask_vertex[0] : mask_vertex[1]] = True
rescaled_image[i][rescaled_image[i] < 0.0] = 0.0 rescaled_image[i][rescaled_image[i] < 0.0] = 0.0
rescaled_image[i][(1 - rescaled_mask[i]).astype(bool)] = 0.0 rescaled_image[i][(1 - rescaled_mask[i]).astype(bool)] = 0.0
@@ -842,7 +844,7 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_
return data_array, error_array, headers, data_mask return data_array, error_array, headers, data_mask
def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.0, scale="pixel", smoothing="gaussian"): def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.5, scale="pixel", smoothing="weighted_gaussian"):
""" """
Smooth a data_array using selected function. Smooth a data_array using selected function.
---------- ----------
@@ -886,13 +888,19 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.0, scale="pi
pxsize[i] = np.round(w.wcs.cdelt * 3600.0, 4) pxsize[i] = np.round(w.wcs.cdelt * 3600.0, 4)
if (pxsize != pxsize[0]).any(): if (pxsize != pxsize[0]).any():
raise ValueError("Not all images in array have same pixel size") raise ValueError("Not all images in array have same pixel size")
FWHM_size = str(FWHM)
FWHM_scale = "arcsec"
FWHM /= pxsize[0].min() FWHM /= pxsize[0].min()
else:
FWHM_size = str(FWHM)
FWHM_scale = "px"
# Define gaussian stdev # Define gaussian stdev
stdev = FWHM / (2.0 * np.sqrt(2.0 * np.log(2))) stdev = FWHM / (2.0 * np.sqrt(2.0 * np.log(2)))
fmax = np.finfo(np.double).max fmax = np.finfo(np.double).max
if smoothing.lower() in ["combine", "combining"]: if smoothing.lower() in ["combine", "combining"]:
smoothing = "combine"
# Smooth using N images combination algorithm # Smooth using N images combination algorithm
# Weight array # Weight array
weight = 1.0 / error_array**2 weight = 1.0 / error_array**2
@@ -928,6 +936,7 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.0, scale="pi
smoothed[np.logical_or(np.isnan(smoothed * error), 1 - data_mask)] = 0.0 smoothed[np.logical_or(np.isnan(smoothed * error), 1 - data_mask)] = 0.0
elif smoothing.lower() in ["weight_gauss", "weighted_gaussian", "gauss", "gaussian"]: elif smoothing.lower() in ["weight_gauss", "weighted_gaussian", "gauss", "gaussian"]:
smoothing = "gaussian"
# Convolution with gaussian function # Convolution with gaussian function
smoothed = np.zeros(data_array.shape) smoothed = np.zeros(data_array.shape)
error = np.zeros(error_array.shape) error = np.zeros(error_array.shape)
@@ -935,6 +944,7 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.0, scale="pi
x, y = np.meshgrid(np.arange(-image.shape[1] / 2, image.shape[1] / 2), np.arange(-image.shape[0] / 2, image.shape[0] / 2)) x, y = np.meshgrid(np.arange(-image.shape[1] / 2, image.shape[1] / 2), np.arange(-image.shape[0] / 2, image.shape[0] / 2))
weights = np.ones(image_error.shape) weights = np.ones(image_error.shape)
if smoothing.lower()[:6] in ["weight"]: if smoothing.lower()[:6] in ["weight"]:
smoothing = "weighted gaussian"
weights = 1.0 / image_error**2 weights = 1.0 / image_error**2
weights[(1 - np.isfinite(weights)).astype(bool)] = 0.0 weights[(1 - np.isfinite(weights)).astype(bool)] = 0.0
weights[(1 - data_mask).astype(bool)] = 0.0 weights[(1 - data_mask).astype(bool)] = 0.0
@@ -953,10 +963,13 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.0, scale="pi
else: else:
raise ValueError("{} is not a valid smoothing option".format(smoothing)) raise ValueError("{} is not a valid smoothing option".format(smoothing))
for header in headers:
header["SMOOTH"] = (" ".join([smoothing,FWHM_size,FWHM_scale]),"Smoothing method used during reduction")
return smoothed, error return smoothed, error
def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale="pixel", smoothing="gaussian"): def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=1.5, scale="pixel", smoothing="weighted_gaussian"):
""" """
Make the average image from a single polarizer for a given instrument. Make the average image from a single polarizer for a given instrument.
----------- -----------
@@ -1115,7 +1128,7 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale=
return polarizer_array, polarizer_cov, pol_headers return polarizer_array, polarizer_cov, pol_headers
def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale="pixel", smoothing="combine", transmitcorr=True): def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale="pixel", smoothing="combine", transmitcorr=True, integrate=True):
""" """
Compute the Stokes parameters I, Q and U for a given data_set Compute the Stokes parameters I, Q and U for a given data_set
---------- ----------
@@ -1179,9 +1192,15 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
"Cannot reduce images from {0:s} instrument\ "Cannot reduce images from {0:s} instrument\
(yet)".format(instr) (yet)".format(instr)
) )
rotate = np.zeros(len(headers))
for i,head in enumerate(headers):
try:
rotate[i] = head['ROTATE']
except KeyError:
rotate[i] = 0.
# Routine for the FOC instrument if (np.unique(rotate) == rotate[0]).all():
if instr == "FOC": theta = globals()["theta"] - rotate[0] * np.pi / 180.0
# Get image from each polarizer and covariance matrix # Get image from each polarizer and covariance matrix
pol_array, pol_cov, pol_headers = polarizer_avg(data_array, error_array, data_mask, headers, FWHM=FWHM, scale=scale, smoothing=smoothing) pol_array, pol_cov, pol_headers = polarizer_avg(data_array, error_array, data_mask, headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
pol0, pol60, pol120 = pol_array pol0, pol60, pol120 = pol_array
@@ -1210,8 +1229,10 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
transmit *= transmit2 * transmit3 * transmit4 transmit *= transmit2 * transmit3 * transmit4
pol_eff = np.array([globals()["pol_efficiency"]["pol0"], globals()["pol_efficiency"]["pol60"], globals()["pol_efficiency"]["pol120"]]) pol_eff = np.array([globals()["pol_efficiency"]["pol0"], globals()["pol_efficiency"]["pol60"], globals()["pol_efficiency"]["pol120"]])
# Calculating correction factor # Calculating correction factor: allows all pol_filt to share same exptime and inverse sensitivity (taken to be the one from POL0)
corr = np.array([1.0 * h["photflam"] / h["exptime"] for h in pol_headers]) * pol_headers[0]["exptime"] / pol_headers[0]["photflam"] corr = np.array([1.0 * h["photflam"] / h["exptime"] for h in pol_headers]) * pol_headers[0]["exptime"] / pol_headers[0]["photflam"]
pol_headers[1]['photflam'], pol_headers[1]['exptime'] = pol_headers[0]['photflam'], pol_headers[1]['exptime']
pol_headers[2]['photflam'], pol_headers[2]['exptime'] = pol_headers[0]['photflam'], pol_headers[2]['exptime']
# Orientation and error for each polarizer # Orientation and error for each polarizer
# fmax = np.finfo(np.float64).max # fmax = np.finfo(np.float64).max
@@ -1223,17 +1244,17 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
coeff_stokes[0, i] = ( coeff_stokes[0, i] = (
pol_eff[(i + 1) % 3] pol_eff[(i + 1) % 3]
* pol_eff[(i + 2) % 3] * pol_eff[(i + 2) % 3]
* np.sin(-2.0 * globals()["theta"][(i + 1) % 3] + 2.0 * globals()["theta"][(i + 2) % 3]) * np.sin(-2.0 * theta[(i + 1) % 3] + 2.0 * theta[(i + 2) % 3])
* 2.0 * 2.0
/ transmit[i] / transmit[i]
) )
coeff_stokes[1, i] = ( coeff_stokes[1, i] = (
(-pol_eff[(i + 1) % 3] * np.sin(2.0 * globals()["theta"][(i + 1) % 3]) + pol_eff[(i + 2) % 3] * np.sin(2.0 * globals()["theta"][(i + 2) % 3])) (-pol_eff[(i + 1) % 3] * np.sin(2.0 * theta[(i + 1) % 3]) + pol_eff[(i + 2) % 3] * np.sin(2.0 * theta[(i + 2) % 3]))
* 2.0 * 2.0
/ transmit[i] / transmit[i]
) )
coeff_stokes[2, i] = ( coeff_stokes[2, i] = (
(pol_eff[(i + 1) % 3] * np.cos(2.0 * globals()["theta"][(i + 1) % 3]) - pol_eff[(i + 2) % 3] * np.cos(2.0 * globals()["theta"][(i + 2) % 3])) (pol_eff[(i + 1) % 3] * np.cos(2.0 * theta[(i + 1) % 3]) - pol_eff[(i + 2) % 3] * np.cos(2.0 * theta[(i + 2) % 3]))
* 2.0 * 2.0
/ transmit[i] / transmit[i]
) )
@@ -1294,9 +1315,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[0] * pol_eff[0]
/ N / N
* ( * (
pol_eff[2] * np.cos(-2.0 * globals()["theta"][2] + 2.0 * globals()["theta"][0]) * (pol_flux_corr[1] - I_stokes) pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes)
- pol_eff[1] * np.cos(-2.0 * globals()["theta"][0] + 2.0 * globals()["theta"][1]) * (pol_flux_corr[2] - I_stokes) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes)
+ coeff_stokes_corr[0, 0] * (np.sin(2.0 * globals()["theta"][0]) * Q_stokes - np.cos(2 * globals()["theta"][0]) * U_stokes) + coeff_stokes_corr[0, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
) )
) )
dI_dtheta2 = ( dI_dtheta2 = (
@@ -1304,9 +1325,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[1] * pol_eff[1]
/ N / N
* ( * (
pol_eff[0] * np.cos(-2.0 * globals()["theta"][0] + 2.0 * globals()["theta"][1]) * (pol_flux_corr[2] - I_stokes) pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes)
- pol_eff[2] * np.cos(-2.0 * globals()["theta"][1] + 2.0 * globals()["theta"][2]) * (pol_flux_corr[0] - I_stokes) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes)
+ coeff_stokes_corr[0, 1] * (np.sin(2.0 * globals()["theta"][1]) * Q_stokes - np.cos(2 * globals()["theta"][1]) * U_stokes) + coeff_stokes_corr[0, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
) )
) )
dI_dtheta3 = ( dI_dtheta3 = (
@@ -1314,9 +1335,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[2] * pol_eff[2]
/ N / N
* ( * (
pol_eff[1] * np.cos(-2.0 * globals()["theta"][1] + 2.0 * globals()["theta"][2]) * (pol_flux_corr[0] - I_stokes) pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes)
- pol_eff[0] * np.cos(-2.0 * globals()["theta"][2] + 2.0 * globals()["theta"][0]) * (pol_flux_corr[1] - I_stokes) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes)
+ coeff_stokes_corr[0, 2] * (np.sin(2.0 * globals()["theta"][2]) * Q_stokes - np.cos(2 * globals()["theta"][2]) * U_stokes) + coeff_stokes_corr[0, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
) )
) )
dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3]) dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3])
@@ -1326,13 +1347,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[0] * pol_eff[0]
/ N / N
* ( * (
np.cos(2.0 * globals()["theta"][0]) * (pol_flux_corr[1] - pol_flux_corr[2]) np.cos(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
- ( - (
pol_eff[2] * np.cos(-2.0 * globals()["theta"][2] + 2.0 * globals()["theta"][0]) pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
- pol_eff[1] * np.cos(-2.0 * globals()["theta"][0] + 2.0 * globals()["theta"][1]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
) )
* Q_stokes * Q_stokes
+ coeff_stokes_corr[1, 0] * (np.sin(2.0 * globals()["theta"][0]) * Q_stokes - np.cos(2 * globals()["theta"][0]) * U_stokes) + coeff_stokes_corr[1, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
) )
) )
dQ_dtheta2 = ( dQ_dtheta2 = (
@@ -1340,13 +1361,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[1] * pol_eff[1]
/ N / N
* ( * (
np.cos(2.0 * globals()["theta"][1]) * (pol_flux_corr[2] - pol_flux_corr[0]) np.cos(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- ( - (
pol_eff[0] * np.cos(-2.0 * globals()["theta"][0] + 2.0 * globals()["theta"][1]) pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
- pol_eff[2] * np.cos(-2.0 * globals()["theta"][1] + 2.0 * globals()["theta"][2]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
) )
* Q_stokes * Q_stokes
+ coeff_stokes_corr[1, 1] * (np.sin(2.0 * globals()["theta"][1]) * Q_stokes - np.cos(2 * globals()["theta"][1]) * U_stokes) + coeff_stokes_corr[1, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
) )
) )
dQ_dtheta3 = ( dQ_dtheta3 = (
@@ -1354,13 +1375,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[2] * pol_eff[2]
/ N / N
* ( * (
np.cos(2.0 * globals()["theta"][2]) * (pol_flux_corr[0] - pol_flux_corr[1]) np.cos(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- ( - (
pol_eff[1] * np.cos(-2.0 * globals()["theta"][1] + 2.0 * globals()["theta"][2]) pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
- pol_eff[0] * np.cos(-2.0 * globals()["theta"][2] + 2.0 * globals()["theta"][0]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
) )
* Q_stokes * Q_stokes
+ coeff_stokes_corr[1, 2] * (np.sin(2.0 * globals()["theta"][2]) * Q_stokes - np.cos(2 * globals()["theta"][2]) * U_stokes) + coeff_stokes_corr[1, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
) )
) )
dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3]) dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3])
@@ -1370,13 +1391,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[0] * pol_eff[0]
/ N / N
* ( * (
np.sin(2.0 * globals()["theta"][0]) * (pol_flux_corr[1] - pol_flux_corr[2]) np.sin(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
- ( - (
pol_eff[2] * np.cos(-2.0 * globals()["theta"][2] + 2.0 * globals()["theta"][0]) pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
- pol_eff[1] * np.cos(-2.0 * globals()["theta"][0] + 2.0 * globals()["theta"][1]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
) )
* U_stokes * U_stokes
+ coeff_stokes_corr[2, 0] * (np.sin(2.0 * globals()["theta"][0]) * Q_stokes - np.cos(2 * globals()["theta"][0]) * U_stokes) + coeff_stokes_corr[2, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
) )
) )
dU_dtheta2 = ( dU_dtheta2 = (
@@ -1384,13 +1405,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[1] * pol_eff[1]
/ N / N
* ( * (
np.sin(2.0 * globals()["theta"][1]) * (pol_flux_corr[2] - pol_flux_corr[0]) np.sin(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- ( - (
pol_eff[0] * np.cos(-2.0 * globals()["theta"][0] + 2.0 * globals()["theta"][1]) pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
- pol_eff[2] * np.cos(-2.0 * globals()["theta"][1] + 2.0 * globals()["theta"][2]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
) )
* U_stokes * U_stokes
+ coeff_stokes_corr[2, 1] * (np.sin(2.0 * globals()["theta"][1]) * Q_stokes - np.cos(2 * globals()["theta"][1]) * U_stokes) + coeff_stokes_corr[2, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
) )
) )
dU_dtheta3 = ( dU_dtheta3 = (
@@ -1398,13 +1419,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[2] * pol_eff[2]
/ N / N
* ( * (
np.sin(2.0 * globals()["theta"][2]) * (pol_flux_corr[0] - pol_flux_corr[1]) np.sin(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- ( - (
pol_eff[1] * np.cos(-2.0 * globals()["theta"][1] + 2.0 * globals()["theta"][2]) pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
- pol_eff[0] * np.cos(-2.0 * globals()["theta"][2] + 2.0 * globals()["theta"][0]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
) )
* U_stokes * U_stokes
+ coeff_stokes_corr[2, 2] * (np.sin(2.0 * globals()["theta"][2]) * Q_stokes - np.cos(2 * globals()["theta"][2]) * U_stokes) + coeff_stokes_corr[2, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
) )
) )
dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3]) dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3])
@@ -1422,8 +1443,48 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
Stokes_cov[1, 1] += s_Q2_axis + s_Q2_stat Stokes_cov[1, 1] += s_Q2_axis + s_Q2_stat
Stokes_cov[2, 2] += s_U2_axis + s_U2_stat Stokes_cov[2, 2] += s_U2_axis + s_U2_stat
# Save values to single header
header_stokes = pol_headers[0]
else:
all_I_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_Q_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_U_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_Stokes_cov = np.zeros((np.unique(rotate).size, 3, 3, data_array.shape[1], data_array.shape[2]))
all_header_stokes = [{},]*np.unique(rotate).size
for i,rot in enumerate(np.unique(rotate)):
rot_mask = (rotate == rot)
all_I_stokes[i], all_Q_stokes[i], all_U_stokes[i], all_Stokes_cov[i], all_header_stokes[i] = compute_Stokes(data_array[rot_mask], error_array[rot_mask], data_mask, [headers[i] for i in np.arange(len(headers))[rot_mask]], FWHM=FWHM, scale=scale, smoothing=smoothing, transmitcorr=transmitcorr, integrate=False)
all_exp = np.array([float(h['exptime']) for h in all_header_stokes])
I_stokes = np.sum([exp*I for exp, I in zip(all_exp, all_I_stokes)],axis=0) / all_exp.sum()
Q_stokes = np.sum([exp*Q for exp, Q in zip(all_exp, all_Q_stokes)],axis=0) / all_exp.sum()
U_stokes = np.sum([exp*U for exp, U in zip(all_exp, all_U_stokes)],axis=0) / all_exp.sum()
Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1]))
for i in range(3):
Stokes_cov[i,i] = np.sum([exp**2*cov for exp, cov in zip(all_exp, all_Stokes_cov[:,i,i])], axis=0) / all_exp.sum()**2
for j in [x for x in range(3) if x!=i]:
Stokes_cov[i,j] = np.sqrt(np.sum([exp**2*cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:,i,j])], axis=0) / all_exp.sum()**2)
Stokes_cov[j,i] = np.sqrt(np.sum([exp**2*cov**2 for exp, cov in zip(all_exp, all_Stokes_cov[:,j,i])], axis=0) / all_exp.sum()**2)
# Save values to single header
header_stokes = all_header_stokes[0]
header_stokes['exptime'] = all_exp.sum()
# Nan handling :
fmax = np.finfo(np.float64).max
I_stokes[np.isnan(I_stokes)] = 0.0
Q_stokes[I_stokes == 0.0] = 0.0
U_stokes[I_stokes == 0.0] = 0.0
Q_stokes[np.isnan(Q_stokes)] = 0.0
U_stokes[np.isnan(U_stokes)] = 0.0
Stokes_cov[np.isnan(Stokes_cov)] = fmax
if integrate:
# Compute integrated values for P, PA before any rotation # Compute integrated values for P, PA before any rotation
mask = np.logical_and(data_mask.astype(bool), (I_stokes > 0.0)) mask = deepcopy(data_mask).astype(bool)
I_diluted = I_stokes[mask].sum() I_diluted = I_stokes[mask].sum()
Q_diluted = Q_stokes[mask].sum() Q_diluted = Q_stokes[mask].sum()
U_diluted = U_stokes[mask].sum() U_diluted = U_stokes[mask].sum()
@@ -1435,7 +1496,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask] ** 2)) QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask] ** 2))
P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted
P_diluted_err = (1.0 / I_diluted) * np.sqrt( P_diluted_err = np.sqrt(
(Q_diluted**2 * Q_diluted_err**2 + U_diluted**2 * U_diluted_err**2 + 2.0 * Q_diluted * U_diluted * QU_diluted_err) / (Q_diluted**2 + U_diluted**2) (Q_diluted**2 * Q_diluted_err**2 + U_diluted**2 * U_diluted_err**2 + 2.0 * Q_diluted * U_diluted * QU_diluted_err) / (Q_diluted**2 + U_diluted**2)
+ ((Q_diluted / I_diluted) ** 2 + (U_diluted / I_diluted) ** 2) * I_diluted_err**2 + ((Q_diluted / I_diluted) ** 2 + (U_diluted / I_diluted) ** 2) * I_diluted_err**2
- 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err - 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err
@@ -1447,16 +1508,15 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err
) )
for header in headers: header_stokes["P_int"] = (P_diluted, "Integrated polarization degree")
header["P_int"] = (P_diluted, "Integrated polarization degree") header_stokes["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
header["P_int_err"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error") header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
header["PA_int"] = (PA_diluted, "Integrated polarization angle") header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
header["PA_int_err"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
return I_stokes, Q_stokes, U_stokes, Stokes_cov return I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes
def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers): def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, header_stokes):
""" """
Compute the polarization degree (in %) and angle (in deg) and their Compute the polarization degree (in %) and angle (in deg) and their
respective errors from given Stokes parameters. respective errors from given Stokes parameters.
@@ -1473,8 +1533,8 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
+45/-45deg linear polarization intensity +45/-45deg linear polarization intensity
Stokes_cov : numpy.ndarray Stokes_cov : numpy.ndarray
Covariance matrix of the Stokes parameters I, Q, U. Covariance matrix of the Stokes parameters I, Q, U.
headers : header list header_stokes : astropy.fits.header.Header
List of headers corresponding to the images in data_array. Header file associated with the Stokes fluxes.
---------- ----------
Returns: Returns:
P : numpy.ndarray P : numpy.ndarray
@@ -1493,9 +1553,6 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
s_PA_P : numpy.ndarray s_PA_P : numpy.ndarray
Image (2D floats) containing the Poisson noise error on the Image (2D floats) containing the Poisson noise error on the
polarization angle. polarization angle.
new_headers : header list
Updated list of headers corresponding to the reduced images accounting
for the new orientation angle.
""" """
# Polarization degree and angle computation # Polarization degree and angle computation
mask = I_stokes > 0.0 mask = I_stokes > 0.0
@@ -1545,7 +1602,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
# Compute the total exposure time so that # Compute the total exposure time so that
# I_stokes*exp_tot = N_tot the total number of events # I_stokes*exp_tot = N_tot the total number of events
exp_tot = np.array([header["exptime"] for header in headers]).sum() exp_tot = header_stokes["exptime"]
# print("Total exposure time : {} sec".format(exp_tot)) # print("Total exposure time : {} sec".format(exp_tot))
N_obs = I_stokes * exp_tot N_obs = I_stokes * exp_tot
@@ -1566,7 +1623,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, ang=None, SNRi_cut=None): def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, header_stokes, SNRi_cut=None):
""" """
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header matrix to rotate Q, U of a given angle in degrees and update header
@@ -1586,12 +1643,8 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
Covariance matrix of the Stokes parameters I, Q, U. Covariance matrix of the Stokes parameters I, Q, U.
data_mask : numpy.ndarray data_mask : numpy.ndarray
2D boolean array delimiting the data to work on. 2D boolean array delimiting the data to work on.
headers : header list header_stokes : astropy.fits.header.Header
List of headers corresponding to the reduced images. Header file associated with the Stokes fluxes.
ang : float, optional
Rotation angle (in degrees) that should be applied to the Stokes
parameters. If None, will rotate to have North up.
Defaults to None.
SNRi_cut : float, optional SNRi_cut : float, optional
Cut that should be applied to the signal-to-noise ratio on I. Cut that should be applied to the signal-to-noise ratio on I.
Any SNR < SNRi_cut won't be displayed. If None, cut won't be applied. Any SNR < SNRi_cut won't be displayed. If None, cut won't be applied.
@@ -1609,8 +1662,8 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
accounting for +45/-45deg linear polarization intensity. accounting for +45/-45deg linear polarization intensity.
new_Stokes_cov : numpy.ndarray new_Stokes_cov : numpy.ndarray
Updated covariance matrix of the Stokes parameters I, Q, U. Updated covariance matrix of the Stokes parameters I, Q, U.
new_headers : header list new_header_stokes : astropy.fits.header.Header
Updated list of headers corresponding to the reduced images accounting Updated Header file associated with the Stokes fluxes accounting
for the new orientation angle. for the new orientation angle.
new_data_mask : numpy.ndarray new_data_mask : numpy.ndarray
Updated 2D boolean array delimiting the data to work on. Updated 2D boolean array delimiting the data to work on.
@@ -1628,11 +1681,12 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j]) U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j])
# Rotate I_stokes, Q_stokes, U_stokes using rotation matrix # Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
if ang is None: # ang = np.zeros((len(headers),))
ang = np.zeros((len(headers),)) # for i, head in enumerate(headers):
for i, head in enumerate(headers): # pc = WCS(head).celestial.wcs.pc[0,0]
ang[i] = -head["orientat"] # ang[i] = -np.arccos(WCS(head).celestial.wcs.pc[0,0]) * 180.0 / np.pi if np.abs(pc) < 1. else 0.
ang = ang.mean() pc = WCS(header_stokes).celestial.wcs.pc[0,0]
ang = -np.arccos(WCS(header_stokes).celestial.wcs.pc[0,0]) * 180.0 / np.pi if np.abs(pc) < 1. else 0.
alpha = np.pi / 180.0 * ang alpha = np.pi / 180.0 * ang
mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]]) mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]])
@@ -1668,24 +1722,22 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
new_Stokes_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:, :, i, j], mrot.T)) new_Stokes_cov[:, :, i, j] = np.dot(mrot, np.dot(new_Stokes_cov[:, :, i, j], mrot.T))
# Update headers to new angle # Update headers to new angle
new_headers = []
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
for header in headers:
new_header = deepcopy(header) new_header_stokes = deepcopy(header_stokes)
new_header["orientat"] = header["orientat"] + ang new_header_stokes["orientat"] = header_stokes["orientat"] + ang
new_wcs = WCS(header).celestial.deepcopy() new_wcs = WCS(header_stokes).celestial.deepcopy()
new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc) new_wcs.wcs.pc = np.dot(mrot, new_wcs.wcs.pc)
new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center[::-1]) + new_center[::-1] new_wcs.wcs.crpix = np.dot(mrot, new_wcs.wcs.crpix - old_center[::-1]) + new_center[::-1]
new_wcs.wcs.set() new_wcs.wcs.set()
for key, val in new_wcs.to_header().items(): for key, val in new_wcs.to_header().items():
new_header.set(key, val) new_header_stokes.set(key, val)
if new_wcs.wcs.pc[0, 0] == 1.0: if new_wcs.wcs.pc[0, 0] == 1.0:
new_header.set("PC1_1", 1.0) new_header_stokes.set("PC1_1", 1.0)
if new_wcs.wcs.pc[1, 1] == 1.0: if new_wcs.wcs.pc[1, 1] == 1.0:
new_header.set("PC2_2", 1.0) new_header_stokes.set("PC2_2", 1.0)
new_header_stokes["orientat"] = header_stokes["orientat"] + ang
new_headers.append(new_header)
# Nan handling : # Nan handling :
fmax = np.finfo(np.float64).max fmax = np.finfo(np.float64).max
@@ -1722,16 +1774,15 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err U_diluted**2 * Q_diluted_err**2 + Q_diluted**2 * U_diluted_err**2 - 2.0 * Q_diluted * U_diluted * QU_diluted_err
) )
for header in new_headers: new_header_stokes["P_int"] = (P_diluted, "Integrated polarization degree")
header["P_int"] = (P_diluted, "Integrated polarization degree") new_header_stokes["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
header["P_int_err"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error") new_header_stokes["PA_int"] = (PA_diluted, "Integrated polarization angle")
header["PA_int"] = (PA_diluted, "Integrated polarization angle") new_header_stokes["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
header["PA_int_err"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_headers return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_header_stokes
def rotate_data(data_array, error_array, data_mask, headers, ang): def rotate_data(data_array, error_array, data_mask, headers):
""" """
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header matrix to rotate Q, U of a given angle in degrees and update header
@@ -1746,9 +1797,6 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
2D boolean array delimiting the data to work on. 2D boolean array delimiting the data to work on.
headers : header list headers : header list
List of headers corresponding to the reduced images. List of headers corresponding to the reduced images.
ang : float
Rotation angle (in degrees) that should be applied to the Stokes
parameters
---------- ----------
Returns: Returns:
new_data_array : numpy.ndarray new_data_array : numpy.ndarray
@@ -1762,7 +1810,6 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
for the new orientation angle. for the new orientation angle.
""" """
# Rotate I_stokes, Q_stokes, U_stokes using rotation matrix # Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
alpha = ang * np.pi / 180.0
old_center = np.array(data_array[0].shape) / 2 old_center = np.array(data_array[0].shape) / 2
shape = np.fix(np.array(data_array[0].shape) * np.sqrt(2.5)).astype(int) shape = np.fix(np.array(data_array[0].shape) * np.sqrt(2.5)).astype(int)
@@ -1771,37 +1818,41 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
data_array = zeropad(data_array, [data_array.shape[0], *shape]) data_array = zeropad(data_array, [data_array.shape[0], *shape])
error_array = zeropad(error_array, [error_array.shape[0], *shape]) error_array = zeropad(error_array, [error_array.shape[0], *shape])
data_mask = zeropad(data_mask, shape) data_mask = zeropad(data_mask, shape)
# Rotate original images using scipy.ndimage.rotate # Rotate original images using scipy.ndimage.rotate
new_headers = []
new_data_array = [] new_data_array = []
new_error_array = [] new_error_array = []
for i in range(data_array.shape[0]): new_data_mask = []
for i,header in zip(range(data_array.shape[0]),headers):
ang = -float(header["ORIENTAT"])
alpha = ang * np.pi / 180.0
new_data_array.append(sc_rotate(data_array[i], ang, order=1, reshape=False, cval=0.0)) new_data_array.append(sc_rotate(data_array[i], ang, order=1, reshape=False, cval=0.0))
new_error_array.append(sc_rotate(error_array[i], ang, order=1, reshape=False, cval=0.0)) new_error_array.append(sc_rotate(error_array[i], ang, order=1, reshape=False, cval=0.0))
new_data_array = np.array(new_data_array) new_data_mask.append(sc_rotate(data_mask * 10.0, ang, order=1, reshape=False, cval=0.0))
new_error_array = np.array(new_error_array)
new_data_mask = sc_rotate(data_mask * 10.0, ang, order=1, reshape=False, cval=0.0)
new_data_mask[new_data_mask < 2] = 0.0
new_data_mask = new_data_mask.astype(bool)
for i in range(new_data_array.shape[0]):
new_data_array[i][new_data_array[i] < 0.0] = 0.0
# Update headers to new angle # Update headers to new angle
new_headers = []
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]]) mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
for header in headers:
new_header = deepcopy(header) new_header = deepcopy(header)
new_header["orientat"] = header["orientat"] + ang
new_wcs = WCS(header).celestial.deepcopy() new_wcs = WCS(header).celestial.deepcopy()
new_wcs.wcs.pc[:2, :2] = np.dot(mrot, new_wcs.wcs.pc[:2, :2]) new_wcs.wcs.pc[:2, :2] = np.dot(mrot, new_wcs.wcs.pc[:2, :2])
new_wcs.wcs.crpix[:2] = np.dot(mrot, new_wcs.wcs.crpix[:2] - old_center[::-1]) + new_center[::-1] new_wcs.wcs.crpix[:2] = np.dot(mrot, new_wcs.wcs.crpix[:2] - old_center[::-1]) + new_center[::-1]
new_wcs.wcs.set() new_wcs.wcs.set()
for key, val in new_wcs.to_header().items(): for key, val in new_wcs.to_header().items():
new_header[key] = val new_header[key] = val
new_header["ORIENTAT"] = np.arccos(new_wcs.celestial.wcs.pc[0,0]) * 180.0 / np.pi
new_header["ROTATE"] = ang
new_headers.append(new_header) new_headers.append(new_header)
globals()["theta"] = globals()["theta"] - alpha
new_data_array = np.array(new_data_array)
new_error_array = np.array(new_error_array)
new_data_mask = np.array(new_data_mask).sum(axis=0)
new_data_mask[new_data_mask < new_data_mask.max()*2./3.] = 0.0
new_data_mask = new_data_mask.astype(bool)
for i in range(new_data_array.shape[0]):
new_data_array[i][new_data_array[i] < 0.0] = 0.0
return new_data_array, new_error_array, new_data_mask, new_headers return new_data_array, new_error_array, new_data_mask, new_headers