better handling of data rotation, add information about reduction in header

This commit is contained in:
2024-07-03 17:25:34 +02:00
parent 6879a8b551
commit fdcf1cb323
5 changed files with 349 additions and 256 deletions

View File

@@ -17,7 +17,7 @@ from lib.utils import princ_angle, sci_not
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False): def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=False, interactive=False, **kwargs):
# Reduction parameters # Reduction parameters
# Deconvolution # Deconvolution
deconvolve = False deconvolve = False
@@ -36,13 +36,12 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Background estimation # Background estimation
error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) error_sub_type = "freedman-diaconis" # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 1.0 subtract_error = 0.7
display_bkg = False display_bkg = False
# Data binning # Data binning
rebin = True pxsize = 0.1
pxsize = 2 pxscale = "arcsec" # pixel, arcsec or full
px_scale = "px" # pixel, arcsec or full
rebin_operation = "sum" # sum or average rebin_operation = "sum" # sum or average
# Alignement # Alignement
@@ -55,23 +54,45 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Smoothing # Smoothing
smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_function = "combine" # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 1.5 # If None, no smoothing is done smoothing_FWHM = 0.2 # If None, no smoothing is done
smoothing_scale = "px" # pixel or arcsec smoothing_scale = "arcsec" # pixel or arcsec
# Rotation # Rotation
rotate_data = False # rotation to North convention can give erroneous results rotate_North = True
rotate_stokes = True
# Polarization map output # Polarization map output
SNRp_cut = 3.0 # P measurments with SNR>3 SNRp_cut = 3.0 # P measurments with SNR>3
SNRi_cut = 3.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 1.0 # I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
flux_lim = None # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None flux_lim = 1e-19, 3e-17 # lowest and highest flux displayed on plot, defaults to bkg and maximum in cut if None
vec_scale = 3 scale_vec = 5
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
# Pipeline start # Pipeline start
# Step 0:
# Get parameters from kwargs
for key, value in [
["error_sub_type", error_sub_type],
["subtract_error", subtract_error],
["pxsize", pxsize],
["pxscale", pxscale],
["smoothing_function", smoothing_function],
["smoothing_FWHM", smoothing_FWHM],
["smoothing_scale", smoothing_scale],
["SNRp_cut", SNRp_cut],
["SNRi_cut", SNRi_cut],
["flux_lim", flux_lim],
["scale_vec", scale_vec],
["step_vec", step_vec],
]:
try:
value = kwargs[key]
except KeyError:
pass
rebin = True if pxsize is not None else False
# Step 1: # Step 1:
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom. # Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
outfiles = []
if infiles is not None: if infiles is not None:
prod = np.array([["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles], dtype=str) prod = np.array([["/".join(filepath.split("/")[:-1]), filepath.split("/")[-1]] for filepath in infiles], dtype=str)
obs_dir = "/".join(infiles[0].split("/")[:-1]) obs_dir = "/".join(infiles[0].split("/")[:-1])
@@ -85,7 +106,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
target, products = retrieve_products(target, proposal_id, output_dir=output_dir) target, products = retrieve_products(target, proposal_id, output_dir=output_dir)
prod = products.pop() prod = products.pop()
for prods in products: for prods in products:
main(target=target, infiles=["/".join(pr) for pr in prods], output_dir=output_dir, crop=crop, interactive=interactive) outfiles.append(main(target=target, infiles=["/".join(pr) for pr in prods], output_dir=output_dir, crop=crop, interactive=interactive)[0])
data_folder = prod[0][0] data_folder = prod[0][0]
try: try:
plots_folder = data_folder.replace("data", "plots") plots_folder = data_folder.replace("data", "plots")
@@ -99,8 +120,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
figname = "_".join([target, "FOC"]) figname = "_".join([target, "FOC"])
figtype = "" figtype = ""
if rebin: if rebin:
if px_scale not in ["full"]: if pxscale not in ["full"]:
figtype = "".join(["b", "{0:.2f}".format(pxsize), px_scale]) # additionnal informations figtype = "".join(["b", "{0:.2f}".format(pxsize), pxscale]) # additionnal informations
else: else:
figtype = "full" figtype = "full"
if smoothing_FWHM is not None: if smoothing_FWHM is not None:
@@ -137,9 +158,34 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
return_background=True, return_background=True,
) )
# Rotate data to have same orientation
rotate_data = np.unique([float(head["ORIENTAT"]) for head in headers]).size != 1
if rotate_data:
ang = np.mean([head["ORIENTAT"] for head in headers])
for head in headers:
head["ORIENTAT"] -= ang
data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers)
if display_data:
proj_plots.plot_obs(
data_array,
headers,
savename="_".join([figname, "rotate_data"]),
plots_folder=plots_folder,
norm=LogNorm(
vmin=data_array[data_array > 0.0].min() * headers[0]["photflam"], vmax=data_array[data_array > 0.0].max() * headers[0]["photflam"]
),
)
# Align and rescale images with oversampling. # Align and rescale images with oversampling.
data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data( data_array, error_array, headers, data_mask, shifts, error_shifts = proj_red.align_data(
data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=True data_array,
headers,
error_array=error_array,
data_mask=data_mask,
background=background,
upsample_factor=10,
ref_center=align_center,
return_shifts=True,
) )
if display_align: if display_align:
@@ -155,17 +201,11 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Rebin data to desired pixel size. # Rebin data to desired pixel size.
if rebin: if rebin:
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array( data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(
data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask data_array, error_array, headers, pxsize=pxsize, scale=pxscale, operation=rebin_operation, data_mask=data_mask
) )
# Rotate data to have North up
if rotate_data:
data_mask = np.ones(data_array.shape[1:]).astype(bool)
alpha = headers[0]["orientat"]
data_array, error_array, data_mask, headers = proj_red.rotate_data(data_array, error_array, data_mask, headers, -alpha)
# Plot array for checking output # Plot array for checking output
if display_data and px_scale.lower() not in ["full", "integrate"]: if display_data and pxscale.lower() not in ["full", "integrate"]:
proj_plots.plot_obs( proj_plots.plot_obs(
data_array, data_array,
headers, headers,
@@ -202,7 +242,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Step 3: # Step 3:
# Rotate images to have North up # Rotate images to have North up
if rotate_stokes: if rotate_North:
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes( I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes(
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None
) )
@@ -215,7 +255,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# Step 4: # Step 4:
# Save image to FITS. # Save image to FITS.
figname = "_".join([figname, figtype]) if figtype != "" else figname figname = "_".join([figname, figtype]) if figtype != "" else figname
Stokes_test = proj_fits.save_Stokes( Stokes_hdul = proj_fits.save_Stokes(
I_stokes, I_stokes,
Q_stokes, Q_stokes,
U_stokes, U_stokes,
@@ -238,25 +278,25 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
# crop to desired region of interest (roi) # crop to desired region of interest (roi)
if crop: if crop:
figname += "_crop" figname += "_crop"
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_test), norm=LogNorm()) stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_hdul), norm=LogNorm())
stokescrop.crop() stokescrop.crop()
stokescrop.write_to("/".join([data_folder, figname + ".fits"])) stokescrop.write_to("/".join([data_folder, figname + ".fits"]))
Stokes_test, headers = stokescrop.hdul_crop, [dataset.header for dataset in stokescrop.hdul_crop] Stokes_hdul, headers = stokescrop.hdul_crop, [dataset.header for dataset in stokescrop.hdul_crop]
data_mask = Stokes_test["data_mask"].data.astype(bool) data_mask = Stokes_hdul["data_mask"].data.astype(bool)
print( print(
"F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format( "F_int({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
headers[0]["photplam"], headers[0]["photplam"],
*sci_not( *sci_not(
Stokes_test[0].data[data_mask].sum() * headers[0]["photflam"], Stokes_hdul[0].data[data_mask].sum() * headers[0]["photflam"],
np.sqrt(Stokes_test[3].data[0, 0][data_mask].sum()) * headers[0]["photflam"], np.sqrt(Stokes_hdul[3].data[0, 0][data_mask].sum()) * headers[0]["photflam"],
2, 2,
out=int, out=int,
), ),
) )
) )
print("P_int = {0:.1f} ± {1:.1f} %".format(headers[0]["p_int"] * 100.0, np.ceil(headers[0]["p_int_err"] * 1000.0) / 10.0)) print("P_int = {0:.1f} ± {1:.1f} %".format(headers[0]["p_int"] * 100.0, np.ceil(headers[0]["sP_int"] * 1000.0) / 10.0))
print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(headers[0]["pa_int"]), princ_angle(np.ceil(headers[0]["pa_int_err"] * 10.0) / 10.0))) print("PA_int = {0:.1f} ± {1:.1f} °".format(princ_angle(headers[0]["pa_int"]), princ_angle(np.ceil(headers[0]["sPA_int"] * 10.0) / 10.0)))
# Background values # Background values
print( print(
"F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format( "F_bkg({0:.0f} Angs) = ({1} ± {2})e{3} ergs.cm^-2.s^-1.Angs^-1".format(
@@ -266,122 +306,124 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0] * 100.0, np.ceil(s_P_bkg[0, 0] * 1000.0) / 10.0)) print("P_bkg = {0:.1f} ± {1:.1f} %".format(debiased_P_bkg[0, 0] * 100.0, np.ceil(s_P_bkg[0, 0] * 1000.0) / 10.0))
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0] * 10.0) / 10.0))) print("PA_bkg = {0:.1f} ± {1:.1f} °".format(princ_angle(PA_bkg[0, 0]), princ_angle(np.ceil(s_PA_bkg[0, 0] * 10.0) / 10.0)))
# Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error). # Plot polarization map (Background is either total Flux, Polarization degree or Polarization degree error).
if px_scale.lower() not in ["full", "integrate"] and not interactive: if pxscale.lower() not in ["full", "integrate"] and not interactive:
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname]), savename="_".join([figname]),
plots_folder=plots_folder, plots_folder=plots_folder,
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "I"]), savename="_".join([figname, "I"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="Intensity", display="Intensity",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "P_flux"]), savename="_".join([figname, "P_flux"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="Pol_Flux", display="Pol_Flux",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "P"]), savename="_".join([figname, "P"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="Pol_deg", display="Pol_deg",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "PA"]), savename="_".join([figname, "PA"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="Pol_ang", display="Pol_ang",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "I_err"]), savename="_".join([figname, "I_err"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="I_err", display="I_err",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "P_err"]), savename="_".join([figname, "P_err"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="Pol_deg_err", display="Pol_deg_err",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "SNRi"]), savename="_".join([figname, "SNRi"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="SNRi", display="SNRi",
) )
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), deepcopy(Stokes_hdul),
data_mask, data_mask,
SNRp_cut=SNRp_cut, SNRp_cut=SNRp_cut,
SNRi_cut=SNRi_cut, SNRi_cut=SNRi_cut,
flux_lim=flux_lim, flux_lim=flux_lim,
step_vec=step_vec, step_vec=step_vec,
vec_scale=vec_scale, scale_vec=scale_vec,
savename="_".join([figname, "SNRp"]), savename="_".join([figname, "SNRp"]),
plots_folder=plots_folder, plots_folder=plots_folder,
display="SNRp", display="SNRp",
) )
elif not interactive: elif not interactive:
proj_plots.polarization_map( proj_plots.polarization_map(
deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=figname, plots_folder=plots_folder, display="integrate" deepcopy(Stokes_hdul), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename=figname, plots_folder=plots_folder, display="integrate"
) )
elif px_scale.lower() not in ["full", "integrate"]: elif pxscale.lower() not in ["full", "integrate"]:
proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim) proj_plots.pol_map(Stokes_hdul, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
return 0 outfiles.append(Stokes_hdul[0].header["FILENAME"])
return outfiles
if __name__ == "__main__": if __name__ == "__main__":
@@ -400,4 +442,4 @@ if __name__ == "__main__":
exitcode = main( exitcode = main(
target=args.target, proposal_id=args.proposal_id, infiles=args.files, output_dir=args.output_dir, crop=args.crop, interactive=args.interactive target=args.target, proposal_id=args.proposal_id, infiles=args.files, output_dir=args.output_dir, crop=args.crop, interactive=args.interactive
) )
print("Finished with ExitCode: ", exitcode) print("Written to: ", exitcode)

View File

@@ -73,7 +73,7 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
if histograms is not None: if histograms is not None:
filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0} filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0}
fig_h, ax_h = plt.subplots(figsize=(10, 6), constrained_layout=True) fig_h, ax_h = plt.subplots(figsize=(10, 8), constrained_layout=True)
for i, (hist, bins) in enumerate(zip(histograms, binning)): for i, (hist, bins) in enumerate(zip(histograms, binning)):
filt_obs[headers[i]["filtnam1"]] += 1 filt_obs[headers[i]["filtnam1"]] += 1
ax_h.plot( ax_h.plot(

View File

@@ -76,7 +76,7 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
# force WCS for POL60 to have same pixel size as POL0 and POL120 # force WCS for POL60 to have same pixel size as POL0 and POL120
is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool) is_pol60 = np.array([head["filtnam1"].lower() == "pol60" for head in headers], dtype=bool)
cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 14) cdelt = np.round(np.array([WCS(head).wcs.cdelt[:2] for head in headers]), 10)
if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2: if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0)) print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0))
raise ValueError("Not all images have same pixel size") raise ValueError("Not all images have same pixel size")
@@ -141,19 +141,23 @@ def save_Stokes(
new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2] new_wcs.wcs.crpix = np.array(new_wcs.wcs.crpix) - vertex[0::-2]
header = new_wcs.to_header() header = new_wcs.to_header()
header["telescop"] = (ref_header["telescop"] if "TELESCOP" in list(ref_header.keys()) else "HST", "telescope used to acquire data") header["TELESCOP"] = (ref_header["telescop"] if "TELESCOP" in list(ref_header.keys()) else "HST", "telescope used to acquire data")
header["instrume"] = (ref_header["instrume"] if "INSTRUME" in list(ref_header.keys()) else "FOC", "identifier for instrument used to acuire data") header["INSTRUME"] = (ref_header["instrume"] if "INSTRUME" in list(ref_header.keys()) else "FOC", "identifier for instrument used to acuire data")
header["photplam"] = (ref_header["photplam"], "Pivot Wavelength") header["PHOTPLAM"] = (ref_header["photplam"], "Pivot Wavelength")
header["photflam"] = (ref_header["photflam"], "Inverse Sensitivity in DN/sec/cm**2/Angst") header["PHOTFLAM"] = (ref_header["photflam"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
header["exptot"] = (exp_tot, "Total exposure time in sec") header["EXPTOT"] = (exp_tot, "Total exposure time in sec")
header["proposid"] = (ref_header["proposid"], "PEP proposal identifier for observation") header["PROPOSID"] = (ref_header["proposid"], "PEP proposal identifier for observation")
header["targname"] = (ref_header["targname"], "Target name") header["TARGNAME"] = (ref_header["targname"], "Target name")
header["orientat"] = (ref_header["orientat"], "Angle between North and the y-axis of the image") header["ORIENTAT"] = (np.arccos(new_wcs.wcs.pc[0, 0]) * 180.0 / np.pi, "Angle between North and the y-axis of the image")
header["filename"] = (filename, "Original filename") header["FILENAME"] = (filename, "Original filename")
header["P_int"] = (ref_header["P_int"], "Integrated polarization degree") header["BKG_TYPE"] = (ref_header["BKG_TYPE"], "Bkg estimation method used during reduction")
header["P_int_err"] = (ref_header["P_int_err"], "Integrated polarization degree error") header["BKG_SUB"] = (ref_header["BKG_SUB"], "Amount of bkg subtracted from images")
header["PA_int"] = (ref_header["PA_int"], "Integrated polarization angle") header["SMOOTH"] = (ref_header["SMOOTH"], "Smoothing method used during reduction")
header["PA_int_err"] = (ref_header["PA_int_err"], "Integrated polarization angle error") header["SAMPLING"] = (ref_header["SAMPLING"], "Resampling performed during reduction")
header["P_INT"] = (ref_header["P_int"], "Integrated polarization degree")
header["sP_INT"] = (ref_header["sP_int"], "Integrated polarization degree error")
header["PA_INT"] = (ref_header["PA_int"], "Integrated polarization angle")
header["sPA_INT"] = (ref_header["sPA_int"], "Integrated polarization angle error")
# Crop Data to mask # Crop Data to mask
if data_mask.shape != (1, 1): if data_mask.shape != (1, 1):

View File

@@ -218,7 +218,7 @@ def polarization_map(
SNRi_cut=3.0, SNRi_cut=3.0,
flux_lim=None, flux_lim=None,
step_vec=1, step_vec=1,
vec_scale=2.0, scale_vec=2.0,
savename=None, savename=None,
plots_folder="", plots_folder="",
display="default", display="default",
@@ -250,9 +250,9 @@ def polarization_map(
Number of steps between each displayed polarization vector. Number of steps between each displayed polarization vector.
If step_vec = 2, every other vector will be displayed. If step_vec = 2, every other vector will be displayed.
Defaults to 1 Defaults to 1
vec_scale : float, optional scale_vec : float, optional
Pixel length of displayed 100% polarization vector. Pixel length of displayed 100% polarization vector.
If vec_scale = 2, a vector of 50% polarization will be 1 pixel wide. If scale_vec = 2, a vector of 50% polarization will be 1 pixel wide.
Defaults to 2. Defaults to 2.
savename : str, optional savename : str, optional
Name of the figure the map should be saved to. If None, the map won't Name of the figure the map should be saved to. If None, the map won't
@@ -428,9 +428,9 @@ def polarization_map(
I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask])) I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask]))
P_diluted = Stokes[0].header["P_int"] P_diluted = Stokes[0].header["P_int"]
P_diluted_err = Stokes[0].header["P_int_err"] P_diluted_err = Stokes[0].header["sP_int"]
PA_diluted = Stokes[0].header["PA_int"] PA_diluted = Stokes[0].header["PA_int"]
PA_diluted_err = Stokes[0].header["PA_int_err"] PA_diluted_err = Stokes[0].header["sPA_int"]
plt.rcParams.update({"font.size": 12}) plt.rcParams.update({"font.size": 12})
px_size = wcs.wcs.get_cdelt()[0] * 3600.0 px_size = wcs.wcs.get_cdelt()[0] * 3600.0
@@ -457,7 +457,7 @@ def polarization_map(
if step_vec == 0: if step_vec == 0:
poldata[np.isfinite(poldata)] = 1.0 / 2.0 poldata[np.isfinite(poldata)] = 1.0 / 2.0
step_vec = 1 step_vec = 1
vec_scale = 2.0 scale_vec = 2.0
X, Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) X, Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
U, V = poldata * np.cos(np.pi / 2.0 + pangdata * np.pi / 180.0), poldata * np.sin(np.pi / 2.0 + pangdata * np.pi / 180.0) U, V = poldata * np.cos(np.pi / 2.0 + pangdata * np.pi / 180.0), poldata * np.sin(np.pi / 2.0 + pangdata * np.pi / 180.0)
ax.quiver( ax.quiver(
@@ -467,7 +467,7 @@ def polarization_map(
V[::step_vec, ::step_vec], V[::step_vec, ::step_vec],
units="xy", units="xy",
angles="uv", angles="uv",
scale=1.0 / vec_scale, scale=1.0 / scale_vec,
scale_units="xy", scale_units="xy",
pivot="mid", pivot="mid",
headwidth=0.0, headwidth=0.0,
@@ -478,7 +478,7 @@ def polarization_map(
color="w", color="w",
edgecolor="k", edgecolor="k",
) )
pol_sc = AnchoredSizeBar(ax.transData, vec_scale, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="w") pol_sc = AnchoredSizeBar(ax.transData, scale_vec, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color="w")
ax.add_artist(pol_sc) ax.add_artist(pol_sc)
ax.add_artist(px_sc) ax.add_artist(px_sc)
@@ -839,7 +839,7 @@ class overplot_radio(align_maps):
Inherit from class align_maps in order to get the same WCS on both maps. Inherit from class align_maps in order to get the same WCS on both maps.
""" """
def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=2, savename=None, **kwargs): def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, scale_vec=2, savename=None, **kwargs):
self.Stokes_UV = self.map self.Stokes_UV = self.map
self.wcs_UV = self.map_wcs self.wcs_UV = self.map_wcs
# Get Data # Get Data
@@ -915,11 +915,11 @@ class overplot_radio(align_maps):
) )
# Display full size polarization vectors # Display full size polarization vectors
if vec_scale is None: if scale_vec is None:
self.vec_scale = 2.0 self.scale_vec = 2.0
pol[np.isfinite(pol)] = 1.0 / 2.0 pol[np.isfinite(pol)] = 1.0 / 2.0
else: else:
self.vec_scale = vec_scale self.scale_vec = scale_vec
step_vec = 1 step_vec = 1
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0) self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0)
@@ -930,7 +930,7 @@ class overplot_radio(align_maps):
self.V[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec],
units="xy", units="xy",
angles="uv", angles="uv",
scale=1.0 / self.vec_scale, scale=1.0 / self.scale_vec,
scale_units="xy", scale_units="xy",
pivot="mid", pivot="mid",
headwidth=0.0, headwidth=0.0,
@@ -998,7 +998,7 @@ class overplot_radio(align_maps):
self.ax_overplot.add_artist(north_dir) self.ax_overplot.add_artist(north_dir)
pol_sc = AnchoredSizeBar( pol_sc = AnchoredSizeBar(
self.ax_overplot.transData, self.ax_overplot.transData,
self.vec_scale, self.scale_vec,
r"$P$= 100%", r"$P$= 100%",
4, 4,
pad=0.5, pad=0.5,
@@ -1046,7 +1046,7 @@ class overplot_chandra(align_maps):
Inherit from class align_maps in order to get the same WCS on both maps. Inherit from class align_maps in order to get the same WCS on both maps.
""" """
def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=2, zoom=1, savename=None, **kwargs): def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, scale_vec=2, zoom=1, savename=None, **kwargs):
self.Stokes_UV = self.map self.Stokes_UV = self.map
self.wcs_UV = self.map_wcs self.wcs_UV = self.map_wcs
# Get Data # Get Data
@@ -1121,11 +1121,11 @@ class overplot_chandra(align_maps):
) )
# Display full size polarization vectors # Display full size polarization vectors
if vec_scale is None: if scale_vec is None:
self.vec_scale = 2.0 self.scale_vec = 2.0
pol[np.isfinite(pol)] = 1.0 / 2.0 pol[np.isfinite(pol)] = 1.0 / 2.0
else: else:
self.vec_scale = vec_scale self.scale_vec = scale_vec
step_vec = 1 step_vec = 1
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0) self.U, self.V = pol * np.cos(np.pi / 2.0 + pang * np.pi / 180.0), pol * np.sin(np.pi / 2.0 + pang * np.pi / 180.0)
@@ -1136,7 +1136,7 @@ class overplot_chandra(align_maps):
self.V[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec],
units="xy", units="xy",
angles="uv", angles="uv",
scale=1.0 / self.vec_scale, scale=1.0 / self.scale_vec,
scale_units="xy", scale_units="xy",
pivot="mid", pivot="mid",
headwidth=0.0, headwidth=0.0,
@@ -1200,7 +1200,7 @@ class overplot_chandra(align_maps):
self.ax_overplot.add_artist(north_dir) self.ax_overplot.add_artist(north_dir)
pol_sc = AnchoredSizeBar( pol_sc = AnchoredSizeBar(
self.ax_overplot.transData, self.ax_overplot.transData,
self.vec_scale, self.scale_vec,
r"$P$= 100%", r"$P$= 100%",
4, 4,
pad=0.5, pad=0.5,
@@ -1245,7 +1245,7 @@ class overplot_pol(align_maps):
Inherit from class align_maps in order to get the same WCS on both maps. Inherit from class align_maps in order to get the same WCS on both maps.
""" """
def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=2.0, savename=None, **kwargs): def overplot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, scale_vec=2.0, savename=None, **kwargs):
self.Stokes_UV = self.map self.Stokes_UV = self.map
self.wcs_UV = self.map_wcs self.wcs_UV = self.map_wcs
# Get Data # Get Data
@@ -1323,11 +1323,11 @@ class overplot_pol(align_maps):
) )
# Display full size polarization vectors # Display full size polarization vectors
if vec_scale is None: if scale_vec is None:
self.vec_scale = 2.0 self.scale_vec = 2.0
pol[np.isfinite(pol)] = 1.0 / 2.0 pol[np.isfinite(pol)] = 1.0 / 2.0
else: else:
self.vec_scale = vec_scale self.scale_vec = scale_vec
step_vec = 1 step_vec = 1
px_scale = np.abs(self.wcs_UV.wcs.get_cdelt()[0] / self.other_wcs.wcs.get_cdelt()[0]) px_scale = np.abs(self.wcs_UV.wcs.get_cdelt()[0] / self.other_wcs.wcs.get_cdelt()[0])
self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0])) self.X, self.Y = np.meshgrid(np.arange(stkI.shape[1]), np.arange(stkI.shape[0]))
@@ -1339,7 +1339,7 @@ class overplot_pol(align_maps):
self.V[::step_vec, ::step_vec], self.V[::step_vec, ::step_vec],
units="xy", units="xy",
angles="uv", angles="uv",
scale=px_scale / self.vec_scale, scale=px_scale / self.scale_vec,
scale_units="xy", scale_units="xy",
pivot="mid", pivot="mid",
headwidth=0.0, headwidth=0.0,
@@ -1395,7 +1395,7 @@ class overplot_pol(align_maps):
self.ax_overplot.add_artist(north_dir) self.ax_overplot.add_artist(north_dir)
pol_sc = AnchoredSizeBar( pol_sc = AnchoredSizeBar(
self.ax_overplot.transData, self.ax_overplot.transData,
self.vec_scale / px_scale, self.scale_vec / px_scale,
r"$P$= 100%", r"$P$= 100%",
4, 4,
pad=0.5, pad=0.5,
@@ -1435,10 +1435,10 @@ class overplot_pol(align_maps):
self.fig_overplot.canvas.draw() self.fig_overplot.canvas.draw()
def plot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, vec_scale=2.0, savename=None, **kwargs) -> None: def plot(self, levels=None, SNRp_cut=3.0, SNRi_cut=3.0, scale_vec=2.0, savename=None, **kwargs) -> None:
while not self.aligned: while not self.aligned:
self.align() self.align()
self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, vec_scale=vec_scale, savename=savename, **kwargs) self.overplot(levels=levels, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, scale_vec=scale_vec, savename=savename, **kwargs)
plt.show(block=True) plt.show(block=True)
def add_vector(self, position="center", pol_deg=1.0, pol_ang=0.0, **kwargs): def add_vector(self, position="center", pol_deg=1.0, pol_ang=0.0, **kwargs):
@@ -1448,7 +1448,7 @@ class overplot_pol(align_maps):
position = self.other_wcs.world_to_pixel(position) position = self.other_wcs.world_to_pixel(position)
u, v = pol_deg * np.cos(np.radians(pol_ang) + np.pi / 2.0), pol_deg * np.sin(np.radians(pol_ang) + np.pi / 2.0) u, v = pol_deg * np.cos(np.radians(pol_ang) + np.pi / 2.0), pol_deg * np.sin(np.radians(pol_ang) + np.pi / 2.0)
for key, value in [["scale", [["scale", self.vec_scale]]], ["width", [["width", 0.1]]], ["color", [["color", "k"]]]]: for key, value in [["scale", [["scale", self.scale_vec]]], ["width", [["width", 0.1]]], ["color", [["color", "k"]]]]:
try: try:
_ = kwargs[key] _ = kwargs[key]
except KeyError: except KeyError:
@@ -1937,9 +1937,9 @@ class crop_Stokes(crop_map):
for dataset in self.hdul_crop: for dataset in self.hdul_crop:
dataset.header["P_int"] = (P_diluted, "Integrated polarization degree") dataset.header["P_int"] = (P_diluted, "Integrated polarization degree")
dataset.header["P_int_err"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error") dataset.header["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
dataset.header["PA_int"] = (PA_diluted, "Integrated polarization angle") dataset.header["PA_int"] = (PA_diluted, "Integrated polarization angle")
dataset.header["PA_int_err"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") dataset.header["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
self.fig.canvas.draw_idle() self.fig.canvas.draw_idle()
@property @property
@@ -3043,9 +3043,9 @@ class pol_map(object):
I_reg = self.I.sum() I_reg = self.I.sum()
I_reg_err = np.sqrt(np.sum(s_I**2)) I_reg_err = np.sqrt(np.sum(s_I**2))
P_reg = self.Stokes[0].header["P_int"] P_reg = self.Stokes[0].header["P_int"]
P_reg_err = self.Stokes[0].header["P_int_err"] P_reg_err = self.Stokes[0].header["sP_int"]
PA_reg = self.Stokes[0].header["PA_int"] PA_reg = self.Stokes[0].header["PA_int"]
PA_reg_err = self.Stokes[0].header["PA_int_err"] PA_reg_err = self.Stokes[0].header["sPA_int"]
s_I = np.sqrt(self.IQU_cov[0, 0]) s_I = np.sqrt(self.IQU_cov[0, 0])
s_Q = np.sqrt(self.IQU_cov[1, 1]) s_Q = np.sqrt(self.IQU_cov[1, 1])

View File

@@ -53,7 +53,7 @@ from scipy.ndimage import shift as sc_shift
from scipy.signal import fftconvolve from scipy.signal import fftconvolve
from .background import bkg_fit, bkg_hist, bkg_mini from .background import bkg_fit, bkg_hist, bkg_mini
from .convex_hull import clean_ROI, image_hull from .convex_hull import image_hull
from .cross_correlation import phase_cross_correlation from .cross_correlation import phase_cross_correlation
from .deconvolve import deconvolve_im, gaussian2d, gaussian_psf, zeropad from .deconvolve import deconvolve_im, gaussian2d, gaussian_psf, zeropad
from .plots import plot_obs from .plots import plot_obs
@@ -433,18 +433,7 @@ def deconvolve_array(data_array, headers, psf="gaussian", FWHM=1.0, scale="px",
return deconv_array return deconv_array
def get_error( def get_error(data_array, headers, error_array=None, data_mask=None, sub_type=None, subtract_error=0.5, display=False, savename=None, plots_folder="", return_background=False):
data_array,
headers,
error_array=None,
data_mask=None,
sub_type=None,
subtract_error=True,
display=False,
savename=None,
plots_folder="",
return_background=False,
):
""" """
Look for sub-image of shape sub_shape that have the smallest integrated Look for sub-image of shape sub_shape that have the smallest integrated
flux (no source assumption) and define the background on the image by the flux (no source assumption) and define the background on the image by the
@@ -532,22 +521,30 @@ def get_error(
n_data_array, c_error_bkg, headers, background = bkg_hist( n_data_array, c_error_bkg, headers, background = bkg_hist(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "histogram", str(int(subtract_error>0.))
elif isinstance(sub_type, str): elif isinstance(sub_type, str):
if sub_type.lower() in ["auto"]: if sub_type.lower() in ["auto"]:
n_data_array, c_error_bkg, headers, background = bkg_fit( n_data_array, c_error_bkg, headers, background = bkg_fit(
data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "histogram fit", "bkg+%.1fsigma"%subtract_error
else: else:
n_data_array, c_error_bkg, headers, background = bkg_hist( n_data_array, c_error_bkg, headers, background = bkg_hist(
data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "histogram", str(int(subtract_error>0.))
elif isinstance(sub_type, tuple): elif isinstance(sub_type, tuple):
n_data_array, c_error_bkg, headers, background = bkg_mini( n_data_array, c_error_bkg, headers, background = bkg_mini(
data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder
) )
sub_type, subtract_error = "minimal flux", str(int(subtract_error>0.))
else: else:
print("Warning: Invalid subtype.") print("Warning: Invalid subtype.")
for header in headers:
header["BKG_TYPE"] = (sub_type,"Bkg estimation method used during reduction")
header["BKG_SUB"] = (subtract_error,"Amount of bkg subtracted from images")
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999) # Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
n_error_array = np.sqrt(err_wav**2 + err_psf**2 + err_flat**2 + c_error_bkg**2) n_error_array = np.sqrt(err_wav**2 + err_psf**2 + err_flat**2 + c_error_bkg**2)
@@ -557,7 +554,7 @@ def get_error(
return n_data_array, n_error_array, headers return n_data_array, n_error_array, headers
def rebin_array(data_array, error_array, headers, pxsize, scale, operation="sum", data_mask=None): def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operation="sum", data_mask=None):
""" """
Homogeneously rebin a data array to get a new pixel size equal to pxsize Homogeneously rebin a data array to get a new pixel size equal to pxsize
where pxsize is given in arcsec. where pxsize is given in arcsec.
@@ -613,65 +610,62 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, operation="sum"
Dxy = np.array([1.0, 1.0]) Dxy = np.array([1.0, 1.0])
# Routine for the FOC instrument # Routine for the FOC instrument
if instr == "FOC": Dxy_arr = np.ones((data_array.shape[0], 2))
# HST_aper = 2400.0 # HST aperture in mm for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))):
Dxy_arr = np.ones((data_array.shape[0], 2)) # Get current pixel size
for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))): w = WCS(header).celestial.deepcopy()
# Get current pixel size new_header = deepcopy(header)
w = WCS(header).celestial.deepcopy()
new_header = deepcopy(header)
# Compute binning ratio # Compute binning ratio
if scale.lower() in ["px", "pixel"]: if scale.lower() in ["px", "pixel"]:
Dxy_arr[i] = np.array( Dxy_arr[i] = np.array( [ pxsize, ] * 2)
[ scale = "px"
pxsize, elif scale.lower() in ["arcsec", "arcseconds"]:
] Dxy_arr[i] = np.array(pxsize / np.abs(w.wcs.cdelt) / 3600.0)
* 2 scale = "arcsec"
) elif scale.lower() in ["full", "integrate"]:
elif scale.lower() in ["arcsec", "arcseconds"]: Dxy_arr[i] = image.shape
Dxy_arr[i] = np.array(pxsize / np.abs(w.wcs.cdelt) / 3600.0) pxsize, scale = "", "full"
elif scale.lower() in ["full", "integrate"]: else:
Dxy_arr[i] = image.shape raise ValueError("'{0:s}' invalid scale for binning.".format(scale))
else: new_shape = np.ceil(min(image.shape / Dxy_arr, key=lambda x: x[0] + x[1])).astype(int)
raise ValueError("'{0:s}' invalid scale for binning.".format(scale))
new_shape = np.ceil(min(image.shape / Dxy_arr, key=lambda x: x[0] + x[1])).astype(int)
for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))): for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))):
# Get current pixel size # Get current pixel size
w = WCS(header).celestial.deepcopy() w = WCS(header).celestial.deepcopy()
new_header = deepcopy(header) new_header = deepcopy(header)
Dxy = image.shape / new_shape Dxy = image.shape / new_shape
if (Dxy < 1.0).any(): if (Dxy < 1.0).any():
raise ValueError("Requested pixel size is below resolution.") raise ValueError("Requested pixel size is below resolution.")
# Rebin data # Rebin data
rebin_data = bin_ndarray(image, new_shape=new_shape, operation=operation) rebin_data = bin_ndarray(image, new_shape=new_shape, operation=operation)
rebinned_data.append(rebin_data) rebinned_data.append(rebin_data)
# Propagate error # Propagate error
rms_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape, operation="average")) rms_image = np.sqrt(bin_ndarray(image**2, new_shape=new_shape, operation="average"))
# sum_image = bin_ndarray(image, new_shape=new_shape, operation="sum") # sum_image = bin_ndarray(image, new_shape=new_shape, operation="sum")
# mask = sum_image > 0.0 # mask = sum_image > 0.0
new_error = np.zeros(rms_image.shape) new_error = np.zeros(rms_image.shape)
if operation.lower() in ["mean", "average", "avg"]: if operation.lower() in ["mean", "average", "avg"]:
new_error = np.sqrt(bin_ndarray(error**2, new_shape=new_shape, operation="average")) new_error = np.sqrt(bin_ndarray(error**2, new_shape=new_shape, operation="average"))
else: else:
new_error = np.sqrt(bin_ndarray(error**2, new_shape=new_shape, operation="sum")) new_error = np.sqrt(bin_ndarray(error**2, new_shape=new_shape, operation="sum"))
rebinned_error.append(np.sqrt(rms_image**2 + new_error**2)) rebinned_error.append(np.sqrt(rms_image**2 + new_error**2))
# Update header # Update header
nw = w.deepcopy() nw = w.deepcopy()
nw.wcs.cdelt *= Dxy nw.wcs.cdelt *= Dxy
nw.wcs.crpix /= Dxy nw.wcs.crpix /= Dxy
nw.array_shape = new_shape nw.array_shape = new_shape
new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape
for key, val in nw.to_header().items(): for key, val in nw.to_header().items():
new_header.set(key, val) new_header.set(key, val)
rebinned_headers.append(new_header) new_header["SAMPLING"] = (str(pxsize)+scale, "Resampling performed during reduction")
if data_mask is not None: rebinned_headers.append(new_header)
data_mask = bin_ndarray(data_mask, new_shape=new_shape, operation="average") > 0.80 if data_mask is not None:
data_mask = bin_ndarray(data_mask, new_shape=new_shape, operation="average") > 0.80
rebinned_data = np.array(rebinned_data) rebinned_data = np.array(rebinned_data)
rebinned_error = np.array(rebinned_error) rebinned_error = np.array(rebinned_error)
@@ -682,7 +676,7 @@ def rebin_array(data_array, error_array, headers, pxsize, scale, operation="sum"
return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask return rebinned_data, rebinned_error, rebinned_headers, Dxy, data_mask
def align_data(data_array, headers, error_array=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False): def align_data(data_array, headers, error_array=None, data_mask=None, background=None, upsample_factor=1.0, ref_data=None, ref_center=None, return_shifts=False):
""" """
Align images in data_array using cross correlation, and rescale them to Align images in data_array using cross correlation, and rescale them to
wider images able to contain any rotation of the reference image. wider images able to contain any rotation of the reference image.
@@ -760,10 +754,14 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_
full_headers.append(headers[0]) full_headers.append(headers[0])
err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0) err_array = np.concatenate((error_array, [np.zeros(ref_data.shape)]), axis=0)
full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0) if data_mask is None:
full_array, err_array, full_headers = crop_array(full_array, full_headers, err_array, step=5, inside=False, null_val=0.0)
else:
full_array, err_array, data_mask, full_headers = crop_array(full_array, full_headers, err_array, data_mask=data_mask, step=5, inside=False, null_val=0.0)
data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1] data_array, ref_data, headers = full_array[:-1], full_array[-1], full_headers[:-1]
error_array = err_array[:-1] error_array = err_array[:-1]
do_shift = True do_shift = True
if ref_center is None: if ref_center is None:
# Define the center of the reference image to be the center pixel # Define the center of the reference image to be the center pixel
@@ -788,6 +786,8 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_
res_shift = res_center - ref_center res_shift = res_center - ref_center
res_mask = np.zeros((res_shape, res_shape), dtype=bool) res_mask = np.zeros((res_shape, res_shape), dtype=bool)
res_mask[res_shift[0] : res_shift[0] + shape[1], res_shift[1] : res_shift[1] + shape[2]] = True res_mask[res_shift[0] : res_shift[0] + shape[1], res_shift[1] : res_shift[1] + shape[2]] = True
if data_mask is not None:
res_mask = np.logical_and(res_mask,zeropad(data_mask, (res_shape, res_shape)).astype(bool))
shifts, errors = [], [] shifts, errors = [], []
for i, image in enumerate(data_array): for i, image in enumerate(data_array):
@@ -806,9 +806,11 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_
rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.0) rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.0)
rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i]) rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i])
curr_mask = sc_shift(res_mask, shift, order=1, cval=False) curr_mask = sc_shift(res_mask*10., shift, order=1, cval=0.0)
mask_vertex = clean_ROI(curr_mask) curr_mask[curr_mask < curr_mask.max()*2./3.] = 0.0
rescaled_mask[i, mask_vertex[2] : mask_vertex[3], mask_vertex[0] : mask_vertex[1]] = True rescaled_mask[i] = curr_mask.astype(bool)
# mask_vertex = clean_ROI(curr_mask)
# rescaled_mask[i, mask_vertex[2] : mask_vertex[3], mask_vertex[0] : mask_vertex[1]] = True
rescaled_image[i][rescaled_image[i] < 0.0] = 0.0 rescaled_image[i][rescaled_image[i] < 0.0] = 0.0
rescaled_image[i][(1 - rescaled_mask[i]).astype(bool)] = 0.0 rescaled_image[i][(1 - rescaled_mask[i]).astype(bool)] = 0.0
@@ -842,7 +844,7 @@ def align_data(data_array, headers, error_array=None, background=None, upsample_
return data_array, error_array, headers, data_mask return data_array, error_array, headers, data_mask
def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.0, scale="pixel", smoothing="gaussian"): def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.5, scale="pixel", smoothing="weighted_gaussian"):
""" """
Smooth a data_array using selected function. Smooth a data_array using selected function.
---------- ----------
@@ -886,13 +888,19 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.0, scale="pi
pxsize[i] = np.round(w.wcs.cdelt * 3600.0, 4) pxsize[i] = np.round(w.wcs.cdelt * 3600.0, 4)
if (pxsize != pxsize[0]).any(): if (pxsize != pxsize[0]).any():
raise ValueError("Not all images in array have same pixel size") raise ValueError("Not all images in array have same pixel size")
FWHM_size = str(FWHM)
FWHM_scale = "arcsec"
FWHM /= pxsize[0].min() FWHM /= pxsize[0].min()
else:
FWHM_size = str(FWHM)
FWHM_scale = "px"
# Define gaussian stdev # Define gaussian stdev
stdev = FWHM / (2.0 * np.sqrt(2.0 * np.log(2))) stdev = FWHM / (2.0 * np.sqrt(2.0 * np.log(2)))
fmax = np.finfo(np.double).max fmax = np.finfo(np.double).max
if smoothing.lower() in ["combine", "combining"]: if smoothing.lower() in ["combine", "combining"]:
smoothing = "combine"
# Smooth using N images combination algorithm # Smooth using N images combination algorithm
# Weight array # Weight array
weight = 1.0 / error_array**2 weight = 1.0 / error_array**2
@@ -928,6 +936,7 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.0, scale="pi
smoothed[np.logical_or(np.isnan(smoothed * error), 1 - data_mask)] = 0.0 smoothed[np.logical_or(np.isnan(smoothed * error), 1 - data_mask)] = 0.0
elif smoothing.lower() in ["weight_gauss", "weighted_gaussian", "gauss", "gaussian"]: elif smoothing.lower() in ["weight_gauss", "weighted_gaussian", "gauss", "gaussian"]:
smoothing = "gaussian"
# Convolution with gaussian function # Convolution with gaussian function
smoothed = np.zeros(data_array.shape) smoothed = np.zeros(data_array.shape)
error = np.zeros(error_array.shape) error = np.zeros(error_array.shape)
@@ -935,6 +944,7 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.0, scale="pi
x, y = np.meshgrid(np.arange(-image.shape[1] / 2, image.shape[1] / 2), np.arange(-image.shape[0] / 2, image.shape[0] / 2)) x, y = np.meshgrid(np.arange(-image.shape[1] / 2, image.shape[1] / 2), np.arange(-image.shape[0] / 2, image.shape[0] / 2))
weights = np.ones(image_error.shape) weights = np.ones(image_error.shape)
if smoothing.lower()[:6] in ["weight"]: if smoothing.lower()[:6] in ["weight"]:
smoothing = "weighted gaussian"
weights = 1.0 / image_error**2 weights = 1.0 / image_error**2
weights[(1 - np.isfinite(weights)).astype(bool)] = 0.0 weights[(1 - np.isfinite(weights)).astype(bool)] = 0.0
weights[(1 - data_mask).astype(bool)] = 0.0 weights[(1 - data_mask).astype(bool)] = 0.0
@@ -953,10 +963,13 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.0, scale="pi
else: else:
raise ValueError("{} is not a valid smoothing option".format(smoothing)) raise ValueError("{} is not a valid smoothing option".format(smoothing))
for header in headers:
header["SMOOTH"] = (" ".join([smoothing,FWHM_size,scale]),"Smoothing method used during reduction")
return smoothed, error return smoothed, error
def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale="pixel", smoothing="gaussian"): def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=1.5, scale="pixel", smoothing="weighted_gaussian"):
""" """
Make the average image from a single polarizer for a given instrument. Make the average image from a single polarizer for a given instrument.
----------- -----------
@@ -1115,7 +1128,7 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None, scale=
return polarizer_array, polarizer_cov, pol_headers return polarizer_array, polarizer_cov, pol_headers
def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale="pixel", smoothing="combine", transmitcorr=True): def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale="pixel", smoothing="combine", transmitcorr=True, integrate=True):
""" """
Compute the Stokes parameters I, Q and U for a given data_set Compute the Stokes parameters I, Q and U for a given data_set
---------- ----------
@@ -1179,9 +1192,15 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
"Cannot reduce images from {0:s} instrument\ "Cannot reduce images from {0:s} instrument\
(yet)".format(instr) (yet)".format(instr)
) )
rotate = np.zeros(len(headers))
for i,head in enumerate(headers):
try:
rotate[i] = head['ROTATE']
except KeyError:
rotate[i] = 0.
# Routine for the FOC instrument if (np.unique(rotate) == rotate[0]).all():
if instr == "FOC": theta = globals()["theta"] - rotate[0] * np.pi / 180.0
# Get image from each polarizer and covariance matrix # Get image from each polarizer and covariance matrix
pol_array, pol_cov, pol_headers = polarizer_avg(data_array, error_array, data_mask, headers, FWHM=FWHM, scale=scale, smoothing=smoothing) pol_array, pol_cov, pol_headers = polarizer_avg(data_array, error_array, data_mask, headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
pol0, pol60, pol120 = pol_array pol0, pol60, pol120 = pol_array
@@ -1223,17 +1242,17 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
coeff_stokes[0, i] = ( coeff_stokes[0, i] = (
pol_eff[(i + 1) % 3] pol_eff[(i + 1) % 3]
* pol_eff[(i + 2) % 3] * pol_eff[(i + 2) % 3]
* np.sin(-2.0 * globals()["theta"][(i + 1) % 3] + 2.0 * globals()["theta"][(i + 2) % 3]) * np.sin(-2.0 * theta[(i + 1) % 3] + 2.0 * theta[(i + 2) % 3])
* 2.0 * 2.0
/ transmit[i] / transmit[i]
) )
coeff_stokes[1, i] = ( coeff_stokes[1, i] = (
(-pol_eff[(i + 1) % 3] * np.sin(2.0 * globals()["theta"][(i + 1) % 3]) + pol_eff[(i + 2) % 3] * np.sin(2.0 * globals()["theta"][(i + 2) % 3])) (-pol_eff[(i + 1) % 3] * np.sin(2.0 * theta[(i + 1) % 3]) + pol_eff[(i + 2) % 3] * np.sin(2.0 * theta[(i + 2) % 3]))
* 2.0 * 2.0
/ transmit[i] / transmit[i]
) )
coeff_stokes[2, i] = ( coeff_stokes[2, i] = (
(pol_eff[(i + 1) % 3] * np.cos(2.0 * globals()["theta"][(i + 1) % 3]) - pol_eff[(i + 2) % 3] * np.cos(2.0 * globals()["theta"][(i + 2) % 3])) (pol_eff[(i + 1) % 3] * np.cos(2.0 * theta[(i + 1) % 3]) - pol_eff[(i + 2) % 3] * np.cos(2.0 * theta[(i + 2) % 3]))
* 2.0 * 2.0
/ transmit[i] / transmit[i]
) )
@@ -1294,9 +1313,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[0] * pol_eff[0]
/ N / N
* ( * (
pol_eff[2] * np.cos(-2.0 * globals()["theta"][2] + 2.0 * globals()["theta"][0]) * (pol_flux_corr[1] - I_stokes) pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes)
- pol_eff[1] * np.cos(-2.0 * globals()["theta"][0] + 2.0 * globals()["theta"][1]) * (pol_flux_corr[2] - I_stokes) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes)
+ coeff_stokes_corr[0, 0] * (np.sin(2.0 * globals()["theta"][0]) * Q_stokes - np.cos(2 * globals()["theta"][0]) * U_stokes) + coeff_stokes_corr[0, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
) )
) )
dI_dtheta2 = ( dI_dtheta2 = (
@@ -1304,9 +1323,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[1] * pol_eff[1]
/ N / N
* ( * (
pol_eff[0] * np.cos(-2.0 * globals()["theta"][0] + 2.0 * globals()["theta"][1]) * (pol_flux_corr[2] - I_stokes) pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1]) * (pol_flux_corr[2] - I_stokes)
- pol_eff[2] * np.cos(-2.0 * globals()["theta"][1] + 2.0 * globals()["theta"][2]) * (pol_flux_corr[0] - I_stokes) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes)
+ coeff_stokes_corr[0, 1] * (np.sin(2.0 * globals()["theta"][1]) * Q_stokes - np.cos(2 * globals()["theta"][1]) * U_stokes) + coeff_stokes_corr[0, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
) )
) )
dI_dtheta3 = ( dI_dtheta3 = (
@@ -1314,9 +1333,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[2] * pol_eff[2]
/ N / N
* ( * (
pol_eff[1] * np.cos(-2.0 * globals()["theta"][1] + 2.0 * globals()["theta"][2]) * (pol_flux_corr[0] - I_stokes) pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2]) * (pol_flux_corr[0] - I_stokes)
- pol_eff[0] * np.cos(-2.0 * globals()["theta"][2] + 2.0 * globals()["theta"][0]) * (pol_flux_corr[1] - I_stokes) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0]) * (pol_flux_corr[1] - I_stokes)
+ coeff_stokes_corr[0, 2] * (np.sin(2.0 * globals()["theta"][2]) * Q_stokes - np.cos(2 * globals()["theta"][2]) * U_stokes) + coeff_stokes_corr[0, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
) )
) )
dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3]) dI_dtheta = np.array([dI_dtheta1, dI_dtheta2, dI_dtheta3])
@@ -1326,13 +1345,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[0] * pol_eff[0]
/ N / N
* ( * (
np.cos(2.0 * globals()["theta"][0]) * (pol_flux_corr[1] - pol_flux_corr[2]) np.cos(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
- ( - (
pol_eff[2] * np.cos(-2.0 * globals()["theta"][2] + 2.0 * globals()["theta"][0]) pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
- pol_eff[1] * np.cos(-2.0 * globals()["theta"][0] + 2.0 * globals()["theta"][1]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
) )
* Q_stokes * Q_stokes
+ coeff_stokes_corr[1, 0] * (np.sin(2.0 * globals()["theta"][0]) * Q_stokes - np.cos(2 * globals()["theta"][0]) * U_stokes) + coeff_stokes_corr[1, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
) )
) )
dQ_dtheta2 = ( dQ_dtheta2 = (
@@ -1340,13 +1359,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[1] * pol_eff[1]
/ N / N
* ( * (
np.cos(2.0 * globals()["theta"][1]) * (pol_flux_corr[2] - pol_flux_corr[0]) np.cos(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- ( - (
pol_eff[0] * np.cos(-2.0 * globals()["theta"][0] + 2.0 * globals()["theta"][1]) pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
- pol_eff[2] * np.cos(-2.0 * globals()["theta"][1] + 2.0 * globals()["theta"][2]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
) )
* Q_stokes * Q_stokes
+ coeff_stokes_corr[1, 1] * (np.sin(2.0 * globals()["theta"][1]) * Q_stokes - np.cos(2 * globals()["theta"][1]) * U_stokes) + coeff_stokes_corr[1, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
) )
) )
dQ_dtheta3 = ( dQ_dtheta3 = (
@@ -1354,13 +1373,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[2] * pol_eff[2]
/ N / N
* ( * (
np.cos(2.0 * globals()["theta"][2]) * (pol_flux_corr[0] - pol_flux_corr[1]) np.cos(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- ( - (
pol_eff[1] * np.cos(-2.0 * globals()["theta"][1] + 2.0 * globals()["theta"][2]) pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
- pol_eff[0] * np.cos(-2.0 * globals()["theta"][2] + 2.0 * globals()["theta"][0]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
) )
* Q_stokes * Q_stokes
+ coeff_stokes_corr[1, 2] * (np.sin(2.0 * globals()["theta"][2]) * Q_stokes - np.cos(2 * globals()["theta"][2]) * U_stokes) + coeff_stokes_corr[1, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
) )
) )
dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3]) dQ_dtheta = np.array([dQ_dtheta1, dQ_dtheta2, dQ_dtheta3])
@@ -1370,13 +1389,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[0] * pol_eff[0]
/ N / N
* ( * (
np.sin(2.0 * globals()["theta"][0]) * (pol_flux_corr[1] - pol_flux_corr[2]) np.sin(2.0 * theta[0]) * (pol_flux_corr[1] - pol_flux_corr[2])
- ( - (
pol_eff[2] * np.cos(-2.0 * globals()["theta"][2] + 2.0 * globals()["theta"][0]) pol_eff[2] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
- pol_eff[1] * np.cos(-2.0 * globals()["theta"][0] + 2.0 * globals()["theta"][1]) - pol_eff[1] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
) )
* U_stokes * U_stokes
+ coeff_stokes_corr[2, 0] * (np.sin(2.0 * globals()["theta"][0]) * Q_stokes - np.cos(2 * globals()["theta"][0]) * U_stokes) + coeff_stokes_corr[2, 0] * (np.sin(2.0 * theta[0]) * Q_stokes - np.cos(2 * theta[0]) * U_stokes)
) )
) )
dU_dtheta2 = ( dU_dtheta2 = (
@@ -1384,13 +1403,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[1] * pol_eff[1]
/ N / N
* ( * (
np.sin(2.0 * globals()["theta"][1]) * (pol_flux_corr[2] - pol_flux_corr[0]) np.sin(2.0 * theta[1]) * (pol_flux_corr[2] - pol_flux_corr[0])
- ( - (
pol_eff[0] * np.cos(-2.0 * globals()["theta"][0] + 2.0 * globals()["theta"][1]) pol_eff[0] * np.cos(-2.0 * theta[0] + 2.0 * theta[1])
- pol_eff[2] * np.cos(-2.0 * globals()["theta"][1] + 2.0 * globals()["theta"][2]) - pol_eff[2] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
) )
* U_stokes * U_stokes
+ coeff_stokes_corr[2, 1] * (np.sin(2.0 * globals()["theta"][1]) * Q_stokes - np.cos(2 * globals()["theta"][1]) * U_stokes) + coeff_stokes_corr[2, 1] * (np.sin(2.0 * theta[1]) * Q_stokes - np.cos(2 * theta[1]) * U_stokes)
) )
) )
dU_dtheta3 = ( dU_dtheta3 = (
@@ -1398,13 +1417,13 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
* pol_eff[2] * pol_eff[2]
/ N / N
* ( * (
np.sin(2.0 * globals()["theta"][2]) * (pol_flux_corr[0] - pol_flux_corr[1]) np.sin(2.0 * theta[2]) * (pol_flux_corr[0] - pol_flux_corr[1])
- ( - (
pol_eff[1] * np.cos(-2.0 * globals()["theta"][1] + 2.0 * globals()["theta"][2]) pol_eff[1] * np.cos(-2.0 * theta[1] + 2.0 * theta[2])
- pol_eff[0] * np.cos(-2.0 * globals()["theta"][2] + 2.0 * globals()["theta"][0]) - pol_eff[0] * np.cos(-2.0 * theta[2] + 2.0 * theta[0])
) )
* U_stokes * U_stokes
+ coeff_stokes_corr[2, 2] * (np.sin(2.0 * globals()["theta"][2]) * Q_stokes - np.cos(2 * globals()["theta"][2]) * U_stokes) + coeff_stokes_corr[2, 2] * (np.sin(2.0 * theta[2]) * Q_stokes - np.cos(2 * theta[2]) * U_stokes)
) )
) )
dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3]) dU_dtheta = np.array([dU_dtheta1, dU_dtheta2, dU_dtheta3])
@@ -1422,8 +1441,39 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
Stokes_cov[1, 1] += s_Q2_axis + s_Q2_stat Stokes_cov[1, 1] += s_Q2_axis + s_Q2_stat
Stokes_cov[2, 2] += s_U2_axis + s_U2_stat Stokes_cov[2, 2] += s_U2_axis + s_U2_stat
else:
all_I_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_Q_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_U_stokes = np.zeros((np.unique(rotate).size, data_array.shape[1], data_array.shape[2]))
all_Stokes_cov = np.zeros((np.unique(rotate).size, 3, 3, data_array.shape[1], data_array.shape[2]))
for i,rot in enumerate(np.unique(rotate)):
rot_mask = (rotate == rot)
all_I_stokes[i], all_Q_stokes[i], all_U_stokes[i], all_Stokes_cov[i] = compute_Stokes(data_array[rot_mask], error_array[rot_mask], data_mask, [headers[i] for i in np.arange(len(headers))[rot_mask]], FWHM=FWHM, scale=scale, smoothing=smoothing, transmitcorr=transmitcorr, integrate=False)
I_stokes = all_I_stokes.sum(axis=0)/np.unique(rotate).size
Q_stokes = all_Q_stokes.sum(axis=0)/np.unique(rotate).size
U_stokes = all_U_stokes.sum(axis=0)/np.unique(rotate).size
Stokes_cov = np.zeros((3, 3, I_stokes.shape[0], I_stokes.shape[1]))
for i in range(3):
Stokes_cov[i,i] = np.sum(all_Stokes_cov[:,i,i],axis=0)/np.unique(rotate).size
for j in [x for x in range(3) if x!=i]:
Stokes_cov[i,j] = np.sqrt(np.sum(all_Stokes_cov[:,i,j]**2,axis=0)/np.unique(rotate).size)
Stokes_cov[j,i] = np.sqrt(np.sum(all_Stokes_cov[:,j,i]**2,axis=0)/np.unique(rotate).size)
# Nan handling :
fmax = np.finfo(np.float64).max
I_stokes[np.isnan(I_stokes)] = 0.0
Q_stokes[I_stokes == 0.0] = 0.0
U_stokes[I_stokes == 0.0] = 0.0
Q_stokes[np.isnan(Q_stokes)] = 0.0
U_stokes[np.isnan(U_stokes)] = 0.0
Stokes_cov[np.isnan(Stokes_cov)] = fmax
if integrate:
# Compute integrated values for P, PA before any rotation # Compute integrated values for P, PA before any rotation
mask = np.logical_and(data_mask.astype(bool), (I_stokes > 0.0)) mask = deepcopy(data_mask).astype(bool)
I_diluted = I_stokes[mask].sum() I_diluted = I_stokes[mask].sum()
Q_diluted = Q_stokes[mask].sum() Q_diluted = Q_stokes[mask].sum()
U_diluted = U_stokes[mask].sum() U_diluted = U_stokes[mask].sum()
@@ -1435,7 +1485,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask] ** 2)) QU_diluted_err = np.sqrt(np.sum(Stokes_cov[1, 2][mask] ** 2))
P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted P_diluted = np.sqrt(Q_diluted**2 + U_diluted**2) / I_diluted
P_diluted_err = (1.0 / I_diluted) * np.sqrt( P_diluted_err = np.sqrt(
(Q_diluted**2 * Q_diluted_err**2 + U_diluted**2 * U_diluted_err**2 + 2.0 * Q_diluted * U_diluted * QU_diluted_err) / (Q_diluted**2 + U_diluted**2) (Q_diluted**2 * Q_diluted_err**2 + U_diluted**2 * U_diluted_err**2 + 2.0 * Q_diluted * U_diluted * QU_diluted_err) / (Q_diluted**2 + U_diluted**2)
+ ((Q_diluted / I_diluted) ** 2 + (U_diluted / I_diluted) ** 2) * I_diluted_err**2 + ((Q_diluted / I_diluted) ** 2 + (U_diluted / I_diluted) ** 2) * I_diluted_err**2
- 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err - 2.0 * (Q_diluted / I_diluted) * IQ_diluted_err
@@ -1449,9 +1499,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers, FWHM=None, scale
for header in headers: for header in headers:
header["P_int"] = (P_diluted, "Integrated polarization degree") header["P_int"] = (P_diluted, "Integrated polarization degree")
header["P_int_err"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error") header["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
header["PA_int"] = (PA_diluted, "Integrated polarization angle") header["PA_int"] = (PA_diluted, "Integrated polarization angle")
header["PA_int_err"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") header["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
return I_stokes, Q_stokes, U_stokes, Stokes_cov return I_stokes, Q_stokes, U_stokes, Stokes_cov
@@ -1566,7 +1616,7 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, ang=None, SNRi_cut=None): def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None):
""" """
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header matrix to rotate Q, U of a given angle in degrees and update header
@@ -1588,10 +1638,6 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
2D boolean array delimiting the data to work on. 2D boolean array delimiting the data to work on.
headers : header list headers : header list
List of headers corresponding to the reduced images. List of headers corresponding to the reduced images.
ang : float, optional
Rotation angle (in degrees) that should be applied to the Stokes
parameters. If None, will rotate to have North up.
Defaults to None.
SNRi_cut : float, optional SNRi_cut : float, optional
Cut that should be applied to the signal-to-noise ratio on I. Cut that should be applied to the signal-to-noise ratio on I.
Any SNR < SNRi_cut won't be displayed. If None, cut won't be applied. Any SNR < SNRi_cut won't be displayed. If None, cut won't be applied.
@@ -1628,11 +1674,11 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j]) U_stokes[i, j] = eps * np.sqrt(Stokes_cov[2, 2][i, j])
# Rotate I_stokes, Q_stokes, U_stokes using rotation matrix # Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
if ang is None: ang = np.zeros((len(headers),))
ang = np.zeros((len(headers),)) for i, head in enumerate(headers):
for i, head in enumerate(headers): pc = WCS(head).celestial.wcs.pc[0,0]
ang[i] = -head["orientat"] ang[i] = -np.arccos(WCS(head).celestial.wcs.pc[0,0]) * 180.0 / np.pi if np.abs(pc) < 1. else 0.
ang = ang.mean() ang = ang.mean()
alpha = np.pi / 180.0 * ang alpha = np.pi / 180.0 * ang
mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]]) mrot = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(2.0 * alpha), np.sin(2.0 * alpha)], [0, -np.sin(2.0 * alpha), np.cos(2.0 * alpha)]])
@@ -1684,6 +1730,7 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
new_header.set("PC1_1", 1.0) new_header.set("PC1_1", 1.0)
if new_wcs.wcs.pc[1, 1] == 1.0: if new_wcs.wcs.pc[1, 1] == 1.0:
new_header.set("PC2_2", 1.0) new_header.set("PC2_2", 1.0)
new_header["orientat"] = header["orientat"] + ang
new_headers.append(new_header) new_headers.append(new_header)
@@ -1724,14 +1771,14 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers,
for header in new_headers: for header in new_headers:
header["P_int"] = (P_diluted, "Integrated polarization degree") header["P_int"] = (P_diluted, "Integrated polarization degree")
header["P_int_err"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error") header["sP_int"] = (np.ceil(P_diluted_err * 1000.0) / 1000.0, "Integrated polarization degree error")
header["PA_int"] = (PA_diluted, "Integrated polarization angle") header["PA_int"] = (PA_diluted, "Integrated polarization angle")
header["PA_int_err"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error") header["sPA_int"] = (np.ceil(PA_diluted_err * 10.0) / 10.0, "Integrated polarization angle error")
return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_headers return new_I_stokes, new_Q_stokes, new_U_stokes, new_Stokes_cov, new_data_mask, new_headers
def rotate_data(data_array, error_array, data_mask, headers, ang): def rotate_data(data_array, error_array, data_mask, headers):
""" """
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header matrix to rotate Q, U of a given angle in degrees and update header
@@ -1746,9 +1793,6 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
2D boolean array delimiting the data to work on. 2D boolean array delimiting the data to work on.
headers : header list headers : header list
List of headers corresponding to the reduced images. List of headers corresponding to the reduced images.
ang : float
Rotation angle (in degrees) that should be applied to the Stokes
parameters
---------- ----------
Returns: Returns:
new_data_array : numpy.ndarray new_data_array : numpy.ndarray
@@ -1762,7 +1806,6 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
for the new orientation angle. for the new orientation angle.
""" """
# Rotate I_stokes, Q_stokes, U_stokes using rotation matrix # Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
alpha = ang * np.pi / 180.0
old_center = np.array(data_array[0].shape) / 2 old_center = np.array(data_array[0].shape) / 2
shape = np.fix(np.array(data_array[0].shape) * np.sqrt(2.5)).astype(int) shape = np.fix(np.array(data_array[0].shape) * np.sqrt(2.5)).astype(int)
@@ -1771,37 +1814,41 @@ def rotate_data(data_array, error_array, data_mask, headers, ang):
data_array = zeropad(data_array, [data_array.shape[0], *shape]) data_array = zeropad(data_array, [data_array.shape[0], *shape])
error_array = zeropad(error_array, [error_array.shape[0], *shape]) error_array = zeropad(error_array, [error_array.shape[0], *shape])
data_mask = zeropad(data_mask, shape) data_mask = zeropad(data_mask, shape)
# Rotate original images using scipy.ndimage.rotate # Rotate original images using scipy.ndimage.rotate
new_headers = []
new_data_array = [] new_data_array = []
new_error_array = [] new_error_array = []
for i in range(data_array.shape[0]): new_data_mask = []
for i,header in zip(range(data_array.shape[0]),headers):
ang = -float(header["ORIENTAT"])
alpha = ang * np.pi / 180.0
new_data_array.append(sc_rotate(data_array[i], ang, order=1, reshape=False, cval=0.0)) new_data_array.append(sc_rotate(data_array[i], ang, order=1, reshape=False, cval=0.0))
new_error_array.append(sc_rotate(error_array[i], ang, order=1, reshape=False, cval=0.0)) new_error_array.append(sc_rotate(error_array[i], ang, order=1, reshape=False, cval=0.0))
new_data_array = np.array(new_data_array) new_data_mask.append(sc_rotate(data_mask * 10.0, ang, order=1, reshape=False, cval=0.0))
new_error_array = np.array(new_error_array)
new_data_mask = sc_rotate(data_mask * 10.0, ang, order=1, reshape=False, cval=0.0)
new_data_mask[new_data_mask < 2] = 0.0
new_data_mask = new_data_mask.astype(bool)
for i in range(new_data_array.shape[0]): # Update headers to new angle
new_data_array[i][new_data_array[i] < 0.0] = 0.0 mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
# Update headers to new angle
new_headers = []
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)], [np.sin(-alpha), np.cos(-alpha)]])
for header in headers:
new_header = deepcopy(header) new_header = deepcopy(header)
new_header["orientat"] = header["orientat"] + ang
new_wcs = WCS(header).celestial.deepcopy() new_wcs = WCS(header).celestial.deepcopy()
new_wcs.wcs.pc[:2, :2] = np.dot(mrot, new_wcs.wcs.pc[:2, :2]) new_wcs.wcs.pc[:2, :2] = np.dot(mrot, new_wcs.wcs.pc[:2, :2])
new_wcs.wcs.crpix[:2] = np.dot(mrot, new_wcs.wcs.crpix[:2] - old_center[::-1]) + new_center[::-1] new_wcs.wcs.crpix[:2] = np.dot(mrot, new_wcs.wcs.crpix[:2] - old_center[::-1]) + new_center[::-1]
new_wcs.wcs.set() new_wcs.wcs.set()
for key, val in new_wcs.to_header().items(): for key, val in new_wcs.to_header().items():
new_header[key] = val new_header[key] = val
new_header["ORIENTAT"] = np.arccos(new_wcs.celestial.wcs.pc[0,0]) * 180.0 / np.pi
new_header["ROTATE"] = ang
new_headers.append(new_header) new_headers.append(new_header)
globals()["theta"] = globals()["theta"] - alpha
new_data_array = np.array(new_data_array)
new_error_array = np.array(new_error_array)
new_data_mask = np.array(new_data_mask).sum(axis=0)
new_data_mask[new_data_mask < new_data_mask.max()*2./3.] = 0.0
new_data_mask = new_data_mask.astype(bool)
for i in range(new_data_array.shape[0]):
new_data_array[i][new_data_array[i] < 0.0] = 0.0
return new_data_array, new_error_array, new_data_mask, new_headers return new_data_array, new_error_array, new_data_mask, new_headers