modify files to comply with pep8 format

This commit is contained in:
2024-02-26 16:30:10 +01:00
parent d2b59cf05a
commit 893cf339c7
12 changed files with 1751 additions and 1659 deletions

View File

@@ -1,4 +1,4 @@
# !/usr/bin/python3 #!/usr/bin/python3
# -*- coding:utf-8 -*- # -*- coding:utf-8 -*-
""" """
Main script where are progressively added the steps for the FOC pipeline reduction. Main script where are progressively added the steps for the FOC pipeline reduction.
@@ -15,8 +15,8 @@ from matplotlib.colors import LogNorm
def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=0, interactive=0): def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=0, interactive=0):
## Reduction parameters # Reduction parameters
# Deconvolution # Deconvolution
deconvolve = False deconvolve = False
if deconvolve: if deconvolve:
# from lib.deconvolve import from_file_psf # from lib.deconvolve import from_file_psf
@@ -28,38 +28,38 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
iterations = 5 iterations = 5
algo = "richardson" algo = "richardson"
# Initial crop # Initial crop
display_crop = False display_crop = False
# Background estimation # Background estimation
error_sub_type = 'freedman-diaconis' # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51)) error_sub_type = 'freedman-diaconis' # sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51, 51))
subtract_error = 1.00 subtract_error = 1.00
display_error = False display_error = False
# Data binning # Data binning
rebin = True rebin = True
pxsize = 0.10 pxsize = 0.10
px_scale = 'arcsec' # pixel, arcsec or full px_scale = 'arcsec' # pixel, arcsec or full
rebin_operation = 'sum' # sum or average rebin_operation = 'sum' # sum or average
# Alignement # Alignement
align_center = 'center' # If None will not align the images align_center = 'center' # If None will not align the images
display_bkg = False display_bkg = False
display_align = False display_align = False
display_data = False display_data = False
# Smoothing # Smoothing
smoothing_function = 'combine' # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_function = 'combine' # gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 0.10 # If None, no smoothing is done smoothing_FWHM = 0.10 # If None, no smoothing is done
smoothing_scale = 'arcsec' # pixel or arcsec smoothing_scale = 'arcsec' # pixel or arcsec
# Rotation # Rotation
rotate_data = False # rotation to North convention can give erroneous results rotate_data = False # rotation to North convention can give erroneous results
rotate_stokes = True rotate_stokes = True
# Final crop # Final crop
# crop = False #Crop to desired ROI crop = False # Crop to desired ROI
# interactive = False #Whether to output to intercative analysis tool interactive = False # Whether to output to intercative analysis tool
# Polarization map output # Polarization map output
SNRp_cut = 3. # P measurments with SNR>3 SNRp_cut = 3. # P measurments with SNR>3
@@ -68,10 +68,10 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
vec_scale = 3 vec_scale = 3
step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length step_vec = 1 # plot all vectors in the array. if step_vec = 2, then every other vector will be plotted if step_vec = 0 then all vectors are displayed at full length
##### Pipeline start # Pipeline start
## Step 1: # Step 1:
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom. # Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
if not infiles is None: if infiles is not None:
prod = np.array([["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles], dtype=str) prod = np.array([["/".join(filepath.split('/')[:-1]), filepath.split('/')[-1]] for filepath in infiles], dtype=str)
obs_dir = "/".join(infiles[0].split("/")[:-1]) obs_dir = "/".join(infiles[0].split("/")[:-1])
if not path_exists(obs_dir): if not path_exists(obs_dir):
@@ -100,12 +100,14 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
else: else:
figtype = "full" figtype = "full"
if smoothing_FWHM is not None: if smoothing_FWHM is not None:
figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]), "{0:.2f}".format(smoothing_FWHM), smoothing_scale]) # additionnal informations figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]),
"{0:.2f}".format(smoothing_FWHM), smoothing_scale]) # additionnal informations
if align_center is None: if align_center is None:
figtype += "_not_aligned" figtype += "_not_aligned"
# Crop data to remove outside blank margins. # Crop data to remove outside blank margins.
data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0., inside=True, display=display_crop, savename=figname, plots_folder=plots_folder) data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0.,
inside=True, display=display_crop, savename=figname, plots_folder=plots_folder)
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM. # Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
if deconvolve: if deconvolve:
@@ -119,16 +121,16 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, "bkg"]), plots_folder=plots_folder) proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, "bkg"]), plots_folder=plots_folder)
# Align and rescale images with oversampling. # Align and rescale images with oversampling.
data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False) data_array, error_array, headers, data_mask = proj_red.align_data( data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False)
if display_align: if display_align:
proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, str(align_center)]), plots_folder=plots_folder) proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, str(align_center)]), plots_folder=plots_folder)
# Rebin data to desired pixel size. # Rebin data to desired pixel size.
if rebin: if rebin:
data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask) data_array, error_array, headers, Dxy, data_mask = proj_red.rebin_array( data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation, data_mask=data_mask)
# Rotate data to have North up # Rotate data to have North up
if rotate_data: if rotate_data:
data_mask = np.ones(data_array.shape[1:]).astype(bool) data_mask = np.ones(data_array.shape[1:]).astype(bool)
alpha = headers[0]['orientat'] alpha = headers[0]['orientat']
@@ -139,34 +141,34 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, "rebin"]), plots_folder=plots_folder) proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array > 0.].min()*headers[0]['photflam'], vmax=data_array[data_array > 0.].max()*headers[0]['photflam'], savename="_".join([figname, "rebin"]), plots_folder=plots_folder)
background = np.array([np.array(bkg).reshape(1, 1) for bkg in background]) background = np.array([np.array(bkg).reshape(1, 1) for bkg in background])
background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1']==head['filtnam1'] for h in headers], dtype=bool)].mean())**2/np.sum([h['filtnam1']==head['filtnam1'] for h in headers]))).reshape(1, 1) for bkg, head in zip(background, headers)]) background_error = np.array([np.array(np.sqrt((bkg-background[np.array([h['filtnam1'] == head['filtnam1'] for h in headers], dtype=bool)].mean()) ** 2/np.sum([h['filtnam1'] == head['filtnam1'] for h in headers]))).reshape(1, 1) for bkg, head in zip(background, headers)])
## Step 2: # Step 2:
# Compute Stokes I, Q, U with smoothed polarized images # Compute Stokes I, Q, U with smoothed polarized images
# SMOOTHING DISCUSSION : # SMOOTHING DISCUSSION :
# FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide # FWHM of FOC have been estimated at about 0.03" across 1500-5000 Angstrom band, which is about 2 detector pixels wide
# see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2 # see Jedrzejewski, R.; Nota, A.; Hack, W. J., A Comparison Between FOC and WFPC2
# Bibcode : 1995chst.conf...10J # Bibcode : 1995chst.conf...10J
I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False) I_stokes, Q_stokes, U_stokes, Stokes_cov = proj_red.compute_Stokes(data_array, error_array, data_mask, headers, FWHM=smoothing_FWHM, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False)
I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False) I_bkg, Q_bkg, U_bkg, S_cov_bkg = proj_red.compute_Stokes(background, background_error, np.array(True).reshape(1, 1), headers, FWHM=None, scale=smoothing_scale, smoothing=smoothing_function, transmitcorr=False)
## Step 3: # Step 3:
# Rotate images to have North up # Rotate images to have North up
if rotate_stokes: if rotate_stokes:
I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None) I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, data_mask, headers, SNRi_cut=None)
I_bkg, Q_bkg, U_bkg, S_cov_bkg, _, _ = proj_red.rotate_Stokes(I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), headers, SNRi_cut=None) I_bkg, Q_bkg, U_bkg, S_cov_bkg, _, _ = proj_red.rotate_Stokes(I_bkg, Q_bkg, U_bkg, S_cov_bkg, np.array(True).reshape(1, 1), headers, SNRi_cut=None)
# Compute polarimetric parameters (polarisation degree and angle). # Compute polarimetric parameters (polarisation degree and angle).
P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers) P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P = proj_red.compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers)
P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, headers) P_bkg, debiased_P_bkg, s_P_bkg, s_P_P_bkg, PA_bkg, s_PA_bkg, s_PA_P_bkg = proj_red.compute_pol(I_bkg, Q_bkg, U_bkg, S_cov_bkg, headers)
## Step 4: # Step 4:
# Save image to FITS. # Save image to FITS.
Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, data_mask, "_".join([figname, figtype]), data_folder=data_folder, return_hdul=True) Stokes_test = proj_fits.save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P, headers, data_mask, "_".join([figname, figtype]), data_folder=data_folder, return_hdul=True)
data_mask = Stokes_test[-1].data.astype(bool) data_mask = Stokes_test[-1].data.astype(bool)
## Step 5: # Step 5:
# crop to desired region of interest (roi) # crop to desired region of interest (roi)
if crop: if crop:
figtype += "_crop" figtype += "_crop"
stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_test), norm=LogNorm()) stokescrop = proj_plots.crop_Stokes(deepcopy(Stokes_test), norm=LogNorm())
@@ -183,19 +185,29 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data", crop=
print("PA_bkg = {0:.1f} ± {1:.1f} °".format(PA_bkg[0, 0], np.ceil(s_PA_bkg[0, 0]*10.)/10.)) print("PA_bkg = {0:.1f} ± {1:.1f} °".format(PA_bkg[0, 0], np.ceil(s_PA_bkg[0, 0]*10.)/10.))
# Plot polarisation map (Background is either total Flux, Polarization degree or Polarization degree error). # Plot polarisation map (Background is either total Flux, Polarization degree or Polarization degree error).
if px_scale.lower() not in ['full', 'integrate'] and not interactive: if px_scale.lower() not in ['full', 'integrate'] and not interactive:
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype]), plots_folder=plots_folder) proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim,
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "I"]), plots_folder=plots_folder, display='Intensity') step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype]), plots_folder=plots_folder)
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "P_flux"]), plots_folder=plots_folder, display='Pol_Flux') proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "P"]), plots_folder=plots_folder, display='Pol_deg') vec_scale=vec_scale, savename="_".join([figname, figtype, "I"]), plots_folder=plots_folder, display='Intensity')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "PA"]), plots_folder=plots_folder, display='Pol_ang') proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "I_err"]), plots_folder=plots_folder, display='I_err') vec_scale=vec_scale, savename="_".join([figname, figtype, "P_flux"]), plots_folder=plots_folder, display='Pol_Flux')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "P_err"]), plots_folder=plots_folder, display='Pol_deg_err') proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRi"]), plots_folder=plots_folder, display='SNRi') vec_scale=vec_scale, savename="_".join([figname, figtype, "P"]), plots_folder=plots_folder, display='Pol_deg')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec, vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRp"]), plots_folder=plots_folder, display='SNRp') proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "PA"]), plots_folder=plots_folder, display='Pol_ang')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "I_err"]), plots_folder=plots_folder, display='I_err')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "P_err"]), plots_folder=plots_folder, display='Pol_deg_err')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRi"]), plots_folder=plots_folder, display='SNRi')
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim, step_vec=step_vec,
vec_scale=vec_scale, savename="_".join([figname, figtype, "SNRp"]), plots_folder=plots_folder, display='SNRp')
elif not interactive: elif not interactive:
proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, savename="_".join([figname, figtype]), plots_folder=plots_folder, display='integrate') proj_plots.polarisation_map(deepcopy(Stokes_test), data_mask, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut,
savename="_".join([figname, figtype]), plots_folder=plots_folder, display='integrate')
elif px_scale.lower() not in ['full', 'integrate']: elif px_scale.lower() not in ['full', 'integrate']:
pol_map = proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim) proj_plots.pol_map(Stokes_test, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
return 0 return 0
@@ -204,18 +216,15 @@ if __name__ == "__main__":
import argparse import argparse
parser = argparse.ArgumentParser(description='Query MAST for target products') parser = argparse.ArgumentParser(description='Query MAST for target products')
parser.add_argument('-t', '--target', metavar='targetname', required=False, parser.add_argument('-t', '--target', metavar='targetname', required=False, help='the name of the target', type=str, default=None)
help='the name of the target', type=str, default=None) parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, help='the proposal id of the data products', type=int, default=None)
parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False, parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*', help='the full or relative path to the data products', default=None)
help='the proposal id of the data products', type=int, default=None)
parser.add_argument('-f', '--files', metavar='path', required=False, nargs='*',
help='the full or relative path to the data products', default=None)
parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False, parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False,
help='output directory path for the data products', type=str, default="./data") help='output directory path for the data products', type=str, default="./data")
parser.add_argument('-c', '--crop', metavar='crop_boolean', required=False, parser.add_argument('-c', '--crop', metavar='crop_boolean', required=False, help='whether to crop the analysis region', type=int, default=0)
help='whether to crop the analysis region', type=int, default=0)
parser.add_argument('-i', '--interactive', metavar='interactive_boolean', required=False, parser.add_argument('-i', '--interactive', metavar='interactive_boolean', required=False,
help='whether to output to the interactive analysis tool', type=int, default=0) help='whether to output to the interactive analysis tool', type=int, default=0)
args = parser.parse_args() args = parser.parse_args()
exitcode = main(target=args.target, proposal_id=args.proposal_id, infiles=args.files, output_dir=args.output_dir, crop=args.crop, interactive=args.interactive) exitcode = main(target=args.target, proposal_id=args.proposal_id, infiles=args.files,
output_dir=args.output_dir, crop=args.crop, interactive=args.interactive)
print("Finished with ExitCode: ", exitcode) print("Finished with ExitCode: ", exitcode)

View File

@@ -4,7 +4,7 @@ from sys import argv
arglist = argv[1:] arglist = argv[1:]
options = "hf:p:i:l:" options = "hf:p:i:l:"
long_options = ["help","fits=","snrp=","snri=","lim="] long_options = ["help", "fits=", "snrp=", "snri=", "lim="]
fits_path = None fits_path = None
SNRp_cut, SNRi_cut = 3, 30 SNRp_cut, SNRi_cut = 3, 30
@@ -28,12 +28,12 @@ try:
except get_error as err: except get_error as err:
print(str(err)) print(str(err))
if not fits_path is None: if fits_path is not None:
from astropy.io import fits from astropy.io import fits
from lib.plots import pol_map from lib.plots import pol_map
Stokes_UV = fits.open(fits_path) Stokes_UV = fits.open(fits_path)
p = pol_map(Stokes_UV, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut,flux_lim=flux_lim) p = pol_map(Stokes_UV, SNRp_cut=SNRp_cut, SNRi_cut=SNRi_cut, flux_lim=flux_lim)
else: else:
print("python3 analysis.py -f <path_to_reduced_fits> -p <SNRp_cut> -i <SNRi_cut> -l <flux_lim>") print("python3 analysis.py -f <path_to_reduced_fits> -p <SNRp_cut> -i <SNRi_cut> -l <flux_lim>")

View File

@@ -9,7 +9,6 @@ prototypes :
- bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background) - bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background)
Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape. Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape.
""" """
import sys
from os.path import join as path_join from os.path import join as path_join
from copy import deepcopy from copy import deepcopy
import numpy as np import numpy as np
@@ -21,36 +20,40 @@ from datetime import datetime
from lib.plots import plot_obs from lib.plots import plot_obs
from scipy.optimize import curve_fit from scipy.optimize import curve_fit
def gauss(x, *p): def gauss(x, *p):
N, mu, sigma = p N, mu, sigma = p
return N*np.exp(-(x-mu)**2/(2.*sigma**2)) return N*np.exp(-(x-mu)**2/(2.*sigma**2))
def gausspol(x, *p): def gausspol(x, *p):
N, mu, sigma, a, b, c, d = p N, mu, sigma, a, b, c, d = p
return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + a*np.log(x) + b/x + c*x + d return N*np.exp(-(x-mu)**2/(2.*sigma**2)) + a*np.log(x) + b/x + c*x + d
def bin_centers(edges): def bin_centers(edges):
return (edges[1:]+edges[:-1])/2. return (edges[1:]+edges[:-1])/2.
def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"): def display_bkg(data, background, std_bkg, headers, histograms=None, binning=None, coeff=None, rectangle=None, savename=None, plots_folder="./"):
plt.rcParams.update({'font.size': 15}) plt.rcParams.update({'font.size': 15})
convert_flux = np.array([head['photflam'] for head in headers]) convert_flux = np.array([head['photflam'] for head in headers])
date_time = np.array([headers[i]['date-obs']+';'+headers[i]['time-obs'] date_time = np.array([headers[i]['date-obs']+';'+headers[i]['time-obs']
for i in range(len(headers))]) for i in range(len(headers))])
date_time = np.array([datetime.strptime(d,'%Y-%m-%d;%H:%M:%S') date_time = np.array([datetime.strptime(d, '%Y-%m-%d;%H:%M:%S')
for d in date_time]) for d in date_time])
filt = np.array([headers[i]['filtnam1'] for i in range(len(headers))]) filt = np.array([headers[i]['filtnam1'] for i in range(len(headers))])
dict_filt = {"POL0":'r', "POL60":'g', "POL120":'b'} dict_filt = {"POL0": 'r', "POL60": 'g', "POL120": 'b'}
c_filt = np.array([dict_filt[f] for f in filt]) c_filt = np.array([dict_filt[f] for f in filt])
fig,ax = plt.subplots(figsize=(10,6), constrained_layout=True) fig, ax = plt.subplots(figsize=(10, 6), constrained_layout=True)
for f in np.unique(filt): for f in np.unique(filt):
mask = [fil==f for fil in filt] mask = [fil == f for fil in filt]
ax.scatter(date_time[mask], background[mask]*convert_flux[mask], ax.scatter(date_time[mask], background[mask]*convert_flux[mask],
color=dict_filt[f],label="{0:s}".format(f)) color=dict_filt[f], label="{0:s}".format(f))
ax.errorbar(date_time, background*convert_flux, ax.errorbar(date_time, background*convert_flux,
yerr=std_bkg*convert_flux, fmt='+k', yerr=std_bkg*convert_flux, fmt='+k',
markersize=0, ecolor=c_filt) markersize=0, ecolor=c_filt)
# Date handling # Date handling
locator = mdates.AutoDateLocator() locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator) formatter = mdates.ConciseDateFormatter(locator)
@@ -60,85 +63,89 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
ax.set_xlabel("Observation date and time") ax.set_xlabel("Observation date and time")
ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
plt.legend() plt.legend()
if not(savename is None): if not (savename is None):
this_savename = deepcopy(savename) this_savename = deepcopy(savename)
if not savename[-4:] in ['.png', '.jpg', '.pdf']: if not savename[-4:] in ['.png', '.jpg', '.pdf']:
this_savename += '_background_flux.pdf' this_savename += '_background_flux.pdf'
else: else:
this_savename = savename[:-4]+"_background_flux"+savename[-4:] this_savename = savename[:-4]+"_background_flux"+savename[-4:]
fig.savefig(path_join(plots_folder,this_savename), bbox_inches='tight') fig.savefig(path_join(plots_folder, this_savename), bbox_inches='tight')
if not(histograms is None): if not (histograms is None):
filt_obs = {"POL0":0, "POL60":0, "POL120":0} filt_obs = {"POL0": 0, "POL60": 0, "POL120": 0}
fig_h, ax_h = plt.subplots(figsize=(10,6), constrained_layout=True) fig_h, ax_h = plt.subplots(figsize=(10, 6), constrained_layout=True)
for i, (hist, bins) in enumerate(zip(histograms, binning)): for i, (hist, bins) in enumerate(zip(histograms, binning)):
filt_obs[headers[i]['filtnam1']] += 1 filt_obs[headers[i]['filtnam1']] += 1
ax_h.plot(bins*convert_flux[i],hist,'+',color="C{0:d}".format(i),alpha=0.8,label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')') ax_h.plot(bins*convert_flux[i], hist, '+', color="C{0:d}".format(i), alpha=0.8,
ax_h.plot([background[i]*convert_flux[i],background[i]*convert_flux[i]],[hist.min(), hist.max()],'x--',color="C{0:d}".format(i),alpha=0.8) label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')')
if not(coeff is None): ax_h.plot([background[i]*convert_flux[i], background[i]*convert_flux[i]], [hist.min(), hist.max()], 'x--', color="C{0:d}".format(i), alpha=0.8)
ax_h.plot(bins*convert_flux[i],gausspol(bins,*coeff[i]),'--',color="C{0:d}".format(i),alpha=0.8) if not (coeff is None):
ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
ax_h.set_xscale('log') ax_h.set_xscale('log')
ax_h.set_ylim([0.,np.max([hist.max() for hist in histograms])]) ax_h.set_ylim([0., np.max([hist.max() for hist in histograms])])
ax_h.set_xlim([np.min(background*convert_flux)*1e-2,np.max(background*convert_flux)*1e2]) ax_h.set_xlim([np.min(background*convert_flux)*1e-2, np.max(background*convert_flux)*1e2])
ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
ax_h.set_ylabel(r"Number of pixels in bin") ax_h.set_ylabel(r"Number of pixels in bin")
ax_h.set_title("Histogram for each observation") ax_h.set_title("Histogram for each observation")
plt.legend() plt.legend()
if not(savename is None): if not (savename is None):
this_savename = deepcopy(savename) this_savename = deepcopy(savename)
if not savename[-4:] in ['.png', '.jpg', '.pdf']: if not savename[-4:] in ['.png', '.jpg', '.pdf']:
this_savename += '_histograms.pdf' this_savename += '_histograms.pdf'
else: else:
this_savename = savename[:-4]+"_histograms"+savename[-4:] this_savename = savename[:-4]+"_histograms"+savename[-4:]
fig_h.savefig(path_join(plots_folder,this_savename), bbox_inches='tight') fig_h.savefig(path_join(plots_folder, this_savename), bbox_inches='tight')
fig2, ax2 = plt.subplots(figsize=(10,10)) fig2, ax2 = plt.subplots(figsize=(10, 10))
data0 = data[0]*convert_flux[0] data0 = data[0]*convert_flux[0]
bkg_data0 = data0 <= background[0]*convert_flux[0] bkg_data0 = data0 <= background[0]*convert_flux[0]
instr = headers[0]['instrume'] instr = headers[0]['instrume']
rootname = headers[0]['rootname'] rootname = headers[0]['rootname']
exptime = headers[0]['exptime'] exptime = headers[0]['exptime']
filt = headers[0]['filtnam1'] filt = headers[0]['filtnam1']
#plots # plots
im2 = ax2.imshow(data0, norm=LogNorm(data0[data0>0.].mean()/10.,data0.max()), origin='lower', cmap='gray') im2 = ax2.imshow(data0, norm=LogNorm(data0[data0 > 0.].mean()/10., data0.max()), origin='lower', cmap='gray')
bkg_im = ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.5) ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.5)
if not(rectangle is None): if not (rectangle is None):
x, y, width, height, angle, color = rectangle[0] x, y, width, height, angle, color = rectangle[0]
ax2.add_patch(Rectangle((x, y),width,height,edgecolor=color,fill=False,lw=2)) ax2.add_patch(Rectangle((x, y), width, height, edgecolor=color, fill=False, lw=2))
ax2.annotate(instr+":"+rootname, color='white', fontsize=10, xy=(0.01, 1.00), xycoords='axes fraction',verticalalignment='top', horizontalalignment='left') ax2.annotate(instr+":"+rootname, color='white', fontsize=10, xy=(0.01, 1.00), xycoords='axes fraction', verticalalignment='top', horizontalalignment='left')
ax2.annotate(filt, color='white', fontsize=14, xy=(0.01, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left') ax2.annotate(filt, color='white', fontsize=14, xy=(0.01, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='left')
ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(1.00, 0.01), xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right') ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(1.00, 0.01),
ax2.set(xlabel='pixel offset', ylabel='pixel offset',aspect='equal') xycoords='axes fraction', verticalalignment='bottom', horizontalalignment='right')
ax2.set(xlabel='pixel offset', ylabel='pixel offset', aspect='equal')
fig2.subplots_adjust(hspace=0, wspace=0, right=1.0) fig2.subplots_adjust(hspace=0, wspace=0, right=1.0)
fig2.colorbar(im2, ax=ax2, location='right', aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") fig2.colorbar(im2, ax=ax2, location='right', aspect=50, pad=0.025, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
if not(savename is None): if not (savename is None):
this_savename = deepcopy(savename) this_savename = deepcopy(savename)
if not savename[-4:] in ['.png', '.jpg', '.pdf']: if not savename[-4:] in ['.png', '.jpg', '.pdf']:
this_savename += '_'+filt+'_background_location.pdf' this_savename += '_'+filt+'_background_location.pdf'
else: else:
this_savename = savename[:-4]+'_'+filt+'_background_location'+savename[-4:] this_savename = savename[:-4]+'_'+filt+'_background_location'+savename[-4:]
fig2.savefig(path_join(plots_folder,this_savename), bbox_inches='tight') fig2.savefig(path_join(plots_folder, this_savename), bbox_inches='tight')
if not(rectangle is None): if not (rectangle is None):
plot_obs(data, headers, vmin=data[data > 0.].min()*convert_flux.mean(), vmax=data[data > 0.].max()*convert_flux.mean(), rectangle=rectangle, plot_obs(data, headers, vmin=data[data > 0.].min()*convert_flux.mean(), vmax=data[data > 0.].max()*convert_flux.mean(), rectangle=rectangle,
savename=savename+"_background_location",plots_folder=plots_folder) savename=savename+"_background_location", plots_folder=plots_folder)
elif not(rectangle is None): elif not (rectangle is None):
plot_obs(data, headers, vmin=data[data > 0.].min(), vmax=data[data > 0.].max(), rectangle=rectangle) plot_obs(data, headers, vmin=data[data > 0.].min(), vmax=data[data > 0.].max(), rectangle=rectangle)
plt.show() plt.show()
def sky_part(img): def sky_part(img):
rand_ind = np.unique((np.random.rand(np.floor(img.size/4).astype(int))*2*img.size).astype(int)%img.size) rand_ind = np.unique((np.random.rand(np.floor(img.size/4).astype(int))*2*img.size).astype(int) % img.size)
rand_pix = img.flatten()[rand_ind] rand_pix = img.flatten()[rand_ind]
# Intensity range # Intensity range
sky_med = np.median(rand_pix) sky_med = np.median(rand_pix)
sig = np.min([img[img<sky_med].std(),img[img>sky_med].std()]) sig = np.min([img[img < sky_med].std(), img[img > sky_med].std()])
sky_range = [sky_med-2.*sig, np.max([sky_med+sig,7e-4])] #Detector background average FOC Data Handbook Sec. 7.6 sky_range = [sky_med-2.*sig, np.max([sky_med+sig, 7e-4])] # Detector background average FOC Data Handbook Sec. 7.6
sky = img[np.logical_and(img>=sky_range[0],img<=sky_range[1])] sky = img[np.logical_and(img >= sky_range[0], img <= sky_range[1])]
return sky, sky_range return sky, sky_range
def bkg_estimate(img, bins=None, chi2=None, coeff=None): def bkg_estimate(img, bins=None, chi2=None, coeff=None):
if bins is None or chi2 is None or coeff is None: if bins is None or chi2 is None or coeff is None:
bins, chi2, coeff = [8], [], [] bins, chi2, coeff = [8], [], []
@@ -147,20 +154,21 @@ def bkg_estimate(img, bins=None, chi2=None, coeff=None):
bins.append(int(3./2.*bins[-1])) bins.append(int(3./2.*bins[-1]))
except IndexError: except IndexError:
bins, chi2, coeff = [8], [], [] bins, chi2, coeff = [8], [], []
hist, bin_edges = np.histogram(img[img>0], bins=bins[-1]) hist, bin_edges = np.histogram(img[img > 0], bins=bins[-1])
binning = bin_centers(bin_edges) binning = bin_centers(bin_edges)
peak = binning[np.argmax(hist)] peak = binning[np.argmax(hist)]
bins_fwhm = binning[hist>hist.max()/2.] bins_fwhm = binning[hist > hist.max()/2.]
fwhm = bins_fwhm[-1]-bins_fwhm[0] fwhm = bins_fwhm[-1]-bins_fwhm[0]
p0 = [hist.max(), peak, fwhm, 1e-3, 1e-3, 1e-3, 1e-3] p0 = [hist.max(), peak, fwhm, 1e-3, 1e-3, 1e-3, 1e-3]
try: try:
popt, pcov = curve_fit(gausspol, binning, hist, p0=p0) popt, pcov = curve_fit(gausspol, binning, hist, p0=p0)
except RuntimeError: except RuntimeError:
popt = p0 popt = p0
chi2.append(np.sum((hist - gausspol(binning,*popt))**2)/hist.size) chi2.append(np.sum((hist - gausspol(binning, *popt))**2)/hist.size)
coeff.append(popt) coeff.append(popt)
return bins, chi2, coeff return bins, chi2, coeff
def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, savename=None, plots_folder=""): def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, savename=None, plots_folder=""):
""" """
---------- ----------
@@ -210,11 +218,11 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
histograms, binning = [], [] histograms, binning = [], []
for i, image in enumerate(data): for i, image in enumerate(data):
#Compute the Count-rate histogram for the image # Compute the Count-rate histogram for the image
sky, sky_range = sky_part(image[image>0.]) sky, sky_range = sky_part(image[image > 0.])
bins, chi2, coeff = bkg_estimate(sky) bins, chi2, coeff = bkg_estimate(sky)
while bins[-1]<256: while bins[-1] < 256:
bins, chi2, coeff = bkg_estimate(sky, bins, chi2, coeff) bins, chi2, coeff = bkg_estimate(sky, bins, chi2, coeff)
hist, bin_edges = np.histogram(sky, bins=bins[-1]) hist, bin_edges = np.histogram(sky, bins=bins[-1])
histograms.append(hist) histograms.append(hist)
@@ -223,18 +231,18 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
weights = 1/chi2**2 weights = 1/chi2**2
weights /= weights.sum() weights /= weights.sum()
bkg = np.sum(weights*coeff[:,1])*subtract_error if subtract_error>0 else np.sum(weights*coeff[:,1]) bkg = np.sum(weights*coeff[:, 1])*subtract_error if subtract_error > 0 else np.sum(weights*coeff[:, 1])
error_bkg[i] *= bkg error_bkg[i] *= bkg
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2) n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2)
#Substract background # Substract background
if subtract_error>0: if subtract_error > 0:
n_data_array[i][mask] = n_data_array[i][mask] - bkg n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg n_data_array[i][np.logical_and(mask, n_data_array[i] <= 0.01*bkg)] = 0.01*bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std() std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std()
background[i] = bkg background[i] = bkg
if display: if display:
@@ -295,50 +303,52 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
histograms, binning, coeff = [], [], [] histograms, binning, coeff = [], [], []
for i, image in enumerate(data): for i, image in enumerate(data):
#Compute the Count-rate histogram for the image # Compute the Count-rate histogram for the image
n_mask = np.logical_and(mask,image>0.) n_mask = np.logical_and(mask, image > 0.)
if not (sub_type is None): if not (sub_type is None):
if type(sub_type) == int: if isinstance(sub_type, int):
n_bins = sub_type n_bins = sub_type
elif sub_type.lower() in ['sqrt']: elif sub_type.lower() in ['sqrt']:
n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
elif sub_type.lower() in ['sturges']: elif sub_type.lower() in ['sturges']:
n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int)+1 # Sturges n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int)+1 # Sturges
elif sub_type.lower() in ['rice']: elif sub_type.lower() in ['rice']:
n_bins = 2*np.fix(np.power(image[n_mask].size,1/3)).astype(int) # Rice n_bins = 2*np.fix(np.power(image[n_mask].size, 1/3)).astype(int) # Rice
elif sub_type.lower() in ['scott']: elif sub_type.lower() in ['scott']:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size,1/3))).astype(int) # Scott n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size, 1/3))).astype(int) # Scott
else: else:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25]))/np.power(image[n_mask].size,1/3))).astype(int) # Freedman-Diaconis n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) /
np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis
else: else:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25]))/np.power(image[n_mask].size,1/3))).astype(int) # Freedman-Diaconis n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25])) /
np.power(image[n_mask].size, 1/3))).astype(int) # Freedman-Diaconis
hist, bin_edges = np.histogram(np.log(image[n_mask]),bins=n_bins) hist, bin_edges = np.histogram(np.log(image[n_mask]), bins=n_bins)
histograms.append(hist) histograms.append(hist)
binning.append(np.exp(bin_centers(bin_edges))) binning.append(np.exp(bin_centers(bin_edges)))
#Take the background as the count-rate with the maximum number of pixels # Take the background as the count-rate with the maximum number of pixels
#hist_max = binning[-1][np.argmax(hist)] # hist_max = binning[-1][np.argmax(hist)]
#bkg = np.sqrt(np.sum(image[np.abs(image-hist_max)/hist_max<0.5]**2)/image[np.abs(image-hist_max)/hist_max<0.5].size) # bkg = np.sqrt(np.sum(image[np.abs(image-hist_max)/hist_max<0.5]**2)/image[np.abs(image-hist_max)/hist_max<0.5].size)
#Fit a gaussian to the log-intensity histogram # Fit a gaussian to the log-intensity histogram
bins_fwhm = binning[-1][hist>hist.max()/2.] bins_fwhm = binning[-1][hist > hist.max()/2.]
fwhm = bins_fwhm[-1]-bins_fwhm[0] fwhm = bins_fwhm[-1]-bins_fwhm[0]
p0 = [hist.max(), binning[-1][np.argmax(hist)], fwhm, 1e-3, 1e-3, 1e-3, 1e-3] p0 = [hist.max(), binning[-1][np.argmax(hist)], fwhm, 1e-3, 1e-3, 1e-3, 1e-3]
popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0) popt, pcov = curve_fit(gausspol, binning[-1], hist, p0=p0)
coeff.append(popt) coeff.append(popt)
bkg = popt[1]*subtract_error if subtract_error>0 else popt[1] bkg = popt[1]*subtract_error if subtract_error > 0 else popt[1]
error_bkg[i] *= bkg error_bkg[i] *= bkg
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2) n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2)
#Substract background # Substract background
if subtract_error > 0: if subtract_error > 0:
n_data_array[i][mask] = n_data_array[i][mask] - bkg n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask,n_data_array[i] < 0.)] = 0. n_data_array[i][np.logical_and(mask, n_data_array[i] < 0.)] = 0.
std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std() std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std()
background[i] = bkg background[i] = bkg
if display: if display:
@@ -346,7 +356,7 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
return n_data_array, n_error_array, headers, background return n_data_array, n_error_array, headers, background
def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True, display=False, savename=None, plots_folder=""): def bkg_mini(data, error, mask, headers, sub_shape=(15, 15), subtract_error=True, display=False, savename=None, plots_folder=""):
""" """
Look for sub-image of shape sub_shape that have the smallest integrated Look for sub-image of shape sub_shape that have the smallest integrated
flux (no source assumption) and define the background on the image by the flux (no source assumption) and define the background on the image by the
@@ -396,11 +406,11 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True,
""" """
sub_shape = np.array(sub_shape) sub_shape = np.array(sub_shape)
# Make sub_shape of odd values # Make sub_shape of odd values
if not(np.all(sub_shape%2)): if not (np.all(sub_shape % 2)):
sub_shape += 1-sub_shape%2 sub_shape += 1-sub_shape % 2
shape = np.array(data.shape) shape = np.array(data.shape)
diff = (sub_shape-1).astype(int) diff = (sub_shape-1).astype(int)
temp = np.zeros((shape[0],shape[1]-diff[0],shape[2]-diff[1])) temp = np.zeros((shape[0], shape[1]-diff[0], shape[2]-diff[1]))
n_data_array, n_error_array = deepcopy(data), deepcopy(error) n_data_array, n_error_array = deepcopy(data), deepcopy(error)
error_bkg = np.ones(n_data_array.shape) error_bkg = np.ones(n_data_array.shape)
@@ -408,37 +418,36 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True,
background = np.zeros((data.shape[0])) background = np.zeros((data.shape[0]))
rectangle = [] rectangle = []
for i,image in enumerate(data): for i, image in enumerate(data):
# Find the sub-image of smallest integrated flux (suppose no source) # Find the sub-image of smallest integrated flux (suppose no source)
#sub-image dominated by background # sub-image dominated by background
fmax = np.finfo(np.double).max fmax = np.finfo(np.double).max
img = deepcopy(image) img = deepcopy(image)
img[1-mask] = fmax/(diff[0]*diff[1]) img[1-mask] = fmax/(diff[0]*diff[1])
for r in range(temp.shape[1]): for r in range(temp.shape[1]):
for c in range(temp.shape[2]): for c in range(temp.shape[2]):
temp[i][r,c] = np.where(mask[r,c], img[r:r+diff[0],c:c+diff[1]].sum(), fmax/(diff[0]*diff[1])) temp[i][r, c] = np.where(mask[r, c], img[r:r+diff[0], c:c+diff[1]].sum(), fmax/(diff[0]*diff[1]))
minima = np.unravel_index(np.argmin(temp.sum(axis=0)),temp.shape[1:]) minima = np.unravel_index(np.argmin(temp.sum(axis=0)), temp.shape[1:])
for i, image in enumerate(data): for i, image in enumerate(data):
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0., 'r']) rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0., 'r'])
# Compute error : root mean square of the background # Compute error : root mean square of the background
sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]] sub_image = image[minima[0]:minima[0]+sub_shape[0], minima[1]:minima[1]+sub_shape[1]]
#bkg = np.std(sub_image) # Previously computed using standard deviation over the background # bkg = np.std(sub_image) # Previously computed using standard deviation over the background
bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*subtract_error if subtract_error>0 else np.sqrt(np.sum(sub_image**2)/sub_image.size) bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*subtract_error if subtract_error > 0 else np.sqrt(np.sum(sub_image**2)/sub_image.size)
error_bkg[i] *= bkg error_bkg[i] *= bkg
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2) n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2)
#Substract background # Substract background
if subtract_error>0.: if subtract_error > 0.:
n_data_array[i][mask] = n_data_array[i][mask] - bkg n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg n_data_array[i][np.logical_and(mask, n_data_array[i] <= 0.01*bkg)] = 0.01*bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std() std_bkg[i] = image[np.abs(image-bkg)/bkg < 1.].std()
background[i] = bkg background[i] = bkg
if display: if display:
display_bkg(data, background, std_bkg, headers, rectangle=rectangle, savename=savename, plots_folder=plots_folder) display_bkg(data, background, std_bkg, headers, rectangle=rectangle, savename=savename, plots_folder=plots_folder)
return n_data_array, n_error_array, headers, background return n_data_array, n_error_array, headers, background

View File

@@ -1,6 +1,5 @@
""" """
Library functions for graham algorithm implementation (find the convex hull Library functions for graham algorithm implementation (find the convex hull of a given list of points).
of a given list of points).
""" """
from copy import deepcopy from copy import deepcopy
@@ -8,30 +7,33 @@ import numpy as np
def clean_ROI(image): def clean_ROI(image):
H,J = [],[] """
Remove instruments borders from an observation.
"""
H, J = [], []
shape = np.array(image.shape) shape = np.array(image.shape)
row, col = np.indices(shape) row, col = np.indices(shape)
for i in range(0,shape[0]): for i in range(0, shape[0]):
r = row[i,:][image[i,:]>0.] r = row[i, :][image[i, :] > 0.]
c = col[i,:][image[i,:]>0.] c = col[i, :][image[i, :] > 0.]
if len(r)>1 and len(c)>1: if len(r) > 1 and len(c) > 1:
H.append((r[0],c[0])) H.append((r[0], c[0]))
H.append((r[-1],c[-1])) H.append((r[-1], c[-1]))
H = np.array(H) H = np.array(H)
for j in range(0,shape[1]): for j in range(0, shape[1]):
r = row[:,j][image[:,j]>0.] r = row[:, j][image[:, j] > 0.]
c = col[:,j][image[:,j]>0.] c = col[:, j][image[:, j] > 0.]
if len(r)>1 and len(c)>1: if len(r) > 1 and len(c) > 1:
J.append((r[0],c[0])) J.append((r[0], c[0]))
J.append((r[-1],c[-1])) J.append((r[-1], c[-1]))
J = np.array(J) J = np.array(J)
xmin = np.min([H[:,1].min(),J[:,1].min()]) xmin = np.min([H[:, 1].min(), J[:, 1].min()])
xmax = np.max([H[:,1].max(),J[:,1].max()])+1 xmax = np.max([H[:, 1].max(), J[:, 1].max()])+1
ymin = np.min([H[:,0].min(),J[:,0].min()]) ymin = np.min([H[:, 0].min(), J[:, 0].min()])
ymax = np.max([H[:,0].max(),J[:,0].max()])+1 ymax = np.max([H[:, 0].max(), J[:, 0].max()])+1
return np.array([xmin,xmax,ymin,ymax]) return np.array([xmin, xmax, ymin, ymax])
# Define angle and vectors operations # Define angle and vectors operations
@@ -116,7 +118,8 @@ def min_lexico(s):
""" """
m = s[0] m = s[0]
for x in s: for x in s:
if lexico(x, m): m = x if lexico(x, m):
m = x
return m return m
@@ -145,16 +148,16 @@ def comp(Omega, A, B):
# Implement quicksort # Implement quicksort
def partition(s, l, r, order): def partition(s, left, right, order):
""" """
Take a random element of a list 's' between indexes 'l', 'r' and place it Take a random element of a list 's' between indexes 'left', 'right' and place it
at its right spot using relation order 'order'. Return the index at which at its right spot using relation order 'order'. Return the index at which
it was placed. it was placed.
---------- ----------
Inputs: Inputs:
s : list s : list
List of elements to be ordered. List of elements to be ordered.
l, r : int left, right : int
Index of the first and last elements to be considered. Index of the first and last elements to be considered.
order : func: A, B -> bool order : func: A, B -> bool
Relation order between 2 elements A, B that returns True if A<=B, Relation order between 2 elements A, B that returns True if A<=B,
@@ -164,30 +167,29 @@ def partition(s, l, r, order):
index : int index : int
Index at which have been placed the element chosen by the function. Index at which have been placed the element chosen by the function.
""" """
i = l - 1 i = left - 1
for j in range(l, r): for j in range(left, right):
if order(s[j], s[r]): if order(s[j], s[right]):
i = i + 1 i = i + 1
temp = deepcopy(s[i]) temp = deepcopy(s[i])
s[i] = deepcopy(s[j]) s[i] = deepcopy(s[j])
s[j] = deepcopy(temp) s[j] = deepcopy(temp)
temp = deepcopy(s[i+1]) temp = deepcopy(s[i+1])
s[i+1] = deepcopy(s[r]) s[i+1] = deepcopy(s[right])
s[r] = deepcopy(temp) s[right] = deepcopy(temp)
return i + 1 return i + 1
def sort_aux(s, l, r, order): def sort_aux(s, left, right, order):
""" """
Sort a list 's' between indexes 'l', 'r' using relation order 'order' by Sort a list 's' between indexes 'left', 'right' using relation order 'order' by
dividing it in 2 sub-lists and sorting these. dividing it in 2 sub-lists and sorting these.
""" """
if l <= r: if left <= right:
# Call partition function that gives an index on which the list will be # Call partition function that gives an index on which the list will be divided
#divided q = partition(s, left, right, order)
q = partition(s, l, r, order) sort_aux(s, left, q - 1, order)
sort_aux(s, l, q - 1, order) sort_aux(s, q + 1, right, order)
sort_aux(s, q + 1, r, order)
def quicksort(s, order): def quicksort(s, order):
@@ -204,7 +206,7 @@ def sort_angles_distances(Omega, s):
Sort the list of points 's' for the composition order given reference point Sort the list of points 's' for the composition order given reference point
Omega. Omega.
""" """
order = lambda A, B: comp(Omega, A, B) def order(A, B): return comp(Omega, A, B)
quicksort(s, order) quicksort(s, order)
@@ -326,24 +328,24 @@ def image_hull(image, step=5, null_val=0., inside=True):
H = [] H = []
shape = np.array(image.shape) shape = np.array(image.shape)
row, col = np.indices(shape) row, col = np.indices(shape)
for i in range(0,shape[0],step): for i in range(0, shape[0], step):
r = row[i,:][image[i,:]>null_val] r = row[i, :][image[i, :] > null_val]
c = col[i,:][image[i,:]>null_val] c = col[i, :][image[i, :] > null_val]
if len(r)>1 and len(c)>1: if len(r) > 1 and len(c) > 1:
H.append((r[0],c[0])) H.append((r[0], c[0]))
H.append((r[-1],c[-1])) H.append((r[-1], c[-1]))
for j in range(0,shape[1],step): for j in range(0, shape[1], step):
r = row[:,j][image[:,j]>null_val] r = row[:, j][image[:, j] > null_val]
c = col[:,j][image[:,j]>null_val] c = col[:, j][image[:, j] > null_val]
if len(r)>1 and len(c)>1: if len(r) > 1 and len(c) > 1:
if not((r[0],c[0]) in H): if not ((r[0], c[0]) in H):
H.append((r[0],c[0])) H.append((r[0], c[0]))
if not((r[-1],c[-1]) in H): if not ((r[-1], c[-1]) in H):
H.append((r[-1],c[-1])) H.append((r[-1], c[-1]))
S = np.array(convex_hull(H)) S = np.array(convex_hull(H))
x_min, y_min = S[:,0]<S[:,0].mean(), S[:,1]<S[:,1].mean() x_min, y_min = S[:, 0] < S[:, 0].mean(), S[:, 1] < S[:, 1].mean()
x_max, y_max = S[:,0]>S[:,0].mean(), S[:,1]>S[:,1].mean() x_max, y_max = S[:, 0] > S[:, 0].mean(), S[:, 1] > S[:, 1].mean()
# Get the 4 extrema # Get the 4 extrema
S0 = S[x_min*y_min][np.abs(0-S[x_min*y_min].sum(axis=1)).min() == np.abs(0-S[x_min*y_min].sum(axis=1))][0] S0 = S[x_min*y_min][np.abs(0-S[x_min*y_min].sum(axis=1)).min() == np.abs(0-S[x_min*y_min].sum(axis=1))][0]
S1 = S[x_min*y_max][np.abs(shape[1]-S[x_min*y_max].sum(axis=1)).min() == np.abs(shape[1]-S[x_min*y_max].sum(axis=1))][0] S1 = S[x_min*y_max][np.abs(shape[1]-S[x_min*y_max].sum(axis=1)).min() == np.abs(shape[1]-S[x_min*y_max].sum(axis=1))][0]
@@ -351,14 +353,14 @@ def image_hull(image, step=5, null_val=0., inside=True):
S3 = S[x_max*y_max][np.abs(shape.sum()-S[x_max*y_max].sum(axis=1)).min() == np.abs(shape.sum()-S[x_max*y_max].sum(axis=1))][0] S3 = S[x_max*y_max][np.abs(shape.sum()-S[x_max*y_max].sum(axis=1)).min() == np.abs(shape.sum()-S[x_max*y_max].sum(axis=1))][0]
# Get the vertex of the biggest included rectangle # Get the vertex of the biggest included rectangle
if inside: if inside:
f0 = np.max([S0[0],S1[0]]) f0 = np.max([S0[0], S1[0]])
f1 = np.min([S2[0],S3[0]]) f1 = np.min([S2[0], S3[0]])
f2 = np.max([S0[1],S2[1]]) f2 = np.max([S0[1], S2[1]])
f3 = np.min([S1[1],S3[1]]) f3 = np.min([S1[1], S3[1]])
else: else:
f0 = np.min([S0[0],S1[0]]) f0 = np.min([S0[0], S1[0]])
f1 = np.max([S2[0],S3[0]]) f1 = np.max([S2[0], S3[0]])
f2 = np.min([S0[1],S2[1]]) f2 = np.min([S0[1], S2[1]])
f3 = np.max([S1[1],S3[1]]) f3 = np.max([S1[1], S3[1]])
return np.array([f0, f1, f2, f3]).astype(int) return np.array([f0, f1, f2, f3]).astype(int)

View File

@@ -1,10 +1,10 @@
""" """
Library functions for phase cross-correlation computation. Library functions for phase cross-correlation computation.
""" """
##Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+) # Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+)
#Otherwise fall back to numpy.fft. # Otherwise fall back to numpy.fft.
#Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer # Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer
#C++/pybind11 version called pypocketfft # C++/pybind11 version called pypocketfft
try: try:
import scipy.fft as fft import scipy.fft as fft
except ImportError: except ImportError:
@@ -14,7 +14,7 @@ import numpy as np
def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, def _upsampled_dft(data, upsampled_region_size, upsample_factor=1,
axis_offsets=None): axis_offsets=None):
""" """
Upsampled DFT by matrix multiplication. Upsampled DFT by matrix multiplication.
This code is intended to provide the same result as if the following This code is intended to provide the same result as if the following
@@ -243,7 +243,7 @@ def phase_cross_correlation(reference_image, moving_image, *,
raise ValueError( raise ValueError(
"NaN values found, please remove NaNs from your input data") "NaN values found, please remove NaNs from your input data")
return shifts, _compute_error(CCmax, src_amp, target_amp),\ return shifts, _compute_error(CCmax, src_amp, target_amp), \
_compute_phasediff(CCmax) _compute_phasediff(CCmax)
else: else:
return shifts return shifts

View File

@@ -43,494 +43,521 @@ def abs2(x):
def zeropad(arr, shape): def zeropad(arr, shape):
""" """
Zero-pad array ARR to given shape. Zero-pad array ARR to given shape.
The contents of ARR is approximately centered in the result. The contents of ARR is approximately centered in the result.
""" """
rank = arr.ndim rank = arr.ndim
if len(shape) != rank: if len(shape) != rank:
raise ValueError("bad number of dimensions") raise ValueError("bad number of dimensions")
diff = np.asarray(shape) - np.asarray(arr.shape) diff = np.asarray(shape) - np.asarray(arr.shape)
if diff.min() < 0: if diff.min() < 0:
raise ValueError("output dimensions must be larger or equal input dimensions") raise ValueError("output dimensions must be larger or equal input dimensions")
offset = diff//2 offset = diff//2
z = np.zeros(shape, dtype=arr.dtype) z = np.zeros(shape, dtype=arr.dtype)
if rank == 1: if rank == 1:
i0 = offset[0]; n0 = i0 + arr.shape[0] i0 = offset[0]
z[i0:n0] = arr n0 = i0 + arr.shape[0]
elif rank == 2: z[i0:n0] = arr
i0 = offset[0]; n0 = i0 + arr.shape[0] elif rank == 2:
i1 = offset[1]; n1 = i1 + arr.shape[1] i0 = offset[0]
z[i0:n0,i1:n1] = arr n0 = i0 + arr.shape[0]
elif rank == 3: i1 = offset[1]
i0 = offset[0]; n0 = i0 + arr.shape[0] n1 = i1 + arr.shape[1]
i1 = offset[1]; n1 = i1 + arr.shape[1] z[i0:n0, i1:n1] = arr
i2 = offset[2]; n2 = i2 + arr.shape[2] elif rank == 3:
z[i0:n0,i1:n1,i2:n2] = arr i0 = offset[0]
elif rank == 4: n0 = i0 + arr.shape[0]
i0 = offset[0]; n0 = i0 + arr.shape[0] i1 = offset[1]
i1 = offset[1]; n1 = i1 + arr.shape[1] n1 = i1 + arr.shape[1]
i2 = offset[2]; n2 = i2 + arr.shape[2] i2 = offset[2]
i3 = offset[3]; n3 = i3 + arr.shape[3] n2 = i2 + arr.shape[2]
z[i0:n0,i1:n1,i2:n2,i3:n3] = arr z[i0:n0, i1:n1, i2:n2] = arr
elif rank == 5: elif rank == 4:
i0 = offset[0]; n0 = i0 + arr.shape[0] i0 = offset[0]
i1 = offset[1]; n1 = i1 + arr.shape[1] n0 = i0 + arr.shape[0]
i2 = offset[2]; n2 = i2 + arr.shape[2] i1 = offset[1]
i3 = offset[3]; n3 = i3 + arr.shape[3] n1 = i1 + arr.shape[1]
i4 = offset[4]; n4 = i4 + arr.shape[4] i2 = offset[2]
z[i0:n0,i1:n1,i2:n2,i3:n3,i4:n4] = arr n2 = i2 + arr.shape[2]
elif rank == 6: i3 = offset[3]
i0 = offset[0]; n0 = i0 + arr.shape[0] n3 = i3 + arr.shape[3]
i1 = offset[1]; n1 = i1 + arr.shape[1] z[i0:n0, i1:n1, i2:n2, i3:n3] = arr
i2 = offset[2]; n2 = i2 + arr.shape[2] elif rank == 5:
i3 = offset[3]; n3 = i3 + arr.shape[3] i0 = offset[0]
i4 = offset[4]; n4 = i4 + arr.shape[4] n0 = i0 + arr.shape[0]
i5 = offset[5]; n5 = i5 + arr.shape[5] i1 = offset[1]
z[i0:n0,i1:n1,i2:n2,i3:n3,i4:n4,i5:n5] = arr n1 = i1 + arr.shape[1]
else: i2 = offset[2]
raise ValueError("too many dimensions") n2 = i2 + arr.shape[2]
return z i3 = offset[3]
n3 = i3 + arr.shape[3]
i4 = offset[4]
n4 = i4 + arr.shape[4]
z[i0:n0, i1:n1, i2:n2, i3:n3, i4:n4] = arr
elif rank == 6:
i0 = offset[0]
n0 = i0 + arr.shape[0]
i1 = offset[1]
n1 = i1 + arr.shape[1]
i2 = offset[2]
n2 = i2 + arr.shape[2]
i3 = offset[3]
n3 = i3 + arr.shape[3]
i4 = offset[4]
n4 = i4 + arr.shape[4]
i5 = offset[5]
n5 = i5 + arr.shape[5]
z[i0:n0, i1:n1, i2:n2, i3:n3, i4:n4, i5:n5] = arr
else:
raise ValueError("too many dimensions")
return z
def gaussian2d(x, y, sigma): def gaussian2d(x, y, sigma):
return np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2) return np.exp(-(x**2+y**2)/(2*sigma**2))/(2*np.pi*sigma**2)
def gaussian_psf(FWHM=1., shape=(5,5)): def gaussian_psf(FWHM=1., shape=(5, 5)):
""" """
Define the gaussian Point-Spread-Function of chosen shape and FWHM. Define the gaussian Point-Spread-Function of chosen shape and FWHM.
---------- ----------
Inputs: Inputs:
FWHM : float, optional FWHM : float, optional
The Full Width at Half Maximum of the desired gaussian function for the The Full Width at Half Maximum of the desired gaussian function for the
PSF in pixel increments. PSF in pixel increments.
Defaults to 1. Defaults to 1.
shape : tuple, optional shape : tuple, optional
The shape of the PSF kernel. Must be of dimension 2. The shape of the PSF kernel. Must be of dimension 2.
Defaults to (5,5). Defaults to (5,5).
---------- ----------
Returns: Returns:
kernel : numpy.ndarray kernel : numpy.ndarray
Kernel containing the weights of the desired gaussian PSF. Kernel containing the weights of the desired gaussian PSF.
""" """
# Compute standard deviation from FWHM # Compute standard deviation from FWHM
stdev = FWHM/(2.*np.sqrt(2.*np.log(2.))) stdev = FWHM/(2.*np.sqrt(2.*np.log(2.)))
# Create kernel of desired shape # Create kernel of desired shape
x, y = np.meshgrid(np.arange(-shape[0]/2,shape[0]/2),np.arange(-shape[1]/2,shape[1]/2)) x, y = np.meshgrid(np.arange(-shape[0]/2, shape[0]/2), np.arange(-shape[1]/2, shape[1]/2))
kernel = gaussian2d(x, y, stdev) kernel = gaussian2d(x, y, stdev)
return kernel/kernel.sum() return kernel/kernel.sum()
def from_file_psf(filename): def from_file_psf(filename):
""" """
Get the Point-Spread-Function from an external FITS file. Get the Point-Spread-Function from an external FITS file.
Such PSF can be generated using the TinyTim standalone program by STSCI. Such PSF can be generated using the TinyTim standalone program by STSCI.
See: See:
[1] https://www.stsci.edu/hst/instrumentation/focus-and-pointing/focus/tiny-tim-hst-psf-modeling [1] https://www.stsci.edu/hst/instrumentation/focus-and-pointing/focus/tiny-tim-hst-psf-modeling
[2] https://doi.org/10.1117/12.892762 [2] https://doi.org/10.1117/12.892762
---------- ----------
Inputs: Inputs:
filename : str filename : str
---------- ----------
kernel : numpy.ndarray kernel : numpy.ndarray
Kernel containing the weights of the desired gaussian PSF. Kernel containing the weights of the desired gaussian PSF.
""" """
with fits.open(filename) as f: with fits.open(filename) as f:
psf = f[0].data psf = f[0].data
if (type(psf) != np.ndarray) or len(psf) != 2: if isinstance(psf, np.ndarray) or len(psf) != 2:
raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename)) raise ValueError("Invalid PSF image in PrimaryHDU at {0:s}".format(filename))
#Return the normalized Point Spread Function # Return the normalized Point Spread Function
kernel = psf/psf.max() kernel = psf/psf.max()
return kernel return kernel
def wiener(image, psf, alpha=0.1, clip=True): def wiener(image, psf, alpha=0.1, clip=True):
""" """
Implement the simplified Wiener filtering. Implement the simplified Wiener filtering.
---------- ----------
Inputs: Inputs:
image : numpy.ndarray image : numpy.ndarray
Input degraded image (can be N dimensional) of floats. Input degraded image (can be N dimensional) of floats.
psf : numpy.ndarray psf : numpy.ndarray
The kernel of the point spread function. Must have shape less or equal to The kernel of the point spread function. Must have shape less or equal to
the image shape. If less, it will be zeropadded. the image shape. If less, it will be zeropadded.
alpha : float, optional alpha : float, optional
A parameter value for numerous deconvolution algorithms. A parameter value for numerous deconvolution algorithms.
Defaults to 0.1 Defaults to 0.1
clip : boolean, optional clip : boolean, optional
If true, pixel value of the result above 1 or under -1 are thresholded If true, pixel value of the result above 1 or under -1 are thresholded
for skimage pipeline compatibility. for skimage pipeline compatibility.
Defaults to True. Defaults to True.
---------- ----------
Returns: Returns:
im_deconv : ndarray im_deconv : ndarray
The deconvolved image. The deconvolved image.
""" """
float_type = np.promote_types(image.dtype, np.float32) float_type = np.promote_types(image.dtype, np.float32)
image = image.astype(float_type, copy=False) image = image.astype(float_type, copy=False)
psf = zeropad(psf.astype(float_type, copy=False), image.shape) psf = zeropad(psf.astype(float_type, copy=False), image.shape)
psf /= psf.sum() psf /= psf.sum()
im_deconv = image.copy() im_deconv = image.copy()
ft_y = np.fft.fftn(im_deconv) ft_y = np.fft.fftn(im_deconv)
ft_h = np.fft.fftn(np.fft.ifftshift(psf)) ft_h = np.fft.fftn(np.fft.ifftshift(psf))
ft_x = ft_h.conj()*ft_y / (abs2(ft_h) + alpha) ft_x = ft_h.conj()*ft_y / (abs2(ft_h) + alpha)
im_deconv = np.fft.ifftn(ft_x).real im_deconv = np.fft.ifftn(ft_x).real
if clip: if clip:
im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv > 1] = 1
im_deconv[im_deconv < -1] = -1 im_deconv[im_deconv < -1] = -1
return im_deconv/im_deconv.max() return im_deconv/im_deconv.max()
def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon=None): def van_cittert(image, psf, alpha=0.1, iterations=20, clip=True, filter_epsilon=None):
""" """
Van-Citter deconvolution algorithm. Van-Citter deconvolution algorithm.
---------- ----------
Inputs: Inputs:
image : numpy.darray image : numpy.darray
Input degraded image (can be N dimensional) of floats between 0 and 1. Input degraded image (can be N dimensional) of floats between 0 and 1.
psf : numpy.darray psf : numpy.darray
The point spread function. The point spread function.
alpha : float, optional alpha : float, optional
A weight parameter for the deconvolution step. A weight parameter for the deconvolution step.
iterations : int, optional iterations : int, optional
Number of iterations. This parameter plays the role of Number of iterations. This parameter plays the role of
regularisation. regularisation.
clip : boolean, optional clip : boolean, optional
True by default. If true, pixel value of the result above 1 or True by default. If true, pixel value of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility. under -1 are thresholded for skimage pipeline compatibility.
filter_epsilon: float, optional filter_epsilon: float, optional
Value below which intermediate results become 0 to avoid division Value below which intermediate results become 0 to avoid division
by small numbers. by small numbers.
---------- ----------
Returns: Returns:
im_deconv : ndarray im_deconv : ndarray
The deconvolved image. The deconvolved image.
""" """
float_type = np.promote_types(image.dtype, np.float32) float_type = np.promote_types(image.dtype, np.float32)
image = image.astype(float_type, copy=False) image = image.astype(float_type, copy=False)
psf = psf.astype(float_type, copy=False) psf = psf.astype(float_type, copy=False)
psf /= psf.sum() psf /= psf.sum()
im_deconv = image.copy() im_deconv = image.copy()
for _ in range(iterations): for _ in range(iterations):
conv = convolve(im_deconv, psf, mode='same') conv = convolve(im_deconv, psf, mode='same')
if filter_epsilon: if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image - conv) relative_blur = np.where(conv < filter_epsilon, 0, image - conv)
else: else:
relative_blur = image - conv relative_blur = image - conv
im_deconv += alpha*relative_blur im_deconv += alpha*relative_blur
if clip: if clip:
im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv > 1] = 1
im_deconv[im_deconv < -1] = -1 im_deconv[im_deconv < -1] = -1
return im_deconv return im_deconv
def richardson_lucy(image, psf, iterations=20, clip=True, filter_epsilon=None): def richardson_lucy(image, psf, iterations=20, clip=True, filter_epsilon=None):
""" """
Richardson-Lucy deconvolution algorithm. Richardson-Lucy deconvolution algorithm.
---------- ----------
Inputs: Inputs:
image : numpy.darray image : numpy.darray
Input degraded image (can be N dimensional) of floats between 0 and 1. Input degraded image (can be N dimensional) of floats between 0 and 1.
psf : numpy.darray psf : numpy.darray
The point spread function. The point spread function.
iterations : int, optional iterations : int, optional
Number of iterations. This parameter plays the role of Number of iterations. This parameter plays the role of
regularisation. regularisation.
clip : boolean, optional clip : boolean, optional
True by default. If true, pixel value of the result above 1 or True by default. If true, pixel value of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility. under -1 are thresholded for skimage pipeline compatibility.
filter_epsilon: float, optional filter_epsilon: float, optional
Value below which intermediate results become 0 to avoid division Value below which intermediate results become 0 to avoid division
by small numbers. by small numbers.
---------- ----------
Returns: Returns:
im_deconv : ndarray im_deconv : ndarray
The deconvolved image. The deconvolved image.
---------- ----------
References References
[1] https://doi.org/10.1364/JOSA.62.000055 [1] https://doi.org/10.1364/JOSA.62.000055
[2] https://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution [2] https://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution
""" """
float_type = np.promote_types(image.dtype, np.float32) float_type = np.promote_types(image.dtype, np.float32)
image = image.astype(float_type, copy=False) image = image.astype(float_type, copy=False)
psf = psf.astype(float_type, copy=False) psf = psf.astype(float_type, copy=False)
psf /= psf.sum() psf /= psf.sum()
im_deconv = image.copy() im_deconv = image.copy()
psf_mirror = np.flip(psf) psf_mirror = np.flip(psf)
for _ in range(iterations): for _ in range(iterations):
conv = convolve(im_deconv, psf, mode='same') conv = convolve(im_deconv, psf, mode='same')
if filter_epsilon: if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image / conv) relative_blur = np.where(conv < filter_epsilon, 0, image / conv)
else: else:
relative_blur = image / conv relative_blur = image / conv
im_deconv *= convolve(relative_blur, psf_mirror, mode='same') im_deconv *= convolve(relative_blur, psf_mirror, mode='same')
if clip: if clip:
im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv > 1] = 1
im_deconv[im_deconv < -1] = -1 im_deconv[im_deconv < -1] = -1
return im_deconv return im_deconv
def one_step_gradient(image, psf, iterations=20, clip=True, filter_epsilon=None): def one_step_gradient(image, psf, iterations=20, clip=True, filter_epsilon=None):
""" """
One-step gradient deconvolution algorithm. One-step gradient deconvolution algorithm.
---------- ----------
Inputs: Inputs:
image : numpy.darray image : numpy.darray
Input degraded image (can be N dimensional) of floats between 0 and 1. Input degraded image (can be N dimensional) of floats between 0 and 1.
psf : numpy.darray psf : numpy.darray
The point spread function. The point spread function.
iterations : int, optional iterations : int, optional
Number of iterations. This parameter plays the role of Number of iterations. This parameter plays the role of
regularisation. regularisation.
clip : boolean, optional clip : boolean, optional
True by default. If true, pixel value of the result above 1 or True by default. If true, pixel value of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility. under -1 are thresholded for skimage pipeline compatibility.
filter_epsilon: float, optional filter_epsilon: float, optional
Value below which intermediate results become 0 to avoid division Value below which intermediate results become 0 to avoid division
by small numbers. by small numbers.
---------- ----------
Returns: Returns:
im_deconv : ndarray im_deconv : ndarray
The deconvolved image. The deconvolved image.
""" """
float_type = np.promote_types(image.dtype, np.float32) float_type = np.promote_types(image.dtype, np.float32)
image = image.astype(float_type, copy=False) image = image.astype(float_type, copy=False)
psf = psf.astype(float_type, copy=False) psf = psf.astype(float_type, copy=False)
psf /= psf.sum() psf /= psf.sum()
im_deconv = image.copy() im_deconv = image.copy()
psf_mirror = np.flip(psf) psf_mirror = np.flip(psf)
for _ in range(iterations): for _ in range(iterations):
conv = convolve(im_deconv, psf, mode='same') conv = convolve(im_deconv, psf, mode='same')
if filter_epsilon: if filter_epsilon:
relative_blur = np.where(conv < filter_epsilon, 0, image - conv) relative_blur = np.where(conv < filter_epsilon, 0, image - conv)
else: else:
relative_blur = image - conv relative_blur = image - conv
im_deconv += convolve(relative_blur, psf_mirror, mode='same') im_deconv += convolve(relative_blur, psf_mirror, mode='same')
if clip: if clip:
im_deconv[im_deconv > 1] = 1 im_deconv[im_deconv > 1] = 1
im_deconv[im_deconv < -1] = -1 im_deconv[im_deconv < -1] = -1
return im_deconv return im_deconv
def conjgrad(image, psf, alpha=0.1, error=None, iterations=20): def conjgrad(image, psf, alpha=0.1, error=None, iterations=20):
""" """
Implement the Conjugate Gradient algorithm. Implement the Conjugate Gradient algorithm.
---------- ----------
Inputs: Inputs:
image : numpy.ndarray image : numpy.ndarray
Input degraded image (can be N dimensional) of floats. Input degraded image (can be N dimensional) of floats.
psf : numpy.ndarray psf : numpy.ndarray
The kernel of the point spread function. Must have shape less or equal to The kernel of the point spread function. Must have shape less or equal to
the image shape. If less, it will be zeropadded. the image shape. If less, it will be zeropadded.
alpha : float, optional alpha : float, optional
A weight parameter for the regularisation matrix. A weight parameter for the regularisation matrix.
Defaults to 0.1 Defaults to 0.1
error : numpy.ndarray, optional error : numpy.ndarray, optional
Known background noise on the inputed image. Will be used for weighted Known background noise on the inputed image. Will be used for weighted
deconvolution. If None, all weights will be set to 1. deconvolution. If None, all weights will be set to 1.
Defaults to None. Defaults to None.
iterations : int, optional iterations : int, optional
Number of iterations. This parameter plays the role of Number of iterations. This parameter plays the role of
regularisation. regularisation.
Defaults to 20. Defaults to 20.
---------- ----------
Returns: Returns:
im_deconv : ndarray im_deconv : ndarray
The deconvolved image. The deconvolved image.
""" """
float_type = np.promote_types(image.dtype, np.float32) float_type = np.promote_types(image.dtype, np.float32)
image = image.astype(float_type, copy=False) image = image.astype(float_type, copy=False)
psf = psf.astype(float_type, copy=False) psf = psf.astype(float_type, copy=False)
psf /= psf.sum() psf /= psf.sum()
# A.x = b avec A = HtWH+aDtD et b = HtWy # A.x = b avec A = HtWH+aDtD et b = HtWy
#Define ft_h : the zeropadded and shifted Fourier transform of the PSF # Define ft_h : the zeropadded and shifted Fourier transform of the PSF
ft_h = np.fft.fftn(np.fft.ifftshift(zeropad(psf,image.shape))) ft_h = np.fft.fftn(np.fft.ifftshift(zeropad(psf, image.shape)))
#Define weights as normalized signal to noise ratio # Define weights as normalized signal to noise ratio
if error is None: if error is None:
wgt = np.ones(image.shape) wgt = np.ones(image.shape)
else: else:
wgt = image/error wgt = image/error
wgt /= wgt.max() wgt /= wgt.max()
def W(x): def W(x):
"""Define W operator : apply weights""" """Define W operator : apply weights"""
return wgt*x return wgt*x
def H(x): def H(x):
"""Define H operator : convolution with PSF""" """Define H operator : convolution with PSF"""
return np.fft.ifftn(ft_h*np.fft.fftn(x)).real return np.fft.ifftn(ft_h*np.fft.fftn(x)).real
def Ht(x): def Ht(x):
"""Define Ht operator : transpose of H""" """Define Ht operator : transpose of H"""
return np.fft.ifftn(ft_h.conj()*np.fft.fftn(x)).real return np.fft.ifftn(ft_h.conj()*np.fft.fftn(x)).real
def DtD(x): def DtD(x):
"""Returns the result of D'.D.x where D is a (multi-dimensional) """Returns the result of D'.D.x where D is a (multi-dimensional)
finite difference operator and D' is its transpose.""" finite difference operator and D' is its transpose."""
dims = x.shape dims = x.shape
r = np.zeros(dims, dtype=x.dtype) # to store the result r = np.zeros(dims, dtype=x.dtype) # to store the result
rank = x.ndim # number of dimensions rank = x.ndim # number of dimensions
if rank == 0: return r if rank == 0:
if dims[0] >= 2: return r
dx = x[1:-1,...] - x[0:-2,...] if dims[0] >= 2:
r[1:-1,...] += dx dx = x[1:-1, ...] - x[0:-2, ...]
r[0:-2,...] -= dx r[1:-1, ...] += dx
if rank == 1: return r r[0:-2, ...] -= dx
if dims[1] >= 2: if rank == 1:
dx = x[:,1:-1,...] - x[:,0:-2,...] return r
r[:,1:-1,...] += dx if dims[1] >= 2:
r[:,0:-2,...] -= dx dx = x[:, 1:-1, ...] - x[:, 0:-2, ...]
if rank == 2: return r r[:, 1:-1, ...] += dx
if dims[2] >= 2: r[:, 0:-2, ...] -= dx
dx = x[:,:,1:-1,...] - x[:,:,0:-2,...] if rank == 2:
r[:,:,1:-1,...] += dx return r
r[:,:,0:-2,...] -= dx if dims[2] >= 2:
if rank == 3: return r dx = x[:, :, 1:-1, ...] - x[:, :, 0:-2, ...]
if dims[3] >= 2: r[:, :, 1:-1, ...] += dx
dx = x[:,:,:,1:-1,...] - x[:,:,:,0:-2,...] r[:, :, 0:-2, ...] -= dx
r[:,:,:,1:-1,...] += dx if rank == 3:
r[:,:,:,0:-2,...] -= dx return r
if rank == 4: return r if dims[3] >= 2:
if dims[4] >= 2: dx = x[:, :, :, 1:-1, ...] - x[:, :, :, 0:-2, ...]
dx = x[:,:,:,:,1:-1,...] - x[:,:,:,:,0:-2,...] r[:, :, :, 1:-1, ...] += dx
r[:,:,:,:,1:-1,...] += dx r[:, :, :, 0:-2, ...] -= dx
r[:,:,:,:,0:-2,...] -= dx if rank == 4:
if rank == 5: return r return r
raise ValueError("too many dimensions") if dims[4] >= 2:
dx = x[:, :, :, :, 1:-1, ...] - x[:, :, :, :, 0:-2, ...]
r[:, :, :, :, 1:-1, ...] += dx
r[:, :, :, :, 0:-2, ...] -= dx
if rank == 5:
return r
raise ValueError("too many dimensions")
def A(x): def A(x):
"""Define symetric positive semi definite operator A""" """Define symetric positive semi definite operator A"""
return Ht(W(H(x)))+alpha*DtD(x) return Ht(W(H(x)))+alpha*DtD(x)
#Define obtained vector A.x = b # Define obtained vector A.x = b
b = Ht(W(image)) b = Ht(W(image))
def inner(x,y): def inner(x, y):
"""Compute inner product of X and Y regardless their shapes """Compute inner product of X and Y regardless their shapes
(their number of elements must however match).""" (their number of elements must however match)."""
return np.inner(x.ravel(),y.ravel()) return np.inner(x.ravel(), y.ravel())
# Compute initial residuals. # Compute initial residuals.
r = np.copy(b) r = np.copy(b)
x = np.zeros(b.shape, dtype=b.dtype) x = np.zeros(b.shape, dtype=b.dtype)
rho = inner(r,r) rho = inner(r, r)
epsilon = np.max([0., 1e-5*np.sqrt(rho)]) epsilon = np.max([0., 1e-5*np.sqrt(rho)])
# Conjugate gradient iterations. # Conjugate gradient iterations.
beta = 0.0 beta = 0.0
k = 0 k = 0
while (k <= iterations) and (np.sqrt(rho) > epsilon): while (k <= iterations) and (np.sqrt(rho) > epsilon):
if np.sqrt(rho) <= epsilon: if np.sqrt(rho) <= epsilon:
print("Converged before maximum iteration.") print("Converged before maximum iteration.")
break break
k += 1 k += 1
if k > iterations: if k > iterations:
print("Didn't converge before maximum iteration.") print("Didn't converge before maximum iteration.")
break break
# Next search direction. # Next search direction.
if beta == 0.0: if beta == 0.0:
p = r p = r
else: else:
p = r + beta*p p = r + beta*p
# Make optimal step along search direction. # Make optimal step along search direction.
q = A(p) q = A(p)
gamma = inner(p, q) gamma = inner(p, q)
if gamma <= 0.0: if gamma <= 0.0:
raise ValueError("Operator A is not positive definite") raise ValueError("Operator A is not positive definite")
alpha = rho/gamma alpha = rho/gamma
x += alpha*p x += alpha*p
r -= alpha*q r -= alpha*q
rho_prev, rho = rho, inner(r,r) rho_prev, rho = rho, inner(r, r)
beta = rho/rho_prev beta = rho/rho_prev
#Return normalized solution # Return normalized solution
im_deconv = x/x.max() im_deconv = x/x.max()
return im_deconv return im_deconv
def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True, def deconvolve_im(image, psf, alpha=0.1, error=None, iterations=20, clip=True,
filter_epsilon=None, algo='richardson'): filter_epsilon=None, algo='richardson'):
""" """
Prepare an image for deconvolution using a chosen algorithm and return Prepare an image for deconvolution using a chosen algorithm and return
results. results.
---------- ----------
Inputs: Inputs:
image : numpy.ndarray image : numpy.ndarray
Input degraded image (can be N dimensional) of floats. Input degraded image (can be N dimensional) of floats.
psf : numpy.ndarray psf : numpy.ndarray
The kernel of the point spread function. Must have shape less or equal to The kernel of the point spread function. Must have shape less or equal to
the image shape. If less, it will be zeropadded. the image shape. If less, it will be zeropadded.
alpha : float, optional alpha : float, optional
A parameter value for numerous deconvolution algorithms. A parameter value for numerous deconvolution algorithms.
Defaults to 0.1 Defaults to 0.1
error : numpy.ndarray, optional error : numpy.ndarray, optional
Known background noise on the inputed image. Will be used for weighted Known background noise on the inputed image. Will be used for weighted
deconvolution. If None, all weights will be set to 1. deconvolution. If None, all weights will be set to 1.
Defaults to None. Defaults to None.
iterations : int, optional iterations : int, optional
Number of iterations. This parameter plays the role of Number of iterations. This parameter plays the role of
regularisation. regularisation.
Defaults to 20. Defaults to 20.
clip : boolean, optional clip : boolean, optional
If true, pixel value of the result above 1 or under -1 are thresholded If true, pixel value of the result above 1 or under -1 are thresholded
for skimage pipeline compatibility. for skimage pipeline compatibility.
Defaults to True. Defaults to True.
filter_epsilon: float, optional filter_epsilon: float, optional
Value below which intermediate results become 0 to avoid division Value below which intermediate results become 0 to avoid division
by small numbers. by small numbers.
Defaults to None. Defaults to None.
algo : str, optional algo : str, optional
Name of the deconvolution algorithm that will be used. Implemented Name of the deconvolution algorithm that will be used. Implemented
algorithms are the following : 'Wiener', 'Van-Cittert', 'One Step Gradient', algorithms are the following : 'Wiener', 'Van-Cittert', 'One Step Gradient',
'Conjugate Gradient' and 'Richardson-Lucy'. 'Conjugate Gradient' and 'Richardson-Lucy'.
Defaults to 'Richardson-Lucy'. Defaults to 'Richardson-Lucy'.
---------- ----------
Returns: Returns:
im_deconv : ndarray im_deconv : ndarray
The deconvolved image. The deconvolved image.
""" """
# Normalize image to highest pixel value # Normalize image to highest pixel value
pxmax = image[np.isfinite(image)].max() pxmax = image[np.isfinite(image)].max()
if pxmax == 0.: if pxmax == 0.:
raise ValueError("Invalid image") raise ValueError("Invalid image")
norm_image = image/pxmax norm_image = image/pxmax
# Deconvolve normalized image # Deconvolve normalized image
if algo.lower() in ['wiener','wiener simple']: if algo.lower() in ['wiener', 'wiener simple']:
norm_deconv = wiener(image=norm_image, psf=psf, alpha=alpha, clip=clip) norm_deconv = wiener(image=norm_image, psf=psf, alpha=alpha, clip=clip)
elif algo.lower() in ['van-cittert','vancittert','cittert']: elif algo.lower() in ['van-cittert', 'vancittert', 'cittert']:
norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha, norm_deconv = van_cittert(image=norm_image, psf=psf, alpha=alpha,
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
elif algo.lower() in ['1grad','one_step_grad','one step grad']: elif algo.lower() in ['1grad', 'one_step_grad', 'one step grad']:
norm_deconv = one_step_gradient(image=norm_image, psf=psf, norm_deconv = one_step_gradient(image=norm_image, psf=psf,
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
elif algo.lower() in ['conjgrad','conj_grad','conjugate gradient']: elif algo.lower() in ['conjgrad', 'conj_grad', 'conjugate gradient']:
norm_deconv = conj_grad(image=norm_image, psf=psf, alpha=alpha, norm_deconv = conj_grad(image=norm_image, psf=psf, alpha=alpha,
error=error, iterations=iterations) error=error, iterations=iterations)
else: # Defaults to Richardson-Lucy else: # Defaults to Richardson-Lucy
norm_deconv = richardson_lucy(image=norm_image, psf=psf, norm_deconv = richardson_lucy(image=norm_image, psf=psf,
iterations=iterations, clip=clip, filter_epsilon=filter_epsilon) iterations=iterations, clip=clip, filter_epsilon=filter_epsilon)
# Output deconvolved image with original pxmax value # Output deconvolved image with original pxmax value
im_deconv = pxmax*norm_deconv im_deconv = pxmax*norm_deconv
return im_deconv return im_deconv

View File

@@ -15,9 +15,8 @@ import numpy as np
from os.path import join as path_join from os.path import join as path_join
from astropy.io import fits from astropy.io import fits
from astropy.wcs import WCS from astropy.wcs import WCS
from lib.convex_hull import image_hull, clean_ROI from lib.convex_hull import clean_ROI
from lib.plots import princ_angle from lib.plots import princ_angle
import matplotlib.pyplot as plt
def get_obs_data(infiles, data_folder="", compute_flux=False): def get_obs_data(infiles, data_folder="", compute_flux=False):
@@ -42,10 +41,10 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
""" """
data_array, headers = [], [] data_array, headers = [], []
for i in range(len(infiles)): for i in range(len(infiles)):
with fits.open(path_join(data_folder,infiles[i])) as f: with fits.open(path_join(data_folder, infiles[i])) as f:
headers.append(f[0].header) headers.append(f[0].header)
data_array.append(f[0].data) data_array.append(f[0].data)
data_array = np.array(data_array,dtype=np.double) data_array = np.array(data_array, dtype=np.double)
# Prevent negative count value in imported data # Prevent negative count value in imported data
for i in range(len(data_array)): for i in range(len(data_array)):
@@ -57,14 +56,14 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1., 1.])).all(): if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt[:2] == np.array([1., 1.])).all():
# Update WCS with relevant information # Update WCS with relevant information
if new_wcs.wcs.has_cd(): if new_wcs.wcs.has_cd():
old_cd = new_wcs.wcs.cd[:2,:2] old_cd = new_wcs.wcs.cd[:2, :2]
del new_wcs.wcs.cd del new_wcs.wcs.cd
keys = list(new_wcs.to_header().keys())+['CD1_1','CD1_2','CD2_1','CD2_2'] keys = list(new_wcs.to_header().keys())+['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2']
for key in keys: for key in keys:
header.remove(key, ignore_missing=True) header.remove(key, ignore_missing=True)
new_cdelt = np.linalg.eig(old_cd)[0] new_cdelt = np.linalg.eig(old_cd)[0]
elif (new_wcs.wcs.cdelt == np.array([1., 1.])).all() and \ elif (new_wcs.wcs.cdelt == np.array([1., 1.])).all() and \
(new_wcs.array_shape in [(512, 512),(1024,512),(512,1024),(1024,1024)]): (new_wcs.array_shape in [(512, 512), (1024, 512), (512, 1024), (1024, 1024)]):
old_cd = new_wcs.wcs.pc old_cd = new_wcs.wcs.pc
new_wcs.wcs.pc = np.dot(old_cd, np.diag(1./new_cdelt)) new_wcs.wcs.pc = np.dot(old_cd, np.diag(1./new_cdelt))
new_wcs.wcs.cdelt = new_cdelt new_wcs.wcs.cdelt = new_cdelt
@@ -73,14 +72,14 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
header['orientat'] = princ_angle(float(header['orientat'])) header['orientat'] = princ_angle(float(header['orientat']))
# force WCS for POL60 to have same pixel size as POL0 and POL120 # force WCS for POL60 to have same pixel size as POL0 and POL120
is_pol60 = np.array([head['filtnam1'].lower()=='pol60' for head in headers],dtype=bool) is_pol60 = np.array([head['filtnam1'].lower() == 'pol60' for head in headers], dtype=bool)
cdelt = np.round(np.array([WCS(head).wcs.cdelt for head in headers]),14) cdelt = np.round(np.array([WCS(head).wcs.cdelt for head in headers]), 14)
if np.unique(cdelt[np.logical_not(is_pol60)],axis=0).size!=2: if np.unique(cdelt[np.logical_not(is_pol60)], axis=0).size != 2:
print(np.unique(cdelt[np.logical_not(is_pol60)],axis=0)) print(np.unique(cdelt[np.logical_not(is_pol60)], axis=0))
raise ValueError("Not all images have same pixel size") raise ValueError("Not all images have same pixel size")
else: else:
for i in np.arange(len(headers))[is_pol60]: for i in np.arange(len(headers))[is_pol60]:
headers[i]['cdelt1'],headers[i]['cdelt2'] = np.unique(cdelt[np.logical_not(is_pol60)],axis=0)[0] headers[i]['cdelt1'], headers[i]['cdelt2'] = np.unique(cdelt[np.logical_not(is_pol60)], axis=0)[0]
if compute_flux: if compute_flux:
for i in range(len(infiles)): for i in range(len(infiles)):
@@ -91,8 +90,8 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P, def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="", s_P_P, PA, s_PA, s_PA_P, headers, data_mask, filename, data_folder="",
return_hdul=False): return_hdul=False):
""" """
Save computed polarimetry parameters to a single fits file, Save computed polarimetry parameters to a single fits file,
updating header accordingly. updating header accordingly.
@@ -127,12 +126,12 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
informations (WCS, orientation, data_type). informations (WCS, orientation, data_type).
Only returned if return_hdul is True. Only returned if return_hdul is True.
""" """
#Create new WCS object given the modified images # Create new WCS object given the modified images
ref_header = headers[0] ref_header = headers[0]
exp_tot = np.array([header['exptime'] for header in headers]).sum() exp_tot = np.array([header['exptime'] for header in headers]).sum()
new_wcs = WCS(ref_header).deepcopy() new_wcs = WCS(ref_header).deepcopy()
if data_mask.shape != (1,1): if data_mask.shape != (1, 1):
vertex = clean_ROI(data_mask) vertex = clean_ROI(data_mask)
shape = vertex[1::2]-vertex[0::2] shape = vertex[1::2]-vertex[0::2]
new_wcs.array_shape = shape new_wcs.array_shape = shape
@@ -153,56 +152,56 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
header['PA_int'] = (ref_header['PA_int'], 'Integrated polarisation angle') header['PA_int'] = (ref_header['PA_int'], 'Integrated polarisation angle')
header['PA_int_err'] = (ref_header['PA_int_err'], 'Integrated polarisation angle error') header['PA_int_err'] = (ref_header['PA_int_err'], 'Integrated polarisation angle error')
#Crop Data to mask # Crop Data to mask
if data_mask.shape != (1,1): if data_mask.shape != (1, 1):
I_stokes = I_stokes[vertex[2]:vertex[3],vertex[0]:vertex[1]] I_stokes = I_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]]
Q_stokes = Q_stokes[vertex[2]:vertex[3],vertex[0]:vertex[1]] Q_stokes = Q_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]]
U_stokes = U_stokes[vertex[2]:vertex[3],vertex[0]:vertex[1]] U_stokes = U_stokes[vertex[2]:vertex[3], vertex[0]:vertex[1]]
P = P[vertex[2]:vertex[3],vertex[0]:vertex[1]] P = P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
debiased_P = debiased_P[vertex[2]:vertex[3],vertex[0]:vertex[1]] debiased_P = debiased_P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
s_P = s_P[vertex[2]:vertex[3],vertex[0]:vertex[1]] s_P = s_P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
s_P_P = s_P_P[vertex[2]:vertex[3],vertex[0]:vertex[1]] s_P_P = s_P_P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
PA = PA[vertex[2]:vertex[3],vertex[0]:vertex[1]] PA = PA[vertex[2]:vertex[3], vertex[0]:vertex[1]]
s_PA = s_PA[vertex[2]:vertex[3],vertex[0]:vertex[1]] s_PA = s_PA[vertex[2]:vertex[3], vertex[0]:vertex[1]]
s_PA_P = s_PA_P[vertex[2]:vertex[3],vertex[0]:vertex[1]] s_PA_P = s_PA_P[vertex[2]:vertex[3], vertex[0]:vertex[1]]
new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2],*shape[::-1])) new_Stokes_cov = np.zeros((*Stokes_cov.shape[:-2], *shape[::-1]))
for i in range(3): for i in range(3):
for j in range(3): for j in range(3):
Stokes_cov[i,j][(1-data_mask).astype(bool)] = 0. Stokes_cov[i, j][(1-data_mask).astype(bool)] = 0.
new_Stokes_cov[i,j] = Stokes_cov[i,j][vertex[2]:vertex[3],vertex[0]:vertex[1]] new_Stokes_cov[i, j] = Stokes_cov[i, j][vertex[2]:vertex[3], vertex[0]:vertex[1]]
Stokes_cov = new_Stokes_cov Stokes_cov = new_Stokes_cov
data_mask = data_mask[vertex[2]:vertex[3],vertex[0]:vertex[1]] data_mask = data_mask[vertex[2]:vertex[3], vertex[0]:vertex[1]]
data_mask = data_mask.astype(float, copy=False) data_mask = data_mask.astype(float, copy=False)
#Create HDUList object # Create HDUList object
hdul = fits.HDUList([]) hdul = fits.HDUList([])
#Add I_stokes as PrimaryHDU # Add I_stokes as PrimaryHDU
header['datatype'] = ('I_stokes', 'type of data stored in the HDU') header['datatype'] = ('I_stokes', 'type of data stored in the HDU')
I_stokes[(1-data_mask).astype(bool)] = 0. I_stokes[(1-data_mask).astype(bool)] = 0.
primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header) primary_hdu = fits.PrimaryHDU(data=I_stokes, header=header)
primary_hdu.name = 'I_stokes' primary_hdu.name = 'I_stokes'
hdul.append(primary_hdu) hdul.append(primary_hdu)
#Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList # Add Q, U, Stokes_cov, P, s_P, PA, s_PA to the HDUList
for data, name in [[Q_stokes,'Q_stokes'],[U_stokes,'U_stokes'], for data, name in [[Q_stokes, 'Q_stokes'], [U_stokes, 'U_stokes'],
[Stokes_cov,'IQU_cov_matrix'],[P, 'Pol_deg'], [Stokes_cov, 'IQU_cov_matrix'], [P, 'Pol_deg'],
[debiased_P, 'Pol_deg_debiased'],[s_P, 'Pol_deg_err'], [debiased_P, 'Pol_deg_debiased'], [s_P, 'Pol_deg_err'],
[s_P_P, 'Pol_deg_err_Poisson_noise'],[PA, 'Pol_ang'], [s_P_P, 'Pol_deg_err_Poisson_noise'], [PA, 'Pol_ang'],
[s_PA, 'Pol_ang_err'],[s_PA_P, 'Pol_ang_err_Poisson_noise'], [s_PA, 'Pol_ang_err'], [s_PA_P, 'Pol_ang_err_Poisson_noise'],
[data_mask, 'Data_mask']]: [data_mask, 'Data_mask']]:
hdu_header = header.copy() hdu_header = header.copy()
hdu_header['datatype'] = name hdu_header['datatype'] = name
if not name == 'IQU_cov_matrix': if not name == 'IQU_cov_matrix':
data[(1-data_mask).astype(bool)] = 0. data[(1-data_mask).astype(bool)] = 0.
hdu = fits.ImageHDU(data=data,header=hdu_header) hdu = fits.ImageHDU(data=data, header=hdu_header)
hdu.name = name hdu.name = name
hdul.append(hdu) hdul.append(hdu)
#Save fits file to designated filepath # Save fits file to designated filepath
hdul.writeto(path_join(data_folder,filename+".fits"), overwrite=True) hdul.writeto(path_join(data_folder, filename+".fits"), overwrite=True)
if return_hdul: if return_hdul:
return hdul return hdul

File diff suppressed because it is too large Load Diff

View File

@@ -17,17 +17,20 @@ def divide_proposal(products):
Divide observation in proposals by time or filter Divide observation in proposals by time or filter
""" """
for pid in np.unique(products['Proposal ID']): for pid in np.unique(products['Proposal ID']):
obs = products[products['Proposal ID']==pid].copy() obs = products[products['Proposal ID'] == pid].copy()
close_date = np.unique(np.array([TimeDelta(np.abs(Time(obs['Start']).unix-date.unix),format='sec') < 7.*u.d for date in obs['Start']], dtype=bool), axis=0) close_date = np.unique(np.array([TimeDelta(np.abs(Time(obs['Start']).unix-date.unix), format='sec')
if len(close_date)>1: < 7.*u.d for date in obs['Start']], dtype=bool), axis=0)
if len(close_date) > 1:
for date in close_date: for date in close_date:
products['Proposal ID'][np.any([products['Dataset']==dataset for dataset in obs['Dataset'][date]],axis=0)] = "_".join([obs['Proposal ID'][date][0],str(obs['Start'][date][0])[:10]]) products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][date]], axis=0)
] = "_".join([obs['Proposal ID'][date][0], str(obs['Start'][date][0])[:10]])
for pid in np.unique(products['Proposal ID']): for pid in np.unique(products['Proposal ID']):
obs = products[products['Proposal ID']==pid].copy() obs = products[products['Proposal ID'] == pid].copy()
same_filt = np.unique(np.array(np.sum([obs['Filters'][:,1:]==filt[1:] for filt in obs['Filters']],axis=2)<3,dtype=bool),axis=0) same_filt = np.unique(np.array(np.sum([obs['Filters'][:, 1:] == filt[1:] for filt in obs['Filters']], axis=2) < 3, dtype=bool), axis=0)
if len(same_filt)>1: if len(same_filt) > 1:
for filt in same_filt: for filt in same_filt:
products['Proposal ID'][np.any([products['Dataset']==dataset for dataset in obs['Dataset'][filt]],axis=0)] = "_".join([obs['Proposal ID'][filt][0],"_".join([fi for fi in obs['Filters'][filt][0][1:] if fi[:-1]!="CLEAR"])]) products['Proposal ID'][np.any([products['Dataset'] == dataset for dataset in obs['Dataset'][filt]], axis=0)] = "_".join(
[obs['Proposal ID'][filt][0], "_".join([fi for fi in obs['Filters'][filt][0][1:] if fi[:-1] != "CLEAR"])])
return products return products
@@ -78,22 +81,22 @@ def get_product_list(target=None, proposal_id=None):
for c, n_c in zip(select_cols, cols): for c, n_c in zip(select_cols, cols):
results.rename_column(c, n_c) results.rename_column(c, n_c)
results['Proposal ID'] = Column(results['Proposal ID'],dtype='U35') results['Proposal ID'] = Column(results['Proposal ID'], dtype='U35')
results['Filters'] = Column(np.array([filt.split(";") for filt in results['Filters']],dtype=str)) results['Filters'] = Column(np.array([filt.split(";") for filt in results['Filters']], dtype=str))
results['Start'] = Column(Time(results['Start'])) results['Start'] = Column(Time(results['Start']))
results['Stop'] = Column(Time(results['Stop'])) results['Stop'] = Column(Time(results['Stop']))
results = divide_proposal(results) results = divide_proposal(results)
obs = results.copy() obs = results.copy()
### Remove single observations for which a FIND filter is used # Remove single observations for which a FIND filter is used
to_remove=[] to_remove = []
for i in range(len(obs)): for i in range(len(obs)):
if "F1ND" in obs[i]['Filters']: if "F1ND" in obs[i]['Filters']:
to_remove.append(i) to_remove.append(i)
obs.remove_rows(to_remove) obs.remove_rows(to_remove)
### Remove observations for which a polarization filter is missing # Remove observations for which a polarization filter is missing
polfilt = {"POL0":0,"POL60":1,"POL120":2} polfilt = {"POL0": 0, "POL60": 1, "POL120": 2}
for pid in np.unique(obs['Proposal ID']): for pid in np.unique(obs['Proposal ID']):
used_pol = np.zeros(3) used_pol = np.zeros(3)
for dataset in obs[obs['Proposal ID'] == pid]: for dataset in obs[obs['Proposal ID'] == pid]:
@@ -102,26 +105,26 @@ def get_product_list(target=None, proposal_id=None):
obs.remove_rows(np.arange(len(obs))[obs['Proposal ID'] == pid]) obs.remove_rows(np.arange(len(obs))[obs['Proposal ID'] == pid])
tab = unique(obs, ['Target name', 'Proposal ID']) tab = unique(obs, ['Target name', 'Proposal ID'])
obs["Obs"] = [np.argmax(np.logical_and(tab['Proposal ID']==data['Proposal ID'],tab['Target name']==data['Target name']))+1 for data in obs] obs["Obs"] = [np.argmax(np.logical_and(tab['Proposal ID'] == data['Proposal ID'], tab['Target name'] == data['Target name']))+1 for data in obs]
try: try:
n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", n_obs = unique(obs[["Obs", "Filters", "Start", "Central wavelength", "Instrument", "Size", "Target name", "Proposal ID", "PI last name"]], 'Obs')
"Size", "Target name", "Proposal ID", "PI last name"]], 'Obs')
except IndexError: except IndexError:
raise ValueError( raise ValueError(
"There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target)) "There is no observation with POL0, POL60 and POL120 for {0:s} in HST/FOC Legacy Archive".format(target))
b = np.zeros(len(results), dtype=bool) b = np.zeros(len(results), dtype=bool)
if not proposal_id is None and str(proposal_id) in obs['Proposal ID']: if proposal_id is not None and str(proposal_id) in obs['Proposal ID']:
b[results['Proposal ID'] == str(proposal_id)] = True b[results['Proposal ID'] == str(proposal_id)] = True
else: else:
n_obs.pprint(len(n_obs)+2) n_obs.pprint(len(n_obs)+2)
a = [np.array(i.split(":"), dtype=str) for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(',')] a = [np.array(i.split(":"), dtype=str)
if a[0][0]=='': for i in input("select observations to be downloaded ('1,3,4,5' or '1,3:5' or 'all','*' default to 1)\n>").split(',')]
if a[0][0] == '':
a = [[1]] a = [[1]]
if a[0][0] in ['a','all','*']: if a[0][0] in ['a', 'all', '*']:
b = np.ones(len(results),dtype=bool) b = np.ones(len(results), dtype=bool)
else: else:
a = [np.array(i,dtype=int) for i in a] a = [np.array(i, dtype=int) for i in a]
for i in a: for i in a:
if len(i) > 1: if len(i) > 1:
for j in range(i[0], i[1]+1): for j in range(i[0], i[1]+1):
@@ -135,19 +138,19 @@ def get_product_list(target=None, proposal_id=None):
dataproduct_type=['image'], dataproduct_type=['image'],
calib_level=[2], calib_level=[2],
description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP") description="DADS C0F file - Calibrated exposure WFPC/WFPC2/FOC/FOS/GHRS/HSP")
products['proposal_id'] = Column(products['proposal_id'],dtype='U35') products['proposal_id'] = Column(products['proposal_id'], dtype='U35')
products['target_name'] = Column(observations['target_name']) products['target_name'] = Column(observations['target_name'])
for prod in products: for prod in products:
prod['proposal_id'] = results['Proposal ID'][results['Dataset']==prod['productFilename'][:len(results['Dataset'][0])].upper()][0] prod['proposal_id'] = results['Proposal ID'][results['Dataset'] == prod['productFilename'][:len(results['Dataset'][0])].upper()][0]
for prod in products: for prod in products:
prod['target_name'] = observations['target_name'][observations['obsid']==prod['obsID']][0] prod['target_name'] = observations['target_name'][observations['obsid'] == prod['obsID']][0]
tab = unique(products, ['target_name', 'proposal_id']) tab = unique(products, ['target_name', 'proposal_id'])
if len(tab)>1 and np.all(tab['target_name']==tab['target_name'][0]): if len(tab) > 1 and np.all(tab['target_name'] == tab['target_name'][0]):
target = tab['target_name'][0] target = tab['target_name'][0]
products["Obs"] = [np.argmax(np.logical_and(tab['proposal_id']==data['proposal_id'],tab['target_name']==data['target_name']))+1 for data in products] products["Obs"] = [np.argmax(np.logical_and(tab['proposal_id'] == data['proposal_id'], tab['target_name'] == data['target_name']))+1 for data in products]
return target, products return target, products
@@ -155,17 +158,17 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
""" """
Given a target name and a proposal_id, create the local directories and retrieve the fits files from the MAST Archive Given a target name and a proposal_id, create the local directories and retrieve the fits files from the MAST Archive
""" """
target, products = get_product_list(target=target,proposal_id=proposal_id) target, products = get_product_list(target=target, proposal_id=proposal_id)
prodpaths = [] prodpaths = []
data_dir = path_join(output_dir, target) # data_dir = path_join(output_dir, target)
out = "" out = ""
for obs in unique(products,'Obs'): for obs in unique(products, 'Obs'):
filepaths = [] filepaths = []
#obs_dir = path_join(data_dir, obs['prodposal_id']) # obs_dir = path_join(data_dir, obs['prodposal_id'])
#if obs['target_name']!=target: # if obs['target_name']!=target:
obs_dir = path_join(path_join(output_dir, target), obs['proposal_id']) obs_dir = path_join(path_join(output_dir, target), obs['proposal_id'])
if not path_exists(obs_dir): if not path_exists(obs_dir):
system("mkdir -p {0:s} {1:s}".format(obs_dir,obs_dir.replace("data","plots"))) system("mkdir -p {0:s} {1:s}".format(obs_dir, obs_dir.replace("data", "plots")))
for file in products['productFilename'][products['Obs'] == obs['Obs']]: for file in products['productFilename'][products['Obs'] == obs['Obs']]:
fpath = path_join(obs_dir, file) fpath = path_join(obs_dir, file)
if not path_exists(fpath): if not path_exists(fpath):
@@ -173,8 +176,8 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
products['dataURI'][products['productFilename'] == file][0], local_path=fpath)[0]) products['dataURI'][products['productFilename'] == file][0], local_path=fpath)[0])
else: else:
out += "{0:s} : Exists\n".format(file) out += "{0:s} : Exists\n".format(file)
filepaths.append([obs_dir,file]) filepaths.append([obs_dir, file])
prodpaths.append(np.array(filepaths,dtype=str)) prodpaths.append(np.array(filepaths, dtype=str))
return target, prodpaths return target, prodpaths
@@ -183,11 +186,11 @@ if __name__ == "__main__":
import argparse import argparse
parser = argparse.ArgumentParser(description='Query MAST for target products') parser = argparse.ArgumentParser(description='Query MAST for target products')
parser.add_argument('-t','--target', metavar='targetname', required=False, parser.add_argument('-t', '--target', metavar='targetname', required=False,
help='the name of the target', type=str, default=None) help='the name of the target', type=str, default=None)
parser.add_argument('-p','--proposal_id', metavar='proposal_id', required=False, parser.add_argument('-p', '--proposal_id', metavar='proposal_id', required=False,
help='the proposal id of the data products', type=int, default=None) help='the proposal id of the data products', type=int, default=None)
parser.add_argument('-o','--output_dir', metavar='directory_path', required=False, parser.add_argument('-o', '--output_dir', metavar='directory_path', required=False,
help='output directory path for the data products', type=str, default="./data") help='output directory path for the data products', type=str, default="./data")
args = parser.parse_args() args = parser.parse_args()
prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id) prodpaths = retrieve_products(target=args.target, proposal_id=args.proposal_id)

File diff suppressed because it is too large Load Diff

View File

@@ -7,65 +7,66 @@ from lib.plots import overplot_radio, overplot_pol, align_pol
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits") Stokes_UV = fits.open("./data/IC5063/5918/IC5063_FOC_b0.10arcsec_c0.20arcsec.fits")
#Stokes_18GHz = fits.open("./data/IC5063/radio/IC5063_18GHz.fits") # Stokes_18GHz = fits.open("./data/IC5063/radio/IC5063_18GHz.fits")
#Stokes_24GHz = fits.open("./data/IC5063/radio/IC5063_24GHz.fits") # Stokes_24GHz = fits.open("./data/IC5063/radio/IC5063_24GHz.fits")
#Stokes_103GHz = fits.open("./data/IC5063/radio/IC5063_103GHz.fits") # Stokes_103GHz = fits.open("./data/IC5063/radio/IC5063_103GHz.fits")
#Stokes_229GHz = fits.open("./data/IC5063/radio/IC5063_229GHz.fits") # Stokes_229GHz = fits.open("./data/IC5063/radio/IC5063_229GHz.fits")
#Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits") # Stokes_357GHz = fits.open("./data/IC5063/radio/IC5063_357GHz.fits")
#Stokes_S2 = fits.open("./data/IC5063/POLARIZATION_COMPARISON/S2_rot_crop.fits") # Stokes_S2 = fits.open("./data/IC5063/POLARIZATION_COMPARISON/S2_rot_crop.fits")
Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits") Stokes_IR = fits.open("./data/IC5063/IR/u2e65g01t_c0f_rot.fits")
##levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.]) # levelsMorganti = np.array([1.,2.,3.,8.,16.,32.,64.,128.])
#levelsMorganti = np.logspace(0.,1.97,5)/100. # levelsMorganti = np.logspace(0.,1.97,5)/100.
# #
#levels18GHz = levelsMorganti*Stokes_18GHz[0].data.max() # levels18GHz = levelsMorganti*Stokes_18GHz[0].data.max()
#A = overplot_radio(Stokes_UV, Stokes_18GHz) # A = overplot_radio(Stokes_UV, Stokes_18GHz)
#A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/18GHz_overplot_forced.pdf',vec_scale=None) # A.plot(levels=levels18GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/18GHz_overplot_forced.pdf',vec_scale=None)
## ##
#levels24GHz = levelsMorganti*Stokes_24GHz[0].data.max() # levels24GHz = levelsMorganti*Stokes_24GHz[0].data.max()
#B = overplot_radio(Stokes_UV, Stokes_24GHz) # B = overplot_radio(Stokes_UV, Stokes_24GHz)
#B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/24GHz_overplot_forced.pdf',vec_scale=None) # B.plot(levels=levels24GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/24GHz_overplot_forced.pdf',vec_scale=None)
## ##
#levels103GHz = levelsMorganti*Stokes_103GHz[0].data.max() # levels103GHz = levelsMorganti*Stokes_103GHz[0].data.max()
#C = overplot_radio(Stokes_UV, Stokes_103GHz) # C = overplot_radio(Stokes_UV, Stokes_103GHz)
#C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/103GHz_overplot_forced.pdf',vec_scale=None) # C.plot(levels=levels103GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/103GHz_overplot_forced.pdf',vec_scale=None)
## ##
#levels229GHz = levelsMorganti*Stokes_229GHz[0].data.max() # levels229GHz = levelsMorganti*Stokes_229GHz[0].data.max()
#D = overplot_radio(Stokes_UV, Stokes_229GHz) # D = overplot_radio(Stokes_UV, Stokes_229GHz)
#D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/229GHz_overplot_forced.pdf',vec_scale=None) # D.plot(levels=levels229GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/229GHz_overplot_forced.pdf',vec_scale=None)
## ##
#levels357GHz = levelsMorganti*Stokes_357GHz[0].data.max() # levels357GHz = levelsMorganti*Stokes_357GHz[0].data.max()
#E = overplot_radio(Stokes_UV, Stokes_357GHz) # E = overplot_radio(Stokes_UV, Stokes_357GHz)
#E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/357GHz_overplot_forced.pdf',vec_scale=None) # E.plot(levels=levels357GHz, SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/357GHz_overplot_forced.pdf',vec_scale=None)
## ##
#F = overplot_pol(Stokes_UV, Stokes_S2) # F = overplot_pol(Stokes_UV, Stokes_S2)
#F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot_forced.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18)) # F.plot(SNRp_cut=3.0, SNRi_cut=80.0, savename='./plots/IC5063/S2_overplot_forced.pdf', norm=LogNorm(vmin=5e-20,vmax=5e-18))
G = overplot_pol(Stokes_UV, Stokes_IR, cmap='inferno') G = overplot_pol(Stokes_UV, Stokes_IR, cmap='inferno')
G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot_forced.pdf',vec_scale=None,norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3,Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']),cmap='inferno_r') G.plot(SNRp_cut=2.0, SNRi_cut=10.0, savename='./plots/IC5063/IR_overplot_forced.pdf', vec_scale=None,
norm=LogNorm(Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']/1e3, Stokes_IR[0].data.max()*Stokes_IR[0].header['photflam']), cmap='inferno_r')
#data_folder1 = "./data/M87/POS1/" # data_folder1 = "./data/M87/POS1/"
#plots_folder1 = "./plots/M87/POS1/" # plots_folder1 = "./plots/M87/POS1/"
#basename1 = "M87_020_log" # basename1 = "M87_020_log"
#M87_1_95 = fits.open(data_folder1+"M87_POS1_1995_FOC_combine_FWHM020.fits") # M87_1_95 = fits.open(data_folder1+"M87_POS1_1995_FOC_combine_FWHM020.fits")
#M87_1_96 = fits.open(data_folder1+"M87_POS1_1996_FOC_combine_FWHM020.fits") # M87_1_96 = fits.open(data_folder1+"M87_POS1_1996_FOC_combine_FWHM020.fits")
#M87_1_97 = fits.open(data_folder1+"M87_POS1_1997_FOC_combine_FWHM020.fits") # M87_1_97 = fits.open(data_folder1+"M87_POS1_1997_FOC_combine_FWHM020.fits")
#M87_1_98 = fits.open(data_folder1+"M87_POS1_1998_FOC_combine_FWHM020.fits") # M87_1_98 = fits.open(data_folder1+"M87_POS1_1998_FOC_combine_FWHM020.fits")
#M87_1_99 = fits.open(data_folder1+"M87_POS1_1999_FOC_combine_FWHM020.fits") # M87_1_99 = fits.open(data_folder1+"M87_POS1_1999_FOC_combine_FWHM020.fits")
#H = align_pol(np.array([M87_1_95,M87_1_96,M87_1_97,M87_1_98,M87_1_99]), norm=LogNorm()) # H = align_pol(np.array([M87_1_95,M87_1_96,M87_1_97,M87_1_98,M87_1_99]), norm=LogNorm())
#H.plot(SNRp_cut=5.0, SNRi_cut=50.0, savename=plots_folder1+'animated_loop/'+basename1, norm=LogNorm()) # H.plot(SNRp_cut=5.0, SNRi_cut=50.0, savename=plots_folder1+'animated_loop/'+basename1, norm=LogNorm())
#command("convert -delay 50 -loop 0 {0:s}animated_loop/{1:s}*.pdf {0:s}animated_loop/{1:s}.gif".format(plots_folder1, basename1)) # command("convert -delay 50 -loop 0 {0:s}animated_loop/{1:s}*.pdf {0:s}animated_loop/{1:s}.gif".format(plots_folder1, basename1))
#data_folder3 = "./data/M87/POS3/" # data_folder3 = "./data/M87/POS3/"
#plots_folder3 = "./plots/M87/POS3/" # plots_folder3 = "./plots/M87/POS3/"
#basename3 = "M87_020_log" # basename3 = "M87_020_log"
#M87_3_95 = fits.open(data_folder3+"M87_POS3_1995_FOC_combine_FWHM020.fits") # M87_3_95 = fits.open(data_folder3+"M87_POS3_1995_FOC_combine_FWHM020.fits")
#M87_3_96 = fits.open(data_folder3+"M87_POS3_1996_FOC_combine_FWHM020.fits") # M87_3_96 = fits.open(data_folder3+"M87_POS3_1996_FOC_combine_FWHM020.fits")
#M87_3_97 = fits.open(data_folder3+"M87_POS3_1997_FOC_combine_FWHM020.fits") # M87_3_97 = fits.open(data_folder3+"M87_POS3_1997_FOC_combine_FWHM020.fits")
#M87_3_98 = fits.open(data_folder3+"M87_POS3_1998_FOC_combine_FWHM020.fits") # M87_3_98 = fits.open(data_folder3+"M87_POS3_1998_FOC_combine_FWHM020.fits")
#M87_3_99 = fits.open(data_folder3+"M87_POS3_1999_FOC_combine_FWHM020.fits") # M87_3_99 = fits.open(data_folder3+"M87_POS3_1999_FOC_combine_FWHM020.fits")
#I = align_pol(np.array([M87_3_95,M87_3_96,M87_3_97,M87_3_98,M87_3_99]), norm=LogNorm()) # I = align_pol(np.array([M87_3_95,M87_3_96,M87_3_97,M87_3_98,M87_3_99]), norm=LogNorm())
#I.plot(SNRp_cut=5.0, SNRi_cut=50.0, savename=plots_folder3+'animated_loop/'+basename3, norm=LogNorm()) # I.plot(SNRp_cut=5.0, SNRi_cut=50.0, savename=plots_folder3+'animated_loop/'+basename3, norm=LogNorm())
#command("convert -delay 20 -loop 0 {0:s}animated_loop/{1:s}*.pdf {0:s}animated_loop/{1:s}.gif".format(plots_folder3, basename3)) # command("convert -delay 20 -loop 0 {0:s}animated_loop/{1:s}*.pdf {0:s}animated_loop/{1:s}.gif".format(plots_folder3, basename3))

View File

@@ -1,23 +1,23 @@
#!/usr/bin/python3 #!/usr/bin/python3
from astropy.io import fits from astropy.io import fits
import numpy as np import numpy as np
from lib.plots import overplot_chandra, overplot_pol, align_pol from lib.plots import overplot_chandra, overplot_pol
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.fits") Stokes_UV = fits.open("./data/MRK463E/5960/MRK463E_FOC_b0.05arcsec_c0.10arcsec.fits")
Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits") Stokes_IR = fits.open("./data/MRK463E/WFPC2/IR_rot_crop.fits")
Stokes_Xr = fits.open("./data/MRK463E/Chandra/4913/primary/acisf04913N004_cntr_img2.fits") Stokes_Xr = fits.open("./data/MRK463E/Chandra/4913/primary/acisf04913N004_cntr_img2.fits")
levels = np.geomspace(1.,99.,10) levels = np.geomspace(1., 99., 10)
#A = overplot_chandra(Stokes_UV, Stokes_Xr) # A = overplot_chandra(Stokes_UV, Stokes_Xr)
#A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf') # A.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot.pdf')
#B = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm()) B = overplot_chandra(Stokes_UV, Stokes_Xr, norm=LogNorm())
#B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot_forced.pdf') B.plot(levels=levels, SNRp_cut=3.0, SNRi_cut=20.0, zoom=1, savename='./plots/MRK463E/Chandra_overplot_forced.pdf')
#C = overplot_pol(Stokes_UV, Stokes_IR) # C = overplot_pol(Stokes_UV, Stokes_IR)
#C.plot(SNRp_cut=3.0, SNRi_cut=20.0, savename='./plots/MRK463E/IR_overplot.pdf') # C.plot(SNRp_cut=3.0, SNRi_cut=20.0, savename='./plots/MRK463E/IR_overplot.pdf')
D = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm()) D = overplot_pol(Stokes_UV, Stokes_IR, norm=LogNorm())
D.plot(SNRp_cut=3.0, SNRi_cut=30.0, vec_scale=2, norm=LogNorm(1e-18,1e-15), savename='./plots/MRK463E/IR_overplot_forced.pdf') D.plot(SNRp_cut=3.0, SNRi_cut=30.0, vec_scale=2, norm=LogNorm(1e-18, 1e-15), savename='./plots/MRK463E/IR_overplot_forced.pdf')