bug fix and small improvements

This commit is contained in:
Thibault Barnouin
2023-05-30 18:07:24 +02:00
parent 8e19cc74f1
commit b1fcad9e25
7 changed files with 138 additions and 97 deletions

View File

@@ -46,8 +46,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data"):
display_data = False display_data = False
# Smoothing # Smoothing
smoothing_function = 'combine' #gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_function = 'gaussian' #gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 0.20 #If None, no smoothing is done smoothing_FWHM = None #If None, no smoothing is done
smoothing_scale = 'arcsec' #pixel or arcsec smoothing_scale = 'arcsec' #pixel or arcsec
# Rotation # Rotation
@@ -77,8 +77,8 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data"):
target = input("Target name:\n>") target = input("Target name:\n>")
else: else:
target, products = retrieve_products(target,proposal_id,output_dir=output_dir) target, products = retrieve_products(target,proposal_id,output_dir=output_dir)
prod = products.pop() prod = products[0]
for prods in products: for prods in products[1:]:
main(target=target,infiles=["/".join(pr) for pr in prods],output_dir=output_dir) main(target=target,infiles=["/".join(pr) for pr in prods],output_dir=output_dir)
data_folder = prod[0,0] data_folder = prod[0,0]
try: try:
@@ -89,7 +89,15 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data"):
data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=True) data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=True)
figname = "_".join([target,"FOC"]) figname = "_".join([target,"FOC"])
figtype = "_".join(["".join([s[0] for s in smoothing_function.split("_")]),"{0:.2f}".format(smoothing_FWHM).replace(".","")]) #additionnal informations if smoothing_FWHM is None:
if px_scale in ['px','pixel','pixels']:
figtype = "".join(["b_",str(pxsize),'px'])
elif px_scale in ['arcsec','arcseconds','arcs']:
figtype = "".join(["b_","{0:.2f}".format(pxsize).replace(".",""),'arcsec'])
else:
figtype = "full"
else:
figtype = "_".join(["".join([s[0] for s in smoothing_function.split("_")]),"".join(["{0:.2f}".format(smoothing_FWHM).replace(".",""),smoothing_scale])]) #additionnal informations
# Crop data to remove outside blank margins. # Crop data to remove outside blank margins.
data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0., inside=True, display=display_crop, savename=figname, plots_folder=plots_folder) data_array, error_array, headers = proj_red.crop_array(data_array, headers, step=5, null_val=0., inside=True, display=display_crop, savename=figname, plots_folder=plots_folder)
@@ -100,7 +108,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data"):
# Estimate error from data background, estimated from sub-image of desired sub_shape. # Estimate error from data background, estimated from sub-image of desired sub_shape.
background = None background = None
data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, sub_type=error_sub_type, subtract_error=subtract_error, display=display_error, savename="_".join([figname,"_errors"]), plots_folder=plots_folder, return_background=True) data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, sub_type=error_sub_type, subtract_error=subtract_error, display=display_error, savename="_".join([figname,"errors"]), plots_folder=plots_folder, return_background=True)
# Align and rescale images with oversampling. # Align and rescale images with oversampling.
data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False) data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False)

View File

@@ -16,8 +16,8 @@ import matplotlib.pyplot as plt
root_dir = path_join('/home/t.barnouin/Documents/Thesis/HST') root_dir = path_join('/home/t.barnouin/Documents/Thesis/HST')
root_dir_K = path_join(root_dir,'Kishimoto','output') root_dir_K = path_join(root_dir,'Kishimoto','output')
root_dir_S = path_join(root_dir,'FOC_Reduction','output') root_dir_S = path_join(root_dir,'FOC_Reduction','output')
root_dir_data_S = path_join(root_dir,'FOC_Reduction','data','NGC1068_x274020') root_dir_data_S = path_join(root_dir,'FOC_Reduction','data','NGC1068','5144')
root_dir_plot_S = path_join(root_dir,'FOC_Reduction','plots','NGC1068_x274020') root_dir_plot_S = path_join(root_dir,'FOC_Reduction','plots','NGC1068','5144')
filename_S = "NGC1068_FOC_b_10px.fits" filename_S = "NGC1068_FOC_b_10px.fits"
plt.rcParams.update({'font.size': 15}) plt.rcParams.update({'font.size': 15})

43
src/lib/background.py Normal file → Executable file
View File

@@ -119,7 +119,7 @@ def sky_part(img):
# Intensity range # Intensity range
sky_med = np.median(rand_pix) sky_med = np.median(rand_pix)
sig = np.min([img[img<sky_med].std(),img[img>sky_med].std()]) sig = np.min([img[img<sky_med].std(),img[img>sky_med].std()])
sky_range = [sky_med-2.*sig, sky_med+sig] sky_range = [sky_med-2.*sig, np.max([sky_med+sig,7e-4])] #Detector background average FOC Data Handbook Sec. 7.6
sky = img[np.logical_and(img>=sky_range[0],img<=sky_range[1])] sky = img[np.logical_and(img>=sky_range[0],img<=sky_range[1])]
return sky, sky_range return sky, sky_range
@@ -170,7 +170,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
savename : str, optional savename : str, optional
Name of the figure the map should be saved to. If None, the map won't Name of the figure the map should be saved to. If None, the map won't
be saved (only displayed). Only used if display is True. be saved (only displayed). Only used if display is True.
Defaults to None. Defaults to None.CNRS-Unistra Labo ObsAstroS
plots_folder : str, optional plots_folder : str, optional
Relative (or absolute) filepath to the folder in wich the map will Relative (or absolute) filepath to the folder in wich the map will
be saved. Not used if savename is None. be saved. Not used if savename is None.
@@ -212,18 +212,7 @@ def bkg_fit(data, error, mask, headers, subtract_error=True, display=False, save
error_bkg[i] *= bkg error_bkg[i] *= bkg
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999) n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2)
#wavelength dependence of the polariser filters
#estimated to less than 1%
err_wav = data[i]*0.01
#difference in PSFs through each polarizers
#estimated to less than 3%
err_psf = data[i]*0.03
#flatfielding uncertainties
#estimated to less than 3%
err_flat = data[i]*0.03
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
#Substract background #Substract background
if subtract_error>0: if subtract_error>0:
@@ -327,18 +316,7 @@ def bkg_hist(data, error, mask, headers, sub_type=None, subtract_error=True, dis
error_bkg[i] *= bkg error_bkg[i] *= bkg
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999) n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2)
#wavelength dependence of the polariser filters
#estimated to less than 1%
err_wav = data[i]*0.01
#difference in PSFs through each polarizers
#estimated to less than 3%
err_psf = data[i]*0.03
#flatfielding uncertainties
#estimated to less than 3%
err_flat = data[i]*0.03
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
#Substract background #Substract background
if subtract_error > 0: if subtract_error > 0:
@@ -435,18 +413,7 @@ def bkg_mini(data, error, mask, headers, sub_shape=(15,15), subtract_error=True,
bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*subtract_error if subtract_error>0 else np.sqrt(np.sum(sub_image**2)/sub_image.size) bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)*subtract_error if subtract_error>0 else np.sqrt(np.sum(sub_image**2)/sub_image.size)
error_bkg[i] *= bkg error_bkg[i] *= bkg
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999) n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2)
#wavelength dependence of the polariser filters
#estimated to less than 1%
err_wav = data[i]*0.01
#difference in PSFs through each polarizers
#estimated to less than 3%
err_psf = data[i]*0.03
#flatfielding uncertainties
#estimated to less than 3%
err_flat = data[i]*0.03
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
#Substract background #Substract background
if subtract_error>0.: if subtract_error>0.:

View File

@@ -1,3 +1,5 @@
#!/usr/bin/python3
#-*- coding:utf-8 -*-
""" """
Library function for simplified fits handling. Library function for simplified fits handling.
@@ -12,7 +14,7 @@ prototypes :
import numpy as np import numpy as np
from os.path import join as path_join from os.path import join as path_join
from astropy.io import fits from astropy.io import fits
from astropy import wcs from astropy.wcs import WCS
from lib.convex_hull import image_hull, clean_ROI from lib.convex_hull import image_hull, clean_ROI
from lib.plots import princ_angle from lib.plots import princ_angle
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
@@ -51,7 +53,7 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
# force WCS to convention PCi_ja unitary, cdelt in deg # force WCS to convention PCi_ja unitary, cdelt in deg
for header in headers: for header in headers:
new_wcs = wcs.WCS(header).deepcopy() new_wcs = WCS(header).deepcopy()
if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt == np.array([1., 1.])).all(): if new_wcs.wcs.has_cd() or (new_wcs.wcs.cdelt == np.array([1., 1.])).all():
# Update WCS with relevant information # Update WCS with relevant information
if new_wcs.wcs.has_cd(): if new_wcs.wcs.has_cd():
@@ -70,6 +72,16 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
header[key] = val header[key] = val
header['orientat'] = princ_angle(float(header['orientat'])) header['orientat'] = princ_angle(float(header['orientat']))
# force WCS for POL60 to have same pixel size as POL0 and POL120
is_pol60 = np.array([head['filtnam1'].lower()=='pol60' for head in headers],dtype=bool)
cdelt = np.array([WCS(head).wcs.cdelt for head in headers])
if np.unique(cdelt[np.logical_not(is_pol60)],axis=0).size!=2:
print(np.unique(cdelt[np.logical_not(is_pol60)],axis=0).size)
raise ValueError("Not all images have same pixel size")
else:
for head in np.array(headers,dtype=object)[is_pol60]:
head['cdelt1'],head['cdelt2'] = np.unique(cdelt[np.logical_not(is_pol60)],axis=0)[0]
if compute_flux: if compute_flux:
for i in range(len(infiles)): for i in range(len(infiles)):
# Compute the flux in counts/sec # Compute the flux in counts/sec
@@ -118,7 +130,7 @@ def save_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, P, debiased_P, s_P,
#Create new WCS object given the modified images #Create new WCS object given the modified images
ref_header = headers[0] ref_header = headers[0]
exp_tot = np.array([header['exptime'] for header in headers]).sum() exp_tot = np.array([header['exptime'] for header in headers]).sum()
new_wcs = wcs.WCS(ref_header).deepcopy() new_wcs = WCS(ref_header).deepcopy()
if data_mask.shape != (1,1): if data_mask.shape != (1,1):
vertex = clean_ROI(data_mask) vertex = clean_ROI(data_mask)

View File

@@ -287,20 +287,18 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
plot_Stokes(Stokes, savename=savename, plots_folder=plots_folder) plot_Stokes(Stokes, savename=savename, plots_folder=plots_folder)
#Compute SNR and apply cuts #Compute SNR and apply cuts
pol.data[pol.data == 0.] = np.nan poldata, pangdata = pol.data.copy(), pang.data.copy()
pol_err.data[pol_err.data == 0.] = np.nan maskP = pol_err.data > 0
SNRp = pol.data/pol_err.data SNRp = np.zeros(pol.data.shape)
SNRp[np.isnan(SNRp)] = 0. SNRp[maskP] = pol.data[maskP]/pol_err.data[maskP]
pol.data[SNRp < SNRp_cut] = np.nan
pang.data[SNRp < SNRp_cut] = np.nan
maskI = stk_cov.data[0,0] > 0 maskI = stk_cov.data[0,0] > 0
SNRi = np.zeros(stkI.data.shape) SNRi = np.zeros(stkI.data.shape)
SNRi[maskI] = stkI.data[maskI]/np.sqrt(stk_cov.data[0,0][maskI]) SNRi[maskI] = stkI.data[maskI]/np.sqrt(stk_cov.data[0,0][maskI])
pol.data[SNRi < SNRi_cut] = np.nan
pang.data[SNRi < SNRi_cut] = np.nan
mask = (SNRp > SNRp_cut) * (SNRi > SNRi_cut) mask = (SNRp > SNRp_cut) * (SNRi > SNRi_cut)
poldata[np.logical_not(mask)] = np.nan
pangdata[np.logical_not(mask)] = np.nan
# Look for pixel of max polarization # Look for pixel of max polarization
if np.isfinite(pol.data).any(): if np.isfinite(pol.data).any():
@@ -321,7 +319,10 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
if display.lower() in ['intensity']: if display.lower() in ['intensity']:
# If no display selected, show intensity map # If no display selected, show intensity map
display='i' display='i'
vmin, vmax = 1/5.0*np.mean(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) if mask.sum() > 0.:
vmin, vmax = 1/5.0*np.mean(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
else:
vmin, vmax = 1/5.0*np.mean(np.sqrt(stk_cov.data[0,0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.) im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsI = np.linspace(vmax*0.01, vmax*0.99, 10) levelsI = np.linspace(vmax*0.01, vmax*0.99, 10)
@@ -332,7 +333,10 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
# Display polarisation flux # Display polarisation flux
display='pf' display='pf'
pf_mask = (stkI.data > 0.) * (pol.data > 0.) pf_mask = (stkI.data > 0.) * (pol.data > 0.)
vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) if mask.sum() > 0.:
vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
else:
vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0,0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
im = ax.imshow(stkI.data*convert_flux*pol.data, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.) im = ax.imshow(stkI.data*convert_flux*pol.data, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda} \cdot P$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
levelsPf = np.linspace(vmax*0.01, vmax*0.99, 10) levelsPf = np.linspace(vmax*0.01, vmax*0.99, 10)
@@ -354,40 +358,55 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
elif display.lower() in ['s_p','pol_err','pol_deg_err']: elif display.lower() in ['s_p','pol_err','pol_deg_err']:
# Display polarization degree error map # Display polarization degree error map
display='s_p' display='s_p'
vmin, vmax = 0., np.max(pol_err.data[SNRp > SNRp_cut])*100. if (SNRp>SNRp_cut).any():
p_err = deepcopy(pol_err.data) vmin, vmax = 0., np.max(pol_err.data[SNRp > SNRp_cut])*100.
p_err[p_err > vmax/100.] = np.nan p_err = deepcopy(pol_err.data)
im = ax.imshow(p_err*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) p_err[p_err > vmax/100.] = np.nan
im = ax.imshow(p_err*100., vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
else:
im = ax.imshow(pol_err.data*100., aspect='equal', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$\sigma_P$ [%]") cbar = plt.colorbar(im, cax=cbar_ax, label=r"$\sigma_P$ [%]")
elif display.lower() in ['s_i','i_err']: elif display.lower() in ['s_i','i_err']:
# Display intensity error map # Display intensity error map
display='s_i' display='s_i'
vmin, vmax = np.min(np.sqrt(stk_cov.data[0,0][stk_cov.data[0,0] > 0.])*convert_flux), np.max(np.sqrt(stk_cov.data[0,0][stk_cov.data[0,0] > 0.])*convert_flux) if (SNRi>SNRi_cut).any():
im = ax.imshow(np.sqrt(stk_cov.data[0,0])*convert_flux, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) vmin, vmax = np.min(np.sqrt(stk_cov.data[0,0][stk_cov.data[0,0] > 0.])*convert_flux), np.max(np.sqrt(stk_cov.data[0,0][stk_cov.data[0,0] > 0.])*convert_flux)
im = ax.imshow(np.sqrt(stk_cov.data[0,0])*convert_flux, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
else:
im = ax.imshow(np.sqrt(stk_cov.data[0,0])*convert_flux, aspect='equal', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") cbar = plt.colorbar(im, cax=cbar_ax, label=r"$\sigma_I$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
elif display.lower() in ['snr','snri']: elif display.lower() in ['snr','snri']:
# Display I_stokes signal-to-noise map # Display I_stokes signal-to-noise map
display='snri' display='snri'
vmin, vmax = 0., np.max(SNRi[np.isfinite(SNRi)]) vmin, vmax = 0., np.max(SNRi[np.isfinite(SNRi)])
im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) if vmax*0.99 > SNRi_cut:
im = ax.imshow(SNRi, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
levelsSNRi = np.linspace(SNRi_cut, vmax*0.99, 10)
print("SNRi contour levels : ", levelsSNRi)
cont = ax.contour(SNRi, levels=levelsSNRi, colors='grey', linewidths=0.5)
#ax.clabel(cont,inline=True,fontsize=6)
else:
im = ax.imshow(SNRi, aspect='equal', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$I_{Stokes}/\sigma_{I}$") cbar = plt.colorbar(im, cax=cbar_ax, label=r"$I_{Stokes}/\sigma_{I}$")
levelsSNRi = np.linspace(SNRi_cut, vmax*0.99, 10)
print("SNRi contour levels : ", levelsSNRi)
cont = ax.contour(SNRi, levels=levelsSNRi, colors='grey', linewidths=0.5)
#ax.clabel(cont,inline=True,fontsize=6)
elif display.lower() in ['snrp']: elif display.lower() in ['snrp']:
# Display polarization degree signal-to-noise map # Display polarization degree signal-to-noise map
display='snrp' display='snrp'
vmin, vmax = 0., np.max(SNRp[np.isfinite(SNRp)]) vmin, vmax = 0., np.max(SNRp[np.isfinite(SNRp)])
im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) if vmax*0.99 > SNRp_cut:
im = ax.imshow(SNRp, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
levelsSNRp = np.linspace(SNRp_cut, vmax*0.99, 10)
print("SNRp contour levels : ", levelsSNRp)
cont = ax.contour(SNRp, levels=levelsSNRp, colors='grey', linewidths=0.5)
#ax.clabel(cont,inline=True,fontsize=6)
else:
im = ax.imshow(SNRp, aspect='equal', cmap='inferno', alpha=1.)
cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P/\sigma_{P}$") cbar = plt.colorbar(im, cax=cbar_ax, label=r"$P/\sigma_{P}$")
levelsSNRp = np.linspace(SNRp_cut, vmax*0.99, 10)
print("SNRp contour levels : ", levelsSNRp)
cont = ax.contour(SNRp, levels=levelsSNRp, colors='grey', linewidths=0.5)
#ax.clabel(cont,inline=True,fontsize=6)
else: else:
# Defaults to intensity map # Defaults to intensity map
vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux) if mask.sum() > 0.:
vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0,0][mask])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
else:
vmin, vmax = 1.*np.mean(np.sqrt(stk_cov.data[0,0][stkI.data > 0.])*convert_flux), np.max(stkI.data[stkI.data > 0.]*convert_flux)
#im = ax.imshow(stkI.data*convert_flux, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.) #im = ax.imshow(stkI.data*convert_flux, vmin=vmin, vmax=vmax, aspect='equal', cmap='inferno', alpha=1.)
#cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]") #cbar = plt.colorbar(im, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.) im = ax.imshow(stkI.data*convert_flux, norm=LogNorm(vmin,vmax), aspect='equal', cmap='inferno', alpha=1.)
@@ -409,11 +428,11 @@ def polarization_map(Stokes, data_mask=None, rectangle=None, SNRp_cut=3., SNRi_c
if display.lower() in ['i','s_i','snri','pf','p','pa','s_p','snrp']: if display.lower() in ['i','s_i','snri','pf','p','pa','s_p','snrp']:
if step_vec == 0: if step_vec == 0:
pol.data[np.isfinite(pol.data)] = 1./2. poldata[np.isfinite(poldata)] = 1./2.
step_vec = 1 step_vec = 1
vec_scale = 2. vec_scale = 2.
X, Y = np.meshgrid(np.arange(stkI.data.shape[1]), np.arange(stkI.data.shape[0])) X, Y = np.meshgrid(np.arange(stkI.data.shape[1]), np.arange(stkI.data.shape[0]))
U, V = pol.data*np.cos(np.pi/2.+pang.data*np.pi/180.), pol.data*np.sin(np.pi/2.+pang.data*np.pi/180.) U, V = poldata*np.cos(np.pi/2.+pangdata*np.pi/180.), poldata*np.sin(np.pi/2.+pangdata*np.pi/180.)
Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=1./vec_scale,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,linewidth=0.5,color='w',edgecolor='k') Q = ax.quiver(X[::step_vec,::step_vec],Y[::step_vec,::step_vec],U[::step_vec,::step_vec],V[::step_vec,::step_vec],units='xy',angles='uv',scale=1./vec_scale,scale_units='xy',pivot='mid',headwidth=0.,headlength=0.,headaxislength=0.,width=0.1,linewidth=0.5,color='w',edgecolor='k')
pol_sc = AnchoredSizeBar(ax.transData, vec_scale, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w') pol_sc = AnchoredSizeBar(ax.transData, vec_scale, r"$P$= 100 %", 4, pad=0.5, sep=5, borderpad=0.5, frameon=False, size_vertical=0.005, color='w')
@@ -1418,7 +1437,7 @@ class pol_map(object):
b_aper.label.set_fontsize(8) b_aper.label.set_fontsize(8)
b_aper_reset = Button(ax_aper_reset,"Reset") b_aper_reset = Button(ax_aper_reset,"Reset")
b_aper_reset.label.set_fontsize(8) b_aper_reset.label.set_fontsize(8)
s_aper_radius = Slider(ax_aper_radius, r"$R_{aper}$", 0.5, 3.5, valstep=0.1, valinit=1) s_aper_radius = Slider(ax_aper_radius, r"$R_{aper}$", np.ceil(self.wcs.wcs.cdelt.max()/1.33*3.6e5)/1e2, 3.5, valstep=1e-2, valinit=1.)
def select_aperture(event): def select_aperture(event):
if self.data is None: if self.data is None:
@@ -1456,6 +1475,7 @@ class pol_map(object):
def reset_aperture(event): def reset_aperture(event):
self.region = None self.region = None
s_aper_radius.reset()
self.pol_int() self.pol_int()
self.fig.canvas.draw_idle() self.fig.canvas.draw_idle()

View File

@@ -132,7 +132,7 @@ def get_product_list(target=None, proposal_id=None):
products['target_name'] = Column(observations['target_name']) products['target_name'] = Column(observations['target_name'])
for prod in products: for prod in products:
products['proposal_id'] = results['Proposal ID'][results['Dataset']==prod['productFilename'][:len(results['Dataset'][0])].upper()] prod['proposal_id'] = results['Proposal ID'][results['Dataset']==prod['productFilename'][:len(results['Dataset'][0])].upper()][0]
#for prod in products: #for prod in products:
# prod['target_name'] = observations['target_name'][observation['obsid']==prod['obsID']] # prod['target_name'] = observations['target_name'][observation['obsid']==prod['obsID']]
@@ -169,7 +169,7 @@ def retrieve_products(target=None, proposal_id=None, output_dir='./data'):
filepaths.append([obs_dir,file]) filepaths.append([obs_dir,file])
prodpaths.append(np.array(filepaths,dtype=str)) prodpaths.append(np.array(filepaths,dtype=str))
return target, prodpaths return target, np.array(prodpaths)
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -485,18 +485,31 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
mask = zeropad(mask_c, data[0].shape).astype(bool) mask = zeropad(mask_c, data[0].shape).astype(bool)
background = np.zeros((data.shape[0])) background = np.zeros((data.shape[0]))
#wavelength dependence of the polariser filters
#estimated to less than 1%
err_wav = data*0.01
#difference in PSFs through each polarizers
#estimated to less than 3%
err_psf = data*0.03
#flatfielding uncertainties
#estimated to less than 3%
err_flat = data*0.03
if (sub_type is None): if (sub_type is None):
n_data_array, n_error_array, headers, background = bkg_hist(data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) n_data_array, c_error_bkg, headers, background = bkg_hist(data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
elif type(sub_type)==str: elif type(sub_type)==str:
if sub_type.lower() in ['auto']: if sub_type.lower() in ['auto']:
n_data_array, n_error_array, headers, background = bkg_fit(data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) n_data_array, c_error_bkg, headers, background = bkg_fit(data, error, mask, headers, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
else: else:
n_data_array, n_error_array, headers, background = bkg_hist(data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) n_data_array, c_error_bkg, headers, background = bkg_hist(data, error, mask, headers, sub_type=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
elif type(sub_type)==tuple: elif type(sub_type)==tuple:
n_data_array, n_error_array, headers, background = bkg_mini(data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder) n_data_array, c_error_bkg, headers, background = bkg_mini(data, error, mask, headers, sub_shape=sub_type, subtract_error=subtract_error, display=display, savename=savename, plots_folder=plots_folder)
else: else:
print("Warning: Invalid subtype.") print("Warning: Invalid subtype.")
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
n_error_array = np.sqrt(err_wav**2+err_psf**2+err_flat**2+c_error_bkg**2)
if return_background: if return_background:
return n_data_array, n_error_array, headers, background return n_data_array, n_error_array, headers, background
else: else:
@@ -553,29 +566,39 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
(yet)".format(instr)) (yet)".format(instr))
rebinned_data, rebinned_error, rebinned_headers = [], [], [] rebinned_data, rebinned_error, rebinned_headers = [], [], []
Dxy = np.array([1, 1],dtype=int) Dxy = np.array([1., 1.])
# Routine for the FOC instrument # Routine for the FOC instrument
if instr == 'FOC': if instr == 'FOC':
HST_aper = 2400. # HST aperture in mm HST_aper = 2400. # HST aperture in mm
Dxy_arr = np.ones((data_array.shape[0],2))
for i, enum in enumerate(list(zip(data_array, error_array, headers))): for i, enum in enumerate(list(zip(data_array, error_array, headers))):
image, error, header = enum image, error, header = enum
# Get current pixel size # Get current pixel size
w = WCS(header).deepcopy() w = WCS(header).deepcopy()
new_header = deepcopy(header)
# Compute binning ratio # Compute binning ratio
if scale.lower() in ['px', 'pixel']: if scale.lower() in ['px', 'pixel']:
Dxy = np.array([pxsize,]*2) Dxy_arr[i] = np.array([pxsize,]*2)
elif scale.lower() in ['arcsec','arcseconds']: elif scale.lower() in ['arcsec','arcseconds']:
Dxy = np.floor(pxsize/np.abs(w.wcs.cdelt)/3600.).astype(int) Dxy_arr[i] = np.array(pxsize/np.abs(w.wcs.cdelt)/3600.)
elif scale.lower() in ['full','integrate']: elif scale.lower() in ['full','integrate']:
Dxy = np.floor(image.shape).astype(int) Dxy_arr[i] = image.shape
else: else:
raise ValueError("'{0:s}' invalid scale for binning.".format(scale)) raise ValueError("'{0:s}' invalid scale for binning.".format(scale))
new_shape = np.ceil(min(image.shape/Dxy_arr,key=lambda x:x[0]+x[1])).astype(int)
for i, enum in enumerate(list(zip(data_array, error_array, headers))):
image, error, header = enum
# Get current pixel size
w = WCS(header).deepcopy()
new_header = deepcopy(header)
Dxy = image.shape/new_shape
if (Dxy < 1.).any(): if (Dxy < 1.).any():
raise ValueError("Requested pixel size is below resolution.") raise ValueError("Requested pixel size is below resolution.")
new_shape = (image.shape//Dxy).astype(int) new_shape = np.ceil(image.shape/Dxy).astype(int)
# Rebin data # Rebin data
rebin_data = bin_ndarray(image, new_shape=new_shape, rebin_data = bin_ndarray(image, new_shape=new_shape,
@@ -606,10 +629,15 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
rebinned_error.append(np.sqrt(rms_image**2 + new_error**2)) rebinned_error.append(np.sqrt(rms_image**2 + new_error**2))
# Update header # Update header
w = w.slice((np.s_[::Dxy[0]], np.s_[::Dxy[1]])) #nw = w.slice((np.s_[::Dxy[0]], np.s_[::Dxy[1]]))
header['NAXIS1'],header['NAXIS2'] = w.array_shape nw = w.deepcopy()
header.update(w.to_header()) nw.wcs.cdelt *= Dxy
rebinned_headers.append(header) nw.wcs.crpix /= Dxy
nw.array_shape = new_shape
new_header['NAXIS1'],new_header['NAXIS2'] = nw.array_shape
for key, val in nw.to_header().items():
new_header.set(key,val)
rebinned_headers.append(new_header)
if not data_mask is None: if not data_mask is None:
data_mask = bin_ndarray(data_mask,new_shape=new_shape,operation='average') > 0.80 data_mask = bin_ndarray(data_mask,new_shape=new_shape,operation='average') > 0.80
@@ -1180,6 +1208,12 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
if mask.any(): if mask.any():
print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(I_stokes[mask].size)) print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(I_stokes[mask].size))
# Statistical error: Poisson noise is assumed
sigma_flux = np.array([np.sqrt(flux/head['exptime']) for flux,head in zip(pol_flux,pol_headers)])
s_I2_stat = np.sum([coeff_stokes[0,i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))],axis=0)
s_Q2_stat = np.sum([coeff_stokes[1,i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))],axis=0)
s_U2_stat = np.sum([coeff_stokes[2,i]**2*sigma_flux[i]**2 for i in range(len(sigma_flux))],axis=0)
# Compute the derivative of each Stokes parameter with respect to the polarizer orientation # Compute the derivative of each Stokes parameter with respect to the polarizer orientation
dI_dtheta1 = 2.*pol_eff[0]/A*(pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes)) dI_dtheta1 = 2.*pol_eff[0]/A*(pol_eff[2]*np.cos(-2.*theta[2]+2.*theta[0])*(pol_flux[1]-I_stokes) - pol_eff[1]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes))
dI_dtheta2 = 2.*pol_eff[1]/A*(pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes)) dI_dtheta2 = 2.*pol_eff[1]/A*(pol_eff[0]*np.cos(-2.*theta[0]+2.*theta[1])*(pol_flux[2]-I_stokes) - pol_eff[2]*np.cos(-2.*theta[1]+2.*theta[2])*(pol_flux[0]-I_stokes))
@@ -1205,9 +1239,9 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
#np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis)) #np.savetxt("output/sU_dir.txt", np.sqrt(s_U2_axis))
# Add quadratically the uncertainty to the Stokes covariance matrix # Add quadratically the uncertainty to the Stokes covariance matrix
Stokes_cov[0,0] += s_I2_axis Stokes_cov[0,0] += s_I2_axis + s_I2_stat
Stokes_cov[1,1] += s_Q2_axis Stokes_cov[1,1] += s_Q2_axis + s_Q2_stat
Stokes_cov[2,2] += s_U2_axis Stokes_cov[2,2] += s_U2_axis + s_U2_stat
#Compute integrated values for P, PA before any rotation #Compute integrated values for P, PA before any rotation
mask = np.logical_and(data_mask.astype(bool), (I_stokes > 0.)) mask = np.logical_and(data_mask.astype(bool), (I_stokes > 0.))