fix smoothing, move background computation to library file

This commit is contained in:
Tibeuleu
2023-01-24 18:03:31 +01:00
parent d7b69a2367
commit 423b11a9e7
4 changed files with 400 additions and 402 deletions

View File

@@ -18,12 +18,12 @@ from astropy.wcs import WCS
##### User inputs ##### User inputs
## Input and output locations ## Input and output locations
#globals()['data_folder'] = "../data/NGC1068_x274020/" globals()['data_folder'] = "../data/NGC1068_x274020/"
#globals()['infiles'] = ['x274020at_c0f.fits','x274020bt_c0f.fits','x274020ct_c0f.fits', globals()['infiles'] = ['x274020at_c0f.fits','x274020bt_c0f.fits','x274020ct_c0f.fits',
# 'x274020dt_c0f.fits','x274020et_c0f.fits','x274020ft_c0f.fits', 'x274020dt_c0f.fits','x274020et_c0f.fits','x274020ft_c0f.fits',
# 'x274020gt_c0f.fits','x274020ht_c0f.fits','x274020it_c0f.fits'] 'x274020gt_c0f.fits','x274020ht_c0f.fits','x274020it_c0f.fits']
##psf_file = 'NGC1068_f253m00.fits' #psf_file = 'NGC1068_f253m00.fits'
#globals()['plots_folder'] = "../plots/NGC1068_x274020/" globals()['plots_folder'] = "../plots/NGC1068_x274020/"
#globals()['data_folder'] = "../data/IC5063_x3nl030/" #globals()['data_folder'] = "../data/IC5063_x3nl030/"
#globals()['infiles'] = ['x3nl0301r_c0f.fits','x3nl0302r_c0f.fits','x3nl0303r_c0f.fits'] #globals()['infiles'] = ['x3nl0301r_c0f.fits','x3nl0302r_c0f.fits','x3nl0303r_c0f.fits']
@@ -80,12 +80,12 @@ from astropy.wcs import WCS
#globals()['infiles'] = ['x3nl0201r_c0f.fits','x3nl0202r_c0f.fits','x3nl0203r_c0f.fits'] #globals()['infiles'] = ['x3nl0201r_c0f.fits','x3nl0202r_c0f.fits','x3nl0203r_c0f.fits']
#globals()['plots_folder'] = "../plots/MKN78_x3nl020/" #globals()['plots_folder'] = "../plots/MKN78_x3nl020/"
globals()['data_folder'] = "../data/MRK231_x4qr010/" #globals()['data_folder'] = "../data/MRK231_x4qr010/"
globals()['infiles'] = ['x4qr010ar_c0f.fits', 'x4qr010br_c0f.fits', 'x4qr010dr_c0f.fits', #globals()['infiles'] = ['x4qr010ar_c0f.fits', 'x4qr010br_c0f.fits', 'x4qr010dr_c0f.fits',
'x4qr010er_c0f.fits', 'x4qr010gr_c0f.fits', 'x4qr010hr_c0f.fits', # 'x4qr010er_c0f.fits', 'x4qr010gr_c0f.fits', 'x4qr010hr_c0f.fits',
'x4qr010jr_c0f.fits', 'x4qr010kr_c0f.fits', 'x4qr0104r_c0f.fits', # 'x4qr010jr_c0f.fits', 'x4qr010kr_c0f.fits', 'x4qr0104r_c0f.fits',
'x4qr0105r_c0f.fits', 'x4qr0107r_c0f.fits', 'x4qr0108r_c0f.fits'] # 'x4qr0105r_c0f.fits', 'x4qr0107r_c0f.fits', 'x4qr0108r_c0f.fits']
globals()['plots_folder'] = "../plots/MRK231_x4qr010/" #globals()['plots_folder'] = "../plots/MRK231_x4qr010/"
#globals()['data_folder'] = "../data/3C273_x0u20/" #globals()['data_folder'] = "../data/3C273_x0u20/"
#globals()['infiles'] = ['x0u20101t_c0f.fits','x0u20102t_c0f.fits','x0u20103t_c0f.fits', #globals()['infiles'] = ['x0u20101t_c0f.fits','x0u20102t_c0f.fits','x0u20103t_c0f.fits',
@@ -130,30 +130,29 @@ def main():
# Initial crop # Initial crop
display_crop = False display_crop = False
# Error estimation # Error estimation
error_sub_shape = (15,15) error_sub_type = 'freedman-diaconis' #sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (15,15))
display_error = False display_error = False
# Data binning # Data binning
rebin = True rebin = True
if rebin: pxsize = 0.10
pxsize = 0.05 px_scale = 'arcsec' #pixel, arcsec or full
px_scale = 'arcsec' #pixel, arcsec or full rebin_operation = 'sum' #sum or average
rebin_operation = 'sum' #sum or average
# Alignement # Alignement
align_center = 'image' #If None will align image to image center align_center = 'image' #If None will align image to image center
display_data = False display_data = False
# Smoothing # Smoothing
smoothing_function = 'combine' #gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine smoothing_function = 'combine' #gaussian_after, weighted_gaussian_after, gaussian, weighted_gaussian or combine
smoothing_FWHM = 0.10 #If None, no smoothing is done smoothing_FWHM = 0.20 #If None, no smoothing is done
smoothing_scale = 'arcsec' #pixel or arcsec smoothing_scale = 'arcsec' #pixel or arcsec
# Rotation # Rotation
rotate_stokes = True #rotation to North convention can give erroneous results rotate_stokes = True
rotate_data = False #rotation to North convention can give erroneous results rotate_data = False #rotation to North convention can give erroneous results
# Final crop # Final crop
crop = False #Crop to desired ROI crop = False #Crop to desired ROI
final_display = True final_display = False
# Polarization map output # Polarization map output
figname = 'MRK231_FOC' #target/intrument name figname = 'NGC1068_FOC' #target/intrument name
figtype = '_combine_FWHM010' #additionnal informations figtype = '_c_FWHM020' #additionnal informations
SNRp_cut = 5. #P measurments with SNR>3 SNRp_cut = 5. #P measurments with SNR>3
SNRi_cut = 50. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%. SNRi_cut = 50. #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
@@ -174,8 +173,7 @@ def main():
# Estimate error from data background, estimated from sub-image of desired sub_shape. # Estimate error from data background, estimated from sub-image of desired sub_shape.
background = None background = None
if px_scale.lower() not in ['full','integrate']: if px_scale.lower() not in ['full','integrate']:
data_array, error_array, headers, background = proj_red.get_error_hist(data_array, headers, error_array, display=display_error, savename=figname+"_errors", plots_folder=plots_folder, return_background=True) data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, sub_type=error_sub_type, display=display_error, savename=figname+"_errors", plots_folder=plots_folder, return_background=True)
# data_array, error_array, headers, background = proj_red.get_error(data_array, headers, error_array, display=display_error, savename=figname+"_errors", plots_folder=plots_folder, return_background=True)
# Align and rescale images with oversampling. # Align and rescale images with oversampling.
data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False) data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False)

302
src/lib/background.py Normal file
View File

@@ -0,0 +1,302 @@
"""
Library function for background estimation steps of the reduction pipeline.
prototypes :
- display_bkg(data, background, std_bkg, headers, histograms, bin_centers, rectangle, savename, plots_folder)
Display and save how the background noise is computed.
- bkg_hist(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background)
Compute the error (noise) of the input array by computing the base mode of each image.
- bkg_mini(data, error, mask, headers, sub_shape, display, savename, plots_folder) -> n_data_array, n_error_array, headers, background)
Compute the error (noise) of the input array by looking at the sub-region of minimal flux in every image and of shape sub_shape.
"""
import sys
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.colors import LogNorm
from matplotlib.patches import Rectangle
from datetime import datetime
from lib.plots import plot_obs
def display_bkg(data, background, std_bkg, headers, histograms=None, bin_centers=None, rectangle=None, savename=None, plots_folder="./"):
plt.rcParams.update({'font.size': 15})
convert_flux = np.array([head['photflam'] for head in headers])
date_time = np.array([headers[i]['date-obs']+';'+headers[i]['time-obs']
for i in range(len(headers))])
date_time = np.array([datetime.strptime(d,'%Y-%m-%d;%H:%M:%S')
for d in date_time])
filt = np.array([headers[i]['filtnam1'] for i in range(len(headers))])
dict_filt = {"POL0":'r', "POL60":'g', "POL120":'b'}
c_filt = np.array([dict_filt[f] for f in filt])
fig,ax = plt.subplots(figsize=(10,6), constrained_layout=True)
for f in np.unique(filt):
mask = [fil==f for fil in filt]
ax.scatter(date_time[mask], background[mask]*convert_flux[mask],
color=dict_filt[f],label="{0:s}".format(f))
ax.errorbar(date_time, background*convert_flux,
yerr=std_bkg*convert_flux, fmt='+k',
markersize=0, ecolor=c_filt)
# Date handling
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.set_xlabel("Observation date and time")
ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
plt.legend()
if not(savename is None):
fig.savefig(plots_folder+savename+"_background_flux.png", bbox_inches='tight')
if not(histograms is None):
filt_obs = {"POL0":0, "POL60":0, "POL120":0}
fig_h, ax_h = plt.subplots(figsize=(10,6), constrained_layout=True)
for i, (hist, bins) in enumerate(zip(histograms, bin_centers)):
filt_obs[headers[i]['filtnam1']] += 1
ax_h.plot(bins,hist,'+',color="C{0:d}".format(i),alpha=0.8,label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')')
ax_h.plot([background[i],background[i]],[hist.min(), hist.max()],'x--',color="C{0:d}".format(i),alpha=0.8)
ax_h.set_xscale('log')
ax_h.set_xlim([background.mean()*1e-2,background.mean()*1e2])
ax_h.set_xlabel(r"Count rate [$s^{-1}$]")
ax_h.set_ylabel(r"Number of pixels in bin")
ax_h.set_title("Histogram for each observation")
plt.legend()
if not(savename is None):
fig_h.savefig(plots_folder+savename+'_histograms.png', bbox_inches='tight')
fig2, ax2 = plt.subplots(figsize=(10,10))
data0 = data[0]*convert_flux[0]
bkg_data0 = data0 <= background[0]*convert_flux[0]
instr = headers[0]['instrume']
rootname = headers[0]['rootname']
exptime = headers[0]['exptime']
filt = headers[0]['filtnam1']
#plots
im = ax2.imshow(data0, norm=LogNorm(data0[data0>0.].mean()/10.,data0.max()), origin='lower', cmap='gray')
bkg_im = ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.7)
if not(rectangle is None):
x, y, width, height, angle, color = rectangle[0]
ax2.add_patch(Rectangle((x, y),width,height,edgecolor=color,fill=False))
ax2.annotate(instr+":"+rootname, color='white', fontsize=10, xy=(0.02, 0.95), xycoords='axes fraction')
ax2.annotate(filt, color='white', fontsize=14, xy=(0.02, 0.02), xycoords='axes fraction')
ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(0.80, 0.02), xycoords='axes fraction')
ax2.set(xlabel='pixel offset', ylabel='pixel offset')
fig2.subplots_adjust(hspace=0, wspace=0, right=0.85)
cbar_ax = fig2.add_axes([0.9, 0.12, 0.02, 0.75])
fig2.colorbar(im, cax=cbar_ax, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
if not(savename is None):
fig2.savefig(plots_folder+savename+'_'+filt+'_background_location.png', bbox_inches='tight')
if not(rectangle is None):
plot_obs(data, headers, vmin=data.min(), vmax=data.max(), rectangle=rectangle,
savename=savename+"_background_location",plots_folder=plots_folder)
elif not(rectangle is None):
plot_obs(data, headers, vmin=vmin, vmax=vmax, rectangle=rectangle)
plt.show()
def bkg_hist(data, error, mask, headers, sub_type=None, display=False, savename=None, plots_folder=""):
"""
----------
Inputs:
data : numpy.ndarray
Array containing the data to study (2D float arrays).
error : numpy.ndarray
Array of images (2D floats, aligned and of the same shape) containing
the error in each pixel of the observation images in data_array.
mask : numpy.ndarray
2D boolean array delimiting the data to work on.
headers : header list
Headers associated with the images in data_array.
sub_type : str or int, optional
If str, statistic rule to be used for the number of bins in counts/s.
If int, number of bins for the counts/s histogram.
Defaults to "Freedman-Diaconis".
display : boolean, optional
If True, data_array will be displayed with a rectangle around the
sub-image selected for background computation.
Defaults to False.
savename : str, optional
Name of the figure the map should be saved to. If None, the map won't
be saved (only displayed). Only used if display is True.
Defaults to None.
plots_folder : str, optional
Relative (or absolute) filepath to the folder in wich the map will
be saved. Not used if savename is None.
Defaults to current folder.
----------
Returns:
data_array : numpy.ndarray
Array containing the data to study minus the background.
headers : header list
Updated headers associated with the images in data_array.
error_array : numpy.ndarray
Array containing the background values associated to the images in
data_array.
background : numpy.ndarray
Array containing the pixel background value for each image in
data_array.
"""
n_data_array, n_error_array = deepcopy(data), deepcopy(error)
error_bkg = np.ones(n_data_array.shape)
std_bkg = np.zeros((data.shape[0]))
background = np.zeros((data.shape[0]))
histograms, bin_centers = [], []
for i, image in enumerate(data):
#Compute the Count-rate histogram for the image
n_mask = np.logical_and(mask,image>0.)
if not (sub_type is None):
if type(sub_type) == int:
n_bins = sub_type
elif sub_type.lower() in ['sqrt']:
n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
elif sub_type.lower() in ['sturges']:
n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int)+1 # Sturges
elif sub_type.lower() in ['rice']:
n_bins = 2*np.fix(np.power(image[n_mask].size,1/3)).astype(int) # Rice
elif sub_type.lower() in ['scott']:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size,1/3))).astype(int) # Scott
else:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25]))/np.power(image[n_mask].size,1/3))).astype(int) # Freedman-Diaconis
else:
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25]))/np.power(image[n_mask].size,1/3))).astype(int) # Freedman-Diaconis
hist, bin_edges = np.histogram(np.log(image[n_mask]),bins=n_bins)
histograms.append(hist)
bin_centers.append(np.exp((bin_edges[:-1]+bin_edges[1:])/2))
#Take the background as the count-rate with the maximum number of pixels
hist_max = bin_centers[-1][np.argmax(hist)]
bkg = np.sqrt(np.sum(image[np.abs(image-hist_max)/hist_max<0.5]**2)/image[np.abs(image-hist_max)/hist_max<0.5].size)
error_bkg[i] *= bkg
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
#wavelength dependence of the polariser filters
#estimated to less than 1%
err_wav = data[i]*0.01
#difference in PSFs through each polarizers
#estimated to less than 3%
err_psf = data[i]*0.03
#flatfielding uncertainties
#estimated to less than 3%
err_flat = data[i]*0.03
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
#Substract background
n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std()
background[i] = bkg
if display:
display_bkg(data, background, std_bkg, headers, histograms=histograms, bin_centers=bin_centers, savename=savename, plots_folder=plots_folder)
return n_data_array, n_error_array, headers, background
def bkg_mini(data, error, mask, headers, sub_shape=(15,15), display=False, savename=None, plots_folder=""):
"""
Look for sub-image of shape sub_shape that have the smallest integrated
flux (no source assumption) and define the background on the image by the
standard deviation on this sub-image.
----------
Inputs:
data : numpy.ndarray
Array containing the data to study (2D float arrays).
error : numpy.ndarray
Array of images (2D floats, aligned and of the same shape) containing
the error in each pixel of the observation images in data_array.
mask : numpy.ndarray
2D boolean array delimiting the data to work on.
headers : header list
Headers associated with the images in data_array.
sub_shape : tuple, optional
Shape of the sub-image to look for. Must be odd.
Defaults to 10% of input array.
display : boolean, optional
If True, data_array will be displayed with a rectangle around the
sub-image selected for background computation.
Defaults to False.
savename : str, optional
Name of the figure the map should be saved to. If None, the map won't
be saved (only displayed). Only used if display is True.
Defaults to None.
plots_folder : str, optional
Relative (or absolute) filepath to the folder in wich the map will
be saved. Not used if savename is None.
Defaults to current folder.
----------
Returns:
data_array : numpy.ndarray
Array containing the data to study minus the background.
headers : header list
Updated headers associated with the images in data_array.
error_array : numpy.ndarray
Array containing the background values associated to the images in
data_array.
background : numpy.ndarray
Array containing the pixel background value for each image in
data_array.
"""
sub_shape = np.array(sub_shape)
# Make sub_shape of odd values
if not(np.all(sub_shape%2)):
sub_shape += 1-sub_shape%2
shape = np.array(data.shape)
diff = (sub_shape-1).astype(int)
temp = np.zeros((shape[0],shape[1]-diff[0],shape[2]-diff[1]))
n_data_array, n_error_array = deepcopy(data), deepcopy(error)
error_bkg = np.ones(n_data_array.shape)
std_bkg = np.zeros((data.shape[0]))
background = np.zeros((data.shape[0]))
rectangle = []
for i,image in enumerate(data):
# Find the sub-image of smallest integrated flux (suppose no source)
#sub-image dominated by background
fmax = np.finfo(np.double).max
img = deepcopy(image)
img[1-mask] = fmax/(diff[0]*diff[1])
for r in range(temp.shape[1]):
for c in range(temp.shape[2]):
temp[i][r,c] = np.where(mask[r,c], img[r:r+diff[0],c:c+diff[1]].sum(), fmax/(diff[0]*diff[1]))
minima = np.unravel_index(np.argmin(temp.sum(axis=0)),temp.shape[1:])
for i, image in enumerate(data):
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0., 'r'])
# Compute error : root mean square of the background
sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]]
#bkg = np.std(sub_image) # Previously computed using standard deviation over the background
bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)
error_bkg[i] *= bkg
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
#wavelength dependence of the polariser filters
#estimated to less than 1%
err_wav = data[i]*0.01
#difference in PSFs through each polarizers
#estimated to less than 3%
err_psf = data[i]*0.03
#flatfielding uncertainties
#estimated to less than 3%
err_flat = data[i]*0.03
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
#Substract background
n_data_array[i][mask] = n_data_array[i][mask] - bkg
n_data_array[i][np.logical_and(mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std()
background[i] = bkg
if display:
display_bkg(data, background, std_bkg, headers, rectangle=rectangle, savename=savename, plots_folder=plots_folder)
return n_data_array, n_error_array, headers, background

View File

@@ -1736,7 +1736,11 @@ class pol_map(object):
self.display_selection = "total_flux" self.display_selection = "total_flux"
if self.display_selection.lower() in ['total_flux']: if self.display_selection.lower() in ['total_flux']:
self.data = self.I*self.convert_flux self.data = self.I*self.convert_flux
vmin, vmax = np.min(self.data[self.cut])/5., np.max(self.data[self.data > 0.]) try:
vmin, vmax = np.min(self.data[self.cut])/5., np.max(self.data[self.data > 0.])
except ValueError:
vmax = np.max(self.data[self.data > 0.])
vmin = vmax*1e-3
norm = LogNorm(vmin, vmax) norm = LogNorm(vmin, vmax)
label = r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]" label = r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]"
elif self.display_selection.lower() in ['pol_flux']: elif self.display_selection.lower() in ['pol_flux']:

View File

@@ -11,7 +11,7 @@ prototypes :
- deconvolve_array(data_array, headers, psf, FWHM, scale, shape, iterations, algo) -> deconvolved_data_array - deconvolve_array(data_array, headers, psf, FWHM, scale, shape, iterations, algo) -> deconvolved_data_array
Homogeneously deconvolve a data array using a chosen deconvolution algorithm. Homogeneously deconvolve a data array using a chosen deconvolution algorithm.
- get_error(data_array, headers, error_array, data_mask, sub_shape, display, savename, plots_folder, return_background) -> data_array, error_array, headers (, background) - get_error(data_array, headers, error_array, data_mask, sub_type, display, savename, plots_folder, return_background) -> data_array, error_array, headers (, background)
Compute the error (noise) on each image of the input array. Compute the error (noise) on each image of the input array.
- rebin_array(data_array, error_array, headers, pxsize, scale, operation, data_mask) -> rebinned_data, rebinned_error, rebinned_headers, Dxy (, data_mask) - rebin_array(data_array, error_array, headers, pxsize, scale, operation, data_mask) -> rebinned_data, rebinned_error, rebinned_headers, Dxy (, data_mask)
@@ -45,7 +45,6 @@ import matplotlib.pyplot as plt
import matplotlib.dates as mdates import matplotlib.dates as mdates
from matplotlib.patches import Rectangle from matplotlib.patches import Rectangle
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
from datetime import datetime
from scipy.ndimage import rotate as sc_rotate, shift as sc_shift from scipy.ndimage import rotate as sc_rotate, shift as sc_shift
from scipy.signal import convolve2d from scipy.signal import convolve2d
from astropy.wcs import WCS from astropy.wcs import WCS
@@ -54,6 +53,7 @@ log.setLevel('ERROR')
import warnings import warnings
from lib.deconvolve import deconvolve_im, gaussian_psf, gaussian2d, zeropad from lib.deconvolve import deconvolve_im, gaussian_psf, gaussian2d, zeropad
from lib.convex_hull import image_hull, clean_ROI from lib.convex_hull import image_hull, clean_ROI
from lib.background import bkg_hist, bkg_mini
from lib.plots import plot_obs from lib.plots import plot_obs
from lib.cross_correlation import phase_cross_correlation from lib.cross_correlation import phase_cross_correlation
@@ -408,205 +408,8 @@ def deconvolve_array(data_array, headers, psf='gaussian', FWHM=1., scale='px',
return deconv_array return deconv_array
def get_error_hist(data_array, headers, error_array=None, data_mask=None,
display=False, savename=None, plots_folder="",
return_background=False):
"""
----------
Inputs:
data_array : numpy.ndarray
Array containing the data to study (2D float arrays).
headers : header list
Headers associated with the images in data_array.
error_array : numpy.ndarray, optional
Array of images (2D floats, aligned and of the same shape) containing
the error in each pixel of the observation images in data_array.
If None, will be initialized to zeros.
Defaults to None.
data_mask : numpy.ndarray, optional
2D boolean array delimiting the data to work on.
If None, will be initialized with a full true mask.
Defaults to None.
display : boolean, optional
If True, data_array will be displayed with a rectangle around the
sub-image selected for background computation.
Defaults to False.
savename : str, optional
Name of the figure the map should be saved to. If None, the map won't
be saved (only displayed). Only used if display is True.
Defaults to None.
plots_folder : str, optional
Relative (or absolute) filepath to the folder in wich the map will
be saved. Not used if savename is None.
Defaults to current folder.
return_background : boolean, optional
If True, the pixel background value for each image in data_array is
returned.
Defaults to False.
----------
Returns:
data_array : numpy.ndarray
Array containing the data to study minus the background.
headers : header list
Updated headers associated with the images in data_array.
error_array : numpy.ndarray
Array containing the background values associated to the images in
data_array.
background : numpy.ndarray
Array containing the pixel background value for each image in
data_array.
Only returned if return_background is True.
"""
# Crop out any null edges
if error_array is None:
error_array = np.zeros(data_array.shape)
n_data_array, n_error_array = deepcopy(data_array), deepcopy(error_array)
if not data_mask is None:
data, mask = n_data_array, deepcopy(data_mask)
else:
data, _, _ = crop_array(n_data_array, headers, step=5, null_val=0., inside=False)
data_mask = np.ones(data_array[0].shape, dtype=bool)
mask = np.ones(data[0].shape, dtype=bool)
error_bkg = np.ones(n_data_array.shape)
std_bkg = np.zeros((data.shape[0]))
background = np.zeros((data.shape[0]))
if display:
plt.rcParams.update({'font.size': 15})
filt_obs = {"POL0":0, "POL60":0, "POL120":0}
fig_h, ax_h = plt.subplots(figsize=(10,6), constrained_layout=True)
date_time = np.array([headers[i]['date-obs']+';'+headers[i]['time-obs']
for i in range(len(headers))])
date_time = np.array([datetime.strptime(d,'%Y-%m-%d;%H:%M:%S')
for d in date_time])
for i, image in enumerate(data):
#Compute the Count-rate histogram for the image
n_mask = np.logical_and(mask,image>0.)
#n_bins = np.fix(np.sqrt(image[n_mask].size)).astype(int) # Square-root
#n_bins = np.ceil(np.log2(image[n_mask].size)).astype(int)+1 # Sturges
#n_bins = 2*np.fix(np.power(image[n_mask].size,1/3)).astype(int) # Rice
#n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(3.5*image[n_mask].std()/np.power(image[n_mask].size,1/3))).astype(int) # Scott
n_bins = np.fix((image[n_mask].max()-image[n_mask].min())/(2*np.subtract(*np.percentile(image[n_mask], [75, 25]))/np.power(image[n_mask].size,1/3))).astype(int) # Freedman-Diaconis
hist, bin_edges = np.histogram(np.log(image[n_mask]),bins=n_bins)
bin_centers = np.exp((bin_edges[:-1]+bin_edges[1:])/2)
#Take the background as the count-rate with the maximum number of pixels
hist_max = bin_centers[np.argmax(hist)]
bkg = np.sqrt(np.sum(image[np.abs(image-hist_max)/hist_max<0.5]**2)/image[np.abs(image-hist_max)/hist_max<0.5].size)
#bkg = np.sum(image[n_mask]*1/np.abs(image[n_mask]-hist_max))/np.sum(1/np.abs(image[n_mask]-hist_max))
#bkg = np.percentile(image[image<hist_max],25.)
#bkg = 0.95*hist_max
if display:
filt_obs[headers[i]['filtnam1']] += 1
ax_h.plot(bin_centers,hist,'+',color="C{0:d}".format(i),alpha=0.8,label=headers[i]['filtnam1']+' (Obs '+str(filt_obs[headers[i]['filtnam1']])+')')
ax_h.plot([bkg,bkg],[hist.min(), hist.max()],'x--',color="C{0:d}".format(i),alpha=0.8)
#print(headers[i]['filtnam1']+' ('+str(date_time[i])+') : n_bins =',n_bins,'; bkg = {0:.2e}'.format(bkg))
error_bkg[i] *= bkg
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
#wavelength dependence of the polariser filters
#estimated to less than 1%
err_wav = data_array[i]*0.01
#difference in PSFs through each polarizers
#estimated to less than 3%
err_psf = data_array[i]*0.03
#flatfielding uncertainties
#estimated to less than 3%
err_flat = data_array[i]*0.03
n_error_array[i] = np.sqrt(n_error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
#Substract background
n_data_array[i][data_mask] = n_data_array[i][data_mask] - bkg
n_data_array[i][np.logical_and(data_mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std()
background[i] = bkg
if (data_array[i] < 0.).any():
print(data_array[i])
#if i==0:
#np.savetxt("output/s_bg.txt",error_bkg[i])
#np.savetxt("output/s_wav.txt",err_wav)
#np.savetxt("output/s_psf.txt",err_psf)
#np.savetxt("output/s_flat.txt",err_flat)
if display:
ax_h.set_xscale('log')
ax_h.set_xlim([background.mean()*1e-2,background.mean()*1e2])
ax_h.set_xlabel(r"Count rate [$s^{-1}$]")
#ax_h.set_yscale('log')
ax_h.set_ylabel(r"Number of pixels in bin")
ax_h.set_title("Histogram for each observation")
plt.legend()
convert_flux = np.array([head['photflam'] for head in headers])
filt = np.array([headers[i]['filtnam1'] for i in range(len(headers))])
dict_filt = {"POL0":'r', "POL60":'g', "POL120":'b'}
c_filt = np.array([dict_filt[f] for f in filt])
fig,ax = plt.subplots(figsize=(10,6), constrained_layout=True)
for f in np.unique(filt):
mask = [fil==f for fil in filt]
ax.scatter(date_time[mask], background[mask]*convert_flux[mask],
color=dict_filt[f],label="{0:s}".format(f))
ax.errorbar(date_time, background*convert_flux,
yerr=std_bkg*convert_flux, fmt='+k',
markersize=0, ecolor=c_filt)
# Date handling
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.set_xlabel("Observation date and time")
ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
#ax.set_title("Background flux and error computed for each image")
plt.legend()
plt.rcParams.update({'font.size': 15})
fig2, ax2 = plt.subplots(figsize=(10,10))
data0 = data[0]*convert_flux[0]
bkg_data0 = data0 <= background[0]*convert_flux[0]
instr = headers[0]['instrume']
rootname = headers[0]['rootname']
exptime = headers[0]['exptime']
filt = headers[0]['filtnam1']
#plots
#im = ax2.imshow(data0, vmin=data0.min(), vmax=data0.max(), origin='lower', cmap='gray')
im = ax2.imshow(data0, norm=LogNorm(data0[data0>0.].mean()/10.,data0.max()), origin='lower', cmap='gray')
bkg_im = ax2.imshow(bkg_data0, origin='lower', cmap='Reds', alpha=0.7)
ax2.annotate(instr+":"+rootname, color='white', fontsize=10,
xy=(0.02, 0.95), xycoords='axes fraction')
ax2.annotate(filt, color='white', fontsize=14, xy=(0.02, 0.02),
xycoords='axes fraction')
ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(0.80, 0.02),
xycoords='axes fraction')
ax2.set(#title="Location of background computation.",
xlabel='pixel offset',
ylabel='pixel offset')
fig2.subplots_adjust(hspace=0, wspace=0, right=0.85)
cbar_ax = fig2.add_axes([0.9, 0.12, 0.02, 0.75])
fig2.colorbar(im, cax=cbar_ax, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
if not(savename is None):
fig.savefig(plots_folder+savename+"_background_flux.png",
bbox_inches='tight')
fig2.savefig(plots_folder+savename+'_'+filt+'_background_location.png',
bbox_inches='tight')
fig_h.savefig(plots_folder+savename+'_histograms.png', bbox_inches='tight')
plt.show()
if return_background:
return n_data_array, n_error_array, headers, background
else:
return n_data_array, n_error_array, headers
def get_error(data_array, headers, error_array=None, data_mask=None, def get_error(data_array, headers, error_array=None, data_mask=None,
sub_shape=None, display=False, savename=None, plots_folder="", sub_type=None, display=False, savename=None, plots_folder="",
return_background=False): return_background=False):
""" """
Look for sub-image of shape sub_shape that have the smallest integrated Look for sub-image of shape sub_shape that have the smallest integrated
@@ -627,9 +430,11 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
2D boolean array delimiting the data to work on. 2D boolean array delimiting the data to work on.
If None, will be initialized with a full true mask. If None, will be initialized with a full true mask.
Defaults to None. Defaults to None.
sub_shape : tuple, optional sub_type : str or int or tuple, optional
Shape of the sub-image to look for. Must be odd. If str, statistic rule to be used for the number of bins in counts/s.
Defaults to 10% of input array. If int, number of bins for the counts/s histogram.
If tuple, shape of the sub-image to look for. Must be odd.
Defaults to "Freedman-Diaconis".
display : boolean, optional display : boolean, optional
If True, data_array will be displayed with a rectangle around the If True, data_array will be displayed with a rectangle around the
sub-image selected for background computation. sub-image selected for background computation.
@@ -667,141 +472,16 @@ def get_error(data_array, headers, error_array=None, data_mask=None,
if not data_mask is None: if not data_mask is None:
data, error, mask = n_data_array, n_error_array, deepcopy(data_mask) data, error, mask = n_data_array, n_error_array, deepcopy(data_mask)
else: else:
data, _, _ = crop_array(n_data_array, headers, n_error_array, step=5, null_val=0., inside=False) data, error, _ = crop_array(n_data_array, headers, n_error_array, step=5, null_val=0., inside=False)
data_mask = np.ones(n_data_array[0].shape, dtype=bool)
mask = np.ones(data[0].shape, dtype=bool) mask = np.ones(data[0].shape, dtype=bool)
if sub_shape is None: background = np.zeros((data.shape[0]))
sub_shape = (np.array(data_array.shape[1:])/10).astype(int)
sub_shape = np.array(sub_shape)
# Make sub_shape of odd values
if not(np.all(sub_shape%2)):
sub_shape += 1-sub_shape%2
shape = np.array(data.shape) if (sub_type is None) or (type(sub_type)==str):
diff = (sub_shape-1).astype(int) n_data_array, n_error_array, headers, background = bkg_hist(data, error, mask, headers, sub_type=sub_type, display=display, savename=savename, plots_folder=plots_folder)
temp = np.zeros((shape[0],shape[1]-diff[0],shape[2]-diff[1])) elif type(sub_type)==tuple:
error_bkg = np.ones(data_array.shape) n_data_array, n_error_array, headers, background = bkg_mini(data, error, mask, headers, sub_shape=sub_type, display=display, savename=savename, plots_folder=plots_folder)
std_bkg = np.zeros((data.shape[0])) else:
background = np.zeros((shape[0])) print("Warning: Invalid subtype.")
rectangle = []
for i,image in enumerate(data):
# Find the sub-image of smallest integrated flux (suppose no source)
#sub-image dominated by background
fmax = np.finfo(np.double).max
img = deepcopy(image)
img[1-mask] = fmax/(diff[0]*diff[1])
for r in range(temp.shape[1]):
for c in range(temp.shape[2]):
temp[i][r,c] = np.where(mask[r,c], img[r:r+diff[0],c:c+diff[1]].sum(), fmax/(diff[0]*diff[1]))
minima = np.unravel_index(np.argmin(temp.sum(axis=0)),temp.shape[1:])
for i, image in enumerate(data):
rectangle.append([minima[1], minima[0], sub_shape[1], sub_shape[0], 0., 'r'])
# Compute error : root mean square of the background
sub_image = image[minima[0]:minima[0]+sub_shape[0],minima[1]:minima[1]+sub_shape[1]]
#bkg = np.std(sub_image) # Previously computed using standard deviation over the background
bkg = np.sqrt(np.sum(sub_image**2)/sub_image.size)
error_bkg[i] *= bkg
# Quadratically add uncertainties in the "correction factors" (see Kishimoto 1999)
#wavelength dependence of the polariser filters
#estimated to less than 1%
err_wav = data_array[i]*0.01
#difference in PSFs through each polarizers
#estimated to less than 3%
err_psf = data_array[i]*0.03
#flatfielding uncertainties
#estimated to less than 3%
err_flat = data_array[i]*0.03
n_error_array[i] = np.sqrt(error_array[i]**2 + error_bkg[i]**2 + err_wav**2 + err_psf**2 + err_flat**2)
#Substract background
n_data_array[i][data_mask] = n_data_array[i][data_mask] - bkg
n_data_array[i][np.logical_and(data_mask,n_data_array[i] <= 0.01*bkg)] = 0.01*bkg
std_bkg[i] = image[np.abs(image-bkg)/bkg<1.].std()
background[i] = bkg
if (data_array[i] < 0.).any():
print(data_array[i])
#if i==0:
#np.savetxt("output/s_bg.txt",error_bkg[i])
#np.savetxt("output/s_wav.txt",err_wav)
#np.savetxt("output/s_psf.txt",err_psf)
#np.savetxt("output/s_flat.txt",err_flat)
if display:
plt.rcParams.update({'font.size': 15})
convert_flux = np.array([head['photflam'] for head in headers])
date_time = np.array([headers[i]['date-obs']+';'+headers[i]['time-obs']
for i in range(len(headers))])
date_time = np.array([datetime.strptime(d,'%Y-%m-%d;%H:%M:%S')
for d in date_time])
filt = np.array([headers[i]['filtnam1'] for i in range(len(headers))])
dict_filt = {"POL0":'r', "POL60":'g', "POL120":'b'}
c_filt = np.array([dict_filt[f] for f in filt])
fig,ax = plt.subplots(figsize=(10,6), constrained_layout=True)
for f in np.unique(filt):
mask = [fil==f for fil in filt]
ax.scatter(date_time[mask], background[mask]*convert_flux[mask],
color=dict_filt[f],label="{0:s}".format(f))
ax.errorbar(date_time, background*convert_flux,
yerr=std_bkg*convert_flux, fmt='+k',
markersize=0, ecolor=c_filt)
# Date handling
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.set_xlabel("Observation date and time")
ax.set_ylabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
#ax.set_title("Background flux and error computed for each image")
plt.legend()
plt.rcParams.update({'font.size': 15})
fig2, ax2 = plt.subplots(figsize=(10,10))
data0 = data[0]*convert_flux[0]
instr = headers[0]['instrume']
rootname = headers[0]['rootname']
exptime = headers[0]['exptime']
filt = headers[0]['filtnam1']
#plots
#im = ax2.imshow(data0, vmin=data0.min(), vmax=data0.max(), origin='lower', cmap='gray')
im = ax2.imshow(data0, norm=LogNorm(data0[data0>0.].mean()/10.,data0.max()), origin='lower', cmap='gray')
x, y, width, height, angle, color = rectangle[0]
ax2.add_patch(Rectangle((x, y),width,height,edgecolor=color,fill=False))
ax2.annotate(instr+":"+rootname, color='white', fontsize=10,
xy=(0.02, 0.95), xycoords='axes fraction')
ax2.annotate(filt, color='white', fontsize=14, xy=(0.02, 0.02),
xycoords='axes fraction')
ax2.annotate(str(exptime)+" s", color='white', fontsize=10, xy=(0.80, 0.02),
xycoords='axes fraction')
ax2.set(#title="Location of background computation.",
xlabel='pixel offset',
ylabel='pixel offset')
fig2.subplots_adjust(hspace=0, wspace=0, right=0.85)
cbar_ax = fig2.add_axes([0.9, 0.12, 0.02, 0.75])
fig2.colorbar(im, cax=cbar_ax, label=r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
if not(savename is None):
fig.savefig(plots_folder+savename+"_background_flux.png",
bbox_inches='tight')
fig2.savefig(plots_folder+savename+'_'+filt+'_background_location.png',
bbox_inches='tight')
plot_obs(data, headers, vmin=data.min(), vmax=data.max(),
rectangle=rectangle,
savename=savename+"_background_location",
plots_folder=plots_folder)
else:
plot_obs(data, headers, vmin=vmin, vmax=vmax,
rectangle=rectangle)
plt.show()
if return_background: if return_background:
return n_data_array, n_error_array, headers, background return n_data_array, n_error_array, headers, background
@@ -879,7 +559,7 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
else: else:
raise ValueError("'{0:s}' invalid scale for binning.".format(scale)) raise ValueError("'{0:s}' invalid scale for binning.".format(scale))
if (Dxy <= 1.).any(): if (Dxy < 1.).any():
raise ValueError("Requested pixel size is below resolution.") raise ValueError("Requested pixel size is below resolution.")
new_shape = (image.shape//Dxy).astype(int) new_shape = (image.shape//Dxy).astype(int)
@@ -1154,10 +834,10 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
dist_rc = np.where(data_mask, np.sqrt((r-xx)**2+(c-yy)**2), fmax) dist_rc = np.where(data_mask, np.sqrt((r-xx)**2+(c-yy)**2), fmax)
# Catch expected "OverflowWarning" as we overflow values that are not in the image # Catch expected "OverflowWarning" as we overflow values that are not in the image
with warnings.catch_warnings(record=True) as w: with warnings.catch_warnings(record=True) as w:
g_rc = np.array([np.exp(-0.5*(dist_rc/stdev)**2),]*data_array.shape[0]) g_rc = np.array([np.exp(-0.5*(dist_rc/stdev)**2)/(2.*np.pi*stdev**2),]*data_array.shape[0])
# Apply weighted combination # Apply weighted combination
smoothed[r,c] = np.where(data_mask[r,c], np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc), 0.) smoothed[r,c] = np.where(data_mask[r,c], np.sum(data_array*weight*g_rc)/np.sum(weight*g_rc), data_array.mean(axis=0)[r,c])
error[r,c] = np.where(data_mask[r,c], np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc), 0.) error[r,c] = np.where(data_mask[r,c], np.sqrt(np.sum(weight*g_rc**2))/np.sum(weight*g_rc), (np.sqrt(np.sum(error_array**2,axis=0)/error_array.shape[0]))[r,c])
# Nan handling # Nan handling
error[np.isnan(smoothed)] = 0. error[np.isnan(smoothed)] = 0.
@@ -1173,11 +853,12 @@ def smooth_data(data_array, error_array, data_mask, headers, FWHM=1.,
weights = np.ones(error_array[i].shape) weights = np.ones(error_array[i].shape)
if smoothing.lower()[:6] in ['weight']: if smoothing.lower()[:6] in ['weight']:
weights = 1./error_array[i]**2 weights = 1./error_array[i]**2
#weights /= weights.max() weights[1-data_mask] = 0.
weights /= weights.sum()
kernel = gaussian2d(x, y, stdev) kernel = gaussian2d(x, y, stdev)
kernel /= kernel.sum() kernel /= kernel.sum()
smoothed[i] = convolve2d(image*weights/image.sum(),kernel,'same')*image.sum() smoothed[i] = np.where(data_mask, convolve2d(image*weights,kernel,'same')/convolve2d(weights,kernel,'same'), image)
error[i] = np.sqrt(convolve2d((error_array[i]/error_array[i].sum())**2*weights**2,kernel**2,'same'))*error_array[i].sum() error[i] = np.where(data_mask, np.sqrt(convolve2d(error_array[i]**2*weights**2,kernel**2,'same'))/convolve2d(weights,kernel,'same'), error_array[i])
# Nan handling # Nan handling
error[i][np.isnan(smoothed[i])] = 0. error[i][np.isnan(smoothed[i])] = 0.
@@ -1254,6 +935,10 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
print("Warning : no image for POL120 of FOC found, averaged data\ print("Warning : no image for POL120 of FOC found, averaged data\
will be NAN") will be NAN")
# Put each polarizer images in separate arrays # Put each polarizer images in separate arrays
headers0 = [header for header in headers if header['filtnam1']=='POL0']
headers60 = [header for header in headers if header['filtnam1']=='POL60']
headers120 = [header for header in headers if header['filtnam1']=='POL120']
pol0_array = data_array[is_pol0] pol0_array = data_array[is_pol0]
pol60_array = data_array[is_pol60] pol60_array = data_array[is_pol60]
pol120_array = data_array[is_pol120] pol120_array = data_array[is_pol120]
@@ -1262,10 +947,6 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
err60_array = error_array[is_pol60] err60_array = error_array[is_pol60]
err120_array = error_array[is_pol120] err120_array = error_array[is_pol120]
headers0 = [header for header in headers if header['filtnam1']=='POL0']
headers60 = [header for header in headers if header['filtnam1']=='POL60']
headers120 = [header for header in headers if header['filtnam1']=='POL120']
if not(FWHM is None) and (smoothing.lower() in ['combine','combining']): if not(FWHM is None) and (smoothing.lower() in ['combine','combining']):
# Smooth by combining each polarizer images # Smooth by combining each polarizer images
pol0, err0 = smooth_data(pol0_array, err0_array, data_mask, headers0, pol0, err0 = smooth_data(pol0_array, err0_array, data_mask, headers0,
@@ -1282,30 +963,31 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
pol120_t = np.sum([header['exptime'] for header in headers120]) pol120_t = np.sum([header['exptime'] for header in headers120])
for i in range(pol0_array.shape[0]): for i in range(pol0_array.shape[0]):
pol0_array[i] *= headers0[i]['exptime']/pol0_t pol0_array[i] *= headers0[i]['exptime']
err0_array[i] *= headers0[i]['exptime']/pol0_t err0_array[i] *= headers0[i]['exptime']
for i in range(pol60_array.shape[0]): for i in range(pol60_array.shape[0]):
pol60_array[i] *= headers60[i]['exptime']/pol60_t pol60_array[i] *= headers60[i]['exptime']
err60_array[i] *= headers60[i]['exptime']/pol60_t err60_array[i] *= headers60[i]['exptime']
for i in range(pol120_array.shape[0]): for i in range(pol120_array.shape[0]):
pol120_array[i] *= headers120[i]['exptime']/pol120_t pol120_array[i] *= headers120[i]['exptime']
err120_array[i] *= headers120[i]['exptime']/pol120_t err120_array[i] *= headers120[i]['exptime']
pol0 = pol0_array.sum(axis=0) pol0 = pol0_array.sum(axis=0)/pol0_t
pol60 = pol60_array.sum(axis=0) pol60 = pol60_array.sum(axis=0)/pol60_t
pol120 = pol120_array.sum(axis=0) pol120 = pol120_array.sum(axis=0)/pol120_t
pol_array = np.array([pol0, pol60, pol120]) pol_array = np.array([pol0, pol60, pol120])
pol_headers = [headers0[0], headers60[0], headers120[0]]
# Propagate uncertainties quadratically # Propagate uncertainties quadratically
err0 = np.sqrt(np.sum(err0_array**2,axis=0)) err0 = np.sqrt(np.sum(err0_array**2,axis=0))/pol0_t
err60 = np.sqrt(np.sum(err60_array**2,axis=0)) err60 = np.sqrt(np.sum(err60_array**2,axis=0))/pol60_t
err120 = np.sqrt(np.sum(err120_array**2,axis=0)) err120 = np.sqrt(np.sum(err120_array**2,axis=0))/pol120_t
polerr_array = np.array([err0, err60, err120]) polerr_array = np.array([err0, err60, err120])
if not(FWHM is None) and (smoothing.lower() in ['gaussian','gauss']): if not(FWHM is None) and (smoothing.lower() in ['gaussian','gauss','weighted_gaussian','weight_gauss']):
# Smooth by convoluting with a gaussian each polX image. # Smooth by convoluting with a gaussian each polX image.
pol_array, polerr_array = smooth_data(pol_array, polerr_array, pol_array, polerr_array = smooth_data(pol_array, polerr_array,
data_mask, headers, FWHM=FWHM, scale=scale) data_mask, pol_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
pol0, pol60, pol120 = pol_array pol0, pol60, pol120 = pol_array
err0, err60, err120 = polerr_array err0, err60, err120 = polerr_array
@@ -1340,7 +1022,7 @@ def polarizer_avg(data_array, error_array, data_mask, headers, FWHM=None,
def compute_Stokes(data_array, error_array, data_mask, headers, def compute_Stokes(data_array, error_array, data_mask, headers,
FWHM=None, scale='pixel', smoothing='gaussian_after', transmitcorr=False): FWHM=None, scale='pixel', smoothing='combine', transmitcorr=False):
""" """
Compute the Stokes parameters I, Q and U for a given data_set Compute the Stokes parameters I, Q and U for a given data_set
---------- ----------
@@ -1371,7 +1053,7 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
deviation stdev = FWHM/(2*sqrt(2*log(2))). deviation stdev = FWHM/(2*sqrt(2*log(2))).
-'gaussian_after' convolve output Stokes I/Q/U with a gaussian of -'gaussian_after' convolve output Stokes I/Q/U with a gaussian of
standard deviation stdev = FWHM/(2*sqrt(2*log(2))). standard deviation stdev = FWHM/(2*sqrt(2*log(2))).
Defaults to 'gaussian_after'. Won't be used if FWHM is None. Defaults to 'combine'. Won't be used if FWHM is None.
transmitcorr : bool, optional transmitcorr : bool, optional
Weither the images should be transmittance corrected for each filter Weither the images should be transmittance corrected for each filter
along the line of sight. Latest calibrated data products (.c0f) does along the line of sight. Latest calibrated data products (.c0f) does
@@ -1457,6 +1139,30 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
I_stokes[i,j], Q_stokes[i,j], U_stokes[i,j] = np.dot(coeff_stokes, pol_flux[:,i,j]).T I_stokes[i,j], Q_stokes[i,j], U_stokes[i,j] = np.dot(coeff_stokes, pol_flux[:,i,j]).T
Stokes_cov[:,:,i,j] = np.dot(coeff_stokes, np.dot(pol_cov[:,:,i,j], coeff_stokes.T)) Stokes_cov[:,:,i,j] = np.dot(coeff_stokes, np.dot(pol_cov[:,:,i,j], coeff_stokes.T))
if not(FWHM is None) and (smoothing.lower() in ['weighted_gaussian_after','weight_gauss_after','gaussian_after','gauss_after']):
smoothing = smoothing.lower()[:-6]
Stokes_array = np.array([I_stokes, Q_stokes, U_stokes])
Stokes_error = np.array([np.sqrt(Stokes_cov[i,i]) for i in range(3)])
Stokes_headers = headers[0:3]
Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask,
headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
I_stokes, Q_stokes, U_stokes = Stokes_array
Stokes_cov[0,0], Stokes_cov[1,1], Stokes_cov[2,2] = deepcopy(Stokes_error**2)
sStokes_array = np.array([I_stokes*Q_stokes, I_stokes*U_stokes, Q_stokes*U_stokes])
sStokes_error = np.array([Stokes_cov[0,1], Stokes_cov[0,2], Stokes_cov[1,2]])
uStokes_error = np.array([Stokes_cov[1,0], Stokes_cov[2,0], Stokes_cov[2,1]])
sStokes_array, sStokes_error = smooth_data(sStokes_array, sStokes_error, data_mask,
headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
uStokes_array, uStokes_error = smooth_data(sStokes_array, uStokes_error, data_mask,
headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
Stokes_cov[0,1], Stokes_cov[0,2], Stokes_cov[1,2] = deepcopy(sStokes_error)
Stokes_cov[1,0], Stokes_cov[2,0], Stokes_cov[2,1] = deepcopy(uStokes_error)
mask = (Q_stokes**2 + U_stokes**2) > I_stokes**2 mask = (Q_stokes**2 + U_stokes**2) > I_stokes**2
if mask.any(): if mask.any():
print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(I_stokes[mask].size)) print("WARNING : found {0:d} pixels for which I_pol > I_stokes".format(I_stokes[mask].size))
@@ -1490,18 +1196,6 @@ def compute_Stokes(data_array, error_array, data_mask, headers,
Stokes_cov[1,1] += s_Q2_axis Stokes_cov[1,1] += s_Q2_axis
Stokes_cov[2,2] += s_U2_axis Stokes_cov[2,2] += s_U2_axis
if not(FWHM is None) and (smoothing.lower() in ['weighted_gaussian_after','weight_gauss_after','gaussian_after','gauss_after']):
smoothing = smoothing.lower()[:-6]
Stokes_array = np.array([I_stokes, Q_stokes, U_stokes])
Stokes_error = np.array([np.sqrt(Stokes_cov[i,i]) for i in range(3)])
Stokes_headers = headers[0:3]
Stokes_array, Stokes_error = smooth_data(Stokes_array, Stokes_error, data_mask,
headers=Stokes_headers, FWHM=FWHM, scale=scale, smoothing=smoothing)
I_stokes, Q_stokes, U_stokes = Stokes_array
Stokes_cov[0,0], Stokes_cov[1,1], Stokes_cov[2,2] = Stokes_error**2
#Compute integrated values for P, PA before any rotation #Compute integrated values for P, PA before any rotation
mask = np.logical_and(data_mask.astype(bool), (I_stokes > 0.)) mask = np.logical_and(data_mask.astype(bool), (I_stokes > 0.))
n_pix = I_stokes[mask].size n_pix = I_stokes[mask].size