add data rotation (instead of stokes rotation) and add sentinels

This commit is contained in:
Thibault Barnouin
2021-06-10 20:02:53 +02:00
parent f7b6d9dfaf
commit 1dc40f96c4
41 changed files with 178 additions and 43 deletions

View File

@@ -16,20 +16,20 @@ import lib.plots as proj_plots #Functions for plotting data
def main():
##### User inputs
## Input and output locations
globals()['data_folder'] = "../data/NGC1068_x274020/"
infiles = ['x274020at.c0f.fits','x274020bt.c0f.fits','x274020ct.c0f.fits',
'x274020dt.c0f.fits','x274020et.c0f.fits','x274020ft.c0f.fits',
'x274020gt.c0f.fits','x274020ht.c0f.fits','x274020it.c0f.fits']
globals()['plots_folder'] = "../plots/NGC1068_x274020/"
# globals()['data_folder'] = "../data/NGC1068_x274020/"
# infiles = ['x274020at.c0f.fits','x274020bt.c0f.fits','x274020ct.c0f.fits',
# 'x274020dt.c0f.fits','x274020et.c0f.fits','x274020ft.c0f.fits',
# 'x274020gt.c0f.fits','x274020ht.c0f.fits','x274020it.c0f.fits']
# globals()['plots_folder'] = "../plots/NGC1068_x274020/"
# globals()['data_folder'] = "../data/NGC1068_x14w010/"
# infiles = ['x14w0101t_c0f.fits','x14w0102t_c0f.fits','x14w0103t_c0f.fits',
# 'x14w0104t_c0f.fits','x14w0105p_c0f.fits','x14w0106t_c0f.fits']
# globals()['plots_folder'] = "../plots/NGC1068_x14w010/"
# globals()['data_folder'] = "../data/3C405_x136060/"
# infiles = ['x1360601t_c0f.fits','x1360602t_c0f.fits','x1360603t_c0f.fits']
# globals()['plots_folder'] = "../plots/3C405_x136060/"
globals()['data_folder'] = "../data/3C405_x136060/"
infiles = ['x1360601t_c0f.fits','x1360602t_c0f.fits','x1360603t_c0f.fits']
globals()['plots_folder'] = "../plots/3C405_x136060/"
# globals()['data_folder'] = "../data/CygnusA_x43w0/"
# infiles = ['x43w0101r_c0f.fits', 'x43w0102r_c0f.fits', 'x43w0103r_c0f.fits',
@@ -87,25 +87,26 @@ def main():
iterations = 10
# Error estimation
error_sub_shape = (75,75)
display_error = False
display_error = True
# Data binning
rebin = True
if rebin:
pxsize = 0.10
pxsize = 0.50
px_scale = 'arcsec' #pixel or arcsec
rebin_operation = 'sum' #sum or average
# Alignement
align_center = 'image' #If None will align image to image center
display_data = False
display_data = True
# Smoothing
smoothing_function = 'combine' #gaussian_after, gaussian or combine
smoothing_FWHM = 0.20 #If None, no smoothing is done
smoothing_FWHM = None #If None, no smoothing is done
smoothing_scale = 'arcsec' #pixel or arcsec
# Rotation
rotate = True #rotation to North convention can give erroneous results
rotate_stokes = False #rotation to North convention can give erroneous results
rotate_data = False #rotation to North convention can give erroneous results
# Polarization map output
figname = 'NGC1068_FOC' #target/intrument name
figtype = '_combine_FWHM020_rot' #additionnal informations
figname = '3C405_FOC' #target/intrument name
figtype = '' #additionnal informations
SNRp_cut = 3 #P measurments with SNR>3
SNRi_cut = 30 #I measurments with SNR>30, which implies an uncertainty in P of 4.7%.
step_vec = 1 #plot all vectors in the array. if step_vec = 2, then every other vector will be plotted
@@ -114,19 +115,40 @@ def main():
## Step 1:
# Get data from fits files and translate to flux in erg/cm²/s/Angstrom.
data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=True)
for data in data_array:
if (data < 0.).any():
print("ETAPE 1 : ", data)
# Crop data to remove outside blank margins.
data_array, error_array = proj_red.crop_array(data_array, step=5, null_val=0., inside=True)
for data in data_array:
if (data < 0.).any():
print("ETAPE 2 : ", data)
# Deconvolve data using Richardson-Lucy iterative algorithm with a gaussian PSF of given FWHM.
if deconvolve:
data_array = proj_red.deconvolve_array(data_array, headers, psf=psf, FWHM=psf_FWHM, scale=psf_scale, shape=psf_shape, iterations=iterations)
# Estimate error from data background, estimated from sub-image of desired sub_shape.
data_array, error_array = proj_red.get_error(data_array, sub_shape=error_sub_shape, display=display_error, headers=headers, savename=figname+"_errors", plots_folder=plots_folder)
for data in data_array:
if (data < 0.).any():
print("ETAPE 3 : ", data)
# Rebin data to desired pixel size.
if rebin:
data_array, error_array, headers, Dxy = proj_red.rebin_array(data_array, error_array, headers, pxsize=pxsize, scale=px_scale, operation=rebin_operation)
for data in data_array:
if (data < 0.).any():
print("ETAPE 4 : ", data)
#Align and rescale images with oversampling.
data_array, error_array = proj_red.align_data(data_array, error_array, upsample_factor=int(Dxy.min()), ref_center=align_center, return_shifts=False)
for data in data_array:
if (data < 0.).any():
print("ETAPE 5 : ", data)
# Rotate data to have North up
ref_header = copy.deepcopy(headers[0])
if rotate_data:
data_array, error_array, headers = proj_red.rotate_data(data_array, error_array, headers, -ref_header['orientat'])
for data in data_array:
if (data < 0.).any():
print("ETAPE 6 : ", data)
#Plot array for checking output
if display_data:
proj_plots.plot_obs(data_array, headers, vmin=data_array.min(), vmax=data_array.max(), savename=figname+"_center_"+align_center, plots_folder=plots_folder)
@@ -141,7 +163,7 @@ def main():
## Step 3:
# Rotate images to have North up
if rotate:
if rotate_stokes:
ref_header = copy.deepcopy(headers[0])
I_stokes, Q_stokes, U_stokes, Stokes_cov, headers = proj_red.rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, -ref_header['orientat'])
# Compute polarimetric parameters (polarization degree and angle).

View File

@@ -41,6 +41,10 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
data_array.append(f[0].data)
data_array = np.array(data_array)
# Prevent negative count value in imported data
for i in range(len(data_array)):
data_array[i][data_array[i] < 0.] = 0.
if compute_flux:
for i in range(len(infiles)):
# Compute the flux in counts/sec

View File

@@ -134,7 +134,7 @@ def polarization_map(Stokes, SNRp_cut=3., SNRi_cut=30., step_vec=1,
pang_err = Stokes[np.argmax([Stokes[i].header['datatype']=='Pol_ang_err' for i in range(len(Stokes))])]
pivot_wav = Stokes[0].header['photplam']
convert_flux = Stokes[0].header['photflam']
convert_flux = 1.#Stokes[0].header['photflam']
wcs = WCS(Stokes[0]).deepcopy()
#Compute SNR and apply cuts

View File

@@ -391,7 +391,10 @@ def get_error(data_array, sub_shape=(15,15), display=False, headers=None,
error = np.sqrt(np.sum(sub_image**2)/sub_image.size)
error_array[i] *= error
background[i] = sub_image.sum()
data_array[i] = np.abs(data_array[i] - sub_image.mean())
data_array[i] = data_array[i] - sub_image.mean()
data_array[i][data_array[i] < 0.] = 0.
if (data_array[i] < 0.).any():
print(data_array[i])
if display:
@@ -651,7 +654,7 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
shifts, errors = [], []
for i,image in enumerate(data_array):
# Initialize rescaled images to background values
rescaled_image[i] *= 0.1*background[i]
rescaled_image[i] *= 0.*background[i]
rescaled_error[i] *= background[i]
# Get shifts and error by cross-correlation to ref_data
shift, error, phase_diff = phase_cross_correlation(ref_data, image,
@@ -664,9 +667,11 @@ def align_data(data_array, error_array=None, upsample_factor=1., ref_data=None,
rescaled_error[i,res_shift[0]:res_shift[0]+shape[1],
res_shift[1]:res_shift[1]+shape[2]] = copy.deepcopy(error_array[i])
# Shift images to align
rescaled_image[i] = sc_shift(rescaled_image[i], shift, cval=0.1*background[i])
rescaled_image[i] = sc_shift(rescaled_image[i], shift, cval=0.)
rescaled_error[i] = sc_shift(rescaled_error[i], shift, cval=background[i])
rescaled_image[i][rescaled_image[i] < 0.] = 0.
shifts.append(shift)
errors.append(error)
@@ -867,15 +872,27 @@ def polarizer_avg(data_array, error_array, headers, FWHM=None, scale='pixel',
else:
# Average on each polarization filter.
pol0 = pol0_array.mean(axis=0)
pol60 = pol60_array.mean(axis=0)
pol120 = pol120_array.mean(axis=0)
#pol0 = pol0_array.mean(axis=0)
#pol60 = pol60_array.mean(axis=0)
#pol120 = pol120_array.mean(axis=0)
# Sum on each polarization filter.
print("Exposure time for polarizer 0°/60°/120° : ",
np.sum([header['exptime'] for header in headers0]),
np.sum([header['exptime'] for header in headers60]),
np.sum([header['exptime'] for header in headers120]))
pol0 = pol0_array.sum(axis=0)
pol60 = pol60_array.sum(axis=0)
pol120 = pol120_array.sum(axis=0)
pol_array = np.array([pol0, pol60, pol120])
# Propagate uncertainties quadratically
err0 = np.mean(err0_array,axis=0)/np.sqrt(err0_array.shape[0])
err60 = np.mean(err60_array,axis=0)/np.sqrt(err60_array.shape[0])
err120 = np.mean(err120_array,axis=0)/np.sqrt(err120_array.shape[0])
#err0 = np.mean(err0_array,axis=0)/np.sqrt(err0_array.shape[0])
#err60 = np.mean(err60_array,axis=0)/np.sqrt(err60_array.shape[0])
#err120 = np.mean(err120_array,axis=0)/np.sqrt(err120_array.shape[0])
err0 = np.sum(err0_array,axis=0)*np.sqrt(err0_array.shape[0])
err60 = np.sum(err60_array,axis=0)*np.sqrt(err60_array.shape[0])
err120 = np.sum(err120_array,axis=0)*np.sqrt(err120_array.shape[0])
polerr_array = np.array([err0, err60, err120])
# Update headers
@@ -886,8 +903,9 @@ def polarizer_avg(data_array, error_array, headers, FWHM=None, scale='pixel',
list_head = headers60
else:
list_head = headers120
header['exptime'] = np.mean([head['exptime'] for head in list_head])/len(list_head)
header['exptime'] = np.sum([head['exptime'] for head in list_head])/len(list_head)
headers_array = [headers0[0], headers60[0], headers120[0]]
if not(FWHM is None) and (smoothing.lower() in ['gaussian','gauss']):
# Smooth by convoluting with a gaussian each polX image.
pol_array, polerr_array = smooth_data(pol_array, polerr_array,
@@ -976,11 +994,34 @@ def compute_Stokes(data_array, error_array, headers, FWHM=None,
FWHM=FWHM, scale=scale, smoothing=smoothing)
pol0, pol60, pol120 = pol_array
if (pol0 < 0.).any() or (pol60 < 0.).any() or (pol120 < 0.).any():
print("WARNING : Negative value in polarizer array.")
#Stokes parameters
I_stokes = (2./3.)*(pol0 + pol60 + pol120)
Q_stokes = (2./3.)*(2*pol0 - pol60 - pol120)
U_stokes = (2./np.sqrt(3.))*(pol60 - pol120)
#Remove nan
I_stokes[np.isnan(I_stokes)]=0.
Q_stokes[np.isnan(Q_stokes)]=0.
Q_stokes[I_stokes == 0.]=0.
U_stokes[np.isnan(U_stokes)]=0.
U_stokes[I_stokes == 0.]=0.
mask = (Q_stokes**2 + U_stokes**2) > I_stokes**2
if mask.any():
print("WARNING : I_pol > I_stokes : ", len(I_stokes[mask]))
plt.imshow(np.sqrt(Q_stokes**2+U_stokes**2)/I_stokes*mask, origin='lower')
plt.colorbar()
plt.title(r"$I_{pol}/I_{tot}$")
plt.show()
#I_stokes[mask]=0.
Q_stokes[mask]=0.
U_stokes[mask]=0.
#Stokes covariance matrix
Stokes_cov = np.zeros((3,3,I_stokes.shape[0],I_stokes.shape[1]))
Stokes_cov[0,0] = (4./9.)*(pol_cov[0,0]+pol_cov[1,1]+pol_cov[2,2]) + (8./9.)*(pol_cov[0,1]+pol_cov[0,2]+pol_cov[1,2])
@@ -990,11 +1031,6 @@ def compute_Stokes(data_array, error_array, headers, FWHM=None,
Stokes_cov[0,2] = Stokes_cov[2,0] = (4./9.)*(2.*pol_cov[0,0]-pol_cov[1,1]-pol_cov[2,2]+pol_cov[0,1]+pol_cov[0,2]-2.*pol_cov[1,2])
Stokes_cov[1,2] = Stokes_cov[2,1] = (4./(3.*np.sqrt(3.)))*(-pol_cov[1,1]+pol_cov[2,2]+2.*pol_cov[0,1]-2.*pol_cov[0,2])
#Remove nan
I_stokes[np.isnan(I_stokes)]=0.
Q_stokes[np.isnan(Q_stokes)]=0.
U_stokes[np.isnan(U_stokes)]=0.
if not(FWHM is None) and (smoothing.lower() in ['gaussian_after','gauss_after']):
Stokes_array = np.array([I_stokes, Q_stokes, U_stokes])
Stokes_error = np.array([np.sqrt(Stokes_cov[i,i]) for i in range(3)])
@@ -1053,10 +1089,11 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
#Polarization degree and angle computation
I_pol = np.sqrt(Q_stokes**2 + U_stokes**2)
P = I_pol/I_stokes*100.
P[I_stokes <= 0.] = 0.
PA = (90./np.pi)*np.arctan2(U_stokes,Q_stokes)+90
if (np.isfinite(P)>100.).any():
print("WARNING : found pixels for which P > 100%")
if (P>100.).any():
print("WARNING : found pixels for which P > 100%", len(P[P>100.]))
#Associated errors
s_P = (100./I_stokes)*np.sqrt((Q_stokes**2*Stokes_cov[1,1] + U_stokes**2*Stokes_cov[2,2] + 2.*Q_stokes*U_stokes*Stokes_cov[1,2])/(Q_stokes**2 + U_stokes**2) + ((Q_stokes/I_stokes)**2 + (U_stokes/I_stokes)**2)*Stokes_cov[0,0] - 2.*(Q_stokes/I_stokes)*Stokes_cov[0,1] - 2.*(U_stokes/I_stokes)*Stokes_cov[0,2])
@@ -1065,18 +1102,90 @@ def compute_pol(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers):
debiased_P = np.sqrt(P**2 - s_P**2)
if (debiased_P>100.).any():
print("WARNING : found pixels for which debiased_P > 100%", len(debiased_P[debiased_P>100.]))
#Compute the total exposure time so that
#I_stokes*exp_tot = N_tot the total number of events
exp_tot = np.array([header['exptime'] for header in headers]).sum()
N_obs = I_stokes/np.array([header['photflam'] for header in headers]).mean()*exp_tot
N_obs = I_stokes*exp_tot
#Errors on P, PA supposing Poisson noise
s_P_P = np.sqrt(2.)*(N_obs)**(-0.5)*100.
s_P_P = np.sqrt(2.)/np.sqrt(N_obs)*100.
s_PA_P = s_P_P/(2.*P/100.)*180./np.pi
return P, debiased_P, s_P, s_P_P, PA, s_PA, s_PA_P
def rotate_data(data_array, error_array, headers, ang):
"""
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
matrix to rotate Q, U of a given angle in degrees and update header
orientation keyword.
----------
Inputs:
data_array : numpy.ndarray
Array of images (2D floats) to be rotated by angle ang.
error_array : numpy.ndarray
Array of error associated to images in data_array.
headers : header list
List of headers corresponding to the reduced images.
ang : float
Rotation angle (in degrees) that should be applied to the Stokes
parameters
----------
Returns:
new_data_array : numpy.ndarray
Updated array containing the rotated images.
new_error_array : numpy.ndarray
Updated array containing the rotated errors.
new_headers : header list
Updated list of headers corresponding to the reduced images accounting
for the new orientation angle.
"""
#Rotate I_stokes, Q_stokes, U_stokes using rotation matrix
alpha = ang*np.pi/180.
#Rotate original images using scipy.ndimage.rotate
new_data_array = []
new_error_array = []
for i in range(len(data_array)):
new_data_array.append(sc_rotate(data_array[i], ang, reshape=False,
cval=0.))
new_error_array.append(sc_rotate(error_array[i], ang, reshape=False,
cval=error_array.mean()))
new_data_array = np.array(new_data_array)
new_error_array = np.array(new_error_array)
for i in range(len(new_data_array)):
new_data_array[i][new_data_array[i] < 0.] = 0.
#Update headers to new angle
new_headers = []
mrot = np.array([[np.cos(-alpha), -np.sin(-alpha)],
[np.sin(-alpha), np.cos(-alpha)]])
for header in headers:
new_header = copy.deepcopy(header)
new_header['orientat'] = header['orientat'] + ang
new_wcs = WCS(header).deepcopy()
if new_wcs.wcs.has_cd(): # CD matrix
del new_wcs.wcs.cd
keys = ['CD1_1','CD1_2','CD2_1','CD2_2']
for key in keys:
new_header.remove(key, ignore_missing=True)
new_wcs.wcs.cdelt = 3600.*np.sqrt(np.sum(new_wcs.wcs.get_pc()**2,axis=1))
elif new_wcs.wcs.has_pc(): # PC matrix + CDELT
newpc = np.dot(mrot, new_wcs.wcs.get_pc())
new_wcs.wcs.pc = newpc
new_wcs.wcs.set()
new_header.update(new_wcs.to_header())
new_headers.append(new_header)
return new_data_array, new_error_array, new_headers
def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang):
"""
Use scipy.ndimage.rotate to rotate I_stokes to an angle, and a rotation
@@ -1133,15 +1242,15 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang):
#Rotate original images using scipy.ndimage.rotate
new_I_stokes = sc_rotate(new_I_stokes, ang, reshape=False,
cval=0.10*np.sqrt(new_Stokes_cov[0,0][0,0]))
cval=0.0*np.sqrt(new_Stokes_cov[0,0][0,0]))
new_Q_stokes = sc_rotate(new_Q_stokes, ang, reshape=False,
cval=0.10*np.sqrt(new_Stokes_cov[1,1][0,0]))
cval=0.0*np.sqrt(new_Stokes_cov[1,1][0,0]))
new_U_stokes = sc_rotate(new_U_stokes, ang, reshape=False,
cval=0.10*np.sqrt(new_Stokes_cov[2,2][0,0]))
cval=0.0*np.sqrt(new_Stokes_cov[2,2][0,0]))
for i in range(3):
for j in range(3):
new_Stokes_cov[i,j] = sc_rotate(new_Stokes_cov[i,j], ang, reshape=False,
cval=0.10*new_Stokes_cov[i,j].mean())
cval=0.0*new_Stokes_cov[i,j].mean())
#Update headers to new angle
new_headers = []
@@ -1153,11 +1262,11 @@ def rotate_Stokes(I_stokes, Q_stokes, U_stokes, Stokes_cov, headers, ang):
new_wcs = WCS(header).deepcopy()
if new_wcs.wcs.has_cd(): # CD matrix
del w.wcs.cd
del new_wcs.wcs.cd
keys = ['CD1_1','CD1_2','CD2_1','CD2_2']
for key in keys:
new_header.remove(key, ignore_missing=True)
w.wcs.cdelt = 3600.*np.sqrt(np.sum(w.wcs.get_pc()**2,axis=1))
new_wcs.wcs.cdelt = 3600.*np.sqrt(np.sum(w.wcs.get_pc()**2,axis=1))
elif new_wcs.wcs.has_pc(): # PC matrix + CDELT
newpc = np.dot(mrot, new_wcs.wcs.get_pc())
new_wcs.wcs.pc = newpc