small improvement and bugfix

This commit is contained in:
Thibault Barnouin
2023-07-03 11:34:06 +02:00
parent 724f2c1f42
commit fb1f075078
4 changed files with 22 additions and 39 deletions

View File

@@ -33,7 +33,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data"):
# Background estimation # Background estimation
error_sub_type = 'freedman-diaconis' #sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51,51)) error_sub_type = 'freedman-diaconis' #sqrt, sturges, rice, scott, freedman-diaconis (default) or shape (example (51,51))
subtract_error = 1.00 subtract_error = 1.00
display_error = True display_error = False
# Data binning # Data binning
rebin = True rebin = True
@@ -93,15 +93,13 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data"):
data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=True) data_array, headers = proj_fits.get_obs_data(infiles, data_folder=data_folder, compute_flux=True)
figname = "_".join([target,"FOC"]) figname = "_".join([target,"FOC"])
if smoothing_FWHM is None: if rebin:
if px_scale in ['px','pixel','pixels']: if not px_scale in ['full']:
figtype = "".join(["b_",str(pxsize),'px']) figtype = "".join(["b","{0:.2f}".format(pxsize),px_scale]) #additionnal informations
elif px_scale in ['arcsec','arcseconds','arcs']:
figtype = "".join(["b_","{0:.2f}".format(pxsize).replace(".",""),'arcsec'])
else: else:
figtype = "full" figtype = "full"
else: if not smoothing_FWHM is None:
figtype = "_".join(["".join([s[0] for s in smoothing_function.split("_")]),"".join(["{0:.2f}".format(smoothing_FWHM).replace(".",""),smoothing_scale])]) #additionnal informations figtype += "_"+"".join(["".join([s[0] for s in smoothing_function.split("_")]),"{0:.2f}".format(smoothing_FWHM),smoothing_scale]) #additionnal informations
if align_center is None: if align_center is None:
figtype += "_not_aligned" figtype += "_not_aligned"
@@ -123,7 +121,7 @@ def main(target=None, proposal_id=None, infiles=None, output_dir="./data"):
data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False) data_array, error_array, headers, data_mask = proj_red.align_data(data_array, headers, error_array=error_array, background=background, upsample_factor=10, ref_center=align_center, return_shifts=False)
if display_align: if display_align:
proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array>0.].min()*headers[0]['photflam'], vmax=data_array[data_array>0.].max()*headers[0]['photflam'], savename="_".join([figname,"center",str(align_center)]), plots_folder=plots_folder) proj_plots.plot_obs(data_array, headers, vmin=data_array[data_array>0.].min()*headers[0]['photflam'], vmax=data_array[data_array>0.].max()*headers[0]['photflam'], savename="_".join([figname,str(align_center)]), plots_folder=plots_folder)
# Rebin data to desired pixel size. # Rebin data to desired pixel size.
if rebin: if rebin:

View File

@@ -17,8 +17,8 @@ root_dir = path_join('/home/t.barnouin/Documents/Thesis/HST')
root_dir_K = path_join(root_dir,'Kishimoto','output') root_dir_K = path_join(root_dir,'Kishimoto','output')
root_dir_S = path_join(root_dir,'FOC_Reduction','output') root_dir_S = path_join(root_dir,'FOC_Reduction','output')
root_dir_data_S = path_join(root_dir,'FOC_Reduction','data','NGC1068','5144') root_dir_data_S = path_join(root_dir,'FOC_Reduction','data','NGC1068','5144')
root_dir_plot_S = path_join(root_dir,'FOC_Reduction','plots','NGC1068','5144') root_dir_plot_S = path_join(root_dir,'FOC_Reduction','plots','NGC1068','5144','notaligned')
filename_S = "NGC1068_FOC_b_10px.fits" filename_S = "NGC1068_FOC_b10.00pixel_not_aligned.fits"
plt.rcParams.update({'font.size': 15}) plt.rcParams.update({'font.size': 15})
SNRi_cut = 30. SNRi_cut = 30.
@@ -140,7 +140,8 @@ fig_dif_pa.savefig(path_join(root_dir_plot_S,"NGC1068_K_polang_diff.png"),bbox_i
##### #####
###display both polarization maps to check consistency ###display both polarization maps to check consistency
##### #####
fig = plt.figure(num="Polarization maps comparison") #plt.rcParams.update({'font.size': 15})
fig = plt.figure(num="Polarization maps comparison",figsize=(10,10))
ax = fig.add_subplot(111, projection=wcs) ax = fig.add_subplot(111, projection=wcs)
fig.subplots_adjust(right=0.85) fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.88, 0.12, 0.01, 0.75]) cbar_ax = fig.add_axes([0.88, 0.12, 0.01, 0.75])
@@ -164,7 +165,6 @@ ax.coords[1].set_ticklabel_position('l')
#ax.axis('equal') #ax.axis('equal')
cbar = plt.colorbar(im0, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]") cbar = plt.colorbar(im0, cax=cbar_ax, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
#plt.rcParams.update({'font.size': 8})
ax.legend(loc='upper right') ax.legend(loc='upper right')
fig.savefig(path_join(root_dir_plot_S,"NGC1068_K_comparison.png"),bbox_inches="tight",dpi=300) fig.savefig(path_join(root_dir_plot_S,"NGC1068_K_comparison.png"),bbox_inches="tight",dpi=300)

View File

@@ -137,7 +137,7 @@ def get_product_list(target=None, proposal_id=None):
for prod in products: for prod in products:
prod['target_name'] = observations['target_name'][observations['obsid']==prod['obsID']][0] prod['target_name'] = observations['target_name'][observations['obsid']==prod['obsID']][0]
tab = unique(products, ['target_name', 'proposal_id']) tab = unique(products, ['target_name', 'proposal_id'])
if np.all(tab['target_name']==tab['target_name'][0]): if len(tab)>1 and np.all(tab['target_name']==tab['target_name'][0]):
target = tab['target_name'][0] target = tab['target_name'][0]
products["Obs"] = [np.argmax(np.logical_and(tab['proposal_id']==data['proposal_id'],tab['target_name']==data['target_name']))+1 for data in products] products["Obs"] = [np.argmax(np.logical_and(tab['proposal_id']==data['proposal_id'],tab['target_name']==data['target_name']))+1 for data in products]

View File

@@ -575,8 +575,7 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
if instr == 'FOC': if instr == 'FOC':
HST_aper = 2400. # HST aperture in mm HST_aper = 2400. # HST aperture in mm
Dxy_arr = np.ones((data_array.shape[0],2)) Dxy_arr = np.ones((data_array.shape[0],2))
for i, enum in enumerate(list(zip(data_array, error_array, headers))): for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))):
image, error, header = enum
# Get current pixel size # Get current pixel size
w = WCS(header).deepcopy() w = WCS(header).deepcopy()
new_header = deepcopy(header) new_header = deepcopy(header)
@@ -592,8 +591,7 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
raise ValueError("'{0:s}' invalid scale for binning.".format(scale)) raise ValueError("'{0:s}' invalid scale for binning.".format(scale))
new_shape = np.ceil(min(image.shape/Dxy_arr,key=lambda x:x[0]+x[1])).astype(int) new_shape = np.ceil(min(image.shape/Dxy_arr,key=lambda x:x[0]+x[1])).astype(int)
for i, enum in enumerate(list(zip(data_array, error_array, headers))): for i, (image, error, header) in enumerate(list(zip(data_array, error_array, headers))):
image, error, header = enum
# Get current pixel size # Get current pixel size
w = WCS(header).deepcopy() w = WCS(header).deepcopy()
new_header = deepcopy(header) new_header = deepcopy(header)
@@ -617,21 +615,12 @@ def rebin_array(data_array, error_array, headers, pxsize, scale,
if operation.lower() in ["mean", "average", "avg"]: if operation.lower() in ["mean", "average", "avg"]:
new_error = np.sqrt(bin_ndarray(error**2, new_error = np.sqrt(bin_ndarray(error**2,
new_shape=new_shape, operation='average')) new_shape=new_shape, operation='average'))
#new_error[mask] = np.sqrt(bin_ndarray(error**2*image,
# new_shape=new_shape, operation='average')[mask]/sum_image[mask])
#new_error[mask] = np.sqrt(bin_ndarray(error**2,
# new_shape=new_shape, operation='average')[mask])
else: else:
new_error = np.sqrt(bin_ndarray(error**2, new_error = np.sqrt(bin_ndarray(error**2,
new_shape=new_shape, operation='sum')) new_shape=new_shape, operation='sum'))
#new_error[mask] = np.sqrt(bin_ndarray(error**2*image,
# new_shape=new_shape, operation='sum')[mask]/sum_image[mask])
#new_error[mask] = np.sqrt(bin_ndarray(error**2,
# new_shape=new_shape, operation='sum')[mask])
rebinned_error.append(np.sqrt(rms_image**2 + new_error**2)) rebinned_error.append(np.sqrt(rms_image**2 + new_error**2))
# Update header # Update header
#nw = w.slice((np.s_[::Dxy[0]], np.s_[::Dxy[1]]))
nw = w.deepcopy() nw = w.deepcopy()
nw.wcs.cdelt *= Dxy nw.wcs.cdelt *= Dxy
nw.wcs.crpix /= Dxy nw.wcs.crpix /= Dxy
@@ -762,19 +751,18 @@ def align_data(data_array, headers, error_array=None, background=None,
# Initialize rescaled images to background values # Initialize rescaled images to background values
rescaled_error[i] *= 0.01*background[i] rescaled_error[i] *= 0.01*background[i]
# Get shifts and error by cross-correlation to ref_data # Get shifts and error by cross-correlation to ref_data
shift, error, phase_diff = phase_cross_correlation(ref_data/ref_data.max(), image/image.max(), if do_shift:
shift, error, _ = phase_cross_correlation(ref_data/ref_data.max(), image/image.max(),
upsample_factor=upsample_factor) upsample_factor=upsample_factor)
else:
shift = pol_shift[headers[i]['filtnam1'].lower()]
error = sigma_shift[headers[i]['filtnam1'].lower()]
# Rescale image to requested output # Rescale image to requested output
rescaled_image[i,res_shift[0]:res_shift[0]+shape[1], rescaled_image[i,res_shift[0]:res_shift[0]+shape[1],
res_shift[1]:res_shift[1]+shape[2]] = deepcopy(image) res_shift[1]:res_shift[1]+shape[2]] = deepcopy(image)
rescaled_error[i,res_shift[0]:res_shift[0]+shape[1], rescaled_error[i,res_shift[0]:res_shift[0]+shape[1],
res_shift[1]:res_shift[1]+shape[2]] = deepcopy(error_array[i]) res_shift[1]:res_shift[1]+shape[2]] = deepcopy(error_array[i])
# Shift images to align # Shift images to align
if do_shift:
rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.)
rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i])
else:
shift = pol_shift[headers[i]['filtnam1'].lower()]
rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.) rescaled_image[i] = sc_shift(rescaled_image[i], shift, order=1, cval=0.)
rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i]) rescaled_error[i] = sc_shift(rescaled_error[i], shift, order=1, cval=background[i])
@@ -792,9 +780,6 @@ def align_data(data_array, headers, error_array=None, background=None,
#sum quadratically the errors #sum quadratically the errors
rescaled_error[i] = np.sqrt(rescaled_error[i]**2 + error_shift**2) rescaled_error[i] = np.sqrt(rescaled_error[i]**2 + error_shift**2)
#if i==1:
#np.savetxt("output/s_shift.txt",error_shift)
shifts.append(shift) shifts.append(shift)
errors.append(error) errors.append(error)