rollback fluxdensity fix at it is not a fix apparently
This commit is contained in:
@@ -85,12 +85,21 @@ def display_bkg(data, background, std_bkg, headers, histograms=None, binning=Non
|
||||
label=headers[i]["filtnam1"] + " (Obs " + str(filt_obs[headers[i]["filtnam1"]]) + ")",
|
||||
)
|
||||
ax_h.plot([background[i] * convert_flux[i], background[i] * convert_flux[i]], [hist.min(), hist.max()], "x--", color="C{0:d}".format(i), alpha=0.8)
|
||||
if i == 0:
|
||||
xmin, xmax = np.min(np.array(bins)[np.array(hist) > 1e2]) * convert_flux[0], np.max(np.array(bins)[np.array(hist) > 1e2]) * convert_flux[0]
|
||||
else:
|
||||
xmin, xmax = (
|
||||
min(xmin, np.min(np.array(bins)[np.array(hist) > 1e2]) * convert_flux[0]),
|
||||
max(xmax, np.max(np.array(bins)[np.array(hist) > 1e2]) * convert_flux[0]),
|
||||
)
|
||||
if coeff is not None:
|
||||
# ax_h.plot(bins*convert_flux[i], gausspol(bins, *coeff[i]), '--', color="C{0:d}".format(i), alpha=0.8)
|
||||
ax_h.plot(bins * convert_flux[i], gauss(bins, *coeff[i]), "--", color="C{0:d}".format(i), alpha=0.8)
|
||||
ax_h.set_xscale("log")
|
||||
ax_h.set_ylim([0.0, np.max([hist.max() for hist in histograms])])
|
||||
ax_h.set_xlim([np.min(background * convert_flux) * 1e-2, np.max(background * convert_flux) * 1e2])
|
||||
ax_h.set_yscale("log")
|
||||
ax_h.set_ylim([100.0, np.max([hist.max() for hist in histograms])])
|
||||
# ax_h.set_xlim([np.min(background * convert_flux) * 1e-2, np.max(background * convert_flux) * 1e2])
|
||||
ax_h.set_xlim([xmin, xmax])
|
||||
ax_h.set_xlabel(r"Flux [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA^{-1}$]")
|
||||
ax_h.set_ylabel(r"Number of pixels in bin")
|
||||
ax_h.set_title("Histogram for each observation")
|
||||
|
||||
@@ -48,11 +48,13 @@ def get_obs_data(infiles, data_folder="", compute_flux=False):
|
||||
f.flush()
|
||||
# Save pixel area for flux density computation
|
||||
if headers[i]["PXFORMT"] == "NORMAL":
|
||||
headers[i]["PXAREA"] = 6.25e-6 # 25x25 micron squared pixel area in cm^2
|
||||
headers[i]["PXAREA"] = 1.96e-4 # 14x14 milliarcsec squared pixel area in arcsec^2
|
||||
elif headers[i]["PXFORMT"] == "ZOOM":
|
||||
headers[i]["PXAREA"] = 1.25e-5 # 50x25 micron squared pixel area in cm^2
|
||||
headers[i]["PXAREA"] = 4.06e-4 # 29x14 milliarcsec squared pixel area in arcsec^2
|
||||
else:
|
||||
headers[i]["PXAREA"] = 1.0 # unknown default to 1 cm^2
|
||||
headers[i]["PXAREA"] = 1.0 # unknown default to 1 arcsec^2
|
||||
# Convert PHOTFLAM value from 1arcsec aperture to the pixel area
|
||||
# headers[i]["PHOTFLAM"] *= np.pi / headers[i]["PXAREA"]
|
||||
data_array = np.array(data_array, dtype=np.double)
|
||||
|
||||
# Prevent negative count value in imported data
|
||||
@@ -150,7 +152,7 @@ def save_Stokes(
|
||||
header["PHOTPLAM"] = (header_stokes["PHOTPLAM"], "Pivot Wavelength")
|
||||
header["PHOTBW"] = (header_stokes["PHOTBW"], "RMS Bandwidth of the Filter and Detector")
|
||||
header["PHOTFLAM"] = (header_stokes["PHOTFLAM"], "Inverse Sensitivity in DN/sec/cm**2/Angst")
|
||||
header["PXAREA"] = (header_stokes["PXAREA"], "Pixel area in cm**2")
|
||||
header["PXAREA"] = (header_stokes["PXAREA"], "Pixel area in arcsec**2")
|
||||
header["EXPTIME"] = (header_stokes["EXPTIME"], "Total exposure time in sec")
|
||||
header["PROPOSID"] = (header_stokes["PROPOSID"], "PEP proposal identifier for observation")
|
||||
header["TARGNAME"] = (header_stokes["TARGNAME"], "Target name")
|
||||
|
||||
@@ -366,7 +366,7 @@ def polarization_map(
|
||||
fig = plt.figure(figsize=(7 * ratiox, 7 * ratioy), layout="constrained")
|
||||
if ax is None:
|
||||
ax = fig.add_subplot(111, projection=wcs)
|
||||
ax.set(aspect="equal", fc="k", ylim=[-stkI.shape[0] * 0.00, stkI.shape[0] * 1.00])
|
||||
ax.set(aspect="equal", fc="k")
|
||||
# fig.subplots_adjust(hspace=0, wspace=0, left=0.102, right=1.02)
|
||||
|
||||
# ax.coords.grid(True, color='white', ls='dotted', alpha=0.5)
|
||||
@@ -527,9 +527,8 @@ def polarization_map(
|
||||
fig.colorbar(im, ax=ax, aspect=30, shrink=0.60, pad=0.025, label=r"$F_{\lambda}$ [$ergs \cdot cm^{-2} \cdot s^{-1} \cdot \AA$]")
|
||||
|
||||
# Get integrated flux values from sum
|
||||
N_pix = data_mask.sum()
|
||||
I_diluted = stkI[data_mask].sum() / N_pix
|
||||
I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask])) / N_pix
|
||||
I_diluted = stkI[data_mask].sum()
|
||||
I_diluted_err = np.sqrt(np.sum(stk_cov[0, 0][data_mask]))
|
||||
|
||||
# Get integrated polarization values from header
|
||||
P_diluted = Stokes[0].header["P_int"]
|
||||
@@ -551,8 +550,8 @@ def polarization_map(
|
||||
sep_y=0.01,
|
||||
sep_x=0.01,
|
||||
back_length=0.0,
|
||||
head_length=10.0,
|
||||
head_width=10.0,
|
||||
head_length=7.0,
|
||||
head_width=7.0,
|
||||
angle=-Stokes[0].header["orientat"],
|
||||
text_props={"ec": "k", "fc": font_color, "alpha": 1, "lw": 0.5},
|
||||
arrow_props={"ec": "k", "fc": font_color, "alpha": 1, "lw": 1},
|
||||
@@ -602,7 +601,7 @@ def polarization_map(
|
||||
color="white",
|
||||
xy=(0.01, 1.00),
|
||||
xycoords="axes fraction",
|
||||
path_effects=[pe.withStroke(linewidth=0.5, foreground="k")],
|
||||
path_effects=[pe.withStroke(linewidth=2.0, foreground="k")],
|
||||
verticalalignment="top",
|
||||
horizontalalignment="left",
|
||||
)
|
||||
@@ -617,7 +616,7 @@ def polarization_map(
|
||||
color="white",
|
||||
xy=(0.01, 1.00),
|
||||
xycoords="axes fraction",
|
||||
path_effects=[pe.withStroke(linewidth=0.5, foreground="k")],
|
||||
path_effects=[pe.withStroke(linewidth=2.0, foreground="k")],
|
||||
verticalalignment="top",
|
||||
horizontalalignment="left",
|
||||
)
|
||||
@@ -3281,7 +3280,6 @@ class pol_map(object):
|
||||
str_conf = ""
|
||||
if self.region is None:
|
||||
s_I = np.sqrt(self.IQU_cov[0, 0])
|
||||
N_pix = self.I.size()
|
||||
I_reg = self.I.sum()
|
||||
I_reg_err = np.sqrt(np.sum(s_I**2))
|
||||
P_reg = self.Stokes[0].header["P_int"]
|
||||
@@ -3296,7 +3294,6 @@ class pol_map(object):
|
||||
s_IU = self.IQU_cov[0, 2]
|
||||
s_QU = self.IQU_cov[1, 2]
|
||||
|
||||
N_cut = self.cut.sum()
|
||||
I_cut = self.I[self.cut].sum()
|
||||
Q_cut = self.Q[self.cut].sum()
|
||||
U_cut = self.U[self.cut].sum()
|
||||
@@ -3332,7 +3329,6 @@ class pol_map(object):
|
||||
s_IU = self.IQU_cov[0, 2]
|
||||
s_QU = self.IQU_cov[1, 2]
|
||||
|
||||
N_pix = self.region.sum()
|
||||
I_reg = self.I[self.region].sum()
|
||||
Q_reg = self.Q[self.region].sum()
|
||||
U_reg = self.U[self.region].sum()
|
||||
@@ -3365,7 +3361,6 @@ class pol_map(object):
|
||||
)
|
||||
|
||||
new_cut = np.logical_and(self.region, self.cut)
|
||||
N_cut = new_cut.sum()
|
||||
I_cut = self.I[new_cut].sum()
|
||||
Q_cut = self.Q[new_cut].sum()
|
||||
U_cut = self.U[new_cut].sum()
|
||||
@@ -3404,7 +3399,7 @@ class pol_map(object):
|
||||
self.an_int.remove()
|
||||
self.str_int = (
|
||||
r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(
|
||||
self.pivot_wav, sci_not(I_reg * self.map_convert / N_pix, I_reg_err * self.map_convert / N_pix, 2)
|
||||
self.pivot_wav, sci_not(I_reg * self.map_convert, I_reg_err * self.map_convert, 2)
|
||||
)
|
||||
+ "\n"
|
||||
+ r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg * 100.0, np.ceil(P_reg_err * 1000.0) / 10.0)
|
||||
@@ -3416,7 +3411,7 @@ class pol_map(object):
|
||||
# self.str_cut = (
|
||||
# "\n"
|
||||
# + r"$F_{{\lambda}}^{{cut}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(
|
||||
# self.pivot_wav, sci_not(I_cut * self.map_convert/N_cut, I_cut_err * self.map_convert/N_cut, 2)
|
||||
# self.pivot_wav, sci_not(I_cut * self.map_convert, I_cut_err * self.map_convert, 2)
|
||||
# )
|
||||
# + "\n"
|
||||
# + r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_cut * 100.0, np.ceil(P_cut_err * 1000.0) / 10.0)
|
||||
@@ -3440,7 +3435,7 @@ class pol_map(object):
|
||||
else:
|
||||
str_int = (
|
||||
r"$F_{{\lambda}}^{{int}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(
|
||||
self.pivot_wav, sci_not(I_reg * self.map_convert / N_pix, I_reg_err * self.map_convert / N_pix, 2)
|
||||
self.pivot_wav, sci_not(I_reg * self.map_convert, I_reg_err * self.map_convert, 2)
|
||||
)
|
||||
+ "\n"
|
||||
+ r"$P^{{int}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_reg * 100.0, np.ceil(P_reg_err * 1000.0) / 10.0)
|
||||
@@ -3452,7 +3447,7 @@ class pol_map(object):
|
||||
# str_cut = (
|
||||
# "\n"
|
||||
# + r"$F_{{\lambda}}^{{cut}}$({0:.0f} $\AA$) = {1} $ergs \cdot cm^{{-2}} \cdot s^{{-1}} \cdot \AA^{{-1}}$".format(
|
||||
# self.pivot_wav, sci_not(I_cut * self.map_convert/N_cut, I_cut_err * self.map_convert/N_cut, 2)
|
||||
# self.pivot_wav, sci_not(I_cut * self.map_convert, I_cut_err * self.map_convert, 2)
|
||||
# )
|
||||
# + "\n"
|
||||
# + r"$P^{{cut}}$ = {0:.1f} $\pm$ {1:.1f} %".format(P_cut * 100.0, np.ceil(P_cut_err * 1000.0) / 10.0)
|
||||
|
||||
@@ -676,9 +676,6 @@ def rebin_array(data_array, error_array, headers, pxsize=2, scale="px", operatio
|
||||
nw.wcs.crpix /= Dxy
|
||||
nw.array_shape = new_shape
|
||||
new_header["NAXIS1"], new_header["NAXIS2"] = nw.array_shape
|
||||
# Compute new values of pixel area and flux density conversion factor
|
||||
new_header["PXAREA"] = header["PXAREA"] * (Dxy[0] * Dxy[1])
|
||||
new_header["PHOTFLAM"] = header["PHOTFLAM"] / (Dxy[0] * Dxy[1])
|
||||
for key, val in nw.to_header().items():
|
||||
new_header.set(key, val)
|
||||
new_header["SAMPLING"] = (str(pxsize) + scale, "Resampling performed during reduction")
|
||||
|
||||
Reference in New Issue
Block a user