Skip to content

Commit

Permalink
fix bug when transferring model from comps
Browse files Browse the repository at this point in the history
  • Loading branch information
landmanbester committed Aug 23, 2024
1 parent 6ff91c0 commit 5497df9
Show file tree
Hide file tree
Showing 5 changed files with 23 additions and 16 deletions.
14 changes: 7 additions & 7 deletions pfb/operators/gridder.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,11 +254,16 @@ def _comps2vis_impl(uvw,
tbin_idx2 = tbin_idx - tbin_idx.min()
fbin_idx2 = fbin_idx - fbin_idx.min()

# currently not interpolating in time
ntime = tbin_idx.size
nband = fbin_idx.size

# get model
nrow = uvw.shape[0]
nchan = freq.size
vis = np.zeros((nrow, nchan, ncorr_out),
dtype=np.result_type(mds.coefficients.dtype, np.complex64))
if not ((freq>=freq_min) & (freq<=freq_max)).any():
return vis

comps = mds.coefficients.values
Ix = mds.location_x.values
Iy = mds.location_y.values
Expand All @@ -268,10 +273,6 @@ def _comps2vis_impl(uvw,
ny = mds.npix_y
x0 = mds.center_x
y0 = mds.center_y

nrow = uvw.shape[0]
nchan = freq.size
vis = np.zeros((nrow, nchan, ncorr_out), dtype=np.result_type(comps, np.complex64))
for t in range(ntime):
indt = slice(tbin_idx2[t], tbin_idx2[t] + tbin_cnts[t])
# TODO - clean up this logic. row_mapping holds the number of rows per
Expand All @@ -284,7 +285,6 @@ def _comps2vis_impl(uvw,
if not ((f>=freq_min) & (f<=freq_max)).any():
continue
# render components to image
# we want to do this on each worker
tout = tfunc(np.mean(utime[indt]))
fout = ffunc(np.mean(freq[indf]))
image = np.zeros((nx, ny), dtype=comps.dtype)
Expand Down
5 changes: 5 additions & 0 deletions pfb/parser/sara.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,11 @@ inputs:
info:
Controls how aggressively the l1reweighting is applied.
Larger values correspond to more agressive reweighting.
nbasisf:
dtype: int
info:
Number of basis functions to use while fitting the frequency axis.
Default is to use the number of non-null imaging bands i.e. interpolation.
positivity:
dtype: int
default: 1
Expand Down
4 changes: 0 additions & 4 deletions pfb/workers/degrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,10 +164,6 @@ def _degrid(**kw):

print("Computing model visibilities", file=log)
writes = []
# avoid reading these more than once
coeffs = mds.coefficients.values
locx = mds.location_x.values
locy = mds.location_y.values
for ms in opts.ms:
xds = xds_from_ms(ms,
chunks=ms_chunks[ms],
Expand Down
4 changes: 2 additions & 2 deletions pfb/workers/grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -437,8 +437,8 @@ def _grid(xdsi=None, **kw):
if opts.transfer_model_from:
from pfb.utils.misc import eval_coeffs_to_slice
model = eval_coeffs_to_slice(
ds.time_out,
ds.freq_out,
time_out,
freq_out,
model_coeffs,
locx, locy,
mds.parametrisation,
Expand Down
12 changes: 9 additions & 3 deletions pfb/workers/sara.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,8 @@ def _sara(ddsi=None, **kw):
time_out.append(ds.time_out)
freq_out = np.unique(np.array(freq_out))
time_out = np.unique(np.array(time_out))
if time_out.size > 1:
raise NotImplementedError('Only static models currently supported')

nband = freq_out.size

Expand Down Expand Up @@ -302,8 +304,12 @@ def _sara(ddsi=None, **kw):
Nxmax = psi.Nxmax
Nymax = psi.Nymax

# level slice indices
# import ipdb; ipdb.set_trace()
# number of frequency basis functions
if opts.nbasisf is None:
nbasisf = int(np.sum(fsel))
else:
nbasisf = opts.nbasisf
print(f"Using {nbasisf} frequency basis functions", file=log)

# a value less than zero turns L1 reweighting off
# we'll start on convergence or at the iteration
Expand Down Expand Up @@ -403,7 +409,7 @@ def _sara(ddsi=None, **kw):
freq_out[fsel],
model[None, fsel, :, :],
wgt=wsums[None, fsel],
nbasisf=int(np.sum(fsel))-1,
nbasisf=nbasisf,
method='Legendre',
sigmasq=1e-6)
# save interpolated dataset
Expand Down

0 comments on commit 5497df9

Please sign in to comment.