Line data Source code
1 : //# Copyright (C) 1997-2010
2 : //# Associated Universities, Inc. Washington DC, USA.
3 : //#
4 : //# This library is free software; you can redistribute it and/or modify it
5 : //# under the terms of the GNU Library General Public License as published by
6 : //# the Free Software Foundation; either version 2 of the License, or (at your
7 : //# option) any later version.
8 : //#
9 : //# This library is distributed in the hope that it will be useful, but WITHOUT
10 : //# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 : //# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
12 : //# License for more details.
13 : //#
14 : //# You should have received a copy of the GNU Library General Public License
15 : //# along with this library; if not, write to the Free Software Foundation,
16 : //# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA.
17 : //#
18 : //# Correspondence concerning AIPS++ should be addressed as follows:
19 : //# Internet email: casa-feedback@nrao.edu.
20 : //# Postal address: AIPS++ Project Office
21 : //# National Radio Astronomy Observatory
22 : //# 520 Edgemont Road
23 : //# Charlottesville, VA 22903-2475 USA
24 : //#
25 : //# $Id: $AspMatrixCleaner.cc
26 :
27 : // Same include list as in MatrixCleaner.cc
28 : #include <casacore/casa/Arrays/Matrix.h>
29 : #include <casacore/casa/Arrays/Cube.h>
30 : #include <casacore/casa/Arrays/ArrayMath.h>
31 : #include <casacore/casa/Arrays/MatrixMath.h>
32 : //#include <casa/Arrays/ArrayIO.h>
33 : #include <casacore/casa/BasicMath/Math.h>
34 : #include <casacore/casa/BasicSL/Complex.h>
35 : #include <casacore/casa/Logging/LogIO.h>
36 : #include <casacore/casa/OS/File.h>
37 : #include <casacore/casa/Containers/Record.h>
38 :
39 : #include <casacore/lattices/LRegions/LCBox.h>
40 : #include <casacore/casa/Arrays/Slicer.h>
41 : #include <casacore/scimath/Mathematics/FFTServer.h>
42 : #include <casacore/casa/OS/HostInfo.h>
43 : #include <casacore/casa/Arrays/ArrayError.h>
44 : #include <casacore/casa/Arrays/ArrayIter.h>
45 : #include <casacore/casa/Arrays/VectorIter.h>
46 :
47 : #include <casacore/casa/Utilities/GenSort.h>
48 : #include <casacore/casa/BasicSL/String.h>
49 : #include <casacore/casa/Utilities/Assert.h>
50 : #include <casacore/casa/Utilities/Fallible.h>
51 :
52 : #include <casacore/casa/BasicSL/Constants.h>
53 :
54 : #include <casacore/casa/Logging/LogSink.h>
55 : #include <casacore/casa/Logging/LogMessage.h>
56 :
57 : #include <synthesis/MeasurementEquations/MatrixCleaner.h>
58 : #include <synthesis/TransformMachines/StokesImageUtil.h>
59 : #include <synthesis/TransformMachines2/Utils.h>
60 : #include <casacore/coordinates/Coordinates/TabularCoordinate.h>
61 : #include <casacore/casa/Utilities/CountedPtr.h>
62 :
63 : #ifdef _OPENMP
64 : #include <omp.h>
65 : #endif
66 :
67 : // Additional include files
68 : #include <algorithm>
69 : #include <chrono>
70 :
71 : #include<synthesis/MeasurementEquations/AspMatrixCleaner.h>
72 :
73 : // for alglib
74 : #include <synthesis/MeasurementEquations/objfunc_alglib.h>
75 : //#include <synthesis/MeasurementEquations/objfunc_alglib_lm.h>
76 : //#include <synthesis/MeasurementEquations/objfunc_alglib_log.h>
77 : //#include <synthesis/MeasurementEquations/objfunc_alglib_beta.h>
78 : using namespace alglib;
79 :
80 : using namespace casacore;
81 : using namespace std::chrono;
82 : namespace casa { //# NAMESPACE CASA - BEGIN
83 11 : AspMatrixCleaner::AspMatrixCleaner():
84 : MatrixCleaner(),
85 11 : itsInitScaleSizes(0),
86 : //itsAspScaleSizes(0),
87 : //itsAspAmplitude(0),
88 11 : itsNInitScales(5),
89 11 : itsPsfWidth(0.0),
90 11 : itsUseZhang(false),
91 11 : itsSwitchedToHogbom(false),
92 11 : itsNumHogbomIter(0),
93 11 : itsNthHogbom(0),
94 11 : itsOptimumScale(0),
95 11 : itsOptimumScaleSize(0.0),
96 11 : itsPeakResidual(1000.0), // temp. should this be changed to MAX?
97 11 : itsPrevPeakResidual(0.0),
98 11 : itsOrigDirty( ),
99 11 : itsFusedThreshold(0.0),
100 11 : itsNumNoChange(0),
101 11 : itsBinSizeForSumFlux(4),
102 33 : itsUserLargestScale(-1.0)
103 : {
104 11 : itsInitScales.resize(0);
105 11 : itsInitScaleXfrs.resize(0);
106 11 : itsDirtyConvInitScales.resize(0);
107 11 : itsInitScaleMasks.resize(0);
108 11 : itsPsfConvInitScales.resize(0);
109 11 : itsNumIterNoGoodAspen.resize(0);
110 : //itsAspCenter.resize(0);
111 11 : itsGoodAspActiveSet.resize(0);
112 11 : itsGoodAspAmplitude.resize(0);
113 11 : itsGoodAspCenter.resize(0);
114 : //itsPrevAspActiveSet.resize(0);
115 : //itsPrevAspAmplitude.resize(0);
116 11 : itsUsedMemoryMB = double(HostInfo::memoryUsed()/2014);
117 11 : itsNormMethod = casa::refim::SynthesisUtils::getenv("ASP_NORM", itsDefaultNorm);
118 11 : }
119 :
120 11 : AspMatrixCleaner::~AspMatrixCleaner()
121 : {
122 11 : destroyAspScales();
123 11 : destroyInitMasks();
124 11 : destroyInitScales();
125 11 : if(!itsMask.null())
126 7 : itsMask=0;
127 11 : }
128 :
129 70 : Bool AspMatrixCleaner::setaspcontrol(const Int niter,
130 : const Float gain,
131 : const Quantity& aThreshold,
132 : const Quantity& fThreshold)
133 : {
134 70 : itsMaxNiter=niter;
135 70 : itsGain=gain;
136 70 : itsThreshold=aThreshold;
137 70 : itsFracThreshold=fThreshold;
138 70 : return true;
139 : }
140 :
141 : // Do the clean as set up
142 35 : Int AspMatrixCleaner::aspclean(Matrix<Float>& model,
143 : Bool /*showProgress*/)
144 : {
145 35 : AlwaysAssert(model.shape()==itsDirty->shape(), AipsError);
146 :
147 70 : LogIO os(LogOrigin("AspMatrixCleaner", "aspclean()", WHERE));
148 35 : os << LogIO::NORMAL1 << "Asp clean algorithm" << LogIO::POST;
149 :
150 :
151 : //Int scale;
152 :
153 35 : AlwaysAssert(itsScalesValid, AipsError);
154 : //no need to use all cores if possible
155 35 : Int nth = itsNscales;
156 : #ifdef _OPENMP
157 :
158 35 : nth = min(nth, omp_get_max_threads());
159 :
160 : #endif
161 :
162 : // Define a subregion for the inner quarter. No longer used
163 : /*IPosition blcDirty(model.shape().nelements(), 0);
164 : IPosition trcDirty(model.shape()-1);
165 :
166 : if(!itsMask.null())
167 : {
168 : os << "Cleaning using given mask" << LogIO::POST;
169 : if (itsMaskThreshold < 0)
170 : {
171 : os << LogIO::NORMAL
172 : << "Mask thresholding is not used, values are interpreted as weights"
173 : <<LogIO::POST;
174 : }
175 : else
176 : {
177 : // a mask that does not allow for clean was sent
178 : if(noClean_p)
179 : return 0;
180 :
181 : os << LogIO::NORMAL
182 : << "Cleaning pixels with mask values above " << itsMaskThreshold
183 : << LogIO::POST;
184 : }
185 :
186 : Int nx=model.shape()(0);
187 : Int ny=model.shape()(1);
188 :
189 : AlwaysAssert(itsMask->shape()(0)==nx, AipsError);
190 : AlwaysAssert(itsMask->shape()(1)==ny, AipsError);
191 : Int xbeg=nx-1;
192 : Int ybeg=ny-1;
193 : Int xend=0;
194 : Int yend=0;
195 : for (Int iy=0;iy<ny;iy++)
196 : {
197 : for (Int ix=0;ix<nx;ix++)
198 : {
199 : if((*itsMask)(ix,iy)>0.000001)
200 : {
201 : xbeg=min(xbeg,ix);
202 : ybeg=min(ybeg,iy);
203 : xend=max(xend,ix);
204 : yend=max(yend,iy);
205 : }
206 : }
207 : }
208 :
209 : if (!itsIgnoreCenterBox) // this is false
210 : {
211 : if((xend - xbeg)>nx/2)
212 : {
213 : xbeg=nx/4-1; //if larger than quarter take inner of mask
214 : os << LogIO::WARN << "Mask span over more than half the x-axis: Considering inner half of the x-axis" << LogIO::POST;
215 : }
216 : if((yend - ybeg)>ny/2)
217 : {
218 : ybeg=ny/4-1;
219 : os << LogIO::WARN << "Mask span over more than half the y-axis: Considering inner half of the y-axis" << LogIO::POST;
220 : }
221 : xend=min(xend,xbeg+nx/2-1);
222 : yend=min(yend,ybeg+ny/2-1);
223 : }
224 :
225 : blcDirty(0)=xbeg;
226 : blcDirty(1)=ybeg;
227 : trcDirty(0)=xend;
228 : trcDirty(1)=yend;
229 : }
230 : else
231 : {
232 : if (itsIgnoreCenterBox) {
233 : os << LogIO::NORMAL << "Cleaning entire image" << LogIO::POST;
234 : os << LogIO::NORMAL1 << "as per MF/WF" << LogIO::POST; // ???
235 : }
236 : else {
237 : os << "Cleaning inner quarter of the image" << LogIO::POST;
238 : for (Int i=0;i<Int(model.shape().nelements());i++)
239 : {
240 : blcDirty(i)=model.shape()(i)/4;
241 : trcDirty(i)=blcDirty(i)+model.shape()(i)/2-1;
242 : if(trcDirty(i)<0)
243 : trcDirty(i)=1;
244 : }
245 : }
246 : }
247 : LCBox centerBox(blcDirty, trcDirty, model.shape());*/
248 :
249 : // Start the iteration
250 35 : Float totalFlux=0.0;
251 35 : Int converged=0;
252 35 : Int stopPointModeCounter = 0;
253 35 : Float tmpMaximumResidual = 0.0;
254 35 : Float minMaximumResidual = 1000.0;
255 35 : Float initRMSResidual = 1000.0;
256 35 : float initModelFlux = 0.0;
257 :
258 35 : os <<LogIO::NORMAL3<< "Starting iteration"<< LogIO::POST;
259 35 : vector<Float> tempScaleSizes;
260 35 : itsIteration = itsStartingIter; // 0
261 :
262 35 : Matrix<Float> itsScale0 = Matrix<Float>(psfShape_p);
263 35 : Matrix<Complex>itsScaleXfr0 = Matrix<Complex> ();
264 :
265 35 : Matrix<Float> itsScale = Matrix<Float>(psfShape_p);
266 35 : Matrix<Complex>itsScaleXfr = Matrix<Complex> ();
267 :
268 : // Define a subregion so that the peak is centered
269 35 : IPosition support(model.shape());
270 35 : support(0) = max(Int(itsInitScaleSizes[itsNInitScales-1] + 0.5), support(0));
271 35 : support(1) = max(Int(itsInitScaleSizes[itsNInitScales-1] + 0.5), support(1));
272 35 : IPosition inc(model.shape().nelements(), 1);
273 :
274 : // get init peakres
275 : // this is important so we have correct peakres for each channel in cube imaging
276 35 : Float maxVal=0;
277 35 : IPosition posmin((*itsDirty).shape().nelements(), 0);
278 35 : Float minVal=0;
279 35 : IPosition posmax((*itsDirty).shape().nelements(), 0);
280 35 : minMaxMasked(minVal, maxVal, posmin, posmax, (*itsDirty), itsInitScaleMasks[0]);
281 35 : itsPeakResidual = (fabs(maxVal) > fabs(minVal)) ? fabs(maxVal) : fabs(minVal);
282 :
283 35 : vector<Float> vecItsStrengthOptimum;
284 35 : vector<Float> vecItsOptimumScaleSize;
285 35 : vecItsStrengthOptimum.clear();
286 35 : vecItsOptimumScaleSize.clear();
287 :
288 : // calculate rms residual
289 35 : float rms = 0.0;
290 : // should be masked
291 35 : int num = int((trcDirty(0) -blcDirty(0))* (trcDirty(1) - blcDirty(1)));
292 11625 : for (int j = blcDirty(1); j <= trcDirty(1); ++j)
293 : {
294 4040340 : for (int i = blcDirty(0); i <= trcDirty(0); ++i)
295 : {
296 4028750 : rms += pow((*itsDirty)(i, j), 2);
297 : }
298 : }
299 35 : rms = rms / num;
300 35 : initRMSResidual = rms;
301 : //os << LogIO::NORMAL3 << "initial rms residual " << initRMSResidual << LogIO::POST;
302 :
303 35 : initModelFlux = sum(model);
304 : //os << LogIO::NORMAL3 << "initial model flux " << initModelFlux << LogIO::POST;
305 :
306 806 : for (Int ii = itsStartingIter; ii < itsMaxNiter; ii++)
307 : {
308 : //cout << "cur iter " << itsIteration << " max iter is "<< itsMaxNiter << endl;
309 791 : itsIteration++;
310 :
311 : // calculate rms residual
312 791 : rms = 0.0;
313 267367 : for (int j = blcDirty(1); j <= trcDirty(1); ++j)
314 : {
315 95090272 : for (int i = blcDirty(0); i <= trcDirty(0); ++i)
316 : {
317 94823696 : rms += pow((*itsDirty)(i, j), 2);
318 : }
319 : }
320 791 : rms = rms / num;
321 :
322 : // make single optimized scale image
323 791 : os << LogIO::NORMAL3 << "Making optimized scale " << itsOptimumScaleSize << " at location " << itsPositionOptimum << LogIO::POST;
324 :
325 791 : if (itsSwitchedToHogbom)
326 : {
327 17 : makeScaleImage(itsScale0, 0.0, itsStrengthOptimum, itsPositionOptimum);
328 17 : fft.fft0(itsScaleXfr0, itsScale0);
329 17 : itsScale = 0.0;
330 17 : itsScale = itsScale0;
331 17 : itsScaleXfr.resize();
332 17 : itsScaleXfr = itsScaleXfr0;
333 17 : vecItsStrengthOptimum.push_back(itsStrengthOptimum);
334 17 : vecItsOptimumScaleSize.push_back(0);
335 : }
336 : else
337 : {
338 774 : makeScaleImage(itsScale, itsOptimumScaleSize, itsStrengthOptimum, itsPositionOptimum);
339 774 : itsScaleXfr.resize();
340 774 : fft.fft0(itsScaleXfr, itsScale);
341 774 : vecItsStrengthOptimum.push_back(itsStrengthOptimum);
342 774 : vecItsOptimumScaleSize.push_back(itsOptimumScaleSize);
343 : }
344 :
345 : // trigger hogbom when
346 : // (1) itsStrengthOptimum is small enough & peakres rarely changes or itsPeakResidual is small enough
347 : // (2) peakres rarely changes
348 791 : if (itsNormMethod == 1) // only Norm Method 1 needs hogbom for speedup
349 : {
350 : //if (!itsSwitchedToHogbom && abs(itsStrengthOptimum) < 0.001) // M31 value - new Asp + gaussian
351 : //if (!itsSwitchedToHogbom && abs(itsStrengthOptimum) < 2.8) // M31 value-new asp: 1k->5k
352 : //if (!itsSwitchedToHogbom && abs(itsPeakResidual) < 8e-5 && abs(itsStrengthOptimum) < 1e-4) // G55
353 : //if (!itsSwitchedToHogbom && abs(itsPeakResidual) < 7e-3 && abs(itsStrengthOptimum) < 1e-7) // Points
354 : //if (!itsSwitchedToHogbom && abs(itsPeakResidual) < 0.138) //GSL M100 channel 22
355 : //if (!itsSwitchedToHogbom && abs(itsPeakResidual) < 0.15) // GSL M100 channel 22 & 23 & others
356 : //if (!itsSwitchedToHogbom && (abs(itsPeakResidual) < 2.5 || abs(itsStrengthOptimum) < 1e-3)) // GSL, CygA
357 :
358 : /*if(!itsSwitchedToHogbom && (abs(itsPeakResidual) < itsFusedThreshold
359 : || abs(itsStrengthOptimum) < (5e-4 * itsFusedThreshold)))*/ // GSL, CygA.
360 1565 : if(!itsSwitchedToHogbom && (abs(itsPeakResidual) < itsFusedThreshold
361 774 : || ((abs(itsStrengthOptimum) < (5e-4 * itsFusedThreshold)) && (itsNumNoChange >= 2))))
362 : // 5e-4 is a experimental number here assuming under that threshold itsStrengthOptimum is too small to take affect.
363 : {
364 0 : os << LogIO::NORMAL3 << "Switch to hogbom b/c peak residual or optimum strength is small enough: " << itsFusedThreshold << LogIO::POST;
365 :
366 0 : bool runlong = false;
367 :
368 : //option 1: use rms residual to detect convergence
369 0 : if (initRMSResidual > rms && initRMSResidual/rms < 1.5)
370 : {
371 0 : runlong = true;
372 0 : os << LogIO::NORMAL3 << "Run hogbom for longer iterations b/c it's approaching convergence. initial rms " << initRMSResidual << " rms " << rms << LogIO::POST;
373 : }
374 : //option 2: use model flux to detect convergence
375 : /*float modelFlux = 0.0;
376 : modelFlux = sum(model);
377 : if (initModelFlux != 0 && (abs(initModelFlux - modelFlux)/initModelFlux < 0.01))
378 : {
379 : runlong = true;
380 : os << LogIO::NORMAL3 << "Run hogbom for longer iterations b/c it's approaching convergence. init model flux " << initModelFlux << " model flux " << modelFlux << LogIO::POST;
381 : }*/
382 :
383 0 : switchedToHogbom(runlong);
384 :
385 0 : if (itsNumNoChange >= 2)
386 0 : itsNumNoChange = 0;
387 : }
388 791 : if (!itsSwitchedToHogbom && itsNumNoChange >= 2)
389 : {
390 2 : os << LogIO::NORMAL3 << "Switched to hogbom at iteration "<< ii << " b/c peakres rarely changes" << LogIO::POST;
391 2 : itsNumNoChange = 0;
392 :
393 2 : os << LogIO::NORMAL3 << "total flux " << totalFlux << " model flux " << sum(model) << LogIO::POST;
394 2 : bool runlong = false;
395 :
396 : //option 1: use rms residual to detect convergence
397 2 : if (initRMSResidual > rms && initRMSResidual/rms < 1.5)
398 : {
399 0 : runlong = true;
400 0 : os << LogIO::NORMAL3 << "Run hogbom for longer iterations b/c it's approaching convergence. initial rms " << initRMSResidual << " rms " << rms << LogIO::POST;
401 : }
402 : //option 2: use model flux to detect convergence
403 : /*float modelFlux = 0.0;
404 : modelFlux = sum(model);
405 : if (initModelFlux != 0 && (abs(initModelFlux - modelFlux)/initModelFlux < 0.01))
406 : {
407 : runlong = true;
408 : os << LogIO::NORMAL3 << "Run hogbom for longer iterations b/c it's approaching convergence. init model flux " << initModelFlux << " model flux " << modelFlux << LogIO::POST;
409 : }*/
410 :
411 2 : switchedToHogbom(runlong);
412 : }
413 : }
414 :
415 791 : if (!itsSwitchedToHogbom)
416 : {
417 772 : if (itsNumIterNoGoodAspen.size() >= 10)
418 682 : itsNumIterNoGoodAspen.pop_front(); // only track the past 10 iters
419 772 : if (itsOptimumScaleSize == 0)
420 4 : itsNumIterNoGoodAspen.push_back(1); // Zhang 2018 fused-Asp approach
421 : else
422 768 : itsNumIterNoGoodAspen.push_back(0);
423 : }
424 :
425 : // Now add to the total flux
426 791 : totalFlux += (itsStrengthOptimum*itsGain);
427 791 : itsTotalFlux = totalFlux;
428 :
429 791 : if(ii == itsStartingIter)
430 : {
431 35 : itsMaximumResidual = abs(itsPeakResidual);
432 35 : tmpMaximumResidual = itsMaximumResidual;
433 35 : os <<LogIO::NORMAL3 << "Initial maximum residual is " << itsMaximumResidual;
434 35 : if( !itsMask.null() )
435 35 : os <<LogIO::NORMAL3 << " within the mask ";
436 :
437 35 : os <<LogIO::NORMAL3 << LogIO::POST;
438 : }
439 :
440 : //save the min peak residual
441 791 : if (abs(minMaximumResidual) > abs(itsPeakResidual))
442 425 : minMaximumResidual = abs(itsPeakResidual);
443 :
444 : // Various ways of stopping:
445 : // 0. stop if below cycle threshold.- same as MS-Clean
446 791 : if (abs(itsPeakResidual) < threshold())
447 : {
448 40 : os << "Reached stopping threshold " << threshold() << " at iteration "<<
449 20 : ii << LogIO::POST;
450 20 : os << "peakres is " << abs(itsPeakResidual) << LogIO::POST;
451 20 : converged = 1;
452 20 : itsSwitchedToHogbom = false;
453 20 : os << LogIO::NORMAL3 << "final rms residual " << rms << ", model flux " << sum(model) << LogIO::POST;
454 :
455 20 : break;
456 : }
457 : // 1. stop if below threshold. 1e-6 is an experimental number
458 771 : if (abs(itsStrengthOptimum) < (1e-6 * itsFusedThreshold))
459 : {
460 : //cout << "Reached stopping threshold " << 1e-6 * itsFusedThreshold << " at iteration "<< ii << endl;
461 0 : os << LogIO::NORMAL3 << "Reached stopping threshold " << 1e-6 * itsFusedThreshold << " at iteration "<<
462 0 : ii << LogIO::POST;
463 0 : os <<LogIO::NORMAL3 << "Optimum flux is " << abs(itsStrengthOptimum) << LogIO::POST;
464 0 : converged = 1;
465 0 : itsSwitchedToHogbom = false;
466 0 : os << LogIO::NORMAL3 << "final rms residual " << rms << ", modelflux " << sum(model) << LogIO::POST;
467 :
468 0 : break;
469 : }
470 : // 2. negatives on largest scale?
471 771 : if ((itsNscales > 1) && itsStopAtLargeScaleNegative &&
472 0 : itsOptimumScale == (itsNInitScales - 1) &&
473 0 : itsStrengthOptimum < 0.0)
474 :
475 : {
476 0 : os <<LogIO::NORMAL3 << "Reached negative on largest scale" << LogIO::POST;
477 0 : converged = -2;
478 0 : break;
479 : }
480 : // 3. stop point mode at work
481 771 : if (itsStopPointMode > 0)
482 : {
483 0 : if (itsOptimumScale == 0)
484 0 : stopPointModeCounter++;
485 : else
486 0 : stopPointModeCounter = 0;
487 :
488 0 : if (stopPointModeCounter >= itsStopPointMode)
489 : {
490 : os <<LogIO::NORMAL3 << "Cleaned " << stopPointModeCounter <<
491 : " consecutive components from the smallest scale, stopping prematurely"
492 0 : << LogIO::POST;
493 0 : itsDidStopPointMode = true;
494 0 : converged = -1;
495 0 : break;
496 : }
497 : }
498 : //4. Diverging large scale
499 : //If actual value is 50% above the maximum residual. ..good chance it will not recover at this stage
500 : /*if(((abs(itsStrengthOptimum)-abs(tmpMaximumResidual)) > (abs(tmpMaximumResidual)/2.0))
501 : && !(itsStopAtLargeScaleNegative))
502 : {
503 : cout << "Diverging due to large scale?" << endl;
504 : os <<LogIO::NORMAL3 << "Diverging due to large scale?" << LogIO::POST;
505 : os <<LogIO::NORMAL3 << "itsStrengthOptimum " << itsStrengthOptimum << " tmp " << tmpMaximumResidual << LogIO::POST;
506 : //clean is diverging most probably due to the large scale
507 : converged=-2;
508 : break;
509 : }*/
510 : //5. Diverging for some other reason; may just need another CS-style reconciling
511 771 : if((abs(itsStrengthOptimum)-abs(tmpMaximumResidual)) > (abs(tmpMaximumResidual)/2.0) ||
512 1542 : (abs(itsPeakResidual)-abs(tmpMaximumResidual)) > (abs(tmpMaximumResidual)/2.0) ||
513 771 : (abs(itsPeakResidual)-abs(minMaximumResidual)) > (abs(minMaximumResidual)/2.0))
514 : {
515 0 : os << LogIO::NORMAL3 << "Diverging due to unknown reason" << LogIO::POST;
516 0 : os << LogIO::NORMAL3 << "tmpMaximumResidual " << abs(tmpMaximumResidual) << " itsStrengthOptimum " << abs(itsStrengthOptimum) << " itsPeakResidual " << abs(itsPeakResidual) << LogIO::POST;
517 0 : os << LogIO::NORMAL3 << "minMaximumResidual " << abs(minMaximumResidual) << LogIO::POST;
518 :
519 0 : converged=-3;
520 0 : itsSwitchedToHogbom = false;
521 0 : os << LogIO::NORMAL3 << "final rms residual " << rms << ", modelflux " << sum(model) << LogIO::POST;
522 :
523 0 : break;
524 : }
525 :
526 771 : if (itsIteration == itsStartingIter + 1)
527 35 : os <<LogIO::NORMAL3 << "iteration MaximumResidual CleanedFlux" << LogIO::POST;
528 771 : if ((itsIteration % (itsMaxNiter/10 > 0 ? itsMaxNiter/10 : 1)) == 0)
529 : {
530 : //Good place to re-up the fiducial maximum residual
531 : //tmpMaximumResidual=abs(itsStrengthOptimum);
532 168 : os <<LogIO::NORMAL3 << itsIteration <<" "<< abs(itsPeakResidual) <<" "
533 168 : << totalFlux <<LogIO::POST;
534 : }
535 :
536 :
537 771 : IPosition blc(itsPositionOptimum - support/2);
538 1542 : IPosition trc(itsPositionOptimum + support/2 - 1);
539 : // try 2.5 sigma
540 : /*Int sigma5 = (Int)(5 * itsOptimumScaleSize / 2);
541 : IPosition blc(itsPositionOptimum - sigma5);
542 : IPosition trc(itsPositionOptimum + sigma5 -1);*/
543 :
544 771 : LCBox::verify(blc, trc, inc, model.shape());
545 771 : IPosition blcPsf(blc);
546 771 : IPosition trcPsf(trc);
547 : //blcDirty = blc; // update blcDirty/trcDirty is bad for Asp
548 : //trcDirty = trc;
549 :
550 : // Update the model image
551 771 : Matrix<Float> modelSub = model(blc, trc);
552 : Float scaleFactor;
553 771 : scaleFactor = itsGain * itsStrengthOptimum;
554 771 : Matrix<Float> scaleSub = (itsScale)(blcPsf,trcPsf);
555 771 : modelSub += scaleFactor * scaleSub;
556 :
557 : // Now update the residual image
558 : // PSF * model
559 771 : Matrix<Complex> cWork;
560 771 : cWork = ((*itsXfr)*(itsScaleXfr)); //Asp's
561 771 : Matrix<Float> itsPsfConvScale = Matrix<Float>(psfShape_p);
562 771 : fft.fft0(itsPsfConvScale, cWork, false);
563 771 : fft.flip(itsPsfConvScale, false, false); //need this if conv with 1 scale; don't need this if conv with 2 scales
564 771 : Matrix<Float> psfSub = (itsPsfConvScale)(blcPsf, trcPsf);
565 771 : Matrix<Float> dirtySub=(*itsDirty)(blc,trc);
566 :
567 : /* debug info
568 : float maxvalue;
569 : IPosition peakpos;
570 : findMaxAbs(psfSub, maxvalue, peakpos);
571 : cout << "psfSub pos " << peakpos << " maxval " << psfSub(peakpos) << endl;
572 : cout << "dirtySub pos " << peakpos << " val " << dirtySub(peakpos) << endl;
573 : findMaxAbs(itsPsfConvScale, maxvalue, peakpos);
574 : cout << "itsPsfConvScale pos " << peakpos << " maxval " << itsPsfConvScale(peakpos) << endl;
575 : cout << "itsDirty pos " << peakpos << " val " << (*itsDirty)(peakpos) << endl;
576 : findMaxAbs(dirtySub, maxvalue, peakpos);
577 : cout << "dirtySub pos " << peakpos << " maxval " << dirtySub(peakpos) << endl;
578 : findMaxAbs((*itsDirty), maxvalue, peakpos);
579 : cout << "itsDirty pos " << peakpos << " maxval " << (*itsDirty)(peakpos) << endl;
580 : cout << "itsPositionOptimum " << itsPositionOptimum << endl;
581 : cout << " maxPsfSub " << max(fabs(psfSub)) << " maxPsfConvScale " << max(fabs(itsPsfConvScale)) << " itsGain " << itsGain << endl;*/
582 771 : os <<LogIO::NORMAL3 << "itsStrengthOptimum " << itsStrengthOptimum << LogIO::POST;
583 :
584 : // subtract the peak that we found from the dirty image
585 771 : dirtySub -= scaleFactor * psfSub;
586 :
587 : // further update the model and residual with the remaining aspen of the active-set
588 : // This is no longer needed since we found out using the last Aspen to update model/residual is good enough
589 : /*if (itsOptimumScaleSize != 0)
590 : {
591 : bool aspenUnchanged = true;
592 : if ((Int)itsGoodAspActiveSet.size() <= 1)
593 : aspenUnchanged = false;
594 :
595 : for (scale = 0; scale < (Int)itsGoodAspActiveSet.size() - 1; ++scale)
596 : // -1 because we counted the latest aspen in the previous step already
597 : {
598 : if (itsPrevAspActiveSet[scale] == itsGoodAspActiveSet[scale])
599 : continue;
600 :
601 : cout << "I have active-set to adjust" << endl;
602 : aspenUnchanged = false;
603 : // "center" is unchanged for aspen
604 : IPosition blc1(itsGoodAspCenter[scale] - support/2);
605 : IPosition trc1(itsGoodAspCenter[scale] + support/2 - 1);
606 : LCBox::verify(blc1, trc1, inc, model.shape());
607 :
608 : IPosition blcPsf1(blc1);
609 : IPosition trcPsf1(trc1);
610 :
611 : Matrix<Float> modelSub1 = model(blc1, trc1);
612 : Matrix<Float> dirtySub1 = (*itsDirty)(blc1,trc1);
613 :
614 : // First remove the previous values of aspen in the active-set
615 : cout << "aspclean: restore with previous scale " << itsPrevAspActiveSet[scale];
616 : cout << " amp " << itsPrevAspAmplitude[scale] << endl;
617 :
618 : makeScaleImage(itsScale, itsPrevAspActiveSet[scale], itsPrevAspAmplitude[scale], itsGoodAspCenter[scale]);
619 : itsScaleXfr.resize();
620 : fft.fft0(itsScaleXfr, itsScale);
621 : Matrix<Float> scaleSubPrev = (itsScale)(blcPsf1,trcPsf1);
622 : const float scaleFactorPrev = itsGain * itsPrevAspAmplitude[scale];
623 : // restore the model image...
624 : modelSub1 -= scaleFactorPrev * scaleSubPrev;
625 : // restore the residual image
626 : Matrix<Complex> cWorkPrev;
627 : cWorkPrev = ((*itsXfr)*(itsScaleXfr));
628 : Matrix<Float> itsPsfConvScalePrev = Matrix<Float>(psfShape_p);
629 : fft.fft0(itsPsfConvScalePrev, cWorkPrev, false);
630 : fft.flip(itsPsfConvScalePrev, false, false); //need this if conv with 1 scale; don't need this if conv with 2 scales
631 : Matrix<Float> psfSubPrev = (itsPsfConvScalePrev)(blcPsf1, trcPsf1);
632 : dirtySub1 += scaleFactorPrev * psfSubPrev;
633 :
634 : // Then update with the new values of aspen in the active-set
635 : cout << "aspclean: update with new scale " << itsGoodAspActiveSet[scale];
636 : cout << " amp " << itsGoodAspAmplitude[scale] << endl;
637 : makeScaleImage(itsScale, itsGoodAspActiveSet[scale], itsGoodAspAmplitude[scale], itsGoodAspCenter[scale]);
638 : itsScaleXfr.resize();
639 : fft.fft0(itsScaleXfr, itsScale);
640 : Matrix<Float> scaleSubNew = (itsScale)(blcPsf1,trcPsf1);
641 : const float scaleFactorNew = itsGain * itsGoodAspAmplitude[scale];
642 :
643 : // Now do the addition of the active-set scales to the model image...
644 : modelSub1 += scaleFactorNew * scaleSubNew;
645 : // Now subtract the active-set scales from the residual image
646 : Matrix<Complex> cWorkNew;
647 : cWorkNew = ((*itsXfr)*(itsScaleXfr));
648 : Matrix<Float> itsPsfConvScaleNew = Matrix<Float>(psfShape_p);
649 : fft.fft0(itsPsfConvScaleNew, cWorkNew, false);
650 : fft.flip(itsPsfConvScaleNew, false, false); //need this if conv with 1 scale; don't need this if conv with 2 scales
651 : Matrix<Float> psfSubNew = (itsPsfConvScaleNew)(blcPsf1, trcPsf1);
652 : dirtySub1 -= scaleFactorNew * psfSubNew;
653 : } //update updating model/residual from aspen in active-set
654 :
655 : // switch to hogbom if aspen is unchaned?
656 : / *if (!itsSwitchedToHogbom && aspenUnchanged)
657 : {
658 : cout << "Switched to hogbom b/c aspen is unchanged" << endl;
659 : switchedToHogbom();
660 : }* /
661 : }*/
662 :
663 : // update peakres
664 771 : itsPrevPeakResidual = itsPeakResidual;
665 771 : maxVal=0;
666 771 : posmin = 0;
667 771 : minVal=0;
668 771 : posmax = 0;
669 771 : minMaxMasked(minVal, maxVal, posmin, posmax, (*itsDirty), itsInitScaleMasks[0]);
670 771 : itsPeakResidual = (fabs(maxVal) > fabs(minVal)) ? fabs(maxVal) : fabs(minVal);
671 771 : os <<LogIO::NORMAL3 << "current peakres " << itsPeakResidual << LogIO::POST;
672 :
673 1525 : if (!itsSwitchedToHogbom &&
674 754 : (fabs(itsPeakResidual - itsPrevPeakResidual) < 1e-4)) //peakres rarely changes
675 6 : itsNumNoChange += 1;
676 : //cout << "after: itsDirty optPos " << (*itsDirty)(itsPositionOptimum) << endl;
677 :
678 : // If we switch to hogbom (i.e. only have 0 scale size),
679 : // we still need to do the following Aspen update to get the new optimumStrength
680 771 : if (itsSwitchedToHogbom)
681 : {
682 17 : if (itsNumHogbomIter == 0)
683 : {
684 0 : itsSwitchedToHogbom = false;
685 :
686 0 : os << LogIO::NORMAL3 << "switched back to Asp." << LogIO::POST;
687 :
688 : //option 1: use rms residual to detect convergence
689 0 : if (!(initRMSResidual > rms && initRMSResidual/rms < 1.5))
690 : {
691 0 : os << "Reached convergence at iteration "<< ii << " b/c hogbom finished" << LogIO::POST;
692 0 : converged = 1;
693 0 : os << LogIO::NORMAL3 << "initial rms " << initRMSResidual << " final rms residual " << rms << LogIO::POST;
694 :
695 0 : break;
696 : }
697 : //option 2: use model flux to detect convergence
698 : /*float modelFlux = 0.0;
699 : modelFlux = sum(model);
700 : if (!(initModelFlux != 0 && (abs(initModelFlux - modelFlux)/initModelFlux < 0.01)))
701 : {
702 : os << "Reached convergence at iteration "<< ii << " b/c hogbom finished" << LogIO::POST;
703 : converged = 1;
704 : os << LogIO::NORMAL3 << "initial model flux " << initModelFlux << " final model flux " << modelFlux << LogIO::POST;
705 :
706 : break;
707 : }*/
708 : }
709 : else
710 17 : itsNumHogbomIter -= 1;
711 : }
712 :
713 771 : tempScaleSizes.clear();
714 771 : tempScaleSizes = getActiveSetAspen();
715 771 : tempScaleSizes.push_back(0.0); // put 0 scale
716 771 : defineAspScales(tempScaleSizes);
717 771 : }
718 : // End of iteration
719 :
720 35 : vector<Float> sumFluxByBins(itsBinSizeForSumFlux,0.0);
721 35 : vector<Float> rangeFluxByBins(itsBinSizeForSumFlux+1,0.0);
722 :
723 35 : getFluxByBins(vecItsOptimumScaleSize,vecItsStrengthOptimum,itsBinSizeForSumFlux,sumFluxByBins,rangeFluxByBins);
724 :
725 :
726 :
727 35 : os << " The number of bins for collecting the sum of Flux: " << itsBinSizeForSumFlux << endl;
728 :
729 175 : for (Int ii = 0; ii < itsBinSizeForSumFlux ; ii++)
730 : {
731 140 : os << " Bin " << ii << "(" << rangeFluxByBins[ii] * itsGain << " , " << rangeFluxByBins[ii+1] * itsGain << "). Sum of Flux : " << sumFluxByBins[ii] * itsGain << LogIO :: POST;
732 : }
733 :
734 : // memory used
735 : //itsUsedMemoryMB = double(HostInfo::memoryUsed()/1024);
736 : //cout << "Memory allocated in aspclean " << itsUsedMemoryMB << " MB." << endl;
737 :
738 35 : if(!converged)
739 15 : os << "Failed to reach stopping threshold" << LogIO::POST;
740 :
741 35 : return converged;
742 35 : }
743 :
744 :
745 32 : Bool AspMatrixCleaner::destroyAspScales()
746 : {
747 32 : destroyInitScales();
748 32 : destroyScales();
749 :
750 140 : for(uInt scale=0; scale < itsDirtyConvInitScales.nelements(); scale++)
751 108 : itsDirtyConvInitScales[scale].resize();
752 :
753 32 : itsDirtyConvInitScales.resize(0, true);
754 :
755 32 : return true;
756 : }
757 :
758 43 : Bool AspMatrixCleaner::destroyInitScales()
759 : {
760 151 : for(uInt scale=0; scale < itsInitScales.nelements(); scale++)
761 108 : itsInitScales[scale].resize();
762 151 : for(uInt scale=0; scale < itsInitScaleXfrs.nelements(); scale++)
763 108 : itsInitScaleXfrs[scale].resize();
764 43 : for(uInt scale=0; scale < itsPsfConvInitScales.nelements(); scale++)
765 0 : itsPsfConvInitScales[scale].resize();
766 :
767 43 : itsInitScales.resize(0, true);
768 43 : itsInitScaleXfrs.resize(0, true);
769 43 : itsPsfConvInitScales.resize(0, true);
770 :
771 43 : return true;
772 : }
773 :
774 11 : Bool AspMatrixCleaner::destroyInitMasks()
775 : {
776 38 : for(uInt scale=0; scale<itsInitScaleMasks.nelements();scale++)
777 27 : itsInitScaleMasks[scale].resize();
778 :
779 11 : itsInitScaleMasks.resize(0);
780 :
781 11 : return true;
782 : }
783 :
784 :
785 35 : float AspMatrixCleaner::getPsfGaussianWidth(ImageInterface<Float>& psf)
786 : {
787 70 : LogIO os( LogOrigin("AspMatrixCleaner","getPsfGaussianWidth",WHERE) );
788 :
789 35 : GaussianBeam beam;
790 : try
791 : {
792 35 : StokesImageUtil::FitGaussianPSF(psf, beam);
793 : }
794 0 : catch(AipsError &x)
795 : {
796 0 : os << "Error in fitting a Gaussian to the PSF : " << x.getMesg() << LogIO::POST;
797 0 : throw( AipsError("Error in fitting a Gaussian to the PSF" + x.getMesg()) );
798 0 : }
799 :
800 35 : CoordinateSystem cs = psf.coordinates();
801 35 : String dirunit = cs.worldAxisUnits()(0);
802 35 : Vector<String> unitas = cs.worldAxisUnits();
803 35 : unitas(0) = "arcsec";
804 35 : unitas(1) = "arcsec";
805 35 : cs.setWorldAxisUnits(unitas);
806 :
807 35 : os << "major width " << beam.getMajor("arcsec") << " in " << cs.worldAxisUnits()(0) << LogIO::POST;
808 35 : os << "minor width " << beam.getMinor("arcsec") << LogIO::POST;
809 35 : os << " pixel sizes are " << abs(cs.increment()(0)) << " and ";
810 35 : os << abs(cs.increment()(1)) << LogIO::POST;
811 35 : const auto xpixels = beam.getMajor("arcsec") / abs(cs.increment()(0));
812 35 : const auto ypixels = beam.getMinor("arcsec") / abs(cs.increment()(1));
813 35 : os << "xpixels " << xpixels << " ypixels " << ypixels << LogIO::POST;
814 :
815 35 : itsPsfWidth = float(ceil((xpixels + ypixels)/2));
816 35 : os << "PSF width: " << itsPsfWidth << " pixels." << LogIO::POST;
817 :
818 35 : return itsPsfWidth;
819 35 : }
820 :
821 : // Make a single initial scale size image by Gaussian
822 108 : void AspMatrixCleaner::makeInitScaleImage(Matrix<Float>& iscale, const Float& scaleSize)
823 : {
824 216 : LogIO os( LogOrigin("AspMatrixCleaner","makeInitScaleImage",WHERE) );
825 :
826 108 : const Int nx = iscale.shape()(0);
827 108 : const Int ny = iscale.shape()(1);
828 108 : iscale = 0.0;
829 :
830 108 : const Double refi = nx/2;
831 108 : const Double refj = ny/2;
832 :
833 108 : if(scaleSize == 0.0)
834 28 : iscale(Int(refi), Int(refj)) = 1.0;
835 : else
836 : {
837 80 : AlwaysAssert(scaleSize>0.0, AipsError);
838 :
839 : /* const Int mini = max(0, (Int)(refi - scaleSize));
840 : const Int maxi = min(nx-1, (Int)(refi + scaleSize));
841 : const Int minj = max(0, (Int)(refj - scaleSize));
842 : const Int maxj = min(ny-1, (Int)(refj + scaleSize));*/
843 :
844 80 : os << "Initial scale size " << scaleSize << " pixels." << LogIO::POST;
845 :
846 : //Gaussian2D<Float> gbeam(1.0/(sqrt(2*M_PI)*scaleSize), 0, 0, scaleSize, 1, 0);
847 :
848 : // 04/06/2022 Has to make the whole scale image. If only using min/max i/j,
849 : // .image looks spotty and not as smooth as before.
850 41040 : for (int j = 0; j < ny; j++)
851 : {
852 21012480 : for (int i = 0; i < nx; i++)
853 : {
854 : //const int px = i - refi;
855 : //const int py = j - refj;
856 : //iscale(i,j) = gbeam(px, py); // gbeam with the above def is equivalent to the following
857 20971520 : iscale(i,j) = (1.0/(sqrt(2*M_PI)*scaleSize))*exp(-(pow(i-refi,2) + pow(j-refj,2))*0.5/pow(scaleSize,2)); //this is for 1D, but represents Sanjay's and gives good init scale
858 : //iscale(i,j) = (1.0/(2*M_PI*pow(scaleSize,2)))*exp(-(pow(i-refi,2) + pow(j-refj,2))*0.5/pow(scaleSize,2)); // this is for 2D, gives unit area but bad init scale (always picks 0)
859 : }
860 : }
861 :
862 : }
863 108 : }
864 :
865 : // Make a single scale size image by Gaussian
866 791 : void AspMatrixCleaner::makeScaleImage(Matrix<Float>& iscale, const Float& scaleSize, const Float& amp, const IPosition& center)
867 : {
868 :
869 791 : const Int nx = iscale.shape()(0);
870 791 : const Int ny = iscale.shape()(1);
871 791 : iscale = 0.0;
872 :
873 791 : if(scaleSize == 0.0)
874 21 : iscale(Int(center[0]), Int(center[1])) = 1.0;
875 : else
876 : {
877 770 : AlwaysAssert(scaleSize>0.0, AipsError);
878 :
879 : /* const Double refi = nx/2;
880 : const Double refj = ny/2;
881 : const Int mini = max(0, (Int)(refi - scaleSize));
882 : const Int maxi = min(nx-1, (Int)(refi + scaleSize));
883 : const Int minj = max(0, (Int)(refj - scaleSize));
884 : const Int maxj = min(ny-1, (Int)(refj + scaleSize));*/
885 : //cout << "makeScaleImage: scalesize " << scaleSize << " center " << center << " amp " << amp << endl;
886 :
887 : ////Gaussian2D<Float> gbeam(1.0 / (sqrt(2*M_PI)*scaleSize), center[0], center[1], scaleSize, 1, 0);
888 :
889 : // all of the following was trying to replicate Sanjay's code but doesn't work
890 : //const Float d = sqrt(pow(1.0/itsPsfWidth, 2) + pow(1.0/scaleSize, 2));
891 : //Gaussian2D<Float> gbeam(amp / (sqrt(2*M_PI)*scaleSize), center[0], center[1], scaleSize, 1, 0);
892 : //Gaussian2D<Float> gbeam(amp * sqrt(2*M_PI)/d, center[0], center[1], scaleSize * d, 1, 0);
893 : //Gaussian2D<Float> gbeam(amp / (2*M_PI), center[0], center[1], scaleSize, 1, 0);
894 : //Gaussian2D<Float> gbeam(amp / pow(2,scaleSize-1), center[0], center[1], itsInitScaleSizes[scaleSize], 1, 0);
895 :
896 395010 : for (int j = 0; j < ny; j++)
897 : {
898 202245120 : for (int i = 0; i < nx; i++)
899 : {
900 : //const int px = i;
901 : //const int py = j;
902 : // iscale(i,j) = gbeam(px, py); // this is equivalent to the following with the above gbeam definition
903 : // This is for 1D, but represents Sanjay's and gives good init scale
904 : // Note that "amp" is not used in the expression
905 201850880 : iscale(i,j) = (1.0/(sqrt(2*M_PI)*scaleSize))*exp(-(pow(i-center[0],2) + pow(j-center[1],2))*0.5/pow(scaleSize,2));
906 :
907 : // This is for 2D, gives unit area but bad init scale (always picks 0)
908 : // iscale(i,j) = (1.0/(2*M_PI*pow(scaleSize,2)))*exp(-(pow(i-center[0],2) + pow(j-center[1],2))*0.5/pow(scaleSize,2));
909 : }
910 : }
911 :
912 : }
913 791 : }
914 :
915 :
916 0 : void AspMatrixCleaner::getLargestScaleSize(ImageInterface<Float>& psf)
917 : {
918 0 : LogIO os( LogOrigin("AspMatrixCleaner","getLargestScaleSize",WHERE) );
919 :
920 : //cout << "user's largest scale " << itsUserLargestScale << endl;
921 :
922 0 : itsLargestInitScale = 5.0 * itsPsfWidth; // default in pixels
923 0 : const Int nx = psfShape_p(0);
924 0 : const Int ny = psfShape_p(1);
925 :
926 0 : CoordinateSystem cs = psf.coordinates();
927 0 : String dirunit = cs.worldAxisUnits()(0);
928 0 : Vector<String> unitas = cs.worldAxisUnits();
929 0 : unitas(0) = "arcsec";
930 0 : unitas(1) = "arcsec";
931 0 : cs.setWorldAxisUnits(unitas);
932 :
933 : //cout << "ncoord " << cs.nCoordinates() << endl;
934 : //cout << "coord type " << cs.type(0) << endl;
935 :
936 :
937 0 : const float baseline = 100.0; // default shortest baseline = 100m (for both ALMA and VLA)
938 :
939 0 : for (uInt i = 0; i < cs.nCoordinates(); i++)
940 : {
941 0 : if (cs.type(i) == Coordinate::SPECTRAL)
942 : {
943 0 : SpectralCoordinate speccoord(cs.spectralCoordinate(i));
944 : //Double startfreq = 0.0, startpixel = -0.5;
945 0 : Double endfreq = 0.0, endpixel = 0.5;
946 : //speccoord.toWorld(startfreq, startpixel);
947 0 : speccoord.toWorld(endfreq, endpixel);
948 : //Double midfreq = (endfreq + startfreq) / 2.0;
949 : //cout << "coord i " << i << " MFS end frequency range : " << endfreq/1.0e+9 << " GHz" << endl;
950 :
951 0 : float nu = float(endfreq); // 1e9;
952 : // largest scale = ( (c/nu)/baseline ) converted from radians to degrees to arcsec
953 0 : itsLargestInitScale = float(ceil(((3e+8/nu)/baseline) * 180.0/3.14 * 60.0 * 60.0 / abs(cs.increment()(0))));
954 :
955 : // if user provides largest scale, use it instead.
956 0 : if (itsUserLargestScale > 0)
957 : {
958 0 : if (itsUserLargestScale > itsLargestInitScale)
959 0 : itsStopAtLargeScaleNegative = true;
960 :
961 0 : itsLargestInitScale = itsUserLargestScale;
962 :
963 : // ensure the largest scale is smaller than imsize/4/sqrt(2pi) = imsize/10 (could try imsize/4 later)
964 0 : if(itsLargestInitScale > min(nx/10, ny/10))
965 : {
966 0 : os << LogIO::WARN << "Scale size of " << itsLargestInitScale << " pixels is too big for an image size of " << nx << " x " << ny << " pixels. This scale size will be reset. " << LogIO::POST;
967 0 : itsLargestInitScale = float(ceil(min(nx/10, ny/10))); // based on the idea of MultiTermMatrixCleaner::verifyScaleSizes()
968 : }
969 :
970 0 : return;
971 : }
972 :
973 : // make a conservative largest allowed scale, 5 is a trial number
974 0 : itsLargestInitScale = float(ceil(itsLargestInitScale / 5.0));
975 :
976 : // ensure the largest scale is smaller than imsize/4/sqrt(2pi) = imsize/10 (could try imsize/4 later)
977 0 : if(itsLargestInitScale > min(nx/10, ny/10))
978 : {
979 0 : os << LogIO::WARN << "Scale size of " << itsLargestInitScale << " pixels is too big for an image size of " << nx << " x " << ny << " pixels. This scale size will be reset. " << LogIO::POST;
980 0 : itsLargestInitScale = float(ceil(min(nx/10, ny/10))); // based on the idea of MultiTermMatrixCleaner::verifyScaleSizes()
981 : }
982 :
983 : //cout << "largest scale " << itsLargestInitScale << endl;
984 :
985 0 : return;
986 0 : }
987 : }
988 :
989 0 : }
990 :
991 28 : void AspMatrixCleaner::setInitScales()
992 : {
993 56 : LogIO os(LogOrigin("AspMatrixCleaner", "setInitScales()", WHERE));
994 :
995 : // if user does not provide the largest scale, use the default.
996 28 : if (itsUserLargestScale < 0)
997 : {
998 12 : itsInitScaleSizes = {0.0f, itsPsfWidth, 2.0f*itsPsfWidth, 4.0f*itsPsfWidth, 8.0f*itsPsfWidth};
999 12 : return;
1000 : }
1001 : else
1002 : {
1003 16 : const Int nx = psfShape_p(0);
1004 16 : const Int ny = psfShape_p(1);
1005 :
1006 : // ensure the largest scale is smaller than imsize/4/sqrt(2pi) = imsize/10 (could try imsize/4 later)
1007 16 : if(itsUserLargestScale> min(nx/10, ny/10))
1008 : {
1009 0 : os << LogIO::WARN << "`largestscale " << itsUserLargestScale << " pixels is too big for an image size of " << nx << " x " << ny << " pixels. This scale size will be reset. " << LogIO::POST;
1010 0 : itsUserLargestScale = float(ceil(min(nx/10, ny/10))); // based on the idea of MultiTermMatrixCleaner::verifyScaleSizes()
1011 : }
1012 :
1013 16 : int numscale = floor(itsUserLargestScale / itsPsfWidth);
1014 16 : if (numscale == 0)
1015 : {
1016 0 : itsNInitScales = 1;
1017 0 : itsInitScaleSizes.resize(itsNInitScales);
1018 0 : itsInitScaleSizes[0] = 0.0f;
1019 0 : os << LogIO::WARN << "`largestscale` " << itsUserLargestScale << " is smaller than the psf width. Only 0 scale is used" << LogIO::POST;
1020 : }
1021 : else
1022 : {
1023 16 : itsInitScaleSizes.resize(1, false);
1024 16 : itsInitScaleSizes[0] = 0.0f;
1025 :
1026 16 : Int scale = 1;
1027 32 : while (((itsPsfWidth * pow(2, scale-1)) < itsUserLargestScale) && (scale < 5))
1028 : {
1029 16 : itsInitScaleSizes.push_back(itsPsfWidth * pow(2, scale-1));
1030 16 : scale++;
1031 : }
1032 :
1033 16 : if (scale <= 4) // restricted the # init scales based on `largestscale"
1034 16 : itsInitScaleSizes.push_back(itsUserLargestScale);
1035 :
1036 16 : itsNInitScales = itsInitScaleSizes.size();
1037 : }
1038 :
1039 : }
1040 :
1041 28 : }
1042 :
1043 : /* for shortest baseline approach
1044 : void AspMatrixCleaner::setInitScalesOld()
1045 : {
1046 : LogIO os(LogOrigin("AspMatrixCleaner", "setInitScales()", WHERE));
1047 :
1048 : // Validate scales
1049 : //os << "Creating " << itsNInitScales << " initial scales" << LogIO::POST;
1050 : itsInitScaleSizes[0] = 0.0f;
1051 : itsInitScaleSizes[1] = itsPsfWidth;
1052 : //os << "scale 1 = 0.0 pixels" << LogIO::POST;
1053 : //os << "scale 2 = " << itsInitScaleSizes[1] << " pixels" << LogIO::POST;
1054 :
1055 : // based on the normalized power-law distribution of ratio(i) = pow(10.0, (Float(i)-2.0)/2.0) for i >= 1
1056 : // 1. there is enough difference to make 5 scales
1057 : if ((itsLargestInitScale - itsPsfWidth) >= 4.0)
1058 : {
1059 : vector<Float> ratio = {0.09, 0.31, 1.0};
1060 :
1061 : for (Int scale = 2; scale < itsNInitScales; scale++)
1062 : {
1063 : itsInitScaleSizes[scale] =
1064 : ceil(itsPsfWidth + ((itsLargestInitScale - itsPsfWidth) * ratio[scale- 2]));
1065 : //os << "scale " << scale+1 << " = " << itsInitScaleSizes[scale]
1066 : //<< " pixels" << LogIO::POST;
1067 : }
1068 : }
1069 : // 2. there is NOT enough difference to make 5 scales, so just make 4 scales
1070 : else if ((itsLargestInitScale - itsPsfWidth) >= 2.0)
1071 : {
1072 : vector<Float> ratio = {0.31, 1.0};
1073 : itsNInitScales = 4;
1074 : itsInitScaleSizes.resize(itsNInitScales);
1075 :
1076 : for (Int scale = 2; scale < itsNInitScales; scale++)
1077 : {
1078 : itsInitScaleSizes[scale] =
1079 : ceil(itsPsfWidth + ((itsLargestInitScale - itsPsfWidth) * ratio[scale- 2]));
1080 : //os << "scale " << scale+1 << " = " << itsInitScaleSizes[scale]
1081 : //<< " pixels" << LogIO::POST;
1082 : }
1083 : }
1084 : // 3. there is NOT enough difference to make 4 scales, so just make 3 scales
1085 : else
1086 : {
1087 : itsNInitScales = 3;
1088 : itsInitScaleSizes.resize(itsNInitScales);
1089 :
1090 : itsInitScaleSizes[2] = itsLargestInitScale;
1091 : //os << "scale 2" << " = " << itsInitScaleSizes[2] << " pixels" << LogIO::POST;
1092 : }
1093 : }
1094 : */
1095 :
1096 28 : void AspMatrixCleaner::setInitScaleXfrs(const Float width)
1097 : {
1098 28 : if(itsInitScales.nelements() > 0)
1099 21 : destroyAspScales();
1100 :
1101 28 : if (itsSwitchedToHogbom)
1102 : {
1103 0 : itsNInitScales = 1;
1104 0 : itsInitScaleSizes.resize(itsNInitScales, false);
1105 0 : itsInitScaleSizes = {0.0f};
1106 : }
1107 : else
1108 : {
1109 28 : itsNInitScales = 5;
1110 28 : itsInitScaleSizes.resize(itsNInitScales, false);
1111 : // shortest baseline approach below is no longer used (see CAS-940 in Jan 2022). Switched back to the original approach.
1112 : // set initial scale sizes from power-law distribution with min scale=PsfWidth and max scale = c/nu/baseline
1113 : // this step can reset itsNInitScales if the largest scale allowed is small
1114 : //setInitScalesOld();
1115 : // old approach may cause Asp to pick unreasonable large scale but this be avoided by setting `largestscalesize`.
1116 : // try 0, width, 2width, 4width and 8width which is also restricted by `largestscale` if provided
1117 : //itsInitScaleSizes = {0.0f, width, 2.0f*width, 4.0f*width, 8.0f*width};
1118 28 : setInitScales();
1119 : }
1120 :
1121 28 : itsInitScales.resize(itsNInitScales, false);
1122 28 : itsInitScaleXfrs.resize(itsNInitScales, false);
1123 28 : fft = FFTServer<Float,Complex>(psfShape_p);
1124 136 : for (int scale = 0; scale < itsNInitScales; scale++)
1125 : {
1126 108 : itsInitScales[scale] = Matrix<Float>(psfShape_p);
1127 108 : makeInitScaleImage(itsInitScales[scale], itsInitScaleSizes[scale]);
1128 : //cout << "made itsInitScales[" << scale << "] = " << itsInitScaleSizes[scale] << endl;
1129 108 : itsInitScaleXfrs[scale] = Matrix<Complex> ();
1130 108 : fft.fft0(itsInitScaleXfrs[scale], itsInitScales[scale]);
1131 : }
1132 28 : }
1133 :
1134 : // calculate the convolutions of the psf with the initial scales
1135 0 : void AspMatrixCleaner::setInitScalePsfs()
1136 : {
1137 0 : itsPsfConvInitScales.resize((itsNInitScales+1)*(itsNInitScales+1), false);
1138 0 : itsNscales = itsNInitScales; // # initial scales. This will be updated in defineAspScales later.
1139 :
1140 0 : Matrix<Complex> cWork;
1141 :
1142 0 : for (Int scale=0; scale < itsNInitScales; scale++)
1143 : {
1144 : //cout << "Calculating convolutions of psf for initial scale size " << itsInitScaleSizes[scale] << endl;
1145 : //PSF * scale
1146 0 : itsPsfConvInitScales[scale] = Matrix<Float>(psfShape_p);
1147 0 : cWork=((*itsXfr)*(itsInitScaleXfrs[scale])*(itsInitScaleXfrs[scale]));
1148 0 : fft.fft0((itsPsfConvInitScales[scale]), cWork, false);
1149 0 : fft.flip(itsPsfConvInitScales[scale], false, false);
1150 :
1151 0 : for (Int otherscale = scale; otherscale < itsNInitScales; otherscale++)
1152 : {
1153 0 : AlwaysAssert(index(scale, otherscale) < Int(itsPsfConvInitScales.nelements()),
1154 : AipsError);
1155 :
1156 : // PSF * scale * otherscale
1157 0 : itsPsfConvInitScales[index(scale,otherscale)] = Matrix<Float>(psfShape_p);
1158 0 : cWork=((*itsXfr)*(itsInitScaleXfrs[scale])*(itsInitScaleXfrs[otherscale]));
1159 0 : fft.fft0(itsPsfConvInitScales[index(scale,otherscale)], cWork, false);
1160 : }
1161 : }
1162 0 : }
1163 :
1164 : // Set up the masks for the initial scales (i.e. 0, 1.5width, 5width and 10width)
1165 35 : Bool AspMatrixCleaner::setInitScaleMasks(const Array<Float> arrmask, const Float& maskThreshold)
1166 : {
1167 70 : LogIO os(LogOrigin("AspMatrixCleaner", "setInitScaleMasks()", WHERE));
1168 :
1169 35 : destroyMasks();
1170 :
1171 35 : Matrix<Float> mask(arrmask);
1172 35 : itsMask = new Matrix<Float>(mask.shape());
1173 35 : itsMask->assign(mask);
1174 35 : itsMaskThreshold = maskThreshold;
1175 35 : noClean_p=(max(*itsMask) < itsMaskThreshold) ? true : false;
1176 :
1177 35 : if(itsMask.null() || noClean_p)
1178 0 : return false;
1179 :
1180 : // make scale masks
1181 35 : if(itsInitScaleSizes.size() < 1)
1182 : {
1183 : os << "Initial scales are not yet set - cannot set initial scale masks"
1184 0 : << LogIO::EXCEPTION;
1185 : }
1186 :
1187 35 : AlwaysAssert((itsMask->shape() == psfShape_p), AipsError);
1188 :
1189 35 : Matrix<Complex> maskFT;
1190 35 : fft.fft0(maskFT, *itsMask);
1191 35 : itsInitScaleMasks.resize(itsNInitScales);
1192 : // Now we can do all the convolutions
1193 35 : Matrix<Complex> cWork;
1194 170 : for (int scale=0; scale < itsNInitScales; scale++)
1195 : {
1196 : // Mask * scale
1197 : // Allow only 10% overlap by default, hence 0.9 is a default mask threshold
1198 : // if thresholding is not used, just extract the real part of the complex mask
1199 135 : itsInitScaleMasks[scale] = Matrix<Float>(itsMask->shape());
1200 135 : cWork=((maskFT)*(itsInitScaleXfrs[scale]));
1201 135 : fft.fft0(itsInitScaleMasks[scale], cWork, false);
1202 135 : fft.flip(itsInitScaleMasks[scale], false, false);
1203 69255 : for (Int j=0 ; j < (itsMask->shape())(1); ++j)
1204 : {
1205 35458560 : for (Int k =0 ; k < (itsMask->shape())(0); ++k)
1206 : {
1207 35389440 : if(itsMaskThreshold > 0)
1208 35389440 : (itsInitScaleMasks[scale])(k,j) = (itsInitScaleMasks[scale])(k,j) > itsMaskThreshold ? 1.0 : 0.0;
1209 : }
1210 : }
1211 135 : Float mysum = sum(itsInitScaleMasks[scale]);
1212 135 : if (mysum <= 0.1) {
1213 0 : os << LogIO::WARN << "Ignoring initial scale " << itsInitScaleSizes[scale] <<
1214 0 : " since it is too large to fit within the mask" << LogIO::POST;
1215 : }
1216 :
1217 : }
1218 :
1219 35 : Int nx = itsInitScaleMasks[0].shape()(0);
1220 35 : Int ny = itsInitScaleMasks[0].shape()(1);
1221 :
1222 : /* Set the edges of the masks according to the scale size */
1223 : // Set the values OUTSIDE the box to zero....
1224 170 : for(Int scale=0; scale < itsNInitScales; scale++)
1225 : {
1226 135 : Int border = (Int)(itsInitScaleSizes[scale]*1.5);
1227 : // bottom
1228 135 : IPosition blc1(2, 0 , 0 );
1229 135 : IPosition trc1(2, nx-1, border);
1230 135 : IPosition inc1(2, 1);
1231 135 : LCBox::verify(blc1, trc1, inc1, itsInitScaleMasks[scale].shape());
1232 135 : (itsInitScaleMasks[scale])(blc1, trc1) = 0.0;
1233 : // top
1234 135 : blc1[0]=0; blc1[1] = ny-border-1;
1235 135 : trc1[0] = nx-1; trc1[1] = ny-1;
1236 135 : LCBox::verify(blc1, trc1, inc1, itsInitScaleMasks[scale].shape());
1237 135 : (itsInitScaleMasks[scale])(blc1, trc1) = 0.0;
1238 : // left
1239 135 : blc1[0]=0; blc1[1]=border;
1240 135 : trc1[0]=border; trc1[1] = ny-border-1;
1241 135 : LCBox::verify(blc1, trc1, inc1, itsInitScaleMasks[scale].shape());
1242 135 : (itsInitScaleMasks[scale])(blc1, trc1) = 0.0;
1243 : // right
1244 135 : blc1[0] = nx-border-1; blc1[1]=border;
1245 135 : trc1[0] = nx; trc1[1] = ny-border-1;
1246 135 : LCBox::verify(blc1, trc1, inc1, itsInitScaleMasks[scale].shape());
1247 135 : (itsInitScaleMasks[scale])(blc1,trc1) = 0.0;
1248 135 : }
1249 :
1250 : // set blcDirty and trcDirty here for speedup
1251 35 : blcDirty = IPosition(itsInitScaleMasks[0].shape().nelements(), 0);
1252 35 : trcDirty = IPosition(itsInitScaleMasks[0].shape() - 1);
1253 :
1254 35 : if(!itsMask.null())
1255 : {
1256 35 : os << LogIO::NORMAL3 << "Finding initial scales for Asp using given mask" << LogIO::POST;
1257 35 : if (itsMaskThreshold < 0)
1258 : {
1259 : os << LogIO::NORMAL3
1260 : << "Mask thresholding is not used, values are interpreted as weights"
1261 0 : <<LogIO::POST;
1262 : }
1263 : else
1264 : {
1265 : // a mask that does not allow for clean was sent
1266 35 : if(noClean_p)
1267 0 : return true;
1268 :
1269 : os << LogIO::NORMAL3
1270 35 : << "Finding initial scales with mask values above " << itsMaskThreshold
1271 35 : << LogIO::POST;
1272 : }
1273 :
1274 35 : AlwaysAssert(itsMask->shape()(0) == nx, AipsError);
1275 35 : AlwaysAssert(itsMask->shape()(1) == ny, AipsError);
1276 35 : Int xbeg=nx-1;
1277 35 : Int ybeg=ny-1;
1278 35 : Int xend=0;
1279 35 : Int yend=0;
1280 17955 : for (Int iy=0;iy<ny;iy++)
1281 : {
1282 9192960 : for (Int ix=0;ix<nx;ix++)
1283 : {
1284 9175040 : if((*itsMask)(ix,iy)>0.000001)
1285 : {
1286 3431150 : xbeg=min(xbeg,ix);
1287 3431150 : ybeg=min(ybeg,iy);
1288 3431150 : xend=max(xend,ix);
1289 3431150 : yend=max(yend,iy);
1290 : }
1291 : }
1292 : }
1293 35 : blcDirty(0)=xbeg;
1294 35 : blcDirty(1)=ybeg;
1295 35 : trcDirty(0)=xend;
1296 35 : trcDirty(1)=yend;
1297 : }
1298 : else
1299 0 : os << LogIO::NORMAL3 << "Finding initial scales using the entire image" << LogIO::POST;
1300 :
1301 :
1302 35 : return true;
1303 35 : }
1304 :
1305 806 : void AspMatrixCleaner::maxDirtyConvInitScales(float& strengthOptimum, int& optimumScale, IPosition& positionOptimum)
1306 : {
1307 1612 : LogIO os(LogOrigin("AspMatrixCleaner", "maxDirtyConvInitScales()", WHERE));
1308 :
1309 : /* We still need the following to define a region. Using minMaxMasked itself is NOT sufficient and results in components outside of mask.
1310 : // this can be done only once at setup since maxDirtyConvInitScales is called every iter
1311 : const int nx = itsDirty->shape()[0];
1312 : const int ny = itsDirty->shape()[1];
1313 :
1314 : IPosition blcDirty(itsDirty->shape().nelements(), 0);
1315 : IPosition trcDirty(itsDirty->shape() - 1);
1316 :
1317 : if(!itsMask.null())
1318 : {
1319 : os << LogIO::NORMAL3 << "Finding initial scales for Asp using given mask" << LogIO::POST;
1320 : if (itsMaskThreshold < 0)
1321 : {
1322 : os << LogIO::NORMAL3
1323 : << "Mask thresholding is not used, values are interpreted as weights"
1324 : <<LogIO::POST;
1325 : }
1326 : else
1327 : {
1328 : // a mask that does not allow for clean was sent
1329 : if(noClean_p)
1330 : return;
1331 :
1332 : os << LogIO::NORMAL3
1333 : << "Finding initial scales with mask values above " << itsMaskThreshold
1334 : << LogIO::POST;
1335 : }
1336 :
1337 : AlwaysAssert(itsMask->shape()(0) == nx, AipsError);
1338 : AlwaysAssert(itsMask->shape()(1) == ny, AipsError);
1339 : Int xbeg=nx-1;
1340 : Int ybeg=ny-1;
1341 : Int xend=0;
1342 : Int yend=0;
1343 : for (Int iy=0;iy<ny;iy++)
1344 : {
1345 : for (Int ix=0;ix<nx;ix++)
1346 : {
1347 : if((*itsMask)(ix,iy)>0.000001)
1348 : {
1349 : xbeg=min(xbeg,ix);
1350 : ybeg=min(ybeg,iy);
1351 : xend=max(xend,ix);
1352 : yend=max(yend,iy);
1353 : }
1354 : }
1355 : }
1356 : blcDirty(0)=xbeg;
1357 : blcDirty(1)=ybeg;
1358 : trcDirty(0)=xend;
1359 : trcDirty(1)=yend;
1360 : }
1361 : else
1362 : os << LogIO::NORMAL3 << "Finding initial scales using the entire image" << LogIO::POST; */
1363 :
1364 :
1365 806 : Vector<Float> maxima(itsNInitScales);
1366 806 : Block<IPosition> posMaximum(itsNInitScales);
1367 :
1368 : /*int nth = itsNInitScales;
1369 : #ifdef _OPENMP
1370 : nth = min(nth, omp_get_max_threads());
1371 : #endif*/
1372 :
1373 : //#pragma omp parallel default(shared) private(scale) num_threads(nth)
1374 : //{
1375 : // #pragma omp for // genie pragma seems to sometimes return wrong value to maxima on tesla
1376 :
1377 : /* debug info
1378 : Float maxVal=0;
1379 : IPosition posmin((*itsDirty).shape().nelements(), 0);
1380 : Float minVal=0;
1381 : IPosition posmax((*itsDirty).shape().nelements(), 0);
1382 : minMaxMasked(minVal, maxVal, posmin, posmax, (*itsDirty), itsInitScaleMasks[0]);
1383 : cout << "orig itsDirty : min " << minVal << " max " << maxVal << endl;
1384 : cout << "posmin " << posmin << " posmax " << posmax << endl; */
1385 :
1386 806 : IPosition gip;
1387 806 : const int nx = itsDirty->shape()[0];
1388 806 : const int ny = itsDirty->shape()[1];
1389 806 : gip = IPosition(2, nx, ny);
1390 806 : Block<casacore::Matrix<Float>> vecWork_p;
1391 806 : vecWork_p.resize(itsNInitScales);
1392 :
1393 3708 : for (int scale = 0; scale < itsNInitScales; ++scale)
1394 : {
1395 : // Find absolute maximum of each smoothed residual
1396 2902 : vecWork_p[scale].resize(gip);
1397 2902 : Matrix<Float> work = (vecWork_p[scale])(blcDirty,trcDirty);
1398 2902 : work = 0.0;
1399 2902 : work = work + (itsDirtyConvInitScales[scale])(blcDirty,trcDirty);
1400 :
1401 2902 : maxima(scale) = 0;
1402 2902 : posMaximum[scale] = IPosition(itsDirty->shape().nelements(), 0);
1403 :
1404 : /* debug info
1405 : Float maxVal=0;
1406 : Float minVal=0;
1407 : IPosition posmin(itsDirty->shape().nelements(), 0);
1408 : IPosition posmax(itsDirty->shape().nelements(), 0);
1409 : minMaxMasked(minVal, maxVal, posmin, posmax, itsDirtyConvInitScales[scale], itsInitScaleMasks[scale]);
1410 : cout << "DirtyConvInitScale " << scale << ": min " << minVal << " max " << maxVal << endl;
1411 : cout << "posmin " << posmin << " posmax " << posmax << endl; */
1412 :
1413 : // Note, must find peak from the (blcDirty, trcDirty) subregion to ensure components are within mask
1414 : // this is using patch already
1415 2902 : if (!itsMask.null())
1416 : {
1417 5804 : findMaxAbsMask(vecWork_p[scale], itsInitScaleMasks[scale],
1418 2902 : maxima(scale), posMaximum[scale]);
1419 : }
1420 : else
1421 0 : findMaxAbs(vecWork_p[scale], maxima(scale), posMaximum[scale]);
1422 :
1423 2902 : if (itsNormMethod == 2)
1424 : {
1425 0 : if (scale > 0)
1426 : {
1427 : float normalization;
1428 : //normalization = 2 * M_PI / pow(itsInitScaleSizes[scale], 2); //sanjay's
1429 : //normalization = 2 * M_PI / pow(itsInitScaleSizes[scale], 1); // this looks good on M31 but bad on G55
1430 0 : normalization = sqrt(2 * M_PI *itsInitScaleSizes[scale]); //GSL. Need to recover and re-norm later
1431 0 : maxima(scale) /= normalization;
1432 : } //sanjay's code doesn't normalize peak here.
1433 : // Norm Method 2 may work fine with GSL with derivatives, but Norm Method 1 is still the preferred approach.
1434 : }
1435 2902 : }
1436 : //}//End parallel section
1437 :
1438 : // Find the peak residual among the 4 initial scales, which will be the next Aspen
1439 3708 : for (int scale = 0; scale < itsNInitScales; scale++)
1440 : {
1441 2902 : if(abs(maxima(scale)) > abs(strengthOptimum))
1442 : {
1443 2390 : optimumScale = scale;
1444 2390 : strengthOptimum = maxima(scale);
1445 2390 : positionOptimum = posMaximum[scale];
1446 : }
1447 : }
1448 :
1449 806 : if (optimumScale > 0)
1450 : {
1451 : //const float normalization = 2 * M_PI / (pow(1.0/itsPsfWidth, 2) + pow(1.0/itsInitScaleSizes[optimumScale], 2)); // sanjay
1452 789 : const float normalization = sqrt(2 * M_PI / (pow(1.0/itsPsfWidth, 2) + pow(1.0/itsInitScaleSizes[optimumScale], 2))); // this is good.
1453 :
1454 : // norm method 2 recovers the optimal strength and then normalize it to get the init guess
1455 789 : if (itsNormMethod == 2)
1456 0 : strengthOptimum *= sqrt(2 * M_PI *itsInitScaleSizes[optimumScale]); // this is needed if we also first normalize and then compare.
1457 :
1458 789 : strengthOptimum /= normalization;
1459 : // cout << "normalization " << normalization << " strengthOptimum " << strengthOptimum << endl;
1460 : }
1461 :
1462 806 : AlwaysAssert(optimumScale < itsNInitScales, AipsError);
1463 806 : }
1464 :
1465 :
1466 : // ALGLIB - "beta" = 1/2scale^2
1467 : /*vector<Float> AspMatrixCleaner::getActiveSetAspen()
1468 : {
1469 : LogIO os(LogOrigin("AspMatrixCleaner", "getActiveSetAspen()", WHERE));
1470 :
1471 : if(int(itsInitScaleXfrs.nelements()) == 0)
1472 : throw(AipsError("Initial scales for Asp are not defined"));
1473 :
1474 : if (!itsSwitchedToHogbom &&
1475 : accumulate(itsNumIterNoGoodAspen.begin(), itsNumIterNoGoodAspen.end(), 0) >= 5)
1476 : {
1477 : os << "Switched to hogbom because of frequent small components." << LogIO::POST;
1478 : switchedToHogbom();
1479 : }
1480 :
1481 : if (itsSwitchedToHogbom)
1482 : itsNInitScales = 1;
1483 : else
1484 : itsNInitScales = itsInitScaleSizes.size();
1485 :
1486 : // Dirty * initial scales
1487 : Matrix<Complex> dirtyFT;
1488 : fft.fft0(dirtyFT, *itsDirty);
1489 : itsDirtyConvInitScales.resize(0);
1490 : itsDirtyConvInitScales.resize(itsNInitScales); // 0, 1width, 2width, 4width and 8width
1491 : //cout << "itsInitScaleSizes.size() " << itsInitScaleSizes.size() << " itsInitScales.size() " << itsInitScales.size() << " NInitScales # " << itsNInitScales << endl;
1492 : for (int scale=0; scale < itsNInitScales; scale++)
1493 : {
1494 : Matrix<Complex> cWork;
1495 :
1496 : itsDirtyConvInitScales[scale] = Matrix<Float>(itsDirty->shape());
1497 : cWork=((dirtyFT)*(itsInitScaleXfrs[scale]));
1498 : fft.fft0((itsDirtyConvInitScales[scale]), cWork, false);
1499 : fft.flip((itsDirtyConvInitScales[scale]), false, false);
1500 :
1501 : //cout << "remake itsDirtyConvInitScales " << scale << " max itsInitScales[" << scale << "] = " << max(fabs(itsInitScales[scale])) << endl;
1502 : //cout << " max itsInitScaleXfrs[" << scale << "] = " << max(fabs(itsInitScaleXfrs[scale])) << endl;
1503 : }
1504 :
1505 : float strengthOptimum = 0.0;
1506 : int optimumScale = 0;
1507 : IPosition positionOptimum(itsDirty->shape().nelements(), 0);
1508 : itsGoodAspActiveSet.resize(0);
1509 : itsGoodAspAmplitude.resize(0);
1510 : itsGoodAspCenter.resize(0);
1511 :
1512 : maxDirtyConvInitScales(strengthOptimum, optimumScale, positionOptimum);
1513 :
1514 : os << LogIO::NORMAL3 << "Peak among the smoothed residual image is " << strengthOptimum << " and initial scale: " << optimumScale << LogIO::POST;
1515 : // cout << " its itsDirty is " << (*itsDirty)(positionOptimum);
1516 : // cout << " at location " << positionOptimum[0] << " " << positionOptimum[1] << " " << positionOptimum[2];
1517 :
1518 :
1519 : itsStrengthOptimum = strengthOptimum;
1520 : itsPositionOptimum = positionOptimum;
1521 : itsOptimumScale = optimumScale;
1522 : itsOptimumScaleSize = itsInitScaleSizes[optimumScale];
1523 :
1524 : // initial scale size = 0 gives the peak res, so we don't
1525 : // need to do the LBFGS optimization for it
1526 : if (itsOptimumScale == 0)
1527 : return {};
1528 : else
1529 : {
1530 : // the new aspen is always added to the active-set
1531 : vector<Float> tempx;
1532 : vector<IPosition> activeSetCenter;
1533 :
1534 : tempx.push_back(strengthOptimum);
1535 : tempx.push_back(itsInitScaleSizes[optimumScale]);
1536 : activeSetCenter.push_back(positionOptimum);
1537 :
1538 : // initialize alglib option
1539 : unsigned int length = tempx.size();
1540 : real_1d_array x;
1541 : x.setlength(length);
1542 :
1543 : Float beta = 0.0;
1544 :
1545 : // initialize starting point
1546 : for (unsigned int i = 0; i < length; i+=2)
1547 : {
1548 : beta = 1 / (2*pow(tempx[i+1], 2));
1549 :
1550 : x[i] = tempx[i]; // amp
1551 : x[i+1] = beta; //beta
1552 : }
1553 :
1554 : std::cout << "before: opt strength/scale " << tempx[0] << " " << tempx[1] << endl;
1555 : std::cout << "before: beta " << beta << std::endl;
1556 :
1557 : ParamAlglibObj optParam(*itsDirty, *itsXfr, activeSetCenter, fft, itsOptimumScaleSize);
1558 : ParamAlglibObj *ptrParam;
1559 : ptrParam = &optParam;
1560 :
1561 : //real_1d_array s = "[1,1]";
1562 : real_1d_array s = "[1,10]";
1563 : double epsg = 1e-3;
1564 : double epsf = 1e-3;
1565 : double epsx = 1e-3;
1566 : ae_int_t maxits = 5;
1567 : minlbfgsstate state;
1568 : minlbfgscreate(1, x, state);
1569 : minlbfgssetcond(state, epsg, epsf, epsx, maxits);
1570 : minlbfgssetscale(state, s);
1571 : minlbfgssetprecscale(state);
1572 : minlbfgsreport rep;
1573 : alglib::minlbfgsoptimize(state, objfunc_alglib, NULL, (void *) ptrParam);
1574 : minlbfgsresults(state, x, rep);
1575 :
1576 : double *x1 = x.getcontent();
1577 : cout << "x1[0] " << x1[0] << " x1[1] " << x1[1] << endl;
1578 :
1579 : // end alglib bfgs optimization
1580 :
1581 : double amp = 0;
1582 : double scale = 0;
1583 :
1584 : amp = x[0]; // i
1585 : beta = fabs(x[1]); // i+1
1586 :
1587 : if (isnan(amp) || isnan(beta) || beta == 0)
1588 : {
1589 : scale = 0;
1590 : amp = (*itsDirty)(itsPositionOptimum); // This is to avoid divergence due to amp being too large.
1591 : // amp=strengthOptimum gives similar results
1592 : }
1593 : else{
1594 : scale = sqrt(1/(2.0 * beta)) ;
1595 : if (scale < 0.4)
1596 : {
1597 : scale = 0;
1598 : amp = (*itsDirty)(itsPositionOptimum);
1599 : }
1600 : }
1601 :
1602 :
1603 : itsGoodAspAmplitude.push_back(amp); // active-set amplitude
1604 : itsGoodAspActiveSet.push_back(scale); // active-set
1605 :
1606 : itsStrengthOptimum = amp;
1607 : itsOptimumScaleSize = scale;
1608 : itsGoodAspCenter = activeSetCenter;
1609 :
1610 : // debug
1611 : os << LogIO::NORMAL3 << "optimized strengthOptimum " << itsStrengthOptimum << " scale size " << itsOptimumScaleSize << LogIO::POST;
1612 : cout << "optimized strengthOptimum " << itsStrengthOptimum << " scale size " << itsOptimumScaleSize << endl;
1613 :
1614 : } // finish bfgs optimization
1615 :
1616 : AlwaysAssert(itsGoodAspCenter.size() == itsGoodAspActiveSet.size(), AipsError);
1617 : AlwaysAssert(itsGoodAspAmplitude.size() == itsGoodAspActiveSet.size(), AipsError);
1618 :
1619 : // debug info
1620 : /*for (unsigned int i = 0; i < itsAspAmplitude.size(); i++)
1621 : {
1622 : //cout << "After opt AspApm[" << i << "] = " << itsAspAmplitude[i] << endl;
1623 : //cout << "After opt AspScale[" << i << "] = " << itsAspScaleSizes[i] << endl;
1624 : //cout << "After opt AspCenter[" << i << "] = " << itsAspCenter[i] << endl;
1625 : cout << "AspScale[ " << i << " ] = " << itsAspScaleSizes[i] << " center " << itsAspCenter[i] << endl;
1626 : }* /
1627 :
1628 : return itsGoodAspActiveSet; // return optimized scale
1629 : }*/
1630 :
1631 :
1632 : // ALGLIB - "log(PSF*Aspen)
1633 : /*vector<Float> AspMatrixCleaner::getActiveSetAspen()
1634 : {
1635 : LogIO os(LogOrigin("AspMatrixCleaner", "getActiveSetAspen()", WHERE));
1636 :
1637 : if(int(itsInitScaleXfrs.nelements()) == 0)
1638 : throw(AipsError("Initial scales for Asp are not defined"));
1639 :
1640 : if (!itsSwitchedToHogbom &&
1641 : accumulate(itsNumIterNoGoodAspen.begin(), itsNumIterNoGoodAspen.end(), 0) >= 5)
1642 : {
1643 : os << "Switched to hogbom because of frequent small components." << LogIO::POST;
1644 : switchedToHogbom();
1645 : }
1646 :
1647 : if (itsSwitchedToHogbom)
1648 : itsNInitScales = 1;
1649 : else
1650 : itsNInitScales = itsInitScaleSizes.size();
1651 :
1652 : // Dirty * initial scales
1653 : Matrix<Complex> dirtyFT;
1654 : fft.fft0(dirtyFT, *itsDirty);
1655 : itsDirtyConvInitScales.resize(0);
1656 : itsDirtyConvInitScales.resize(itsNInitScales); // 0, 1width, 2width, 4width and 8width
1657 : //cout << "itsInitScaleSizes.size() " << itsInitScaleSizes.size() << " itsInitScales.size() " << itsInitScales.size() << " NInitScales # " << itsNInitScales << endl;
1658 : for (int scale=0; scale < itsNInitScales; scale++)
1659 : {
1660 : Matrix<Complex> cWork;
1661 :
1662 : itsDirtyConvInitScales[scale] = Matrix<Float>(itsDirty->shape());
1663 : cWork=((dirtyFT)*(itsInitScaleXfrs[scale]));
1664 : fft.fft0((itsDirtyConvInitScales[scale]), cWork, false);
1665 : fft.flip((itsDirtyConvInitScales[scale]), false, false);
1666 :
1667 : //cout << "remake itsDirtyConvInitScales " << scale << " max itsInitScales[" << scale << "] = " << max(fabs(itsInitScales[scale])) << endl;
1668 : //cout << " max itsInitScaleXfrs[" << scale << "] = " << max(fabs(itsInitScaleXfrs[scale])) << endl;
1669 : }
1670 :
1671 : float strengthOptimum = 0.0;
1672 : int optimumScale = 0;
1673 : IPosition positionOptimum(itsDirty->shape().nelements(), 0);
1674 : itsGoodAspActiveSet.resize(0);
1675 : itsGoodAspAmplitude.resize(0);
1676 : itsGoodAspCenter.resize(0);
1677 :
1678 : maxDirtyConvInitScales(strengthOptimum, optimumScale, positionOptimum);
1679 :
1680 : os << LogIO::NORMAL3 << "Peak among the smoothed residual image is " << strengthOptimum << " and initial scale: " << optimumScale << LogIO::POST;
1681 : // cout << " its itsDirty is " << (*itsDirty)(positionOptimum);
1682 : // cout << " at location " << positionOptimum[0] << " " << positionOptimum[1] << " " << positionOptimum[2];
1683 :
1684 :
1685 : itsStrengthOptimum = strengthOptimum;
1686 : itsPositionOptimum = positionOptimum;
1687 : itsOptimumScale = optimumScale;
1688 : itsOptimumScaleSize = itsInitScaleSizes[optimumScale];
1689 :
1690 : // initial scale size = 0 gives the peak res, so we don't
1691 : // need to do the LBFGS optimization for it
1692 : if (itsOptimumScale == 0)
1693 : return {};
1694 : else
1695 : {
1696 : // the new aspen is always added to the active-set
1697 : vector<Float> tempx;
1698 : vector<IPosition> activeSetCenter;
1699 :
1700 : tempx.push_back(strengthOptimum);
1701 : tempx.push_back(itsInitScaleSizes[optimumScale]);
1702 : activeSetCenter.push_back(positionOptimum);
1703 :
1704 : // initialize alglib option
1705 : unsigned int length = tempx.size();
1706 : real_1d_array x;
1707 : x.setlength(length);
1708 :
1709 : Float a = 0.0;
1710 : Float b = 0.0;
1711 : Float c = 0.0;
1712 :
1713 : // initialize starting point
1714 : for (unsigned int i = 0; i < length; i+=2)
1715 : {
1716 : a = log(tempx[i]) - ((pow(positionOptimum[0], 2)+pow(positionOptimum[1], 2))/(2*pow(tempx[i+1]+itsPsfWidth, 2)));
1717 : b = (positionOptimum[0]+positionOptimum[1]) / pow(tempx[i+1]+itsPsfWidth, 2);
1718 : c = -1 / (2*pow(tempx[i+1]+itsPsfWidth, 2));
1719 :
1720 : x[i] = a;
1721 : x[i+1] = b;
1722 : x[i+2] = c;
1723 : }
1724 :
1725 : std::cout << "before: itsPSFWidth " << itsPsfWidth << " opt strength/scale " << tempx[0] << " " << tempx[1] << endl;
1726 : std::cout << "before: a/b/c " << a << " " << b << " " << c << std::endl;
1727 :
1728 : ParamAlglibObj optParam(*itsDirty, *itsXfr, activeSetCenter, fft, itsOptimumScaleSize);
1729 : ParamAlglibObj *ptrParam;
1730 : ptrParam = &optParam;
1731 :
1732 : real_1d_array s = "[1,1,1]";
1733 : double epsg = 1e-3;
1734 : double epsf = 1e-3;
1735 : double epsx = 1e-3;
1736 : ae_int_t maxits = 5;
1737 : minlbfgsstate state;
1738 : minlbfgscreate(1, x, state);
1739 : minlbfgssetcond(state, epsg, epsf, epsx, maxits);
1740 : minlbfgssetscale(state, s);
1741 : minlbfgsreport rep;
1742 : alglib::minlbfgsoptimize(state, objfunc_alglib, NULL, (void *) ptrParam);
1743 : minlbfgsresults(state, x, rep);
1744 :
1745 : double *x1 = x.getcontent();
1746 : cout << "x1[0] " << x1[0] << " x1[1] " << x1[1] << " x1[2] " << x1[2] << endl;
1747 :
1748 : // end alglib bfgs optimization
1749 :
1750 : double amp = 0;
1751 : double scale = 0;
1752 :
1753 : a = x[0]; // i
1754 : b = x[1]; // i+1
1755 : c = x[2]; // i+2
1756 :
1757 : amp = exp(a - (pow(b,2)/(4.0*c)));
1758 : scale = sqrt(-1/(2.0 * c)) - itsPsfWidth;
1759 :
1760 : if (fabs(scale) < 0.4)
1761 : {
1762 : scale = 0;
1763 : amp = (*itsDirty)(itsPositionOptimum); // This is to avoid divergence due to amp being too large.
1764 : // amp=strengthOptimum gives similar results
1765 : }
1766 : else
1767 : scale = fabs(scale);
1768 :
1769 : itsGoodAspAmplitude.push_back(amp); // active-set amplitude
1770 : itsGoodAspActiveSet.push_back(scale); // active-set
1771 :
1772 : itsStrengthOptimum = amp;
1773 : itsOptimumScaleSize = scale;
1774 : itsGoodAspCenter = activeSetCenter;
1775 :
1776 : // debug
1777 : os << LogIO::NORMAL3 << "optimized strengthOptimum " << itsStrengthOptimum << " scale size " << itsOptimumScaleSize << LogIO::POST;
1778 : cout << "optimized strengthOptimum " << itsStrengthOptimum << " scale size " << itsOptimumScaleSize << endl;
1779 :
1780 : } // finish bfgs optimization
1781 :
1782 : AlwaysAssert(itsGoodAspCenter.size() == itsGoodAspActiveSet.size(), AipsError);
1783 : AlwaysAssert(itsGoodAspAmplitude.size() == itsGoodAspActiveSet.size(), AipsError);
1784 :
1785 : // debug info
1786 : /*for (unsigned int i = 0; i < itsAspAmplitude.size(); i++)
1787 : {
1788 : //cout << "After opt AspApm[" << i << "] = " << itsAspAmplitude[i] << endl;
1789 : //cout << "After opt AspScale[" << i << "] = " << itsAspScaleSizes[i] << endl;
1790 : //cout << "After opt AspCenter[" << i << "] = " << itsAspCenter[i] << endl;
1791 : cout << "AspScale[ " << i << " ] = " << itsAspScaleSizes[i] << " center " << itsAspCenter[i] << endl;
1792 : }* /
1793 :
1794 : return itsGoodAspActiveSet; // return optimized scale
1795 : }
1796 : */
1797 :
1798 : // ALGLIB - LM
1799 : /*
1800 : vector<Float> AspMatrixCleaner::getActiveSetAspen()
1801 : {
1802 : LogIO os(LogOrigin("AspMatrixCleaner", "getActiveSetAspen()", WHERE));
1803 :
1804 : if(int(itsInitScaleXfrs.nelements()) == 0)
1805 : throw(AipsError("Initial scales for Asp are not defined"));
1806 :
1807 : if (!itsSwitchedToHogbom &&
1808 : accumulate(itsNumIterNoGoodAspen.begin(), itsNumIterNoGoodAspen.end(), 0) >= 5)
1809 : {
1810 : os << "Switched to hogbom because of frequent small components." << LogIO::POST;
1811 : switchedToHogbom();
1812 : }
1813 :
1814 : if (itsSwitchedToHogbom)
1815 : itsNInitScales = 1;
1816 : else
1817 : itsNInitScales = itsInitScaleSizes.size();
1818 :
1819 : // Dirty * initial scales
1820 : Matrix<Complex> dirtyFT;
1821 : fft.fft0(dirtyFT, *itsDirty);
1822 : itsDirtyConvInitScales.resize(0);
1823 : itsDirtyConvInitScales.resize(itsNInitScales); // 0, 1width, 2width, 4width and 8width
1824 : //cout << "itsInitScaleSizes.size() " << itsInitScaleSizes.size() << " itsInitScales.size() " << itsInitScales.size() << " NInitScales # " << itsNInitScales << endl;
1825 : for (int scale=0; scale < itsNInitScales; scale++)
1826 : {
1827 : Matrix<Complex> cWork;
1828 :
1829 : itsDirtyConvInitScales[scale] = Matrix<Float>(itsDirty->shape());
1830 : cWork=((dirtyFT)*(itsInitScaleXfrs[scale]));
1831 : fft.fft0((itsDirtyConvInitScales[scale]), cWork, false);
1832 : fft.flip((itsDirtyConvInitScales[scale]), false, false);
1833 :
1834 : //cout << "remake itsDirtyConvInitScales " << scale << " max itsInitScales[" << scale << "] = " << max(fabs(itsInitScales[scale])) << endl;
1835 : //cout << " max itsInitScaleXfrs[" << scale << "] = " << max(fabs(itsInitScaleXfrs[scale])) << endl;
1836 : }
1837 :
1838 : float strengthOptimum = 0.0;
1839 : int optimumScale = 0;
1840 : IPosition positionOptimum(itsDirty->shape().nelements(), 0);
1841 : itsGoodAspActiveSet.resize(0);
1842 : itsGoodAspAmplitude.resize(0);
1843 : itsGoodAspCenter.resize(0);
1844 :
1845 : maxDirtyConvInitScales(strengthOptimum, optimumScale, positionOptimum);
1846 :
1847 : os << LogIO::NORMAL << "Peak among the smoothed residual image is " << strengthOptimum << " and initial scale: " << optimumScale << LogIO::POST;
1848 : //cout << "Peak among the smoothed residual image is " << strengthOptimum << " and initial scale: " << optimumScale << endl;
1849 : // cout << " its itsDirty is " << (*itsDirty)(positionOptimum);
1850 : // cout << " at location " << positionOptimum[0] << " " << positionOptimum[1] << " " << positionOptimum[2];
1851 :
1852 :
1853 : itsStrengthOptimum = strengthOptimum;
1854 : itsPositionOptimum = positionOptimum;
1855 : itsOptimumScale = optimumScale;
1856 : itsOptimumScaleSize = itsInitScaleSizes[optimumScale];
1857 :
1858 : // initial scale size = 0 gives the peak res, so we don't
1859 : // need to do the LBFGS optimization for it
1860 : if (itsOptimumScale == 0)
1861 : return {};
1862 : else
1863 : {
1864 : // the new aspen is always added to the active-set
1865 : vector<Float> tempx;
1866 : vector<IPosition> activeSetCenter;
1867 :
1868 : tempx.push_back(strengthOptimum);
1869 : tempx.push_back(itsInitScaleSizes[optimumScale]);
1870 : activeSetCenter.push_back(positionOptimum);
1871 :
1872 : // initialize alglib option
1873 : unsigned int length = tempx.size();
1874 : real_1d_array x;
1875 : x.setlength(length);
1876 :
1877 : real_1d_array s; //G55
1878 : s.setlength(length);
1879 :
1880 : // initialize starting point
1881 : for (unsigned int i = 0; i < length; i+=2)
1882 : {
1883 : x[i] = tempx[i]; //amp
1884 : x[i+1] = tempx[i+1]; //scale
1885 :
1886 : // G55 like
1887 : s[i] = tempx[i]; //amp
1888 : s[i+1] = tempx[i+1]; //scale
1889 : }
1890 :
1891 : ParamAlglibObj optParam(*itsDirty, *itsXfr, activeSetCenter, fft);
1892 : ParamAlglibObj *ptrParam;
1893 : ptrParam = &optParam;
1894 :
1895 : //real_1d_array s = "[0.001,10]"; //G55
1896 : //real_1d_array s = "[1,1]";
1897 : double epsg = 1e-3;
1898 : double epsf = 1e-3;
1899 : double epsx = 1e-3;
1900 : ae_int_t maxits = 5;
1901 : minlmstate state;
1902 : minlmcreatev(1, x, 0.0001, state);
1903 : minlmsetcond(state, epsx, maxits);
1904 : minlmsetscale(state, s);
1905 : minlmreport rep;
1906 : alglib::minlmoptimize(state, objfunc_alglib, NULL, (void *) ptrParam);
1907 : minlmresults(state, x, rep);
1908 : double *x1 = x.getcontent();
1909 : //cout << "x1[0] " << x1[0] << " x1[1] " << x1[1] << endl;
1910 :
1911 : // end alglib bfgs optimization
1912 :
1913 : double amp = x[0]; // i
1914 : double scale = x[1]; // i+1
1915 :
1916 : if (fabs(scale) < 0.4)
1917 : {
1918 : scale = 0;
1919 : amp = (*itsDirty)(itsPositionOptimum); // This is to avoid divergence due to amp being too large.
1920 : // amp=strengthOptimum gives similar results
1921 : }
1922 : else
1923 : scale = fabs(scale);
1924 :
1925 : itsGoodAspAmplitude.push_back(amp); // active-set amplitude
1926 : itsGoodAspActiveSet.push_back(scale); // active-set
1927 :
1928 : itsStrengthOptimum = amp;
1929 : itsOptimumScaleSize = scale;
1930 : itsGoodAspCenter = activeSetCenter;
1931 :
1932 : // debug
1933 : os << LogIO::NORMAL << "optimized strengthOptimum " << itsStrengthOptimum << " scale size " << itsOptimumScaleSize << LogIO::POST;
1934 : //cout << "optimized strengthOptimum " << itsStrengthOptimum << " scale size " << itsOptimumScaleSize << " at " << itsPositionOptimum << endl;
1935 :
1936 : } // finish bfgs optimization
1937 :
1938 : AlwaysAssert(itsGoodAspCenter.size() == itsGoodAspActiveSet.size(), AipsError);
1939 : AlwaysAssert(itsGoodAspAmplitude.size() == itsGoodAspActiveSet.size(), AipsError);
1940 :
1941 : // debug info
1942 : /*for (unsigned int i = 0; i < itsAspAmplitude.size(); i++)
1943 : {
1944 : //cout << "After opt AspApm[" << i << "] = " << itsAspAmplitude[i] << endl;
1945 : //cout << "After opt AspScale[" << i << "] = " << itsAspScaleSizes[i] << endl;
1946 : //cout << "After opt AspCenter[" << i << "] = " << itsAspCenter[i] << endl;
1947 : cout << "AspScale[ " << i << " ] = " << itsAspScaleSizes[i] << " center " << itsAspCenter[i] << endl;
1948 : }* /
1949 :
1950 : return itsGoodAspActiveSet; // return optimized scale
1951 : }*/
1952 :
1953 :
1954 : // ALGLIB - gold - not "log"
1955 :
1956 806 : vector<Float> AspMatrixCleaner::getActiveSetAspen()
1957 : {
1958 1612 : LogIO os(LogOrigin("AspMatrixCleaner", "getActiveSetAspen()", WHERE));
1959 :
1960 806 : if(int(itsInitScaleXfrs.nelements()) == 0)
1961 0 : throw(AipsError("Initial scales for Asp are not defined"));
1962 :
1963 1595 : if (!itsSwitchedToHogbom &&
1964 1595 : accumulate(itsNumIterNoGoodAspen.begin(), itsNumIterNoGoodAspen.end(), 0) >= 5)
1965 : {
1966 0 : os << "Switched to hogbom because of frequent small components." << LogIO::POST;
1967 0 : switchedToHogbom();
1968 : }
1969 :
1970 806 : if (itsSwitchedToHogbom)
1971 17 : itsNInitScales = 1;
1972 : else
1973 789 : itsNInitScales = itsInitScaleSizes.size();
1974 :
1975 : // Dirty * initial scales
1976 806 : Matrix<Complex> dirtyFT;
1977 806 : fft.fft0(dirtyFT, *itsDirty);
1978 806 : itsDirtyConvInitScales.resize(0);
1979 806 : itsDirtyConvInitScales.resize(itsNInitScales); // 0, 1width, 2width, 4width and 8width
1980 : //cout << "itsInitScaleSizes.size() " << itsInitScaleSizes.size() << " itsInitScales.size() " << itsInitScales.size() << " NInitScales # " << itsNInitScales << endl;
1981 3708 : for (int scale=0; scale < itsNInitScales; scale++)
1982 : {
1983 2902 : Matrix<Complex> cWork;
1984 :
1985 2902 : itsDirtyConvInitScales[scale] = Matrix<Float>(itsDirty->shape());
1986 2902 : cWork=((dirtyFT)*(itsInitScaleXfrs[scale]));
1987 2902 : fft.fft0((itsDirtyConvInitScales[scale]), cWork, false);
1988 2902 : fft.flip((itsDirtyConvInitScales[scale]), false, false);
1989 :
1990 : //cout << "remake itsDirtyConvInitScales " << scale << " max itsInitScales[" << scale << "] = " << max(fabs(itsInitScales[scale])) << endl;
1991 : //cout << " max itsInitScaleXfrs[" << scale << "] = " << max(fabs(itsInitScaleXfrs[scale])) << endl;
1992 2902 : }
1993 :
1994 806 : float strengthOptimum = 0.0;
1995 806 : int optimumScale = 0;
1996 806 : IPosition positionOptimum(itsDirty->shape().nelements(), 0);
1997 806 : itsGoodAspActiveSet.resize(0);
1998 806 : itsGoodAspAmplitude.resize(0);
1999 806 : itsGoodAspCenter.resize(0);
2000 :
2001 806 : maxDirtyConvInitScales(strengthOptimum, optimumScale, positionOptimum);
2002 :
2003 806 : os << LogIO::NORMAL3 << "Peak among the smoothed residual image is " << strengthOptimum << " and initial scale: " << optimumScale << LogIO::POST;
2004 : //cout << "Peak among the smoothed residual image is " << strengthOptimum << " and initial scale: " << optimumScale << endl;
2005 : // cout << " its itsDirty is " << (*itsDirty)(positionOptimum);
2006 : // cout << " at location " << positionOptimum[0] << " " << positionOptimum[1] << " " << positionOptimum[2];
2007 :
2008 :
2009 806 : itsStrengthOptimum = strengthOptimum;
2010 806 : itsPositionOptimum = positionOptimum;
2011 806 : itsOptimumScale = optimumScale;
2012 806 : itsOptimumScaleSize = itsInitScaleSizes[optimumScale];
2013 :
2014 : // initial scale size = 0 gives the peak res, so we don't
2015 : // need to do the LBFGS optimization for it
2016 806 : if (itsOptimumScale == 0)
2017 17 : return {};
2018 : else
2019 : {
2020 : // the new aspen is always added to the active-set
2021 789 : vector<Float> tempx;
2022 789 : vector<IPosition> activeSetCenter;
2023 :
2024 789 : tempx.push_back(strengthOptimum);
2025 789 : tempx.push_back(itsInitScaleSizes[optimumScale]);
2026 789 : activeSetCenter.push_back(positionOptimum);
2027 :
2028 : // initialize alglib option
2029 789 : unsigned int length = tempx.size();
2030 789 : real_1d_array x;
2031 789 : x.setlength(length);
2032 :
2033 : // for G55 ,etc
2034 789 : real_1d_array s;
2035 789 : s.setlength(length);
2036 :
2037 : // initialize starting point
2038 1578 : for (unsigned int i = 0; i < length; i+=2)
2039 : {
2040 789 : x[i] = tempx[i]; //amp
2041 789 : x[i+1] = tempx[i+1]; //scale
2042 :
2043 789 : s[i] = tempx[i]; //amp
2044 789 : s[i+1] = tempx[i+1]; //scale
2045 : }
2046 :
2047 789 : ParamAlglibObj optParam(*itsDirty, *itsXfr, activeSetCenter, fft);
2048 : ParamAlglibObj *ptrParam;
2049 789 : ptrParam = &optParam;
2050 :
2051 : //real_1d_array s = "[1,1]";
2052 : //real_1d_array s = "[0.001,10]";
2053 789 : double epsg = 1e-3;
2054 789 : double epsf = 1e-3;
2055 789 : double epsx = 1e-3;
2056 789 : ae_int_t maxits = 5;
2057 789 : minlbfgsstate state;
2058 789 : minlbfgscreate(1, x, state);
2059 789 : minlbfgssetcond(state, epsg, epsf, epsx, maxits);
2060 789 : minlbfgssetscale(state, s);
2061 789 : minlbfgsreport rep;
2062 789 : alglib::minlbfgsoptimize(state, objfunc_alglib, NULL, (void *) ptrParam);
2063 789 : minlbfgsresults(state, x, rep);
2064 789 : double *x1 = x.getcontent();
2065 : //cout << "x1[0] " << x1[0] << " x1[1] " << x1[1] << endl;
2066 :
2067 : // end alglib bfgs optimization
2068 :
2069 789 : double amp = x[0]; // i
2070 789 : double scale = x[1]; // i+1
2071 :
2072 789 : if (fabs(scale) < 0.4)
2073 : {
2074 4 : scale = 0;
2075 4 : amp = (*itsDirty)(itsPositionOptimum); // This is to avoid divergence due to amp being too large.
2076 : // amp=strengthOptimum gives similar results
2077 : }
2078 : else
2079 785 : scale = fabs(scale);
2080 :
2081 789 : itsGoodAspAmplitude.push_back(amp); // active-set amplitude
2082 789 : itsGoodAspActiveSet.push_back(scale); // active-set
2083 :
2084 789 : itsStrengthOptimum = amp;
2085 789 : itsOptimumScaleSize = scale;
2086 789 : itsGoodAspCenter = activeSetCenter;
2087 :
2088 : // debug
2089 789 : os << LogIO::NORMAL3 << "optimized strengthOptimum " << itsStrengthOptimum << " scale size " << itsOptimumScaleSize << LogIO::POST;
2090 : //cout << "optimized strengthOptimum " << itsStrengthOptimum << " scale size " << itsOptimumScaleSize << " at " << itsPositionOptimum << endl;
2091 :
2092 789 : } // finish bfgs optimization
2093 :
2094 789 : AlwaysAssert(itsGoodAspCenter.size() == itsGoodAspActiveSet.size(), AipsError);
2095 789 : AlwaysAssert(itsGoodAspAmplitude.size() == itsGoodAspActiveSet.size(), AipsError);
2096 :
2097 : // debug info
2098 : /*for (unsigned int i = 0; i < itsAspAmplitude.size(); i++)
2099 : {
2100 : //cout << "After opt AspApm[" << i << "] = " << itsAspAmplitude[i] << endl;
2101 : //cout << "After opt AspScale[" << i << "] = " << itsAspScaleSizes[i] << endl;
2102 : //cout << "After opt AspCenter[" << i << "] = " << itsAspCenter[i] << endl;
2103 : cout << "AspScale[ " << i << " ] = " << itsAspScaleSizes[i] << " center " << itsAspCenter[i] << endl;
2104 : }*/
2105 :
2106 789 : return itsGoodAspActiveSet; // return optimized scale
2107 806 : }
2108 :
2109 :
2110 : // gsl lbfgs
2111 : /*
2112 : vector<Float> AspMatrixCleaner::getActiveSetAspen()
2113 : {
2114 : LogIO os(LogOrigin("AspMatrixCleaner", "getActiveSetAspen()", WHERE));
2115 :
2116 : if(int(itsInitScaleXfrs.nelements()) == 0)
2117 : throw(AipsError("Initial scales for Asp are not defined"));
2118 :
2119 : if (!itsSwitchedToHogbom &&
2120 : accumulate(itsNumIterNoGoodAspen.begin(), itsNumIterNoGoodAspen.end(), 0) >= 5)
2121 : {
2122 : os << "Switched to hogbom because of frequent small components." << LogIO::POST;
2123 : switchedToHogbom();
2124 : }
2125 :
2126 : if (itsSwitchedToHogbom)
2127 : itsNInitScales = 1;
2128 : else
2129 : itsNInitScales = itsInitScaleSizes.size();
2130 :
2131 : // Dirty * initial scales
2132 : Matrix<Complex> dirtyFT;
2133 : FFTServer<Float,Complex> fft(itsDirty->shape());
2134 : fft.fft0(dirtyFT, *itsDirty);
2135 : itsDirtyConvInitScales.resize(0);
2136 : itsDirtyConvInitScales.resize(itsNInitScales); // 0, 1width, 2width, 4width and 8width
2137 :
2138 : for (int scale=0; scale < itsNInitScales; scale++)
2139 : {
2140 : Matrix<Complex> cWork;
2141 :
2142 : itsDirtyConvInitScales[scale] = Matrix<Float>(itsDirty->shape());
2143 : cWork=((dirtyFT)*(itsInitScaleXfrs[scale]));
2144 : fft.fft0((itsDirtyConvInitScales[scale]), cWork, false);
2145 : fft.flip((itsDirtyConvInitScales[scale]), false, false);
2146 :
2147 : // cout << "remake itsDirtyConvInitScales " << scale << " max itsInitScales[" << scale << "] = " << max(fabs(itsInitScales[scale])) << endl;
2148 : // cout << " max itsInitScaleXfrs[" << scale << "] = " << max(fabs(itsInitScaleXfrs[scale])) << endl;
2149 : }
2150 :
2151 : float strengthOptimum = 0.0;
2152 : int optimumScale = 0;
2153 : IPosition positionOptimum(itsDirty->shape().nelements(), 0);
2154 : itsGoodAspActiveSet.resize(0);
2155 : itsGoodAspAmplitude.resize(0);
2156 : itsGoodAspCenter.resize(0);
2157 : //itsPrevAspActiveSet.resize(0);
2158 : //itsPrevAspAmplitude.resize(0);
2159 :
2160 : maxDirtyConvInitScales(strengthOptimum, optimumScale, positionOptimum);
2161 : os << "Peak among the smoothed residual image is " << strengthOptimum << " and initial scale: " << optimumScale << LogIO::POST;
2162 : // cout << " its itsDirty is " << (*itsDirty)(positionOptimum);
2163 : // cout << " at location " << positionOptimum[0] << " " << positionOptimum[1] << " " << positionOptimum[2];
2164 :
2165 : // memory used
2166 : //itsUsedMemoryMB = double(HostInfo::memoryUsed()/1024);
2167 : //cout << "Memory allocated in getActiveSetAspen " << itsUsedMemoryMB << " MB." << endl;
2168 :
2169 : itsStrengthOptimum = strengthOptimum;
2170 : itsPositionOptimum = positionOptimum;
2171 : itsOptimumScale = optimumScale;
2172 : itsOptimumScaleSize = itsInitScaleSizes[optimumScale];
2173 :
2174 : // initial scale size = 0 gives the peak res, so we don't
2175 : // need to do the LBFGS optimization for it
2176 : if (itsOptimumScale == 0)
2177 : return {};
2178 : else
2179 : {
2180 : // AlwaysAssert(itsAspScaleSizes.size() == itsAspAmplitude.size(), AipsError);
2181 : // AlwaysAssert(itsAspScaleSizes.size() == itsAspCenter.size(), AipsError);
2182 :
2183 : // No longer needed. heuristiclly determine active set for speed up
2184 : /*Float resArea = 0.0;
2185 : Int nX = itsDirty->shape()(0);
2186 : Int nY = itsDirty->shape()(1);
2187 :
2188 : for (Int j = 0; j < nY; ++j)
2189 : {
2190 : for(Int i = 0; i < nX; ++i)
2191 : resArea += abs((*itsDirty)(i,j));
2192 : }
2193 : const Float lamda = 318; //M31 new Asp - gold
2194 :
2195 : const Float threshold = lamda * resArea;
2196 :
2197 : vector<Float> tempx;
2198 : vector<IPosition> activeSetCenter;
2199 :
2200 : vector<pair<Float,int>> vp; //(LenDev, idx)
2201 :
2202 : sort(vp.begin(),vp.end(), [](const pair<Float,int> &l, const pair<Float,int> &r) {return l.first > r.first;});
2203 :
2204 : // select the top 5
2205 : vector<int> goodvp;
2206 : for (unsigned int i = 0; i < vp.size(); i++)
2207 : {
2208 : if (i >= 20)
2209 : break;
2210 : goodvp.push_back(vp[i].second);
2211 : }
2212 : sort(goodvp.begin(), goodvp.end(), [](const int &l, const int &r) {return l > r;});
2213 :
2214 : for (unsigned int i = 0; i < goodvp.size(); i++)
2215 : {
2216 : tempx.push_back(itsAspAmplitude[goodvp[i]]);
2217 : tempx.push_back(itsAspScaleSizes[goodvp[i]]);
2218 : activeSetCenter.push_back(itsAspCenter[goodvp[i]]);
2219 : itsAspAmplitude.erase(itsAspAmplitude.begin() + goodvp[i]);
2220 : itsAspScaleSizes.erase(itsAspScaleSizes.begin() + goodvp[i]);
2221 : itsAspCenter.erase(itsAspCenter.begin() + goodvp[i]);
2222 : itsAspGood.erase(itsAspGood.begin() + goodvp[i]);
2223 : }* /
2224 :
2225 : // the new aspen is always added to the active-set
2226 : vector<Float> tempx;
2227 : vector<IPosition> activeSetCenter;
2228 :
2229 : tempx.push_back(strengthOptimum);
2230 : tempx.push_back(itsInitScaleSizes[optimumScale]);
2231 : activeSetCenter.push_back(positionOptimum);
2232 :
2233 : // GSL: set the initial guess
2234 : unsigned int length = tempx.size();
2235 : gsl_vector *x = NULL;
2236 : x = gsl_vector_alloc(length);
2237 : gsl_vector_set_zero(x);
2238 :
2239 : for (unsigned int i = 0; i < length; i+=2)
2240 : {
2241 : gsl_vector_set(x, i, tempx[i]);
2242 : gsl_vector_set(x, i+1, tempx[i+1]);
2243 :
2244 : // No longer needed. save aspen before optimization
2245 : // itsPrevAspAmplitude.push_back(tempx[i]); // active-set amplitude before bfgs
2246 : // itsPrevAspActiveSet.push_back(tempx[i+1]); // prev active-set before bfgs
2247 : }
2248 :
2249 : // GSL optimization
2250 : //fdf
2251 : gsl_multimin_function_fdf my_func;
2252 : gsl_multimin_fdfminimizer *s = NULL;
2253 :
2254 : // setupSolver
2255 : ParamObj optParam(*itsDirty, *itsXfr, activeSetCenter);
2256 : ParamObj *ptrParam;
2257 : ptrParam = &optParam;
2258 : // fdf
2259 : const gsl_multimin_fdfminimizer_type *T;
2260 : T = gsl_multimin_fdfminimizer_vector_bfgs2;
2261 : s = gsl_multimin_fdfminimizer_alloc(T, length);
2262 : my_func.n = length;
2263 : my_func.f = my_f;
2264 : my_func.df = my_df;
2265 : my_func.fdf = my_fdf;
2266 : my_func.params = (void *)ptrParam;
2267 :
2268 : // fdf
2269 : const float InitStep = gsl_blas_dnrm2(x);
2270 : gsl_multimin_fdfminimizer_set(s, &my_func, x, InitStep, 0.1);
2271 :
2272 : // ---------- BFGS algorithm begin ----------
2273 : // fdf
2274 : findComponent(5, s); // has to be > =5
2275 : //---------- BFGS algorithm end ----------
2276 :
2277 : // update x is needed here.
2278 : gsl_vector *optx = NULL;
2279 : optx = gsl_multimin_fdfminimizer_x(s); //fdf
2280 : // end GSL optimization
2281 :
2282 : // put the updated latest Aspen back to the active-set. Permanent list is no longer needed.
2283 : //for (unsigned int i = 0; i < length; i+= 2)
2284 : //{
2285 : double amp = gsl_vector_get(optx, 0); // i
2286 : double scale = gsl_vector_get(optx, 1); // i+1
2287 : scale = (scale = fabs(scale)) < 0.4 ? 0 : scale;
2288 : itsGoodAspAmplitude.push_back(amp); // active-set amplitude
2289 : itsGoodAspActiveSet.push_back(scale); // active-set
2290 :
2291 : // No longer needed. permanent list that doesn't get clear
2292 : //itsAspAmplitude.push_back(amp);
2293 : //itsAspScaleSizes.push_back(scale);
2294 : //itsAspCenter.push_back(activeSetCenter[i/2]);
2295 : //}
2296 :
2297 : //itsStrengthOptimum = itsAspAmplitude[itsAspAmplitude.size() -1]; // the latest aspen is the last element of x
2298 : //itsOptimumScaleSize = itsAspScaleSizes[itsAspScaleSizes.size() -1]; // the latest aspen is the last element of x
2299 : itsStrengthOptimum = amp;
2300 : itsOptimumScaleSize = scale;
2301 : itsGoodAspCenter = activeSetCenter;
2302 :
2303 : // debug
2304 : //os << "optimized strengthOptimum " << itsStrengthOptimum << " scale size " << itsOptimumScaleSize << LogIO::POST;
2305 :
2306 : // free GSL stuff
2307 : gsl_multimin_fdfminimizer_free(s); //fdf
2308 : gsl_vector_free(x);
2309 : //gsl_vector_free(optx); // Dont do it. Free this causes seg fault!!!
2310 :
2311 : } // finish bfgs optimization
2312 :
2313 : AlwaysAssert(itsGoodAspCenter.size() == itsGoodAspActiveSet.size(), AipsError);
2314 : AlwaysAssert(itsGoodAspAmplitude.size() == itsGoodAspActiveSet.size(), AipsError);
2315 :
2316 : // debug info
2317 : /*for (unsigned int i = 0; i < itsAspAmplitude.size(); i++)
2318 : {
2319 : //cout << "After opt AspApm[" << i << "] = " << itsAspAmplitude[i] << endl;
2320 : //cout << "After opt AspScale[" << i << "] = " << itsAspScaleSizes[i] << endl;
2321 : //cout << "After opt AspCenter[" << i << "] = " << itsAspCenter[i] << endl;
2322 : cout << "AspScale[ " << i << " ] = " << itsAspScaleSizes[i] << " center " << itsAspCenter[i] << endl;
2323 : }* /
2324 :
2325 : return itsGoodAspActiveSet; // return optimized scale
2326 : }*/
2327 :
2328 : // Define the Asp scales without doing anything else
2329 806 : void AspMatrixCleaner::defineAspScales(vector<Float>& scaleSizes)
2330 : {
2331 806 : sort(scaleSizes.begin(), scaleSizes.end());
2332 : // No longe needed since we only update with the latest Aspen. Remove the duplicated scales
2333 : // scaleSizes.erase(unique(scaleSizes.begin(),scaleSizes.end(),[](Float l, Float r) { return abs(l - r) < 1e-3; }), scaleSizes.end());
2334 :
2335 806 : itsNscales = Int(scaleSizes.size());
2336 806 : itsScaleSizes.resize(itsNscales);
2337 806 : itsScaleSizes = Vector<Float>(scaleSizes); // make a copy that we can call our own
2338 :
2339 : // analytically calculate component scale by Asp 2016
2340 806 : if (itsUseZhang)
2341 : {
2342 0 : for (Int i = 0; i < itsNscales; i++)
2343 : {
2344 0 : if (itsScaleSizes[i] >= itsPsfWidth)
2345 0 : itsScaleSizes[i] = sqrt(pow(itsScaleSizes[i], 2) - pow(Float(itsPsfWidth), 2));
2346 : }
2347 : }
2348 : // end Asp 2016
2349 :
2350 806 : itsScalesValid = true;
2351 806 : }
2352 :
2353 2 : void AspMatrixCleaner::switchedToHogbom(bool runlong)
2354 : {
2355 4 : LogIO os(LogOrigin("AspMatrixCleaner", "switchedToHogbom", WHERE));
2356 :
2357 2 : itsSwitchedToHogbom = true;
2358 :
2359 : // if users set it, do not automatically switch to hogbom
2360 : // this makes G55 result even better
2361 2 : if (itsFusedThreshold < 0)
2362 0 : itsSwitchedToHogbom = false;
2363 :
2364 2 : itsNthHogbom += 1;
2365 2 : itsNumIterNoGoodAspen.resize(0);
2366 : //itsNumHogbomIter = ceil(100 + 50 * (exp(0.05*itsNthHogbom) - 1)); // zhang's formula
2367 : //itsNumHogbomIter = ceil(50 + 2 * (exp(0.05*itsNthHogbom) - 1)); // genie's formula
2368 2 : itsNumHogbomIter = 51; // genie's formula - removed itsNthHogbom to remove the state dependency. The diff from the above is neligible
2369 :
2370 2 : if (runlong)
2371 : //itsNumHogbomIter = ceil(500 + 50 * (exp(0.05*itsNthHogbom) - 1));
2372 0 : itsNumHogbomIter = 510;
2373 :
2374 2 : os << LogIO::NORMAL3 << "Run hogbom for " << itsNumHogbomIter << " iterations." << LogIO::POST;
2375 2 : }
2376 :
2377 0 : void AspMatrixCleaner::setOrigDirty(const Matrix<Float>& dirty){
2378 0 : itsOrigDirty=new Matrix<Float>(dirty.shape());
2379 0 : itsOrigDirty->assign(dirty);
2380 0 : }
2381 :
2382 :
2383 35 : void AspMatrixCleaner::getFluxByBins(const vector<Float>& scaleSizes,const vector<Float>& optimum, Int binSize, vector<Float>& sumFluxByBins,vector<Float>& rangeFluxByBins) {
2384 :
2385 35 : double maxScaleSize = *std::max_element(scaleSizes.begin(),scaleSizes.end());
2386 35 : double minScaleSize = *std::min_element(scaleSizes.begin(),scaleSizes.end());
2387 35 : double deltaScale = (maxScaleSize-minScaleSize) / binSize;
2388 :
2389 :
2390 210 : for(Int j=0; j < binSize+1; j++)
2391 : {
2392 175 : rangeFluxByBins[j] = minScaleSize+j*deltaScale;
2393 175 : if ( j == binSize)
2394 35 : rangeFluxByBins[j] = maxScaleSize;
2395 : }
2396 :
2397 826 : for(Int i=0; i < Int(scaleSizes.size()); i++)
2398 4746 : for(Int j=0; j < binSize+1; j++)
2399 : {
2400 3955 : if ( scaleSizes[i] < rangeFluxByBins[j+1] && (scaleSizes[i] >= rangeFluxByBins[j] ))
2401 756 : sumFluxByBins[j] += optimum[i];
2402 : }
2403 :
2404 :
2405 35 : }
2406 :
2407 :
2408 :
2409 : } //# NAMESPACE CASA - END
|