Build: #7 failed
Job: Test Many Linux 2.28 failed
uid a002 x85c183 x36f SPw15 23 PPr regression: Test case result
The below summarizes the result of the test " uid a002 x85c183 x36f SPw15 23 PPr regression" in build 7 of Pipeline - Pipeline Main with Casa 6.6.6 test - cvpost - release-6.6.6 - Test Many Linux 2.28.
- Description
- uid a002 x85c183 x36f SPw15 23 PPr regression
- Test class
- pipeline.infrastructure.utils.regression-tester
- Method
- test_uid___A002_X85c183_X36f_SPW15_23__PPR__regression
- Duration
- 16 mins
- Status
- Failed (Existing Failure)
Error Log
Failed: Failed to match 2 result values within tolerances : s7.hsd_imaging.uid___A002_X85c183_X36f_SPW15_23.ms.atmcor.qa.metric.SingleDishImageContamination values differ by > a relative difference of 1e-07 expected: True new: False diff: 1 percent_diff: 100.0% s7.hsd_imaging.uid___A002_X85c183_X36f_SPW15_23.ms.atmcor.qa.score.SingleDishImageContamination values differ by > a relative difference of 1e-07 expected: 0.65 new: 1.0 diff: -0.35 percent_diff: -53.84615384615385% Worst absolute diff, s7.hsd_imaging.uid___A002_X85c183_X36f_SPW15_23.ms.atmcor.qa.metric.SingleDishImageContamination: 1 Worst percentage diff, s7.hsd_imaging.uid___A002_X85c183_X36f_SPW15_23.ms.atmcor.qa.metric.SingleDishImageContamination: 100.0% @pytest.mark.fast @pytest.mark.alma def test_uid___A002_X85c183_X36f_SPW15_23__PPR__regression(): """Run ALMA single-dish restoredata regression on the observation data of M100. Dataset: uid___A002_X85c183_X36f_SPW15_23 Expected results version: casa-6.2.1-2-pipeline-2021.2.0.94 """ input_dir = 'pl-regressiontest/uid___A002_X85c183_X36f_SPW15_23' pr = PipelineRegression(input_dir=input_dir, visname=['uid___A002_X85c183_X36f_SPW15_23.ms'], expectedoutput_dir=('pl-regressiontest/uid___A002_X85c183_X36f_SPW15_23')) # copy files use restore task into products folder input_products = casa_tools.utils.resolve(f'{input_dir}/products') shutil.copytree(input_products, f'{pr.output_dir}/products') > pr.run(ppr=f'{input_dir}/PPR.xml') pipeline/infrastructure/utils/regression-tester.py:526: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ pipeline/infrastructure/utils/regression-tester.py:224: in run self.__compare_results(new_file, default_relative_tolerance) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <pipeline.infrastructure.utils.regression-tester.PipelineRegression object at 0x7fbdf1b0ea00> new_file = 'uid___A002_X85c183_X36f_SPW15_23.ms.NEW.results.txt' relative_tolerance = 1e-07 def __compare_results(self, new_file: str, relative_tolerance: float): """ Compare results between new one loaded from file and old one. Args: new_file : file path of new results relative_tolerance : relative tolerance of output value """ with open(self.expectedoutput_file) as expected_fd, open(new_file) as new_fd: expected_results = expected_fd.readlines() new_results = new_fd.readlines() errors = [] worst_diff = (0, 0) worst_percent_diff = (0, 0) for old, new in zip(expected_results, new_results): try: oldkey, oldval, tol = self.__sanitize_regression_string(old) newkey, newval, _ = self.__sanitize_regression_string(new) except ValueError as e: errorstr = "The results: {0} could not be parsed. Error: {1}".format(new, str(e)) errors.append(errorstr) continue assert oldkey == newkey tolerance = tol if tol else relative_tolerance if newval is not None: LOG.info(f'Comparing {oldval} to {newval} with a rel. tolerance of {tolerance}') if oldval != pytest.approx(newval, rel=tolerance): diff = oldval-newval percent_diff = (oldval-newval)/oldval * 100 if abs(diff) > abs(worst_diff[0]): worst_diff = diff, oldkey if abs(percent_diff) > abs(worst_percent_diff[0]): worst_percent_diff = percent_diff, oldkey errorstr = f"{oldkey}\n\tvalues differ by > a relative difference of {tolerance}\n\texpected: {oldval}\n\tnew: {newval}\n\tdiff: {diff}\n\tpercent_diff: {percent_diff}%" errors.append(errorstr) elif oldval is not None: # If only the new value is None, fail errorstr = f"{oldkey}\n\tvalue is None\n\texpected: {oldval}\n\tnew: {newval}" errors.append(errorstr) else: # If old and new values are both None, this is expected, so pass LOG.info(f'Comparing {oldval} and {newval}... both values are None.') [LOG.warning(x) for x in errors] n_errors = len(errors) if n_errors > 0: summary_str = f"Worst absolute diff, {worst_diff[1]}: {worst_diff[0]}\nWorst percentage diff, {worst_percent_diff[1]}: {worst_percent_diff[0]}%" errors.append(summary_str) > pytest.fail("Failed to match {0} result value{1} within tolerance{1} :\n{2}".format( n_errors, '' if n_errors == 1 else 's', '\n'.join(errors)), pytrace=True) E Failed: Failed to match 2 result values within tolerances : E s7.hsd_imaging.uid___A002_X85c183_X36f_SPW15_23.ms.atmcor.qa.metric.SingleDishImageContamination E values differ by > a relative difference of 1e-07 E expected: True E new: False E diff: 1 E percent_diff: 100.0% E s7.hsd_imaging.uid___A002_X85c183_X36f_SPW15_23.ms.atmcor.qa.score.SingleDishImageContamination E values differ by > a relative difference of 1e-07 E expected: 0.65 E new: 1.0 E diff: -0.35 E percent_diff: -53.84615384615385% E Worst absolute diff, s7.hsd_imaging.uid___A002_X85c183_X36f_SPW15_23.ms.atmcor.qa.metric.SingleDishImageContamination: 1 E Worst percentage diff, s7.hsd_imaging.uid___A002_X85c183_X36f_SPW15_23.ms.atmcor.qa.metric.SingleDishImageContamination: 100.0% pipeline/infrastructure/utils/regression-tester.py:290: Failed