Commit 53e3e7fb authored by Simon Conseil's avatar Simon Conseil

Merge branch 'combine' into 'master'

Allow to correct zero offset in mpdaf_merging_sigma_clipping

See merge request !163
parents 6a6166f1 279f97a2
Pipeline #3149 passed with stage
in 21 minutes and 21 seconds
......@@ -40,6 +40,7 @@ from astropy import units as u
from astropy.table import Table
from astropy.utils.console import ProgressBar
from ctypes import c_char_p
from datetime import datetime
from numpy import allclose, array_equal
from .cube import Cube
......@@ -139,7 +140,7 @@ def _pycombine(self, nmax=2, nclip=5.0, var='propagate', nstop=2, nl=None,
if self.flux_scales is None and self.flux_offsets is None:
for l in range(nl):
if l % 100 == 0:
info('%d/%d', l, nl)
info('%d/%d %s', l, nl, datetime.now())
arr.fill(np.nan)
for i, f in enumerate(data):
x, y, x2, y2 = pos[i]
......@@ -158,17 +159,17 @@ def _pycombine(self, nmax=2, nclip=5.0, var='propagate', nstop=2, nl=None,
scales = np.ones(self.nfiles)
else:
scales = np.asarray(self.flux_scales)
self._logger.info('Using scales: %s', scales)
self._logger.info('Using scales')
if self.flux_offsets is None:
offsets = np.zeros(self.nfiles)
else:
offsets = np.asarray(self.flux_offsets)
self._logger.info('Using offsets: %s', offsets)
self._logger.info('Using offsets')
for l in range(nl):
if l % 100 == 0:
info('%d/%d', l, nl)
info('%d/%d %s', l, nl, datetime.now())
arr.fill(np.nan)
for i, f in enumerate(data):
x, y, x2, y2 = pos[i]
......@@ -335,6 +336,11 @@ class CubeList:
str(c.wcs.wcs.wcs.crpix), str(c.wcs.wcs.wcs.crval))
for c in self.cubes]
t = Table(rows=rows, names=('filename', 'shape', 'crpix', 'crval'))
if self.flux_scales is not None:
t['scale'] = self.flux_scales
if self.flux_offsets is not None:
t['offset'] = self.flux_offsets
for line in t.pformat():
self._logger.info(line)
......@@ -492,12 +498,19 @@ class CubeList:
files = files.encode('utf8')
if self.flux_scales is None:
scales = np.ones(self.nfiles)
scale = np.ones(self.nfiles)
else:
scales = np.asarray(self.flux_scales)
scale = np.asarray(self.flux_scales)
self._logger.info('Using scales')
if self.flux_offsets is None:
offset = np.zeros(self.nfiles)
else:
offset = np.asarray(self.flux_offsets)
self._logger.info('Using offsets')
ctools.mpdaf_merging_sigma_clipping(
c_char_p(files), data, vardata, expmap, scales, select_pix,
c_char_p(files), data, vardata, expmap, scale, offset, select_pix,
valid_pix, nmax, np.float64(nclip_low), np.float64(nclip_up),
nstop, np.int32(var_mean), np.int32(mad))
......
......@@ -61,15 +61,24 @@ class TestCubeList(unittest.TestCase):
shape = (5, 4, 3)
ncubes = 3
cubevals = [0, 1, 5]
cubevals = np.array([0, 1, 5])
scalelist = [0.5, 1, 1.5]
offsetlist = [1.5, 2, 2.5]
@classmethod
def setUpClass(cls):
cls.tmpdir = tempfile.mkdtemp()
print('\n>>> Create cubes in', cls.tmpdir)
cls.cubenames = []
cls.arr = np.arange(np.prod(cls.shape)).reshape(cls.shape)
cls.scaledcube = np.mean([
(cls.arr * val + cls.offsetlist[k]) * cls.scalelist[k]
for k, val in enumerate(cls.cubevals)], axis=0)
cls.combined_cube = np.mean([(cls.arr * i) for i in cls.cubevals],
axis=0)
for i in cls.cubevals:
cube = generate_cube(data=i, shape=cls.shape)
cube = generate_cube(data=cls.arr * i, shape=cls.shape)
cube.primary_header['CUBEIDX'] = i
cube.primary_header['OBJECT'] = 'OBJECT %d' % i
cube.primary_header['EXPTIME'] = 100
......@@ -92,10 +101,10 @@ class TestCubeList(unittest.TestCase):
def test_get_item(self):
clist = CubeList(self.cubenames)
assert_array_equal(clist[0, 2, 2], self.cubevals)
assert_array_equal(np.array([a.data for a in clist[0, :, :]])[:, 0, 0],
assert_array_equal(clist[0, 0, 1], self.cubevals)
assert_array_equal(np.array([a.data for a in clist[0, :, :]])[:, 0, 1],
self.cubevals)
assert_array_equal(np.array([a.data for a in clist[0]])[:, 0, 0],
assert_array_equal(np.array([a.data for a in clist[0]])[:, 0, 1],
self.cubevals)
def test_checks(self):
......@@ -114,26 +123,22 @@ class TestCubeList(unittest.TestCase):
@pytest.mark.skipif(not HAS_CFITSIO, reason="requires cfitsio")
def test_median(self):
clist = CubeList(self.cubenames)
combined_cube = np.ones(self.shape)
cube, expmap, stat_pix = clist.median(header={'FOO': 'BAR'})
self.assert_header(cube)
assert_array_equal(cube.data, combined_cube)
assert_array_equal(cube.data, self.arr)
assert_array_equal(expmap.data, self.expmap)
@pytest.mark.skipif(not HAS_FITSIO, reason="requires fitsio")
def test_pymedian(self):
clist = CubeList(self.cubenames)
combined_cube = np.ones(self.shape)
cube, expmap, stat_pix = clist.pymedian(header={'FOO': 'BAR'})
self.assert_header(cube)
assert_array_equal(cube.data, combined_cube)
assert_array_equal(cube.data, self.arr)
assert_array_equal(expmap.data, self.expmap)
@pytest.mark.skipif(not HAS_CFITSIO, reason="requires cfitsio")
def test_combine(self):
clist = CubeList(self.cubenames)
combined_cube = np.full(self.shape, 2, dtype=float)
cube, expmap, stat_pix = clist.combine(header={'FOO': 'BAR'})
cube2, expmap2, stat_pix2 = clist.combine(header={'FOO': 'BAR'},
mad=True)
......@@ -141,17 +146,15 @@ class TestCubeList(unittest.TestCase):
assert_array_equal(expmap.data, expmap2.data)
self.assert_header(cube)
assert_array_equal(cube.data, combined_cube)
assert_array_equal(cube.data, self.combined_cube)
assert_array_equal(expmap.data, self.expmap)
cube = clist.combine(nclip=(5., 5.), var='stat_mean')[0]
assert_array_equal(cube.data, combined_cube)
assert_array_equal(cube.data, self.combined_cube)
@pytest.mark.skipif(not HAS_FITSIO, reason="requires fitsio")
def test_pycombine(self):
clist = CubeList(self.cubenames)
combined_cube = np.full(self.shape, 2, dtype=float)
cube, expmap, stat_pix, rejmap = clist.pycombine(header={'FOO': 'BAR'})
cube2, expmap2, stat_pix2, rejmap2 = clist.pycombine(
header={'FOO': 'BAR'}, mad=True)
......@@ -159,43 +162,33 @@ class TestCubeList(unittest.TestCase):
assert_array_equal(expmap.data, expmap2.data)
self.assert_header(cube)
assert_array_equal(cube.data, combined_cube)
assert_array_equal(cube.data, self.combined_cube)
assert_array_equal(expmap.data, self.expmap)
cube = clist.pycombine(nclip=(5., 5.), var='stat_mean')[0]
assert_array_equal(cube.data, combined_cube)
assert_array_equal(cube.data, self.combined_cube)
@pytest.mark.skipif(not HAS_CFITSIO, reason="requires cfitsio")
def test_combine_scale(self):
clist = CubeList(self.cubenames, scalelist=[2.] * self.ncubes)
combined_cube = np.full(self.shape, 2 * 2, dtype=float)
clist = CubeList(self.cubenames, scalelist=self.scalelist,
offsetlist=self.offsetlist)
cube, expmap, stat_pix = clist.combine(header={'FOO': 'BAR'})
assert_array_equal(cube.data, combined_cube)
assert_array_equal(cube.data, self.scaledcube)
@pytest.mark.skipif(not HAS_FITSIO, reason="requires fitsio")
def test_pycombine_scale(self):
clist = CubeList(self.cubenames, scalelist=[2.] * self.ncubes)
combined_cube = np.full(self.shape, 2 * 2, dtype=float)
clist = CubeList(self.cubenames, scalelist=self.scalelist,
offsetlist=self.offsetlist)
cube2, expmap2, _, _ = clist.pycombine(header={'FOO': 'BAR'})
assert_array_equal(cube2.data, combined_cube)
clist = CubeList(self.cubenames, scalelist=[2.] * self.ncubes,
offsetlist=[0.5] * self.ncubes)
combined_cube = np.full(self.shape, 5, dtype=float)
cube2, expmap2, _, _ = clist.pycombine(header={'FOO': 'BAR'})
assert_array_equal(cube2.data, combined_cube)
assert_array_equal(cube2.data, self.scaledcube)
@pytest.mark.skipif(not HAS_FITSIO, reason="requires fitsio")
def test_mosaic_combine(self):
clist = CubeMosaic(self.cubenames, self.cubenames[0])
combined_cube = np.full(self.shape, 2, dtype=float)
cube, expmap, stat_pix, rejmap = clist.pycombine(header={'FOO': 'BAR'})
self.assert_header(cube)
assert_array_equal(cube.data, combined_cube)
assert_array_equal(cube.data, self.combined_cube)
assert_array_equal(expmap.data, self.expmap)
cube2, expmap2, _, _ = clist.pycombine(header={'FOO': 'BAR'}, mad=True)
......
......@@ -81,6 +81,7 @@ else:
array_1d_double, # double* var
array_1d_int, # int* expmap
array_1d_double, # double* scale
array_1d_double, # double* offset
array_1d_int, # int* selected_pix
array_1d_int, # int* valid_pix
ctypes.c_int, # int nmax
......
......@@ -63,15 +63,15 @@ int get_max_threads(int nfiles, int typ_var) {
exit(EXIT_FAILURE);
}
int num_nthreads = limit.rlim_cur/nfiles * 0.9;
int num_nthreads = limit.rlim_cur / nfiles * 0.9;
if (1000/nfiles < num_nthreads) {
//limit of cfitsio
num_nthreads = 1000/nfiles;
num_nthreads = 1000 / nfiles;
}
printf("num_nthreads: %d\n", num_nthreads);
if (typ_var==0) {
num_nthreads = num_nthreads/2;
num_nthreads = num_nthreads / 2;
}
int nthreads=1;
......@@ -136,13 +136,30 @@ int compute_loop_limits(long naxes, int* limits) {
return EXIT_SUCCESS;
}
void report_progress(time_t *ref, long firstpix[], int limits[], float value) {
time_t now;
struct tm *info;
char buffer[80];
time(&now);
if ((value >= 0) || ((now - *ref) > 60)) {
*ref = now;
info = localtime(&now);
strftime(buffer, 80, "%x - %I:%M%p", info);
if (value < 0) {
value = firstpix[2] * 100.0 / (limits[1] - limits[0]);
}
printf("%s %3.1f%%\n", buffer, value);
fflush(stdout);
}
}
int mpdaf_merging_median(char* input, double* data, int* expmap, int* valid_pix)
{
char* filenames[MAX_FILES];
char buffer[80], begin[80];
int nfiles=0;
time_t now;
struct tm *info;
time(&now);
// read input files list
nfiles = split_files_list(input, filenames);
......@@ -152,7 +169,7 @@ int mpdaf_merging_median(char* input, double* data, int* expmap, int* valid_pix)
omp_set_num_threads(num_nthreads); // Set number of threads to use
// create threads
#pragma omp parallel shared(filenames, nfiles, data, expmap, valid_pix, buffer, begin)
#pragma omp parallel shared(filenames, nfiles, data, expmap, valid_pix)
{
#endif
......@@ -169,6 +186,7 @@ int mpdaf_merging_median(char* input, double* data, int* expmap, int* valid_pix)
{
printf("Read fits files\n");
printf("naxes %zu %zu %zu\n", naxes[0], naxes[1], naxes[2]);
report_progress(&now, NULL, NULL, 0);
}
// read other files and compare that the shape is the same
......@@ -191,8 +209,7 @@ int mpdaf_merging_median(char* input, double* data, int* expmap, int* valid_pix)
int *indx;
double *pix[MAX_FILES_PER_THREAD], *wdata;
long npixels = naxes[0];
for (i=0; i<nfiles; i++)
{
for (i=0; i<nfiles; i++) {
pix[i] = (double *) malloc(npixels * sizeof(double));
if (pix[i] == NULL) {
printf("Memory allocation error\n");
......@@ -203,25 +220,19 @@ int mpdaf_merging_median(char* input, double* data, int* expmap, int* valid_pix)
wdata = (double *) malloc(nfiles * sizeof(double));
indx = (int *) malloc(nfiles * sizeof(int));
for (firstpix[2] = limits[0]; firstpix[2] <= limits[1]; firstpix[2]++)
{
for (firstpix[1] = 1; firstpix[1] <= naxes[1]; firstpix[1]++)
{
for (firstpix[2] = limits[0]; firstpix[2] <= limits[1]; firstpix[2]++) {
for (firstpix[1] = 1; firstpix[1] <= naxes[1]; firstpix[1]++) {
int index0 = (firstpix[1]-1)*naxes[0] + (firstpix[2]-1)*naxes[0]*naxes[1];
for (i=0; i<nfiles; i++)
{
for (i=0; i<nfiles; i++) {
if (fits_read_pix(fdata[i], TDOUBLE, firstpix, npixels, NULL, pix[i],
NULL, &status))
break;
}
for(ii=0; ii< npixels; ii++)
{
for(ii=0; ii< npixels; ii++) {
n = 0;
for (i=0; i<nfiles; i++)
{
if (!isnan(pix[i][ii]))
{
for (i=0; i<nfiles; i++) {
if (!isnan(pix[i][ii])) {
wdata[n] = pix[i][ii];
indx[n] = n;
n = n + 1;
......@@ -229,59 +240,45 @@ int mpdaf_merging_median(char* input, double* data, int* expmap, int* valid_pix)
}
}
int index = ii + index0;
if (n==0)
{
if (n==0) {
data[index] = NAN; //mean value
expmap[index] = 0; //exp map
}
else if (n==1)
{
} else if (n==1) {
data[index] = wdata[0]; //mean value
expmap[index] = 1; //exp map
}
else
{
} else {
data[index] = mpdaf_median(wdata,n,indx);
expmap[index] = n;
}
}
}
#pragma omp master
{
time(&now);
info = localtime(&now);
strftime(buffer,80,"%x - %I:%M%p", info);
if(strcmp(buffer,begin) != 0)
if (firstpix[2] % 100 == 0) {
#pragma omp master
{
printf("%s %3.1f%%\n", buffer, (firstpix[2]-limits[0])*100.0/(limits[1]-limits[0]));
fflush(stdout);
strcpy(begin, buffer);
report_progress(&now, firstpix, limits, -1);
}
}
}
for (i=0; i<nfiles; i++)
{
for (i=0; i<nfiles; i++) {
#pragma omp atomic
valid_pix[i] += valid[i];
}
free(wdata);
free(indx);
for (i=0; i<nfiles; i++)
{
for (i=0; i<nfiles; i++) {
free(pix[i]);
fits_close_file(fdata[i], &status);
}
if (status)
{
if (status) {
fits_report_error(stderr, status);
exit(EXIT_FAILURE);
}
#ifdef _OPENMP
}
#endif
printf("%s 100%%\n", buffer);
fflush(stdout);
report_progress(&now, NULL, NULL, 100);
return EXIT_SUCCESS;
}
......@@ -289,14 +286,28 @@ int mpdaf_merging_median(char* input, double* data, int* expmap, int* valid_pix)
// var=0: 'propagate'
// var=1: 'stat_mean'
// var=2: 'stat_one'
int mpdaf_merging_sigma_clipping(char* input, double* data, double* var, int* expmap, double* scale, int* selected_pix, int* valid_pix, int nmax, double nclip_low, double nclip_up, int nstop, int typ_var, int mad)
int mpdaf_merging_sigma_clipping(
char* input,
double* data,
double* var,
int* expmap,
double* scale,
double*offset,
int* selected_pix,
int* valid_pix,
int nmax,
double nclip_low,
double nclip_up,
int nstop,
int typ_var,
int mad
)
{
char* filenames[MAX_FILES];
char buffer[80], begin[80];
int nfiles=0;
time_t now;
struct tm *info;
time(&now);
printf("merging cube using mean with sigma clipping\n");
printf("nmax = %d\n", nmax);
......@@ -312,7 +323,7 @@ int mpdaf_merging_sigma_clipping(char* input, double* data, double* var, int* ex
omp_set_num_threads(num_nthreads); // Set number of threads to use
// create threads
#pragma omp parallel shared(filenames, nfiles, data, var, expmap, scale, valid_pix, buffer, begin, nmax, nclip_low, nclip_up, nstop, selected_pix, typ_var, mad)
#pragma omp parallel shared(filenames, nfiles, data, var, expmap, scale, valid_pix, nmax, nclip_low, nclip_up, nstop, selected_pix, typ_var, mad)
{
#endif
......@@ -329,6 +340,7 @@ int mpdaf_merging_sigma_clipping(char* input, double* data, double* var, int* ex
{
printf("Read fits files\n");
printf("naxes %zu %zu %zu\n", naxes[0], naxes[1], naxes[2]);
report_progress(&now, NULL, NULL, 0);
}
// read other files and compare that the shape is the same
......@@ -363,7 +375,7 @@ int mpdaf_merging_sigma_clipping(char* input, double* data, double* var, int* ex
double *pix[MAX_FILES_PER_THREAD], *pixvar[MAX_FILES_PER_THREAD], *wdata, *wvar=NULL;
int *indx, *files_id;
double x[3];
long npixels = naxes[0];
long npixels = naxes[0] * naxes[1];
for (i=0; i<nfiles; i++)
{
pix[i] = (double *) malloc(npixels * sizeof(double));
......@@ -392,145 +404,114 @@ int mpdaf_merging_sigma_clipping(char* input, double* data, double* var, int* ex
for (firstpix[2] = limits[0]; firstpix[2] <= limits[1]; firstpix[2]++)
{
for (firstpix[1] = 1; firstpix[1] <= naxes[1]; firstpix[1]++)
{
int index0 = (firstpix[1]-1)*naxes[0] + (firstpix[2]-1)*naxes[0]*naxes[1];
int index0 = (firstpix[2] - 1) * npixels;
for (i=0; i<nfiles; i++)
{
if (fits_read_pix(fdata[i], TDOUBLE, firstpix, npixels, NULL, pix[i],
NULL, &status))
// read data values for the current plane, and optionally stat
for (i=0; i<nfiles; i++) {
if (fits_read_pix(fdata[i], TDOUBLE, firstpix, npixels,
NULL, pix[i], NULL, &status))
break;
}
if (typ_var==0) {
for (i=0; i<nfiles; i++) {
if (fits_read_pix(fvar[i], TDOUBLE, firstpix, npixels,
NULL, pixvar[i], NULL, &status))
break;
}
if (typ_var==0)
{
for (i=0; i<nfiles; i++)
{
if (fits_read_pix(fvar[i], TDOUBLE, firstpix, npixels, NULL, pixvar[i],
NULL, &status))
break;
}
}
for(ii=0; ii< npixels; ii++)
{
n = 0;
for (i=0; i<nfiles; i++)
{
if (!isnan(pix[i][ii]))
{
wdata[n] = pix[i][ii]*scale[i];
files_id[n] = i;
indx[n] = n;
if (typ_var==0)
{
wvar[n] = pixvar[i][ii]*scale[i]*scale[i];
}
n = n + 1;
valid[i] = valid[i] + 1;
}
for(ii=0; ii< npixels; ii++) {
n = 0;
for (i=0; i<nfiles; i++) {
if (!isnan(pix[i][ii])) {
wdata[n] = (offset[i] + pix[i][ii]) * scale[i];
files_id[n] = i;
indx[n] = n;
if (typ_var==0) {
wvar[n] = pixvar[i][ii] * scale[i] * scale[i];
}
n += 1;
valid[i] += 1;
}
int index = ii + index0;
if (n==0)
{
data[index] = NAN; //mean value
expmap[index] = 0; //exp map
var[index] = NAN; //var
}
else if (n==1)
{
data[index] = wdata[0]; //mean value
expmap[index] = 1; //exp map
if (typ_var==0) //var
var[index] = wvar[0];
else
var[index] = NAN;
select[files_id[0]] += 1;
}
}
int index = ii + index0;
if (n==0) {
data[index] = NAN; //mean value
expmap[index] = 0; //exp map
var[index] = NAN; //var
} else if (n==1) {
data[index] = wdata[0]; //mean value
expmap[index] = 1; //exp map
if (typ_var==0) //var
var[index] = wvar[0];
else
{
if (mad==1)
{
mpdaf_mean_madsigma_clip(wdata, n, x, nmax, nclip_low, nclip_up, nstop, indx);
}
else
{
mpdaf_mean_sigma_clip(wdata, n, x, nmax, nclip_low, nclip_up, nstop, indx);
}
data[index] = x[0];//mean value
expmap[index] = x[2];//exp map
if (typ_var==0)
{
var[index] = mpdaf_sum(wvar,x[2],indx)/x[2]/x[2];
}
else
{
if (x[2]>1)
{
var[index] = (x[1]*x[1]);//var
if (typ_var==1)
{
var[index] /= (x[2]-1);
}
}
else
{
var[index] = NAN;//var
var[index] = NAN;
select[files_id[0]] += 1;
} else {
if (mad==1) {
mpdaf_mean_madsigma_clip(wdata, n, x, nmax, nclip_low,
nclip_up, nstop, indx);
} else {
mpdaf_mean_sigma_clip(wdata, n, x, nmax, nclip_low,
nclip_up, nstop, indx);
}
data[index] = x[0]; // mean value
expmap[index] = x[2]; // exp map
if (typ_var==0) { // var
var[index] = mpdaf_sum(wvar, x[2], indx) / (x[2] * x[2]);
} else {
if (x[2]>1) {
var[index] = (x[1] * x[1]);
if (typ_var==1) {
var[index] /= (x[2] - 1);
}
}
for (i=0; i<x[2]; i++)
{
select[files_id[indx[i]]] += 1;
} else {
var[index] = NAN;
}
}
for (i=0; i<x[2]; i++) {
select[files_id[indx[i]]] += 1;
}
}
}
#pragma omp master
{
time(&now);
info = localtime(&now);
strftime(buffer,80,"%x - %I:%M%p", info);
if(strcmp(buffer,begin) != 0)
if (firstpix[2] % 100 == 0) {
#pragma omp master
{
printf("%s %3.1f%%\n", buffer, firstpix[2]*100.0/(limits[1]-limits[0]));
fflush(stdout);
strcpy(begin, buffer);
report_progress(&now, firstpix, limits, -1);
}
}
}
for (i=0; i<nfiles; i++)
{
for (i=0; i<nfiles; i++) {
#pragma omp atomic
valid_pix[i] += valid[i];
#pragma omp atomic
selected_pix[i] += select[i];
}
free(wdata);
free(indx);
free(files_id);
for (i=0; i<nfiles; i++)
{
for (i=0; i<nfiles; i++) {
free(pix[i]);
fits_close_file(fdata[i], &status);
}
if (typ_var==0)
{
if (typ_var==0) {
free(wvar);
for (i=0; i<nfiles; i++)
{
for (i=0; i<nfiles; i++) {
free(pixvar[i]);
fits_close_file(fvar[i], &status);
}
}
if (status)
{
if (status) {
fits_report_error(stderr, status);
exit(EXIT_FAILURE);
}
#ifdef _OPENMP
}
#endif
printf("%s 100%%\n", buffer);
fflush(stdout);
report_progress(&now, NULL, NULL, 100);
return EXIT_SUCCESS;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment