Dear co-rooters ,
I am facing a rather bizarre issue. I am doing some rather simple (but could be heavy memory-wise) matrix calculations, using ublas
.
My code is the following
covariance_linear-interpolation.C
//ROOT6
// source /afs/cern.ch/sw/lcg/external/gcc/4.8/x86_64-slc6/setup.csh
// source /afs/cern.ch/sw/lcg/app/releases/ROOT/6.02.02/x86_64-slc6-gcc48-opt/root/bin/thisroot.csh
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>// std::max()
#include "TROOT.h"
#include "TGraph.h"
#include "TH1.h"
#include "TMath.h"
#include "TMatrixF.h"
#include "TMatrixFSym.h"
#include "TMatrixFSparse.h"
#include "TMatrixTSym.h"
#include "TString.h"
#include "TRandom.h"
#include </usr/include/boost/numeric/ublas/matrix.hpp>
#include </usr/include/boost/numeric/ublas/matrix_sparse.hpp>
#include </usr/include/boost/numeric/ublas/symmetric.hpp>
#include </usr/include/boost/numeric/ublas/io.hpp>
#include "covariance_lib.cc"
using namespace std;
using namespace boost::numeric::ublas;
void covariance_linear_interpolation(TString filename, int E_MIN, int E_MAX, int N_BPDEC, bool return_TMatrix){
// (2) Define the isolethargic binning - MeV
int ndec = E_MAX - E_MIN;
int nbins = (int) ndec*N_BPDEC;
float step = (float) ndec / nbins;
float* En_USER = new float[nbins];
for(int i=0; i < nbins; i++) {
En_USER[i] = (float) pow(10., step * (float) i + E_MIN);
}
// (3) Read the evaluation and interpolate
int nbins_EVAL = count_lines(filename);
float* xs_USER = new float[nbins];
float* En_EVAL = new float[nbins_EVAL];
float* xs_EVAL = new float[nbins_EVAL];
int* idx = new int[nbins];
input_ascii(filename, En_EVAL, xs_EVAL);
linear_interpolation(nbins, nbins_EVAL, En_USER, xs_USER, En_EVAL, xs_EVAL, idx);
// (2) Define the size for the covariance matrices
const int size_EVAL = nbins_EVAL;
const int size_USER = nbins;
// (3) Define the types of T-objects that will be used
float *covariance_EVAL = new float[size_EVAL*(size_EVAL+1)/2];
for (int i=0; i<size_EVAL*(size_EVAL+1)/2; ++i){
covariance_EVAL[i] = 1.e-3*i;
}
//TMatrixFSym *COV_EVAL = new TMatrixFSym(size_EVAL, covariance_EVAL);
//TMatrixFSym COV_EVAL(size_EVAL, covariance_EVAL);
symmetric_matrix<float, upper> COV_EVAL (size_EVAL, size_EVAL);
COV_EVAL = fill_symmetric( COV_EVAL, covariance_EVAL, size_EVAL);
delete[] covariance_EVAL;
delete[] xs_USER;
delete[] xs_EVAL;
// () Define the non-zero elements of the SParse TMatrix
int* icol = new int[2*(nbins)];
int* irow = new int[2*(nbins)];
float* coeff = new float[2*(nbins)];
for (int i=0; i<(nbins)-1; ++i){//<-------------This is the loop where it randomly crashes
cout << i << " ";
icol[2*i] = idx[i];
icol[2*i+1] = idx[i] + 1;
irow[2*i] = i;
irow[2*i+1] = i;
coeff[2*i] = 1 + (En_USER[i]-En_EVAL[idx[i]])/(En_EVAL[idx[i]]-En_EVAL[idx[i+1]]);
coeff[2*i+1] = coeff[2*i] - 1;
if ( !isfinite(coeff[2*i]) ){// If En_USER == En_EVAL then sensitivity coefficient is 1
coeff[2*i] = 1;
coeff[2*i+1] = 0;
}
}
delete[] En_USER;
delete[] En_EVAL;
delete[] idx;
//TMatrixFSparse G(size_USER, size_EVAL);
//G.SetMatrixArray(2*(nbins), irow, icol, coeff);
//mapped_matrix<float> G(size_USER, size_EVAL,2*nbins);
//cout << "Rows = " << size_USER << ", Cols = " << G.size1() << ", DATA = " << G.size2() << endl;
//for (int i=0; i<2*nbins; ++i) {cout << i << ". " << irow[i] << "/" << icol[i] << " ";}
//G = fill_sparse( G, 2*nbins, irow, icol, coeff);//here it crashes
delete[] icol;
delete[] irow;
delete[] coeff;
//cout << G << endl;
/*
// (7) Calculate the covariance matrix - will be symmetric
TMatrixF COV_USER(G, TMatrixF::kMult, COV_EVAL);
int size_MAX = max(size_USER, size_EVAL);
G.ResizeTo(size_MAX, size_MAX);
G.T();
G.ResizeTo(size_EVAL, size_USER);
COV_USER *= G;
//COV_USER.TMatrixF::Mult(COV_USER, G.T());
COV_USER.Print();
//delete COV_USER;
//if (return_TMatrix) return ;
return;
*/
}
covariance_lib.cc
// Inputs : -name = name of the ascii file to be read - two column file with x and y values, rescpectively assumed
// -X_USER = array that contains the user defined energy binning
// -output = if output = TRUE exports the interpolation array
// if output = FALSE exports the index of the input values used in the interpolation
void linear_interpolation(int size_USER, int size_EVAL, float* X_USER, float* Y_USER, float* X_EVAL, float* Y_EVAL, int* index){
float x1, x2,y1, y2, a, b;
bool boo;
x1 = 0.; x2 = 0.; y1 = 0; y2 = 0.; a = 0.; b = 0.;
for(int i=0; i<size_USER; ++i){
boo = true;
for (int j=0; j<size_EVAL; j++){
if (boo==true){
if(X_USER[i] == X_EVAL[j]){
Y_USER[i] = Y_EVAL[j];
index[i] = j;
boo = false;
}
else if(X_USER[i]>X_EVAL[j]){
x1 = X_EVAL[j];
y1 = Y_EVAL[j];
index[i] = j;
}
else if( X_USER[i]<X_EVAL[j] && boo == true ){
x2 = X_EVAL[j];
y2 = Y_EVAL[j];
a = (y2-y1)/(x2-x1);
b = y2-a*x2;
Y_USER[i] = a*X_USER[i]+b;
//index[i] = j;
boo = false;
}
}
}//end of loop over EVAL
}//end of loop over USER
}//____linear_interpolation()
void input_ascii(TString filename, float* X_EVAL, float* Y_EVAL){
std::ifstream myfile;
myfile.open(filename);
float x = 0, y = 0;
int i = 0;
while(1){
myfile >> x >> y;
if (!myfile.good()) break;
X_EVAL[i] = x;
Y_EVAL[i] = y;
i++;
}
myfile.close();
}//___input_ascii()
int count_lines(TString filename){
float x, y;
int nlines;
// (1) Open the file to count its lines
std::ifstream myfile;
myfile.open(filename);
if(!myfile){
cout << "File not found" << endl;
}
nlines = 0;
while(1){
myfile >> x >> y;
if (!myfile.good()) break;
nlines++;
}
myfile.close();
return nlines;
}//___count_lines()
std::vector<float> isolethargic_binning(int min, int max, int bpd){
int ndec = max - min;
int nbins = (int) ndec*bpd;
float step = (float) ndec / nbins;
std::vector<float> xbins(nbins+1);
for(int i=0; i <= nbins; i++) {
xbins[i] = (float) pow(10., step * (float) i + min);
}
return xbins;
}//___isolethargic_binning()
boost::numeric::ublas::symmetric_matrix<float> fill_symmetric (boost::numeric::ublas::symmetric_matrix<float> m_sym, float* filler, int size_FILLER){
float* in = filler;
for (size_t i=0; i<m_sym.size1(); ++ i)
for (size_t j = 0; j <= i && in != &filler[size_FILLER+1]; ++ j)
m_sym (i, j) = *in++;
return m_sym;
}//___fill_symmetric
boost::numeric::ublas::mapped_matrix<float> fill_sparse (boost::numeric::ublas::mapped_matrix<float> m_spar, int size_DATA, int* row, int* col, float* data){
for (int i=0; i<size_DATA; ++ i)
m_spar( row[i], col[i] ) = data[i];}
return m_spar;
}//___fill_sparse
The problem is that the code crashes after a random number of iterations (the for
loop where the crash takes place is indicated in the code) and I can’t understand what it the matter.
Note that I am running the code in lxplus using ROOT6.
A sample file can be found here and the code can be run using
root [0] .L covariance_linear-interpolation.C++
root [1] covariance_linear_interpolation("ENDF_B-VII_1_reduced.dat", -3, 0, 1000, 0)
Any idea on what be the issue?
Thanks in advance!
*For completeness the output can be found below
*** Break *** segmentation violation 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 =========================================================== There was a crash. This is the entire stack trace of all threads: =========================================================== #0 0x00007fb96268182e in waitpid () from /lib64/libc.so.6 #1 0x00007fb962613479 in do_system () from /lib64/libc.so.6 #2 0x00007fb963b0ce7f in TUnixSystem::StackTrace() () from /afs/cern.ch/sw/lcg/app/releases/ROOT/6.02.02/x86_64-slc6-gcc48-opt/root/lib/libCore.so #3 0x00007fb963b0e9ec in TUnixSystem::DispatchSignals(ESignals) () from /afs/cern.ch/sw/lcg/app/releases/ROOT/6.02.02/x86_64-slc6-gcc48-opt/root/lib/libCore.so #4 <signal handler called> #5 0x00007fb957725813 in covariance_linear_interpolation(TString, int, int, int, bool) () from /eos/user/a/astamato/covariance/code/covariance_linear-interpolation_C.so #6 0x00007fb963f6fce1 in ?? () #7 0x0000000000000000 in ?? () =========================================================== The lines below might hint at the cause of the crash. If they do not help you then please submit a bug report at http://root.cern.ch/bugs. Please post the ENTIRE stack trace from above as an attachment in addition to anything else that might help us fixing this issue. =========================================================== #5 0x00007fb957725813 in covariance_linear_interpolation(TString, int, int, int, bool) () from /eos/user/a/astamato/covariance/code/covariance_linear-interpolation_C.so #6 0x00007fb963f6fce1 in ?? () #7 0x0000000000000000 in ?? () ===========================================================