Help with mlp evaluate function

Hi,
I have adapted the mlpHiggs tutorial example for my own inputs. When I call the TMLPAnalyzer::DrawNetwork() function it draws the result I expect. The plot of TMultiLayerPerceptron::Evaluate() should give a very similar result. However, when I call this function I get an identical value close to (but not equal to) zero for every signal and background input. My code is:

TMultiLayerPerceptron *mlp = new TMultiLayerPerceptron(“mass,angle:2:type”,simu,“Entry$%2”,"(Entry$+1)%2");

mlp->SetLearningMethod(mlp->kStochastic);

mlp->Train(ntrain, “text,graph,update=10”);

// Use TMLPAnalyzer to see what it looks for
TCanvas* mlpa_canvas = new TCanvas(“mlpa_canvas”,“Network analysis”);

mlpa_canvas->Divide(2,2);

TMLPAnalyzer ana(mlp);

// Initialisation
ana.GatherInformations();

// output to the console
ana.CheckNetwork();

mlpa_canvas->cd(1);

// shows how each variable influences the network
ana.DrawDInputs();

mlpa_canvas->cd(2);

// shows the network structure
mlp->Draw();

mlpa_canvas->cd(3);

// draws the resulting network
ana.DrawNetwork(0,“type==1”,“type==0”);

mlpa_canvas->cd(4);
// Use the NN to plot the results for each sample
// This will give approx. the same result as DrawNetwork.
// All entries are used, while DrawNetwork focuses on
// the test sample. Also the xaxis range is manually set.
TH1F *bg = new TH1F(“bgh”, “NN output”, 50, -.5, 1.5);
TH1F *sig = new TH1F(“sigh”, “NN output”, 50, -.5, 1.5);
bg->SetDirectory(0);
sig->SetDirectory(0);
Double_t params[2];
for (i = 0; i < background->GetEntries(); i++)
{
background->GetEntry(i);
params[0] = mass;
params[1] = angle;
bg->Fill(mlp->Evaluate(0,params));
}
for (i = 0; i < signal->GetEntries(); i++)
{
signal->GetEntry(i);
params[0] = mass;
params[1] = angle;
sig->Fill(mlp->Evaluate(0,params));
}
bg->SetLineColor(kBlue);
bg->SetFillStyle(3008); bg->SetFillColor(kBlue);
sig->SetLineColor(kRed);
sig->SetFillStyle(3003); sig->SetFillColor(kRed);
bg->SetStats(0);
sig->SetStats(0);
bg->Draw();
sig->Draw(“same”);
TLegend *legend = new TLegend(.75, .80, .95, .95);
legend->AddEntry(bg, “Background”);
legend->AddEntry(sig, “Signal”);
legend->Draw();
mlpa_canvas->cd(0);

The graphical output is at:
clynch.home.cern.ch/clynch/result.eps

Thanks for your help,
Clare

Hi Clare,

You are probably missing the SetBranchAddress at the beginning of the script (the part you didn’t post).

You should check the values of angle and mass in the loop that fills the NNoutput histogram.

Cheers,
Christophe.

Hi,
Thanks for replying. I’ve checked all the values of mass and angle, and they seem to be correct. Here is the full code, including the SetBranchAddress part:

void mlpHiggs(Int_t ntrain=100)
{

if (!gROOT->GetClass(“TMultiLayerPerceptron”))
{
gSystem->Load(“libMLP”);
}

// Prepare inputs
// The 2 trees are merged into one, and a “type” branch,
// equal to 1 for the signal and 0 for the background is added.
TFile input(“myroot.root”);

TTree *signal = (TTree *) input.Get(“sigtree”);

TTree *background = (TTree *) input.Get(“backtree”);

TFile hfile = (TFile)gROOT->FindObject(“output.root”);
if (hfile)
hfile->Close();
hfile = new TFile(“output.root”,“RECREATE”);

TTree *simu = new TTree(“MonteCarlo”, “Filtered Monte Carlo Events”);
Float_t angle,mass;
Int_t type;

signal->SetBranchAddress(“angle”, &angle);
signal->SetBranchAddress(“mass”, &mass);

background->SetBranchAddress(“angle”, &angle);
background->SetBranchAddress(“mass”, &mass);

simu->Branch(“angle”, &angle, “angle/F”);
simu->Branch(“mass”, &mass, “mass/I”);
simu->Branch(“type”, &type, “type/I”);

type = 1;
Int_t i;
for (i = 0; i < signal->GetEntries(); i++)
{
signal->GetEntry(i);
simu->Fill();

}

type = 0;
for (i = 0; i < background->GetEntries(); i++)
{
background->GetEntry(i);
simu->Fill();
}

TMultiLayerPerceptron *mlp = new TMultiLayerPerceptron(“mass,angle:2:type”,simu,“Entry$%2”,"(Entry$+1)%2");

mlp->SetLearningMethod(mlp->kStochastic);

mlp->Train(ntrain, “text,graph,update=10”);

// Use TMLPAnalyzer to see what it looks for
TCanvas* mlpa_canvas = new TCanvas(“mlpa_canvas”,“Network analysis”);

mlpa_canvas->Divide(2,2);

TMLPAnalyzer ana(mlp);

// Initialisation
ana.GatherInformations();

// output to the console
ana.CheckNetwork();

mlpa_canvas->cd(1);

// shows how each variable influences the network
ana.DrawDInputs();

mlpa_canvas->cd(2);

// shows the network structure
mlp->Draw();

mlpa_canvas->cd(3);

// draws the resulting network
ana.DrawNetwork(0,“type==1”,“type==0”);

mlpa_canvas->cd(4);
// Use the NN to plot the results for each sample
// This will give approx. the same result as DrawNetwork.
// All entries are used, while DrawNetwork focuses on
// the test sample. Also the xaxis range is manually set.
TH1F *bg = new TH1F(“bgh”, “NN output”, 50, -.5, 1.5);
TH1F *sig = new TH1F(“sigh”, “NN output”, 50, -.5, 1.5);
bg->SetDirectory(0);
sig->SetDirectory(0);
Double_t params[2];
for (i = 0; i < background->GetEntries(); i++)
{
background->GetEntry(i);
params[0] = mass;
params[1] = angle;
bg->Fill(mlp->Evaluate(0,params));
}
for (i = 0; i < signal->GetEntries(); i++)
{
signal->GetEntry(i);
params[0] = mass;
params[1] = angle;
sig->Fill(mlp->Evaluate(0,params));
}
bg->SetLineColor(kBlue);
bg->SetFillStyle(3008); bg->SetFillColor(kBlue);
sig->SetLineColor(kRed);
sig->SetFillStyle(3003); sig->SetFillColor(kRed);
bg->SetStats(0);
sig->SetStats(0);
bg->Draw();
sig->Draw(“same”);
TLegend *legend = new TLegend(.75, .80, .95, .95);
legend->AddEntry(bg, “Background”);
legend->AddEntry(sig, “Signal”);
legend->Draw();
mlpa_canvas->cd(0);

simu->Print();
hfile->Write();
}

Thanks again,
Clare