Post by Zeinab Ghassabi » Thu Apr 12, 2018, 20:26
Good day,
In the
previous topic, you recommend to increase number of classes for getting better results. I applied DGM for new images. I attached you the train and test images and segmentation results for 6 and 12 classes.

- Train Image
- 8CE84370_Anchor_resized.tif (78.92 KiB) Viewed 16977 times

- Test Image
- 8D031E70_adjBscan_resized.tif (78.92 KiB) Viewed 16977 times

- Ground truth 1 (6 class)
- 8CE84370_GT.tif (5.84 KiB) Viewed 16977 times

- Ground Truth 2 (12 Class)
- 8CE84370_GTregionsandlayers.tif (5.82 KiB) Viewed 16977 times
The following images visualize the layers:
I tested all combination of edge and node models. I attached you the best results. Please see the last two images for classification results of 6 and 12 classes:

- 8CE84370_DGM_Output_layerclassificationNodemodel(4)-EdgeModel(4).tif (122.97 KiB) Viewed 16975 times

- 8CE84370_DGM_Output_layerclassificationNodemodel(4)-EdgeModel(2)37FGTRegionsLayers.tif (124.89 KiB) Viewed 16975 times
It seems that for 6 classes the combination of the
Nearest Neighbor or
Random Forest node model, with the
Concatenated edge model provide the best classification results. However, it takes much time about 30 minutes or more.
For 12 classes the combination of the
Nearest Neighbor node model with the
Contrast-Sensitive Potts or
No Edges edge models provide better classification results. However, it takes much time.
Would you please guide me more how to change parameters to have better results? What do you recommend to improve classification results of these images?
Here is the last version of DemoTrain.cpp
Code: Select all
// Example "Training" 2D-case with model training
#include "DGM.h"
#include "VIS.h"
#include "DGM\timer.h"
#include <iostream>
#include <stdio.h>
#include <conio.h>
#include "DGM\serialize.h"
/*
// Store the colors we read, so that we can write them again.
std::vector<Vec3b> vPalette;
// Produce a color image from a bunch of labels
template <typename T>
Mat colorize(const Mat &map)
{
Mat res(map.size(), CV_8UC3);
for (int y = 0; y < res.rows; y++) {
const T *pMap = map.ptr<T>(y);
Vec3b *pRes = res.ptr<Vec3b>(y);
for (int x = 0; x < res.cols; x++)
pRes[x] = vPalette[pMap[x]];
}
return res;
}
void fillPalette(void)
{
if (!vPalette.empty()) vPalette.clear();
vPalette.push_back(Vec3b(0, 0, 255));
vPalette.push_back(Vec3b(0, 128, 255));
vPalette.push_back(Vec3b(0, 255, 255));
vPalette.push_back(Vec3b(0, 255, 128));
vPalette.push_back(Vec3b(0, 255, 0));
vPalette.push_back(Vec3b(128, 255, 0));
vPalette.push_back(Vec3b(255, 255, 0));
vPalette.push_back(Vec3b(255, 128, 0));
vPalette.push_back(Vec3b(255, 0, 0));
vPalette.push_back(Vec3b(255, 0, 128));
vPalette.push_back(Vec3b(255, 0, 255));
vPalette.push_back(Vec3b(128, 0, 255));
vPalette.push_back(Vec3b(0, 0, 128));
vPalette.push_back(Vec3b(0, 64, 128));
vPalette.push_back(Vec3b(0, 128, 128));
vPalette.push_back(Vec3b(0, 128, 64));
vPalette.push_back(Vec3b(0, 128, 0));
// vPalette.push_back(Vec3b( 64, 128, 0 ));
vPalette.push_back(Vec3b(128, 128, 0));
vPalette.push_back(Vec3b(128, 64, 0));
vPalette.push_back(Vec3b(128, 0, 0));
vPalette.push_back(Vec3b(128, 0, 64));
}
*/
using namespace DirectGraphicalModels;
using namespace DirectGraphicalModels::vis;
int main(int argc, char *argv1[])
{
const CvSize imgSize = cvSize(1024,78);// (1024, 61);
const int width = imgSize.width;
const int height = imgSize.height;
const unsigned int nStates = 12;// 7;// 7;// 6;// 4;// 4;
const unsigned int nFeatures = 37;//5//36//3;
char *argv[] = { "","","","","","","","","","" };
// argv[3] = "E:\\zghassabi\\Project1-DIE\\Dataset\\image 1"; //training image feature vector
// argv[4] = "E:\\zghassabi\\Project1-DIE\\Dataset\\image 1\\DABAB8D0-gt7.tif";// gt5_DABAB8D0.tif"; //DABAB8D0-gt4.tif"; // //groundtruth for training
// argv[5] = "E:\\zghassabi\\Project1-DIE\\Dataset\\image 1"; //testing image feature vector
// argv[6] = "E:\\zghassabi\\Project1-DIE\\Dataset\\image 1\\DABAB8D0-gt7.tif"; //gt5_DABAB8D0.tif"; //DABAB8D0-gt4.tif"; //gt5_DABAB8D0.tif";// //testing groundtruth for evaluation
// argv[7] = "E:\\zghassabi\\Project1-DIE\\Dataset\\image 1\\mask_DABAB8D0.tif"; // testing image
// argv[7] = "E:\\zghassabi\\Project1-DIE\\Dataset\\test image 1\\DABAB8D0-Adjacentl.tif"; // testing image
// argv[8] = "E:\\zghassabi\\Project1-DIE\\Dataset\\test image 1\\CRF_output_adjacent7class.tif"; //output image
//argv[4] = "E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\anchor\\8CE84370_GT.tif";//groundtruth for training
argv[4] = "E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\anchor\\8CE84370_GTregionsandlayers.tif";//groundtruth for training
//argv[6] = "E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\anchor\\8CE84370_GT.tif"; //testing groundtruth for evaluation
argv[6] = "E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\anchor\\8CE84370_GTregionsandlayers.tif"; //testing groundtruth for evaluation
argv[7] = "E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\adjacent1\\8D031E70_adjBscan_resized.tif";// 8CE84370_Anchor_resized.tif"; // testing image
argv[8] = "E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\adjacent1\\8CE84370_DGM_Output_layerclassificationNodemodel(3)-EdgeModel(2)37FGTRegionsLayers.tif"; //output image
// Reading parameters and images
int nodeModel =3;// 3;// 4; // node training model
int edgeModel =2;// 2;//1; // edge training model
// Mat train_fv = imread(argv[3], 1); resize(train_fv, train_fv, imgSize, 0, 0, INTER_LANCZOS4); // training image feature vector
Mat train_gt = imread(argv[4], 0); resize(train_gt, train_gt, imgSize, 0, 0, INTER_NEAREST); // groundtruth for training
// Mat test_fv = imread(argv[5], 1); resize(test_fv, test_fv, imgSize, 0, 0, INTER_LANCZOS4); // testing image feature vector
Mat test_gt = imread(argv[6], 0); resize(test_gt, test_gt, imgSize, 0, 0, INTER_NEAREST); // groundtruth for evaluation
Mat test_img = imread(argv[7], 1); resize(test_img, test_img, imgSize, 0, 0, INTER_LANCZOS4); // testing image
//-training image feature vector ---------------------------------------------------------------------------------------------------------------------------------
Mat FeatureSets;
vec_mat_t channels;
Ptr<ml::TrainData> raw_data = ml::TrainData::loadFromCSV("E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\anchor\\All_features_Anchorimage.csv", 0);//("E:\\zghassabi\\Project1-DIE\\Dataset\\image 1\\All_features.csv", 0);
Mat data1 = raw_data->getSamples(); //data1
Mat data2 = raw_data->getResponses();
hconcat(data1, data2, data1);
Mat data;
data1.convertTo(data, CV_8UC1); //data1
printf("data.channels = %d\n", data.channels());
printf("data.rows = %d\n", data.rows);
printf("data.cols = %d\n", data.cols);
printf("data.type = %d\n", data.type());
for (int i = 0; i < nFeatures; i++) { // instead of 1024*5 you can use data.cols when the number of features is 36
Mat m = data.colRange(i * width, i * width + width);
channels.push_back(m);
// imshow("Image", m);
// cvWaitKey(0);
// imwrite(argc[6], m);
// cvWaitKey(0);
}
merge(channels, FeatureSets);
Mat train_fv = FeatureSets; resize(train_fv, train_fv, imgSize, 0, 0, INTER_LANCZOS4);
//testing image feature vector-------------------------------------------------------------------------------------------------------------------------------------------------
// Mat test_fv = train_fv;
Mat FeatureSetst;
vec_mat_t channelst;
Ptr<ml::TrainData> raw_data2 = ml::TrainData::loadFromCSV("E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\adjacent1\\All_features_testimage.csv", 0);//("E:\\zghassabi\\Project1-DIE\\Dataset\\test image 1\\All_features_testimage.csv", 0);
Mat data1t = raw_data2->getSamples(); //data1
Mat data2t = raw_data2->getResponses();
hconcat(data1t, data2t, data1t);
Mat datat;
data1t.convertTo(datat, CV_8UC1); //data1
printf("\n");
printf("datat.channels = %d\n", datat.channels());
printf("datat.rows = %d\n", datat.rows);
printf("datat.cols = %d\n", datat.cols);
printf("datat.type = %d\n", datat.type());
for (int i = 0; i < nFeatures; i++) { // 5120 10240 data.cols
Mat m = datat.colRange(i * width, i * width + width);
channelst.push_back(m);
// imshow("Image", mt);
// cvWaitKey(0);
//imwrite(argc[6], m);
//cvWaitKey(0);
}
merge(channelst, FeatureSetst);
Mat test_fv = FeatureSetst; resize(test_fv, test_fv, imgSize, 0, 0, INTER_LANCZOS4);
//-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
printf("test image: %i\n", test_img.rows);
printf("train feature vector: %i\n", train_fv.rows);
printf("train gt: %i\n", train_gt.rows);
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
CTrainNode * nodeTrainer = NULL;
CTrainEdge * edgeTrainer = NULL;
CGraphExt * graph = new CGraphExt(nStates);
CInfer * decoder = new CInferLBP(graph);
CMarker * marker = new CMarker(DEF_PALETTE_6);
CCMat * confMat = new CCMat(nStates);
float params[] = { 10, 0.01f };
size_t params_len;
switch (nodeModel) {
case 0: nodeTrainer = new CTrainNodeNaiveBayes(nStates, nFeatures); break;
case 1: nodeTrainer = new CTrainNodeGMM(nStates, nFeatures); break;
case 2: nodeTrainer = new CTrainNodeCvGMM(nStates, nFeatures); break;
case 3: nodeTrainer = new CTrainNodeKNN(nStates, nFeatures); break;
case 4: nodeTrainer = new CTrainNodeCvKNN(nStates, nFeatures); break;
case 5: nodeTrainer = new CTrainNodeCvRF(nStates, nFeatures); break;
#ifdef USE_SHERWOOD
case 6: nodeTrainer = new CTrainNodeMsRF(nStates, nFeatures); break;
#endif
case 7: nodeTrainer = new CTrainNodeCvANN(nStates, nFeatures); break;
default: printf("Unknown node_training_model is given\n"); return 0;
}
switch (edgeModel) {
case 0: params[0] = 1; // Emulate "No edges"
case 1: edgeTrainer = new CTrainEdgePotts(nStates, nFeatures); params_len = 1; break;
case 2: edgeTrainer = new CTrainEdgePottsCS(nStates, nFeatures); params_len = 2; break;
case 3: edgeTrainer = new CTrainEdgePrior(nStates, nFeatures); params_len = 2; break;
case 4:
edgeTrainer = new CTrainEdgeConcat<CTrainNodeNaiveBayes, CDiffFeaturesConcatenator>(nStates, nFeatures);
params_len = 1;
break;
default: printf("Unknown edge_training_model is given\n"); return 0;
}
// ==================== STAGE 1: Building the graph ====================
Timer::start("Building the Graph... ");
graph->build(imgSize);
Timer::stop();
// ========================= STAGE 2: Training =========================
Timer::start("Training... ");
// Node Training (compact notation)
nodeTrainer->addFeatureVec(train_fv, train_gt);
// Edge Training (comprehensive notation)
Mat featureVector1(nFeatures, 1, CV_8UC1);
Mat featureVector2(nFeatures, 1, CV_8UC1);
for (int y = 1; y < height; y++) {
byte *pFv1 = train_fv.ptr<byte>(y);
byte *pFv2 = train_fv.ptr<byte>(y - 1);
byte *pGt1 = train_gt.ptr<byte>(y);
byte *pGt2 = train_gt.ptr<byte>(y - 1);
for (int x = 1; x < width; x++) {
for (word f = 0; f < nFeatures; f++) featureVector1.at<byte>(f, 0) = pFv1[nFeatures * x + f]; // featureVector1 = fv[x][y]
for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv1[nFeatures * (x - 1) + f]; // featureVector2 = fv[x-1][y]
edgeTrainer->addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt1[x - 1]);
edgeTrainer->addFeatureVecs(featureVector2, pGt1[x - 1], featureVector1, pGt1[x]);
for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[nFeatures * x + f]; // featureVector2 = fv[x][y-1]
edgeTrainer->addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x]);
edgeTrainer->addFeatureVecs(featureVector2, pGt2[x], featureVector1, pGt1[x]);
};// x
}; // y
nodeTrainer->train();
edgeTrainer->train();
Timer::stop();
// ==================== STAGE 3: Filling the Graph =====================
Timer::start("Filling the Graph... ");
// CV_32FC(nStates) <- CV_8UC(nFeatures);
Mat nodePotentials = nodeTrainer->getNodePotentials(test_fv); // Classification: CV_32FC(nStates) <- CV_8UC(nFeatures)
Serialize::to("E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\adjacent1\\fileName7class.dat", nodePotentials);// ("E:\\zghassabi\\Project1-DIE\\Dataset\\test image 1\\fileName7class.dat", nodePotentials); // <<<< THIS LINE
// Serialize::to("E:\\zghassabi\\Project1-DIE\\DGM-master\\Data\\fileName.dat", nodePotentials);
graph->setNodes(nodePotentials); // Filling-in the graph nodes
graph->fillEdges(edgeTrainer, test_fv, params, params_len); // Filling-in the graph edges with pairwise potentials
Timer::stop();
// resize(nodePotentials, nodePotentials, imgSize, 0, 0, INTER_LANCZOS4);
// imshow("nodePotentials", nodePotentials);
// cvWaitKey(0);// *1000);
// ========================= STAGE 4: Decoding =========================
Timer::start("Decoding... ");
vec_byte_t optimalDecoding = decoder->decode(100); //100
Timer::stop();
// ====================== Evaluation =======================
Mat solution(imgSize, CV_8UC1, optimalDecoding.data());
confMat->estimate(test_gt, solution); // compare solution with the groundtruth
Mat res = test_img.clone(); // the same results
marker->markClasses(res, solution);
imshow("Solution", res);
imwrite(argv[8], res);
cvWaitKey(0);
//imshow("Solution", solution); //for roi
// imwrite(argv[8], solution);
// cvWaitKey(0);
char str[255];
sprintf(str, "Accuracy = %.2f%%", confMat->getAccuracy());
printf("%s\n", str);
// ====================== Visualization =======================
marker->markClasses(test_img, solution);
rectangle(test_img, Point(width - 160, height - 18), Point(width, height), CV_RGB(0, 0, 0), -1);
putText(test_img, str, Point(width - 155, height - 5), FONT_HERSHEY_SIMPLEX, 0.45, CV_RGB(225, 240, 255), 1, CV_AA);
imwrite(argv[8], test_img);
imshow("Image", test_img);
cvWaitKey(0);// *1000);
//==========================================my code======================
// Mat res2 = colorize<byte>(solution);
// imshow("Solution", res2);
// imwrite(argv[8], res2);
// imshow("output Image", res2);
// cvWaitKey(0);
return 0;
}
Good day,
In the [url=http://project-10.de/forum/viewtopic.php?f=31&t=1224]previous topic[/url], you recommend to increase number of classes for getting better results. I applied DGM for new images. I attached you the train and test images and segmentation results for 6 and 12 classes.
[attachment=7]8CE84370_Anchor_resized.tif[/attachment]
[attachment=6]All_features_Anchorimage.csv[/attachment]
[attachment=5]8D031E70_adjBscan_resized.tif[/attachment]
[attachment=4]All_features_testimage.csv[/attachment]
[attachment=9]8CE84370_GT.tif[/attachment]
[attachment=8]8CE84370_GTregionsandlayers.tif[/attachment]
The following images visualize the layers:
[attachment=3]8CE84370_gt1_2.jpg[/attachment]
[attachment=2]8CE84370_gt_regions_layers.jpg[/attachment]
I tested all combination of edge and node models. I attached you the best results. Please see the last two images for classification results of 6 and 12 classes:
[attachment=0]8CE84370_DGM_Output_layerclassificationNodemodel(4)-EdgeModel(4).tif[/attachment]
[attachment=1]8CE84370_DGM_Output_layerclassificationNodemodel(4)-EdgeModel(2)37FGTRegionsLayers.tif[/attachment]
It seems that for 6 classes the combination of the [b]Nearest Neighbor[/b] or [b]Random Forest[/b] node model, with the [b]Concatenated[/b] edge model provide the best classification results. However, it takes much time about 30 minutes or more.
For 12 classes the combination of the [b]Nearest Neighbor[/b] node model with the [b]Contrast-Sensitive Potts[/b] or [b]No Edges[/b] edge models provide better classification results. However, it takes much time.
Would you please guide me more how to change parameters to have better results? What do you recommend to improve classification results of these images?
Here is the last version of DemoTrain.cpp
[code]
// Example "Training" 2D-case with model training
#include "DGM.h"
#include "VIS.h"
#include "DGM\timer.h"
#include <iostream>
#include <stdio.h>
#include <conio.h>
#include "DGM\serialize.h"
/*
// Store the colors we read, so that we can write them again.
std::vector<Vec3b> vPalette;
// Produce a color image from a bunch of labels
template <typename T>
Mat colorize(const Mat &map)
{
Mat res(map.size(), CV_8UC3);
for (int y = 0; y < res.rows; y++) {
const T *pMap = map.ptr<T>(y);
Vec3b *pRes = res.ptr<Vec3b>(y);
for (int x = 0; x < res.cols; x++)
pRes[x] = vPalette[pMap[x]];
}
return res;
}
void fillPalette(void)
{
if (!vPalette.empty()) vPalette.clear();
vPalette.push_back(Vec3b(0, 0, 255));
vPalette.push_back(Vec3b(0, 128, 255));
vPalette.push_back(Vec3b(0, 255, 255));
vPalette.push_back(Vec3b(0, 255, 128));
vPalette.push_back(Vec3b(0, 255, 0));
vPalette.push_back(Vec3b(128, 255, 0));
vPalette.push_back(Vec3b(255, 255, 0));
vPalette.push_back(Vec3b(255, 128, 0));
vPalette.push_back(Vec3b(255, 0, 0));
vPalette.push_back(Vec3b(255, 0, 128));
vPalette.push_back(Vec3b(255, 0, 255));
vPalette.push_back(Vec3b(128, 0, 255));
vPalette.push_back(Vec3b(0, 0, 128));
vPalette.push_back(Vec3b(0, 64, 128));
vPalette.push_back(Vec3b(0, 128, 128));
vPalette.push_back(Vec3b(0, 128, 64));
vPalette.push_back(Vec3b(0, 128, 0));
// vPalette.push_back(Vec3b( 64, 128, 0 ));
vPalette.push_back(Vec3b(128, 128, 0));
vPalette.push_back(Vec3b(128, 64, 0));
vPalette.push_back(Vec3b(128, 0, 0));
vPalette.push_back(Vec3b(128, 0, 64));
}
*/
using namespace DirectGraphicalModels;
using namespace DirectGraphicalModels::vis;
int main(int argc, char *argv1[])
{
const CvSize imgSize = cvSize(1024,78);// (1024, 61);
const int width = imgSize.width;
const int height = imgSize.height;
const unsigned int nStates = 12;// 7;// 7;// 6;// 4;// 4;
const unsigned int nFeatures = 37;//5//36//3;
char *argv[] = { "","","","","","","","","","" };
// argv[3] = "E:\\zghassabi\\Project1-DIE\\Dataset\\image 1"; //training image feature vector
// argv[4] = "E:\\zghassabi\\Project1-DIE\\Dataset\\image 1\\DABAB8D0-gt7.tif";// gt5_DABAB8D0.tif"; //DABAB8D0-gt4.tif"; // //groundtruth for training
// argv[5] = "E:\\zghassabi\\Project1-DIE\\Dataset\\image 1"; //testing image feature vector
// argv[6] = "E:\\zghassabi\\Project1-DIE\\Dataset\\image 1\\DABAB8D0-gt7.tif"; //gt5_DABAB8D0.tif"; //DABAB8D0-gt4.tif"; //gt5_DABAB8D0.tif";// //testing groundtruth for evaluation
// argv[7] = "E:\\zghassabi\\Project1-DIE\\Dataset\\image 1\\mask_DABAB8D0.tif"; // testing image
// argv[7] = "E:\\zghassabi\\Project1-DIE\\Dataset\\test image 1\\DABAB8D0-Adjacentl.tif"; // testing image
// argv[8] = "E:\\zghassabi\\Project1-DIE\\Dataset\\test image 1\\CRF_output_adjacent7class.tif"; //output image
//argv[4] = "E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\anchor\\8CE84370_GT.tif";//groundtruth for training
argv[4] = "E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\anchor\\8CE84370_GTregionsandlayers.tif";//groundtruth for training
//argv[6] = "E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\anchor\\8CE84370_GT.tif"; //testing groundtruth for evaluation
argv[6] = "E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\anchor\\8CE84370_GTregionsandlayers.tif"; //testing groundtruth for evaluation
argv[7] = "E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\adjacent1\\8D031E70_adjBscan_resized.tif";// 8CE84370_Anchor_resized.tif"; // testing image
argv[8] = "E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\adjacent1\\8CE84370_DGM_Output_layerclassificationNodemodel(3)-EdgeModel(2)37FGTRegionsLayers.tif"; //output image
// Reading parameters and images
int nodeModel =3;// 3;// 4; // node training model
int edgeModel =2;// 2;//1; // edge training model
// Mat train_fv = imread(argv[3], 1); resize(train_fv, train_fv, imgSize, 0, 0, INTER_LANCZOS4); // training image feature vector
Mat train_gt = imread(argv[4], 0); resize(train_gt, train_gt, imgSize, 0, 0, INTER_NEAREST); // groundtruth for training
// Mat test_fv = imread(argv[5], 1); resize(test_fv, test_fv, imgSize, 0, 0, INTER_LANCZOS4); // testing image feature vector
Mat test_gt = imread(argv[6], 0); resize(test_gt, test_gt, imgSize, 0, 0, INTER_NEAREST); // groundtruth for evaluation
Mat test_img = imread(argv[7], 1); resize(test_img, test_img, imgSize, 0, 0, INTER_LANCZOS4); // testing image
//-training image feature vector ---------------------------------------------------------------------------------------------------------------------------------
Mat FeatureSets;
vec_mat_t channels;
Ptr<ml::TrainData> raw_data = ml::TrainData::loadFromCSV("E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\anchor\\All_features_Anchorimage.csv", 0);//("E:\\zghassabi\\Project1-DIE\\Dataset\\image 1\\All_features.csv", 0);
Mat data1 = raw_data->getSamples(); //data1
Mat data2 = raw_data->getResponses();
hconcat(data1, data2, data1);
Mat data;
data1.convertTo(data, CV_8UC1); //data1
printf("data.channels = %d\n", data.channels());
printf("data.rows = %d\n", data.rows);
printf("data.cols = %d\n", data.cols);
printf("data.type = %d\n", data.type());
for (int i = 0; i < nFeatures; i++) { // instead of 1024*5 you can use data.cols when the number of features is 36
Mat m = data.colRange(i * width, i * width + width);
channels.push_back(m);
// imshow("Image", m);
// cvWaitKey(0);
// imwrite(argc[6], m);
// cvWaitKey(0);
}
merge(channels, FeatureSets);
Mat train_fv = FeatureSets; resize(train_fv, train_fv, imgSize, 0, 0, INTER_LANCZOS4);
//testing image feature vector-------------------------------------------------------------------------------------------------------------------------------------------------
// Mat test_fv = train_fv;
Mat FeatureSetst;
vec_mat_t channelst;
Ptr<ml::TrainData> raw_data2 = ml::TrainData::loadFromCSV("E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\adjacent1\\All_features_testimage.csv", 0);//("E:\\zghassabi\\Project1-DIE\\Dataset\\test image 1\\All_features_testimage.csv", 0);
Mat data1t = raw_data2->getSamples(); //data1
Mat data2t = raw_data2->getResponses();
hconcat(data1t, data2t, data1t);
Mat datat;
data1t.convertTo(datat, CV_8UC1); //data1
printf("\n");
printf("datat.channels = %d\n", datat.channels());
printf("datat.rows = %d\n", datat.rows);
printf("datat.cols = %d\n", datat.cols);
printf("datat.type = %d\n", datat.type());
for (int i = 0; i < nFeatures; i++) { // 5120 10240 data.cols
Mat m = datat.colRange(i * width, i * width + width);
channelst.push_back(m);
// imshow("Image", mt);
// cvWaitKey(0);
//imwrite(argc[6], m);
//cvWaitKey(0);
}
merge(channelst, FeatureSetst);
Mat test_fv = FeatureSetst; resize(test_fv, test_fv, imgSize, 0, 0, INTER_LANCZOS4);
//-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
printf("test image: %i\n", test_img.rows);
printf("train feature vector: %i\n", train_fv.rows);
printf("train gt: %i\n", train_gt.rows);
//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
CTrainNode * nodeTrainer = NULL;
CTrainEdge * edgeTrainer = NULL;
CGraphExt * graph = new CGraphExt(nStates);
CInfer * decoder = new CInferLBP(graph);
CMarker * marker = new CMarker(DEF_PALETTE_6);
CCMat * confMat = new CCMat(nStates);
float params[] = { 10, 0.01f };
size_t params_len;
switch (nodeModel) {
case 0: nodeTrainer = new CTrainNodeNaiveBayes(nStates, nFeatures); break;
case 1: nodeTrainer = new CTrainNodeGMM(nStates, nFeatures); break;
case 2: nodeTrainer = new CTrainNodeCvGMM(nStates, nFeatures); break;
case 3: nodeTrainer = new CTrainNodeKNN(nStates, nFeatures); break;
case 4: nodeTrainer = new CTrainNodeCvKNN(nStates, nFeatures); break;
case 5: nodeTrainer = new CTrainNodeCvRF(nStates, nFeatures); break;
#ifdef USE_SHERWOOD
case 6: nodeTrainer = new CTrainNodeMsRF(nStates, nFeatures); break;
#endif
case 7: nodeTrainer = new CTrainNodeCvANN(nStates, nFeatures); break;
default: printf("Unknown node_training_model is given\n"); return 0;
}
switch (edgeModel) {
case 0: params[0] = 1; // Emulate "No edges"
case 1: edgeTrainer = new CTrainEdgePotts(nStates, nFeatures); params_len = 1; break;
case 2: edgeTrainer = new CTrainEdgePottsCS(nStates, nFeatures); params_len = 2; break;
case 3: edgeTrainer = new CTrainEdgePrior(nStates, nFeatures); params_len = 2; break;
case 4:
edgeTrainer = new CTrainEdgeConcat<CTrainNodeNaiveBayes, CDiffFeaturesConcatenator>(nStates, nFeatures);
params_len = 1;
break;
default: printf("Unknown edge_training_model is given\n"); return 0;
}
// ==================== STAGE 1: Building the graph ====================
Timer::start("Building the Graph... ");
graph->build(imgSize);
Timer::stop();
// ========================= STAGE 2: Training =========================
Timer::start("Training... ");
// Node Training (compact notation)
nodeTrainer->addFeatureVec(train_fv, train_gt);
// Edge Training (comprehensive notation)
Mat featureVector1(nFeatures, 1, CV_8UC1);
Mat featureVector2(nFeatures, 1, CV_8UC1);
for (int y = 1; y < height; y++) {
byte *pFv1 = train_fv.ptr<byte>(y);
byte *pFv2 = train_fv.ptr<byte>(y - 1);
byte *pGt1 = train_gt.ptr<byte>(y);
byte *pGt2 = train_gt.ptr<byte>(y - 1);
for (int x = 1; x < width; x++) {
for (word f = 0; f < nFeatures; f++) featureVector1.at<byte>(f, 0) = pFv1[nFeatures * x + f]; // featureVector1 = fv[x][y]
for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv1[nFeatures * (x - 1) + f]; // featureVector2 = fv[x-1][y]
edgeTrainer->addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt1[x - 1]);
edgeTrainer->addFeatureVecs(featureVector2, pGt1[x - 1], featureVector1, pGt1[x]);
for (word f = 0; f < nFeatures; f++) featureVector2.at<byte>(f, 0) = pFv2[nFeatures * x + f]; // featureVector2 = fv[x][y-1]
edgeTrainer->addFeatureVecs(featureVector1, pGt1[x], featureVector2, pGt2[x]);
edgeTrainer->addFeatureVecs(featureVector2, pGt2[x], featureVector1, pGt1[x]);
};// x
}; // y
nodeTrainer->train();
edgeTrainer->train();
Timer::stop();
// ==================== STAGE 3: Filling the Graph =====================
Timer::start("Filling the Graph... ");
// CV_32FC(nStates) <- CV_8UC(nFeatures);
Mat nodePotentials = nodeTrainer->getNodePotentials(test_fv); // Classification: CV_32FC(nStates) <- CV_8UC(nFeatures)
Serialize::to("E:\\zghassabi\\Project1-DIE\\Dataset\\amd1\\adjacent1\\fileName7class.dat", nodePotentials);// ("E:\\zghassabi\\Project1-DIE\\Dataset\\test image 1\\fileName7class.dat", nodePotentials); // <<<< THIS LINE
// Serialize::to("E:\\zghassabi\\Project1-DIE\\DGM-master\\Data\\fileName.dat", nodePotentials);
graph->setNodes(nodePotentials); // Filling-in the graph nodes
graph->fillEdges(edgeTrainer, test_fv, params, params_len); // Filling-in the graph edges with pairwise potentials
Timer::stop();
// resize(nodePotentials, nodePotentials, imgSize, 0, 0, INTER_LANCZOS4);
// imshow("nodePotentials", nodePotentials);
// cvWaitKey(0);// *1000);
// ========================= STAGE 4: Decoding =========================
Timer::start("Decoding... ");
vec_byte_t optimalDecoding = decoder->decode(100); //100
Timer::stop();
// ====================== Evaluation =======================
Mat solution(imgSize, CV_8UC1, optimalDecoding.data());
confMat->estimate(test_gt, solution); // compare solution with the groundtruth
Mat res = test_img.clone(); // the same results
marker->markClasses(res, solution);
imshow("Solution", res);
imwrite(argv[8], res);
cvWaitKey(0);
//imshow("Solution", solution); //for roi
// imwrite(argv[8], solution);
// cvWaitKey(0);
char str[255];
sprintf(str, "Accuracy = %.2f%%", confMat->getAccuracy());
printf("%s\n", str);
// ====================== Visualization =======================
marker->markClasses(test_img, solution);
rectangle(test_img, Point(width - 160, height - 18), Point(width, height), CV_RGB(0, 0, 0), -1);
putText(test_img, str, Point(width - 155, height - 5), FONT_HERSHEY_SIMPLEX, 0.45, CV_RGB(225, 240, 255), 1, CV_AA);
imwrite(argv[8], test_img);
imshow("Image", test_img);
cvWaitKey(0);// *1000);
//==========================================my code======================
// Mat res2 = colorize<byte>(solution);
// imshow("Solution", res2);
// imwrite(argv[8], res2);
// imshow("output Image", res2);
// cvWaitKey(0);
return 0;
}
[/code]