我使用cv::EM算法对图像流进行高斯混合模型分类。然而,当使用EM::prediction方法将像素分类为不同的模型时,我发现它太慢了,一张600x800图像需要大约3秒的时间。另一方面,由MOG background subtractor提供的OpenCV非常快地执行这个部分,只使用大约30 is。因此,我决定使用它的执行方法来替换EM::prediction部件。然而,我不知道如何改变它。
我正在使用的prediction部件的代码如下:
cv::Mat floatSource;
source.convertTo ( floatSource, CV_32F );
cv::Mat samples ( source.rows * source.cols, 3, CV_32FC1 );
int idx = 0;
for ( int y = 0; y < source.rows; y ++ )
{
cv::Vec3f* row = floatSource.ptr <cv::Vec3f> (y);
for ( int x = 0; x < source.cols; x ++ )
{
samples.at<cv::Vec3f> ( idx++, 0 ) = row[x];
}
}
cv::EMParams params(2); // num of mixture we use is 2 here
cv::ExpectationMaximization em ( samples, cv::Mat(), params );
cv::Mat means = em.getMeans();
cv::Mat weight = em.getWeights();
const int fgId = weights.at<float>(0) > weights.at<flaot>(1) ? 0:1;
idx = 0;
for ( int y = 0; y < source.rows; y ++ )
{
for ( int x = 0; x < source.cols; x ++ )
{
const int result = cvRound ( em.predict ( samples.row ( idx++ ), NULL );
}
}我从cvbgfg_gaussmix.cpp中为EM prediction找到的部分代码如下所示:
static void process8uC3 ( BackgroundSubtractorMOG& obj, const Mat& image, Mat& fgmask, double learningRate )
{
int x, y, k, k1, rows = image.rows, cols = image.cols;
float alpha = (float)learningRate, T = (float)obj.backgroundRatio, vT = (float)obj.varThreshold;
int K = obj.nmixtures;
const float w0 = (float)CV_BGFG_MOG_WEIGHT_INIT;
const float sk0 = (float)(CV_BGFG_MOG_WEIGHT_INIT/CV_BGFG_MOG_SIGMA_INIT);
const float var0 = (float) (CV_BGFG_MOG_SIGMA_INIT*CV_BGFG_MOG_SIGMA_INIT);
for ( y = 0; y < rows; y ++ )
{
const uchar* src = image.ptr<uchar>(y);
uchar* dst = fgmask.ptr<uchar>(y);
MixData<Vec3f>* mptr = (MixData<Vec3f>*)obj.bgmodel.ptr(y);
for ( x = 0; x < cols; x++, mptr += K )
{
float wsum = 0, dw = 0;
Vec3f pix ( src [x*3], src[x*3+1], src[x*3+2]);
for ( k = 0; k < K; k ++ )
{
float w = mptr[k].weight;
Vec3f mu = mptr[k].mean[0];
Vec3f var = mptr[k].var[0];
Vec3f diff = pix - mu;
float d2 = diff.dot(diff);
if ( d2 < vT * (var[0] +var[1] + var[2] )
{
dw = alpha * ( 1.f - w );
mptr[k].weight = w + dw;
mptr[k].mean = mu + alpha * diff;
var = Vec3f ( max ( var[0] + alpha * ( diff[0] * diff[1] - var[0] ), FLT_EPSILON),
max ( var[1] + alpha * ( diff[1]*diff[1] - var[1] ), FLT_EPSILON,
max ( var[2] + alpha * ( diff[2]*diff[2] - var[2] ), FLT_EPSILON ));
mptr[k].var = var;
mptr[k].sortKey = w/sqrt ( var[0] + var[1] + var[2] );
for ( k1 = k-1; k1 >= 0; k1-- )
{
if ( mptr[k1].sortKey > mptr[k1+1].sortKey)
break;
std::swap ( mptr[k1],mptr[k1+1]);
}
break;
}
wsum += w;
}
dst[x] = (uchar) (-(wsum >= T ));
wsum += dw;
if ( k == K )
{
wsum += w0 - mptr[K-1].weight;
mptr[k-1].weight = w0;
mptr[K-1].mean = pix;
mptr[K-1].var = Vec3f ( var0, var0, var0 );
mptr[K-1].sortKey = sk0;
}
else
for ( ; k < K; k ++ )
wsum += mptr[k].weight;
dw = 1.f/wsum;
for ( k = 0; k < K; k ++ )
{
mptr[k].weight *= dw;
mptr[k].sortKey *= dw;
}
}
}
}如何将这个部分代码更改为可以在我的第一个代码中使用为em.predict部件?提前谢谢你。
更新
我自己这样做是为了在代码中使用process8uC3函数:
cv::Mat fgImg ( 600, 800, CV_8UC3 );
cv::Mat bgImg ( 600, 800, CV_8UC3 );
double learningRate = 0.001;
int x, y, k, k1;
int rows = sourceMat.rows; //source opencv matrix
int cols = sourceMat.cols; //source opencv matrix
float alpha = (float) learningRate;
float T = 2.0;
float vT = 0.30;
int K = 3;
const float w0 = (float) CV_BGFG_MOG_WEIGTH_INIT;
const float sk0 = (float) (CV_BGFG_MOG_WEIGHT_INIT/CV_BGFG_MOG_SIGMA_INIT);
const float var0 = (float) (CV_BGFG_MOG_SIGMA_INIT*CV_BGFG_MOG_SIGMA_INIT);
const float minVar = FLT_EPSILON;
for ( y = 0; y < rows; y ++ )
{
const char* src = source.ptr < uchar > ( y );
uchar* dst = fgImg.ptr < uchar > ( y );
uchar* tmp = bgImg.ptr ( y );
MixData<cv::Vec3f>* mptr = (MixData<cv::Vec3f>*)tmp;
for ( x = 0; x < cols; x ++, mptr += K )
{
float w = mptr[k].weight;
cv::Vec3f mu = mpptr[k].mean[0];
cv::Vec3f var = mptr[k].var[0];
cv::Vec3f diff = pix - mu;
float d2 = diff.dot ( diff );
if ( d2 < vT * ( var[0] + var[1] + var[2] ) )
{
dw = alpha * ( 1.f - w );
mptr[k].weight = w + dw;
mptr[k].mean = mu + alpha * diff;
var = cv::Vec3f ( max ( var[0] + alpha*(diff[0]*diff[0]-var[0]),minVar),
max ( var[1]+ alpha*(diff[1]*diff[1]-var[1]),minVar),
max ( var[2] + alpha*(diff[2]*diff[2]-var[2]),minVar) );
mptr[k].var = var;
mptr[k].sortKey = w/sqrt ( var[0] + var[1] + var[2] );
for ( k1 = k-1; k1 >= 0; k1 -- )
{
if ( mptr[k1].sortKey > mptr[k1+1].sortKey )
break;
std::swap ( mptr[k1], mptr[k1+1] );
}
break;
}
wsum += w;
}
dst[x] = (uchar) (-(wsum >= T ));
wsum += dw;
if ( k == K )
{
wsum += w0 - mptr[k-1].weight;
mptr[k-1].weight = w0;
mptr[k-1].mean = pix;
mptr[k-1].var = cv::Vec3f ( var0, var0, var0 );
mptr[k-1].sortKey = sk0;
}
else
for ( ; k < K; k ++ )
{
mptr[k].weight *= dw;
mptr[k].sortKey *= dw;
}
}
}
}它没有错误地编译,但结果完全是大量的。我怀疑这可能是与值T和vT有关的东西,并用其他几个值对它们进行了更改,但没有产生任何区别。所以我相信,即使它没有错误地编译,我也用错了它。
发布于 2012-10-24 21:41:29
不是直接回答您的问题,而是对您的代码进行一些评论:
int idx = 0;
for ( int y = 0; y < source.rows; y ++ )
{
cv::Vec3f* row = floatSource.ptr <cv::Vec3f> (y);
for ( int x = 0; x < source.cols; x ++ )
{
samples.at<cv::Vec3f> ( idx++, 0 ) = row[x];
}
}我的猜测是,这里您希望创建一个矩阵,其中包含逐行行和3列,存储像素RGB (或其他可能使用的颜色空间)值。首先,您的样本矩阵被错误初始化,因为您忘记了图像通道上的循环。您的代码中只填充第一个通道。但是无论如何,您也可以通过调用reshape来做同样的事情
cv::Mat samples = floatSource.reshape(1, source.rows*source.cols)这不仅将修复您的错误,而且还将加快您的进程,因为使用Mat.at<>访问像素的速度并不快,整形是O(1)操作,因为基础矩阵数据没有更改,只有行/cols/通道数。
其次,您可以通过将完整的样本矩阵传递给em::predict而不是每个样本来节省一些时间。目前,您可以逐行调用em::预测,而您只能执行一次,加上对mat.row()的逐行调用,这将创建一个临时矩阵(标头)。
进一步加快速度的一种方法是并行化要预测的调用,例如使用OpenCV使用的TBB (您在编译OpenCV时打开了TBB吗?)也许预测已经是多线程的,而不是检查)。
发布于 2012-10-31 11:41:49
看看GrabCut在OpenCV中的源代码:OpenCV/imgproc/src/grabcut.cpp。该模块有私有类GMM (实现训练高斯混合模型和样本分类)。要初始化GMM,使用k-均值。如果您需要更快的初始化,可以尝试K-表示++算法(请参阅模块/core/src/madx.cpp模块中的generateCentersPP函数)。
https://stackoverflow.com/questions/13050904
复制相似问题