interpolate function in util2d -- what is special about the way bilinear interpolation is implemented?
.
cv::Mat interpolate(const cv::Mat & image, int factor, float depthErrorRatio)
{
//UASSERT_MSG(factor >= 1, uFormat("factor=%d", factor).c_str());
cv::Mat out;
if(!image.empty())
{
if(factor > 1)
{
if((image.type() == CV_32FC1 || image.type()==CV_16UC1))
{
//UASSERT(depthErrorRatio>0.0f);
out = cv::Mat::zeros(image.rows*factor, image.cols*factor, image.type());
for(int j=0; j<out.rows; j+=factor)
{
for(int i=0; i<out.cols; i+=factor)
{
if(i>0 && j>0)
{
float dTopLeft;
float dTopRight;
float dBottomLeft;
float dBottomRight;
if(image.type() == CV_32FC1)
{
dTopLeft = image.at<float>(j/factor-1, i/factor-1);
dTopRight = image.at<float>(j/factor-1, i/factor);
dBottomLeft = image.at<float>(j/factor, i/factor-1);
dBottomRight = image.at<float>(j/factor, i/factor);
}
else
{
dTopLeft = image.at<unsigned short>(j/factor-1, i/factor-1);
dTopRight = image.at<unsigned short>(j/factor-1, i/factor);
dBottomLeft = image.at<unsigned short>(j/factor, i/factor-1);
dBottomRight = image.at<unsigned short>(j/factor, i/factor);
}
if(dTopLeft>0 && dTopRight>0 && dBottomLeft>0 && dBottomRight > 0)
{
float depthError = depthErrorRatio*(dTopLeft+dTopRight+dBottomLeft+dBottomRight)/4.0f;
if(fabs(dTopLeft-dTopRight) <= depthError &&
fabs(dTopLeft-dBottomLeft) <= depthError &&
fabs(dTopLeft-dBottomRight) <= depthError)
{ std::cout << "Manual interpolation, depthError = "<< depthError << std::endl;
// bilinear interpolation
// do first and last rows then columns
float slopeTop = (dTopRight-dTopLeft)/float(factor);
float slopeBottom = (dBottomRight-dBottomLeft)/float(factor);
if(image.type() == CV_32FC1)
{
for(int z=i-factor; z<=i; ++z)
{
out.at<float>(j-factor, z) = dTopLeft+(slopeTop*float(z-(i-factor)));
out.at<float>(j, z) = dBottomLeft+(slopeBottom*float(z-(i-factor)));
}
}
else
{
for(int z=i-factor; z<=i; ++z)
{
out.at<unsigned short>(j-factor, z) = (unsigned short)(dTopLeft+(slopeTop*float(z-(i-factor))));
out.at<unsigned short>(j, z) = (unsigned short)(dBottomLeft+(slopeBottom*float(z-(i-factor))));
}
}
// fill the columns
if(image.type() == CV_32FC1)
{
for(int z=i-factor; z<=i; ++z)
{
float top = out.at<float>(j-factor, z);
float bottom = out.at<float>(j, z);
float slope = (bottom-top)/float(factor);
for(int d=j-factor+1; d<j; ++d)
{
out.at<float>(d, z) = top+(slope*float(d-(j-factor)));
}
}
}
else
{
for(int z=i-factor; z<=i; ++z)
{
float top = out.at<unsigned short>(j-factor, z);
float bottom = out.at<unsigned short>(j, z);
float slope = (bottom-top)/float(factor);
for(int d=j-factor+1; d<j; ++d)
{
out.at<unsigned short>(d, z) = (unsigned short)(top+(slope*float(d-(j-factor))));
}
}
}
}
}
}
}
}
}
else
{
cv::resize(image, out, cv::Size(), float(factor), float(factor));
}
}
else
{
out = image;
}
}
return out;
}
Why, in this function of util2d in rtabmap is the bilinear interpolation implemented manually? I understand that one would wish to check for the depth error ratio but why isn't this checked separately and then the cv::resize used for interpolation as a standard, and not only in case the error ratio is too large or the image of a different type than the two mentioned?