sift算法的编译与实现 - 教你一步一步用c语言实现sift算法、下

优质
小牛编辑
128浏览
2023-12-01

本文接上,教你一步一步用c语言实现sift算法、上而来:

函数编写

ok,接上文,咱们一个一个的来编写main函数中所涉及到所有函数,这也是本文的关键部分:

  1. //下采样原来的图像,返回缩小2倍尺寸的图像
  2. CvMat * halfSizeImage(CvMat * im)
  3. {
  4. unsigned int i,j;
  5. int w = im->cols/2;
  6. int h = im->rows/2;
  7. CvMat *imnew = cvCreateMat(h, w, CV_32FC1);
  8. #define Im(ROW,COL) ((float *)(im->data.fl + im->step/sizeof(float) *(ROW)))[(COL)]
  9. #define Imnew(ROW,COL) ((float *)(imnew->data.fl + imnew->step/sizeof(float) *(ROW)))[(COL)]
  10. for ( j = 0; j < h; j++)
  11. for ( i = 0; i < w; i++)
  12. Imnew(j,i)=Im(j*2, i*2);
  13. return imnew;
  14. }
  15. //上采样原来的图像,返回放大2倍尺寸的图像
  16. CvMat * doubleSizeImage(CvMat * im)
  17. {
  18. unsigned int i,j;
  19. int w = im->cols*2;
  20. int h = im->rows*2;
  21. CvMat *imnew = cvCreateMat(h, w, CV_32FC1);
  22. #define Im(ROW,COL) ((float *)(im->data.fl + im->step/sizeof(float) *(ROW)))[(COL)]
  23. #define Imnew(ROW,COL) ((float *)(imnew->data.fl + imnew->step/sizeof(float) *(ROW)))[(COL)]
  24. for ( j = 0; j < h; j++)
  25. for ( i = 0; i < w; i++)
  26. Imnew(j,i)=Im(j/2, i/2);
  27. return imnew;
  28. }
  29. //上采样原来的图像,返回放大2倍尺寸的线性插值图像
  30. CvMat * doubleSizeImage2(CvMat * im)
  31. {
  32. unsigned int i,j;
  33. int w = im->cols*2;
  34. int h = im->rows*2;
  35. CvMat *imnew = cvCreateMat(h, w, CV_32FC1);
  36. #define Im(ROW,COL) ((float *)(im->data.fl + im->step/sizeof(float) *(ROW)))[(COL)]
  37. #define Imnew(ROW,COL) ((float *)(imnew->data.fl + imnew->step/sizeof(float) *(ROW)))[(COL)]
  38. // fill every pixel so we don't have to worry about skipping pixels later
  39. for ( j = 0; j < h; j++)
  40. {
  41. for ( i = 0; i < w; i++)
  42. {
  43. Imnew(j,i)=Im(j/2, i/2);
  44. }
  45. }
  46. /*
  47. A B C
  48. E F G
  49. H I J
  50. pixels A C H J are pixels from original image
  51. pixels B E G I F are interpolated pixels
  52. */
  53. // interpolate pixels B and I
  54. for ( j = 0; j < h; j += 2)
  55. for ( i = 1; i < w - 1; i += 2)
  56. Imnew(j,i)=0.5*(Im(j/2, i/2)+Im(j/2, i/2+1));
  57. // interpolate pixels E and G
  58. for ( j = 1; j < h - 1; j += 2)
  59. for ( i = 0; i < w; i += 2)
  60. Imnew(j,i)=0.5*(Im(j/2, i/2)+Im(j/2+1, i/2));
  61. // interpolate pixel F
  62. for ( j = 1; j < h - 1; j += 2)
  63. for ( i = 1; i < w - 1; i += 2)
  64. Imnew(j,i)=0.25*(Im(j/2, i/2)+Im(j/2+1, i/2)+Im(j/2, i/2+1)+Im(j/2+1, i/2+1));
  65. return imnew;
  66. }
  67. //双线性插值,返回像素间的灰度值
  68. float getPixelBI(CvMat * im, float col, float row)
  69. {
  70. int irow, icol;
  71. float rfrac, cfrac;
  72. float row1 = 0, row2 = 0;
  73. int width=im->cols;
  74. int height=im->rows;
  75. #define ImMat(ROW,COL) ((float *)(im->data.fl + im->step/sizeof(float) *(ROW)))[(COL)]
  76. irow = (int) row;
  77. icol = (int) col;
  78. if (irow < 0 || irow >= height
  79. || icol < 0 || icol >= width)
  80. return 0;
  81. if (row > height - 1)
  82. row = height - 1;
  83. if (col > width - 1)
  84. col = width - 1;
  85. rfrac = 1.0 - (row - (float) irow);
  86. cfrac = 1.0 - (col - (float) icol);
  87. if (cfrac < 1)
  88. {
  89. row1 = cfrac * ImMat(irow,icol) + (1.0 - cfrac) * ImMat(irow,icol+1);
  90. }
  91. else
  92. {
  93. row1 = ImMat(irow,icol);
  94. }
  95. if (rfrac < 1)
  96. {
  97. if (cfrac < 1)
  98. {
  99. row2 = cfrac * ImMat(irow+1,icol) + (1.0 - cfrac) * ImMat(irow+1,icol+1);
  100. } else
  101. {
  102. row2 = ImMat(irow+1,icol);
  103. }
  104. }
  105. return rfrac * row1 + (1.0 - rfrac) * row2;
  106. }
  107. //矩阵归一化
  108. void normalizeMat(CvMat* mat)
  109. {
  110. #define Mat(ROW,COL) ((float *)(mat->data.fl + mat->step/sizeof(float) *(ROW)))[(COL)]
  111. float sum = 0;
  112. for (unsigned int j = 0; j < mat->rows; j++)
  113. for (unsigned int i = 0; i < mat->cols; i++)
  114. sum += Mat(j,i);
  115. for ( j = 0; j < mat->rows; j++)
  116. for (unsigned int i = 0; i < mat->rows; i++)
  117. Mat(j,i) /= sum;
  118. }
  119. //向量归一化
  120. void normalizeVec(float* vec, int dim)
  121. {
  122. unsigned int i;
  123. float sum = 0;
  124. for ( i = 0; i < dim; i++)
  125. sum += vec[i];
  126. for ( i = 0; i < dim; i++)
  127. vec[i] /= sum;
  128. }
  129. //得到向量的欧式长度,2-范数
  130. float GetVecNorm( float* vec, int dim )
  131. {
  132. float sum=0.0;
  133. for (unsigned int i=0;i<dim;i++)
  134. sum+=vec[i]*vec[i];
  135. return sqrt(sum);
  136. }
  137. //产生1D高斯核
  138. float* GaussianKernel1D(float sigma, int dim)
  139. {
  140. unsigned int i;
  141. //printf("GaussianKernel1D(): Creating 1x%d vector for sigma=%.3f gaussian kernel/n", dim, sigma);
  142. float *kern=(float*)malloc( dim*sizeof(float) );
  143. float s2 = sigma * sigma;
  144. int c = dim / 2;
  145. float m= 1.0/(sqrt(2.0 * CV_PI) * sigma);
  146. double v;
  147. for ( i = 0; i < (dim + 1) / 2; i++)
  148. {
  149. v = m * exp(-(1.0*i*i)/(2.0 * s2)) ;
  150. kern[c+i] = v;
  151. kern[c-i] = v;
  152. }
  153. // normalizeVec(kern, dim);
  154. // for ( i = 0; i < dim; i++)
  155. // printf("%f ", kern[i]);
  156. // printf("/n");
  157. return kern;
  158. }
  159. //产生2D高斯核矩阵
  160. CvMat* GaussianKernel2D(float sigma)
  161. {
  162. // int dim = (int) max(3.0f, GAUSSKERN * sigma);
  163. int dim = (int) max(3.0f, 2.0 * GAUSSKERN *sigma + 1.0f);
  164. // make dim odd
  165. if (dim % 2 == 0)
  166. dim++;
  167. //printf("GaussianKernel(): Creating %dx%d matrix for sigma=%.3f gaussian/n", dim, dim, sigma);
  168. CvMat* mat=cvCreateMat(dim, dim, CV_32FC1);
  169. #define Mat(ROW,COL) ((float *)(mat->data.fl + mat->step/sizeof(float) *(ROW)))[(COL)]
  170. float s2 = sigma * sigma;
  171. int c = dim / 2;
  172. //printf("%d %d/n", mat.size(), mat[0].size());
  173. float m= 1.0/(sqrt(2.0 * CV_PI) * sigma);
  174. for (int i = 0; i < (dim + 1) / 2; i++)
  175. {
  176. for (int j = 0; j < (dim + 1) / 2; j++)
  177. {
  178. //printf("%d %d %d/n", c, i, j);
  179. float v = m * exp(-(1.0*i*i + 1.0*j*j) / (2.0 * s2));
  180. Mat(c+i,c+j) =v;
  181. Mat(c-i,c+j) =v;
  182. Mat(c+i,c-j) =v;
  183. Mat(c-i,c-j) =v;
  184. }
  185. }
  186. // normalizeMat(mat);
  187. return mat;
  188. }
  189. //x方向像素处作卷积
  190. float ConvolveLocWidth(float* kernel, int dim, CvMat * src, int x, int y)
  191. {
  192. #define Src(ROW,COL) ((float *)(src->data.fl + src->step/sizeof(float) *(ROW)))[(COL)]
  193. unsigned int i;
  194. float pixel = 0;
  195. int col;
  196. int cen = dim / 2;
  197. //printf("ConvolveLoc(): Applying convoluation at location (%d, %d)/n", x, y);
  198. for ( i = 0; i < dim; i++)
  199. {
  200. col = x + (i - cen);
  201. if (col < 0)
  202. col = 0;
  203. if (col >= src->cols)
  204. col = src->cols - 1;
  205. pixel += kernel[i] * Src(y,col);
  206. }
  207. if (pixel > 1)
  208. pixel = 1;
  209. return pixel;
  210. }
  211. //x方向作卷积
  212. void Convolve1DWidth(float* kern, int dim, CvMat * src, CvMat * dst)
  213. {
  214. #define DST(ROW,COL) ((float *)(dst->data.fl + dst->step/sizeof(float) *(ROW)))[(COL)]
  215. unsigned int i,j;
  216. for ( j = 0; j < src->rows; j++)
  217. {
  218. for ( i = 0; i < src->cols; i++)
  219. {
  220. //printf("%d, %d/n", i, j);
  221. DST(j,i) = ConvolveLocWidth(kern, dim, src, i, j);
  222. }
  223. }
  224. }
  225. //y方向像素处作卷积
  226. float ConvolveLocHeight(float* kernel, int dim, CvMat * src, int x, int y)
  227. {
  228. #define Src(ROW,COL) ((float *)(src->data.fl + src->step/sizeof(float) *(ROW)))[(COL)]
  229. unsigned int j;
  230. float pixel = 0;
  231. int cen = dim / 2;
  232. //printf("ConvolveLoc(): Applying convoluation at location (%d, %d)/n", x, y);
  233. for ( j = 0; j < dim; j++)
  234. {
  235. int row = y + (j - cen);
  236. if (row < 0)
  237. row = 0;
  238. if (row >= src->rows)
  239. row = src->rows - 1;
  240. pixel += kernel[j] * Src(row,x);
  241. }
  242. if (pixel > 1)
  243. pixel = 1;
  244. return pixel;
  245. }
  246. //y方向作卷积
  247. void Convolve1DHeight(float* kern, int dim, CvMat * src, CvMat * dst)
  248. {
  249. #define Dst(ROW,COL) ((float *)(dst->data.fl + dst->step/sizeof(float) *(ROW)))[(COL)]
  250. unsigned int i,j;
  251. for ( j = 0; j < src->rows; j++)
  252. {
  253. for ( i = 0; i < src->cols; i++)
  254. {
  255. //printf("%d, %d/n", i, j);
  256. Dst(j,i) = ConvolveLocHeight(kern, dim, src, i, j);
  257. }
  258. }
  259. }
  260. //卷积模糊图像
  261. int BlurImage(CvMat * src, CvMat * dst, float sigma)
  262. {
  263. float* convkernel;
  264. int dim = (int) max(3.0f, 2.0 * GAUSSKERN * sigma + 1.0f);
  265. CvMat *tempMat;
  266. // make dim odd
  267. if (dim % 2 == 0)
  268. dim++;
  269. tempMat = cvCreateMat(src->rows, src->cols, CV_32FC1);
  270. convkernel = GaussianKernel1D(sigma, dim);
  271. Convolve1DWidth(convkernel, dim, src, tempMat);
  272. Convolve1DHeight(convkernel, dim, tempMat, dst);
  273. cvReleaseMat(&tempMat);
  274. return dim;
  275. }

五个步骤

ok,接下来,进入重点部分,咱们依据上文介绍的sift算法的几个步骤,来一一实现这些函数。

为了版述清晰,再贴一下,主函数,顺便再加强下对sift 算法的五个步骤的认识:

1、 SIFT算法第一步:图像预处理

CvMat *ScaleInitImage(CvMat * im) ; //金字塔初始化

2、 SIFT算法第二步:建立高斯金字塔函数

ImageOctaves* BuildGaussianOctaves(CvMat * image) ; //建立高斯金字塔

3、 SIFT算法第三步:特征点位置检测,最后确定特征点的位置

int DetectKeypoint(int numoctaves, ImageOctaves *GaussianPyr);

4、 SIFT算法第四步:计算高斯图像的梯度方向和幅值,计算各个特征点的主方向

void ComputeGrad_DirecandMag(int numoctaves, ImageOctaves *GaussianPyr);

5、 SIFT算法第五步:抽取各个特征点处的特征描述字

void ExtractFeatureDescriptors(int numoctaves, ImageOctaves *GaussianPyr);

ok,接下来一一具体实现这几个函数:

SIFT算法第一步

SIFT算法第一步:扩大图像,预滤波剔除噪声,得到金字塔的最底层-第一阶的第一层:

  1. CvMat *ScaleInitImage(CvMat * im)
  2. {
  3. double sigma,preblur_sigma;
  4. CvMat *imMat;
  5. CvMat * dst;
  6. CvMat *tempMat;
  7. //首先对图像进行平滑滤波,抑制噪声
  8. imMat = cvCreateMat(im->rows, im->cols, CV_32FC1);
  9. BlurImage(im, imMat, INITSIGMA);
  10. //针对两种情况分别进行处理:初始化放大原始图像或者在原图像基础上进行后续操作
  11. //建立金字塔的最底层
  12. if (DOUBLE_BASE_IMAGE_SIZE)
  13. {
  14. tempMat = doubleSizeImage2(imMat);//对扩大两倍的图像进行二次采样,采样率为0.5,采用线性插值
  15. #define TEMPMAT(ROW,COL) ((float *)(tempMat->data.fl + tempMat->step/sizeof(float) * (ROW)))[(COL)]
  16. dst = cvCreateMat(tempMat->rows, tempMat->cols, CV_32FC1);
  17. preblur_sigma = 1.0;//sqrt(2 - 4*INITSIGMA*INITSIGMA);
  18. BlurImage(tempMat, dst, preblur_sigma);
  19. // The initial blurring for the first image of the first octave of the pyramid.
  20. sigma = sqrt( (4*INITSIGMA*INITSIGMA) + preblur_sigma * preblur_sigma );
  21. // sigma = sqrt(SIGMA * SIGMA - INITSIGMA * INITSIGMA * 4);
  22. //printf("Init Sigma: %f/n", sigma);
  23. BlurImage(dst, tempMat, sigma); //得到金字塔的最底层-放大2倍的图像
  24. cvReleaseMat( &dst );
  25. return tempMat;
  26. }
  27. else
  28. {
  29. dst = cvCreateMat(im->rows, im->cols, CV_32FC1);
  30. //sigma = sqrt(SIGMA * SIGMA - INITSIGMA * INITSIGMA);
  31. preblur_sigma = 1.0;//sqrt(2 - 4*INITSIGMA*INITSIGMA);
  32. sigma = sqrt( (4*INITSIGMA*INITSIGMA) + preblur_sigma * preblur_sigma );
  33. //printf("Init Sigma: %f/n", sigma);
  34. BlurImage(imMat, dst, sigma); //得到金字塔的最底层:原始图像大小
  35. return dst;
  36. }
  37. }

SIFT算法第二步

SIFT第二步,建立Gaussian金字塔,给定金字塔第一阶第一层图像后,计算高斯金字塔其他尺度图像,
每一阶的数目由变量SCALESPEROCTAVE决定,给定一个基本图像,计算它的高斯金字塔图像,返回外部向量是阶梯指针,内部向量是每一个阶梯内部的不同尺度图像。

  1. //SIFT算法第二步
  2. ImageOctaves* BuildGaussianOctaves(CvMat * image)
  3. {
  4. ImageOctaves *octaves;
  5. CvMat *tempMat;
  6. CvMat *dst;
  7. CvMat *temp;
  8. int i,j;
  9. double k = pow(2, 1.0/((float)SCALESPEROCTAVE)); //方差倍数
  10. float preblur_sigma, initial_sigma , sigma1,sigma2,sigma,absolute_sigma,sigma_f;
  11. //计算金字塔的阶梯数目
  12. int dim = min(image->rows, image->cols);
  13. int numoctaves = (int) (log((double) dim) / log(2.0)) - 2; //金字塔阶数
  14. //限定金字塔的阶梯数
  15. numoctaves = min(numoctaves, MAXOCTAVES);
  16. //为高斯金塔和DOG金字塔分配内存
  17. octaves=(ImageOctaves*) malloc( numoctaves * sizeof(ImageOctaves) );
  18. DOGoctaves=(ImageOctaves*) malloc( numoctaves * sizeof(ImageOctaves) );
  19. printf("BuildGaussianOctaves(): Base image dimension is %dx%d/n", (int)(0.5*(image->cols)), (int)(0.5*(image->rows)) );
  20. printf("BuildGaussianOctaves(): Building %d octaves/n", numoctaves);
  21. // start with initial source image
  22. tempMat=cvCloneMat( image );
  23. // preblur_sigma = 1.0;//sqrt(2 - 4*INITSIGMA*INITSIGMA);
  24. initial_sigma = sqrt(2);//sqrt( (4*INITSIGMA*INITSIGMA) + preblur_sigma * preblur_sigma );
  25. // initial_sigma = sqrt(SIGMA * SIGMA - INITSIGMA * INITSIGMA * 4);
  26. //在每一阶金字塔图像中建立不同的尺度图像
  27. for ( i = 0; i < numoctaves; i++)
  28. {
  29. //首先建立金字塔每一阶梯的最底层,其中0阶梯的最底层已经建立好
  30. printf("Building octave %d of dimesion (%d, %d)/n", i, tempMat->cols,tempMat->rows);
  31. //为各个阶梯分配内存
  32. octaves[i].Octave= (ImageLevels*) malloc( (SCALESPEROCTAVE + 3) * sizeof(ImageLevels) );
  33. DOGoctaves[i].Octave= (ImageLevels*) malloc( (SCALESPEROCTAVE + 2) * sizeof(ImageLevels) );
  34. //存储各个阶梯的最底层
  35. (octaves[i].Octave)[0].Level=tempMat;
  36. octaves[i].col=tempMat->cols;
  37. octaves[i].row=tempMat->rows;
  38. DOGoctaves[i].col=tempMat->cols;
  39. DOGoctaves[i].row=tempMat->rows;
  40. if (DOUBLE_BASE_IMAGE_SIZE)
  41. octaves[i].subsample=pow(2,i)*0.5;
  42. else
  43. octaves[i].subsample=pow(2,i);
  44. if(i==0)
  45. {
  46. (octaves[0].Octave)[0].levelsigma = initial_sigma;
  47. (octaves[0].Octave)[0].absolute_sigma = initial_sigma;
  48. printf("0 scale and blur sigma : %f /n", (octaves[0].subsample) * ((octaves[0].Octave)[0].absolute_sigma));
  49. }
  50. else
  51. {
  52. (octaves[i].Octave)[0].levelsigma = (octaves[i-1].Octave)[SCALESPEROCTAVE].levelsigma;
  53. (octaves[i].Octave)[0].absolute_sigma = (octaves[i-1].Octave)[SCALESPEROCTAVE].absolute_sigma;
  54. printf( "0 scale and blur sigma : %f /n", ((octaves[i].Octave)[0].absolute_sigma) );
  55. }
  56. sigma = initial_sigma;
  57. //建立本阶梯其他层的图像
  58. for ( j = 1; j < SCALESPEROCTAVE + 3; j++)
  59. {
  60. dst = cvCreateMat(tempMat->rows, tempMat->cols, CV_32FC1);//用于存储高斯层
  61. temp = cvCreateMat(tempMat->rows, tempMat->cols, CV_32FC1);//用于存储DOG层
  62. // 2 passes of 1D on original
  63. // if(i!=0)
  64. // {
  65. // sigma1 = pow(k, j - 1) * ((octaves[i-1].Octave)[j-1].levelsigma);
  66. // sigma2 = pow(k, j) * ((octaves[i].Octave)[j-1].levelsigma);
  67. // sigma = sqrt(sigma2*sigma2 - sigma1*sigma1);
  68. sigma_f= sqrt(k*k-1)*sigma;
  69. // }
  70. // else
  71. // {
  72. // sigma = sqrt(SIGMA * SIGMA - INITSIGMA * INITSIGMA * 4)*pow(k,j);
  73. // }
  74. sigma = k*sigma;
  75. absolute_sigma = sigma * (octaves[i].subsample);
  76. printf("%d scale and Blur sigma: %f /n", j, absolute_sigma);
  77. (octaves[i].Octave)[j].levelsigma = sigma;
  78. (octaves[i].Octave)[j].absolute_sigma = absolute_sigma;
  79. //产生高斯层
  80. int length=BlurImage((octaves[i].Octave)[j-1].Level, dst, sigma_f);//相应尺度
  81. (octaves[i].Octave)[j].levelsigmalength = length;
  82. (octaves[i].Octave)[j].Level=dst;
  83. //产生DOG层
  84. cvSub( ((octaves[i].Octave)[j]).Level, ((octaves[i].Octave)[j-1]).Level, temp, 0 );
  85. // cvAbsDiff( ((octaves[i].Octave)[j]).Level, ((octaves[i].Octave)[j-1]).Level, temp );
  86. ((DOGoctaves[i].Octave)[j-1]).Level=temp;
  87. }
  88. // halve the image size for next iteration
  89. tempMat = halfSizeImage( ( (octaves[i].Octave)[SCALESPEROCTAVE].Level ) );
  90. }
  91. return octaves;
  92. }

SIFT算法第三步

SIFT算法第三步,特征点位置检测,最后确定特征点的位置检测DOG金字塔中的局部最大值,找到之后,还要经过两个检验才能确认为特征点:一是它必须有明显的差异,二是他不应该是边缘点,(也就是说,在极值点处的主曲率比应该小于某一个阈值)。

  1. //SIFT算法第三步,特征点位置检测,
  2. int DetectKeypoint(int numoctaves, ImageOctaves *GaussianPyr)
  3. {
  4. //计算用于DOG极值点检测的主曲率比的阈值
  5. double curvature_threshold;
  6. curvature_threshold= ((CURVATURE_THRESHOLD + 1)*(CURVATURE_THRESHOLD + 1))/CURVATURE_THRESHOLD;
  7. #define ImLevels(OCTAVE,LEVEL,ROW,COL) ((float *)(DOGoctaves[(OCTAVE)].Octave[(LEVEL)].Level->data.fl + DOGoctaves[(OCTAVE)].Octave[(LEVEL)].Level->step/sizeof(float) *(ROW)))[(COL)]
  8. int keypoint_count = 0;
  9. for (int i=0; i<numoctaves; i++)
  10. {
  11. for(int j=1;j<SCALESPEROCTAVE+1;j++)//取中间的scaleperoctave个层
  12. {
  13. //在图像的有效区域内寻找具有显著性特征的局部最大值
  14. //float sigma=(GaussianPyr[i].Octave)[j].levelsigma;
  15. //int dim = (int) (max(3.0f, 2.0*GAUSSKERN *sigma + 1.0f)*0.5);
  16. int dim = (int)(0.5*((GaussianPyr[i].Octave)[j].levelsigmalength)+0.5);
  17. for (int m=dim;m<((DOGoctaves[i].row)-dim);m++)
  18. for(int n=dim;n<((DOGoctaves[i].col)-dim);n++)
  19. {
  20. if ( fabs(ImLevels(i,j,m,n))>= CONTRAST_THRESHOLD )
  21. {
  22. if ( ImLevels(i,j,m,n)!=0.0 ) //1、首先是非零
  23. {
  24. float inf_val=ImLevels(i,j,m,n);
  25. if(( (inf_val <= ImLevels(i,j-1,m-1,n-1))&&
  26. (inf_val <= ImLevels(i,j-1,m ,n-1))&&
  27. (inf_val <= ImLevels(i,j-1,m+1,n-1))&&
  28. (inf_val <= ImLevels(i,j-1,m-1,n ))&&
  29. (inf_val <= ImLevels(i,j-1,m ,n ))&&
  30. (inf_val <= ImLevels(i,j-1,m+1,n ))&&
  31. (inf_val <= ImLevels(i,j-1,m-1,n+1))&&
  32. (inf_val <= ImLevels(i,j-1,m ,n+1))&&
  33. (inf_val <= ImLevels(i,j-1,m+1,n+1))&& //底层的小尺度9
  34. (inf_val <= ImLevels(i,j,m-1,n-1))&&
  35. (inf_val <= ImLevels(i,j,m ,n-1))&&
  36. (inf_val <= ImLevels(i,j,m+1,n-1))&&
  37. (inf_val <= ImLevels(i,j,m-1,n ))&&
  38. (inf_val <= ImLevels(i,j,m+1,n ))&&
  39. (inf_val <= ImLevels(i,j,m-1,n+1))&&
  40. (inf_val <= ImLevels(i,j,m ,n+1))&&
  41. (inf_val <= ImLevels(i,j,m+1,n+1))&& //当前层8
  42. (inf_val <= ImLevels(i,j+1,m-1,n-1))&&
  43. (inf_val <= ImLevels(i,j+1,m ,n-1))&&
  44. (inf_val <= ImLevels(i,j+1,m+1,n-1))&&
  45. (inf_val <= ImLevels(i,j+1,m-1,n ))&&
  46. (inf_val <= ImLevels(i,j+1,m ,n ))&&
  47. (inf_val <= ImLevels(i,j+1,m+1,n ))&&
  48. (inf_val <= ImLevels(i,j+1,m-1,n+1))&&
  49. (inf_val <= ImLevels(i,j+1,m ,n+1))&&
  50. (inf_val <= ImLevels(i,j+1,m+1,n+1)) //下一层大尺度9
  51. ) ||
  52. ( (inf_val >= ImLevels(i,j-1,m-1,n-1))&&
  53. (inf_val >= ImLevels(i,j-1,m ,n-1))&&
  54. (inf_val >= ImLevels(i,j-1,m+1,n-1))&&
  55. (inf_val >= ImLevels(i,j-1,m-1,n ))&&
  56. (inf_val >= ImLevels(i,j-1,m ,n ))&&
  57. (inf_val >= ImLevels(i,j-1,m+1,n ))&&
  58. (inf_val >= ImLevels(i,j-1,m-1,n+1))&&
  59. (inf_val >= ImLevels(i,j-1,m ,n+1))&&
  60. (inf_val >= ImLevels(i,j-1,m+1,n+1))&&
  61. (inf_val >= ImLevels(i,j,m-1,n-1))&&
  62. (inf_val >= ImLevels(i,j,m ,n-1))&&
  63. (inf_val >= ImLevels(i,j,m+1,n-1))&&
  64. (inf_val >= ImLevels(i,j,m-1,n ))&&
  65. (inf_val >= ImLevels(i,j,m+1,n ))&&
  66. (inf_val >= ImLevels(i,j,m-1,n+1))&&
  67. (inf_val >= ImLevels(i,j,m ,n+1))&&
  68. (inf_val >= ImLevels(i,j,m+1,n+1))&&
  69. (inf_val >= ImLevels(i,j+1,m-1,n-1))&&
  70. (inf_val >= ImLevels(i,j+1,m ,n-1))&&
  71. (inf_val >= ImLevels(i,j+1,m+1,n-1))&&
  72. (inf_val >= ImLevels(i,j+1,m-1,n ))&&
  73. (inf_val >= ImLevels(i,j+1,m ,n ))&&
  74. (inf_val >= ImLevels(i,j+1,m+1,n ))&&
  75. (inf_val >= ImLevels(i,j+1,m-1,n+1))&&
  76. (inf_val >= ImLevels(i,j+1,m ,n+1))&&
  77. (inf_val >= ImLevels(i,j+1,m+1,n+1))
  78. ) ) //2、满足26个中极值点
  79. {
  80. //此处可存储
  81. //然后必须具有明显的显著性,即必须大于CONTRAST_THRESHOLD=0.02
  82. if ( fabs(ImLevels(i,j,m,n))>= CONTRAST_THRESHOLD )
  83. {
  84. //最后显著处的特征点必须具有足够的曲率比,CURVATURE_THRESHOLD=10.0,首先计算Hessian矩阵
  85. // Compute the entries of the Hessian matrix at the extrema location.
  86. /*
  87. 1 0 -1
  88. 0 0 0
  89. -1 0 1 *0.25
  90. */
  91. // Compute the trace and the determinant of the Hessian.
  92. //Tr_H = Dxx + Dyy;
  93. //Det_H = Dxx*Dyy - Dxy^2;
  94. float Dxx,Dyy,Dxy,Tr_H,Det_H,curvature_ratio;
  95. Dxx = ImLevels(i,j,m,n-1) + ImLevels(i,j,m,n+1)-2.0*ImLevels(i,j,m,n);
  96. Dyy = ImLevels(i,j,m-1,n) + ImLevels(i,j,m+1,n)-2.0*ImLevels(i,j,m,n);
  97. Dxy = ImLevels(i,j,m-1,n-1) + ImLevels(i,j,m+1,n+1) - ImLevels(i,j,m+1,n-1) - ImLevels(i,j,m-1,n+1);
  98. Tr_H = Dxx + Dyy;
  99. Det_H = Dxx*Dyy - Dxy*Dxy;
  100. // Compute the ratio of the principal curvatures.
  101. curvature_ratio = (1.0*Tr_H*Tr_H)/Det_H;
  102. if ( (Det_H>=0.0) && (curvature_ratio <= curvature_threshold) ) //最后得到最具有显著性特征的特征点
  103. {
  104. //将其存储起来,以计算后面的特征描述字
  105. keypoint_count++;
  106. Keypoint k;
  107. /* Allocate memory for the keypoint. */
  108. k = (Keypoint) malloc(sizeof(struct KeypointSt));
  109. k->next = keypoints;
  110. keypoints = k;
  111. k->row = m*(GaussianPyr[i].subsample);
  112. k->col =n*(GaussianPyr[i].subsample);
  113. k->sy = m; //行
  114. k->sx = n; //列
  115. k->octave=i;
  116. k->level=j;
  117. k->scale = (GaussianPyr[i].Octave)[j].absolute_sigma;
  118. }//if >curvature_thresh
  119. }//if >contrast
  120. }//if inf value
  121. }//if non zero
  122. }//if >contrast
  123. } //for concrete image level col
  124. }//for levels
  125. }//for octaves
  126. return keypoint_count;
  127. }
  128. //在图像中,显示SIFT特征点的位置
  129. void DisplayKeypointLocation(IplImage* image, ImageOctaves *GaussianPyr)
  130. {
  131. Keypoint p = keypoints; // p指向第一个结点
  132. while(p) // 没到表尾
  133. {
  134. cvLine( image, cvPoint((int)((p->col)-3),(int)(p->row)),
  135. cvPoint((int)((p->col)+3),(int)(p->row)), CV_RGB(255,255,0),
  136. 1, 8, 0 );
  137. cvLine( image, cvPoint((int)(p->col),(int)((p->row)-3)),
  138. cvPoint((int)(p->col),(int)((p->row)+3)), CV_RGB(255,255,0),
  139. 1, 8, 0 );
  140. // cvCircle(image,cvPoint((uchar)(p->col),(uchar)(p->row)),
  141. // (int)((GaussianPyr[p->octave].Octave)[p->level].absolute_sigma),
  142. // CV_RGB(255,0,0),1,8,0);
  143. p=p->next;
  144. }
  145. }
  146. // Compute the gradient direction and magnitude of the gaussian pyramid images
  147. void ComputeGrad_DirecandMag(int numoctaves, ImageOctaves *GaussianPyr)
  148. {
  149. // ImageOctaves *mag_thresh ;
  150. mag_pyr=(ImageOctaves*) malloc( numoctaves * sizeof(ImageOctaves) );
  151. grad_pyr=(ImageOctaves*) malloc( numoctaves * sizeof(ImageOctaves) );
  152. // float sigma=( (GaussianPyr[0].Octave)[SCALESPEROCTAVE+2].absolute_sigma ) / GaussianPyr[0].subsample;
  153. // int dim = (int) (max(3.0f, 2 * GAUSSKERN *sigma + 1.0f)*0.5+0.5);
  154. #define ImLevels(OCTAVE,LEVEL,ROW,COL) ((float *)(GaussianPyr[(OCTAVE)].Octave[(LEVEL)].Level->data.fl + GaussianPyr[(OCTAVE)].Octave[(LEVEL)].Level->step/sizeof(float) *(ROW)))[(COL)]
  155. for (int i=0; i<numoctaves; i++)
  156. {
  157. mag_pyr[i].Octave= (ImageLevels*) malloc( (SCALESPEROCTAVE) * sizeof(ImageLevels) );
  158. grad_pyr[i].Octave= (ImageLevels*) malloc( (SCALESPEROCTAVE) * sizeof(ImageLevels) );
  159. for(int j=1;j<SCALESPEROCTAVE+1;j++)//取中间的scaleperoctave个层
  160. {
  161. CvMat *Mag = cvCreateMat(GaussianPyr[i].row, GaussianPyr[i].col, CV_32FC1);
  162. CvMat *Ori = cvCreateMat(GaussianPyr[i].row, GaussianPyr[i].col, CV_32FC1);
  163. CvMat *tempMat1 = cvCreateMat(GaussianPyr[i].row, GaussianPyr[i].col, CV_32FC1);
  164. CvMat *tempMat2 = cvCreateMat(GaussianPyr[i].row, GaussianPyr[i].col, CV_32FC1);
  165. cvZero(Mag);
  166. cvZero(Ori);
  167. cvZero(tempMat1);
  168. cvZero(tempMat2);
  169. #define MAG(ROW,COL) ((float *)(Mag->data.fl + Mag->step/sizeof(float) *(ROW)))[(COL)]
  170. #define ORI(ROW,COL) ((float *)(Ori->data.fl + Ori->step/sizeof(float) *(ROW)))[(COL)]
  171. #define TEMPMAT1(ROW,COL) ((float *)(tempMat1->data.fl + tempMat1->step/sizeof(float) *(ROW)))[(COL)]
  172. #define TEMPMAT2(ROW,COL) ((float *)(tempMat2->data.fl + tempMat2->step/sizeof(float) *(ROW)))[(COL)]
  173. for (int m=1;m<(GaussianPyr[i].row-1);m++)
  174. for(int n=1;n<(GaussianPyr[i].col-1);n++)
  175. {
  176. //计算幅值
  177. TEMPMAT1(m,n) = 0.5*( ImLevels(i,j,m,n+1)-ImLevels(i,j,m,n-1) ); //dx
  178. TEMPMAT2(m,n) = 0.5*( ImLevels(i,j,m+1,n)-ImLevels(i,j,m-1,n) ); //dy
  179. MAG(m,n) = sqrt(TEMPMAT1(m,n)*TEMPMAT1(m,n)+TEMPMAT2(m,n)*TEMPMAT2(m,n)); //mag
  180. //计算方向
  181. ORI(m,n) =atan( TEMPMAT2(m,n)/TEMPMAT1(m,n) );
  182. if (ORI(m,n)==CV_PI)
  183. ORI(m,n)=-CV_PI;
  184. }
  185. ((mag_pyr[i].Octave)[j-1]).Level=Mag;
  186. ((grad_pyr[i].Octave)[j-1]).Level=Ori;
  187. cvReleaseMat(&tempMat1);
  188. cvReleaseMat(&tempMat2);
  189. }//for levels
  190. }//for octaves
  191. }

SIFT算法第四步

  1. //SIFT算法第四步:计算各个特征点的主方向,确定主方向
  2. void AssignTheMainOrientation(int numoctaves, ImageOctaves *GaussianPyr,ImageOctaves *mag_pyr,ImageOctaves *grad_pyr)
  3. {
  4. // Set up the histogram bin centers for a 36 bin histogram.
  5. int num_bins = 36;
  6. float hist_step = 2.0*PI/num_bins;
  7. float hist_orient[36];
  8. for (int i=0;i<36;i++)
  9. hist_orient[i]=-PI+i*hist_step;
  10. float sigma1=( ((GaussianPyr[0].Octave)[SCALESPEROCTAVE].absolute_sigma) ) / (GaussianPyr[0].subsample);//SCALESPEROCTAVE+2
  11. int zero_pad = (int) (max(3.0f, 2 * GAUSSKERN *sigma1 + 1.0f)*0.5+0.5);
  12. //Assign orientations to the keypoints.
  13. #define ImLevels(OCTAVES,LEVELS,ROW,COL) ((float *)((GaussianPyr[(OCTAVES)].Octave[(LEVELS)].Level)->data.fl + (GaussianPyr[(OCTAVES)].Octave[(LEVELS)].Level)->step/sizeof(float) *(ROW)))[(COL)]
  14. int keypoint_count = 0;
  15. Keypoint p = keypoints; // p指向第一个结点
  16. while(p) // 没到表尾
  17. {
  18. int i=p->octave;
  19. int j=p->level;
  20. int m=p->sy; //行
  21. int n=p->sx; //列
  22. if ((m>=zero_pad)&&(m<GaussianPyr[i].row-zero_pad)&&
  23. (n>=zero_pad)&&(n<GaussianPyr[i].col-zero_pad) )
  24. {
  25. float sigma=( ((GaussianPyr[i].Octave)[j].absolute_sigma) ) / (GaussianPyr[i].subsample);
  26. //产生二维高斯模板
  27. CvMat* mat = GaussianKernel2D( sigma );
  28. int dim=(int)(0.5 * (mat->rows));
  29. //分配用于存储Patch幅值和方向的空间
  30. #define MAT(ROW,COL) ((float *)(mat->data.fl + mat->step/sizeof(float) *(ROW)))[(COL)]
  31. //声明方向直方图变量
  32. double* orienthist = (double *) malloc(36 * sizeof(double));
  33. for ( int sw = 0 ; sw < 36 ; ++sw)
  34. {
  35. orienthist[sw]=0.0;
  36. }
  37. //在特征点的周围统计梯度方向
  38. for (int x=m-dim,mm=0;x<=(m+dim);x++,mm++)
  39. for(int y=n-dim,nn=0;y<=(n+dim);y++,nn++)
  40. {
  41. //计算特征点处的幅值
  42. double dx = 0.5*(ImLevels(i,j,x,y+1)-ImLevels(i,j,x,y-1)); //dx
  43. double dy = 0.5*(ImLevels(i,j,x+1,y)-ImLevels(i,j,x-1,y)); //dy
  44. double mag = sqrt(dx*dx+dy*dy); //mag
  45. //计算方向
  46. double Ori =atan( 1.0*dy/dx );
  47. int binIdx = FindClosestRotationBin(36, Ori); //得到离现有方向最近的直方块
  48. orienthist[binIdx] = orienthist[binIdx] + 1.0* mag * MAT(mm,nn);//利用高斯加权累加进直方图相应的块
  49. }
  50. // Find peaks in the orientation histogram using nonmax suppression.
  51. AverageWeakBins (orienthist, 36);
  52. // find the maximum peak in gradient orientation
  53. double maxGrad = 0.0;
  54. int maxBin = 0;
  55. for (int b = 0 ; b < 36 ; ++b)
  56. {
  57. if (orienthist[b] > maxGrad)
  58. {
  59. maxGrad = orienthist[b];
  60. maxBin = b;
  61. }
  62. }
  63. // First determine the real interpolated peak high at the maximum bin
  64. // position, which is guaranteed to be an absolute peak.
  65. double maxPeakValue=0.0;
  66. double maxDegreeCorrection=0.0;
  67. if ( (InterpolateOrientation ( orienthist[maxBin == 0 ? (36 - 1) : (maxBin - 1)],
  68. orienthist[maxBin], orienthist[(maxBin + 1) % 36],
  69. &maxDegreeCorrection, &maxPeakValue)) == false)
  70. printf("BUG: Parabola fitting broken");
  71. // Now that we know the maximum peak value, we can find other keypoint
  72. // orientations, which have to fulfill two criterias:
  73. //
  74. // 1. They must be a local peak themselves. Else we might add a very
  75. // similar keypoint orientation twice (imagine for example the
  76. // values: 0.4 1.0 0.8, if 1.0 is maximum peak, 0.8 is still added
  77. // with the default threshhold, but the maximum peak orientation
  78. // was already added).
  79. // 2. They must have at least peakRelThresh times the maximum peak
  80. // value.
  81. bool binIsKeypoint[36];
  82. for ( b = 0 ; b < 36 ; ++b)
  83. {
  84. binIsKeypoint[b] = false;
  85. // The maximum peak of course is
  86. if (b == maxBin)
  87. {
  88. binIsKeypoint[b] = true;
  89. continue;
  90. }
  91. // Local peaks are, too, in case they fulfill the threshhold
  92. if (orienthist[b] < (peakRelThresh * maxPeakValue))
  93. continue;
  94. int leftI = (b == 0) ? (36 - 1) : (b - 1);
  95. int rightI = (b + 1) % 36;
  96. if (orienthist[b] <= orienthist[leftI] || orienthist[b] <= orienthist[rightI])
  97. continue; // no local peak
  98. binIsKeypoint[b] = true;
  99. }
  100. // find other possible locations
  101. double oneBinRad = (2.0 * PI) / 36;
  102. for ( b = 0 ; b < 36 ; ++b)
  103. {
  104. if (binIsKeypoint[b] == false)
  105. continue;
  106. int bLeft = (b == 0) ? (36 - 1) : (b - 1);
  107. int bRight = (b + 1) % 36;
  108. // Get an interpolated peak direction and value guess.
  109. double peakValue;
  110. double degreeCorrection;
  111. double maxPeakValue, maxDegreeCorrection;
  112. if (InterpolateOrientation ( orienthist[maxBin == 0 ? (36 - 1) : (maxBin - 1)],
  113. orienthist[maxBin], orienthist[(maxBin + 1) % 36],
  114. °reeCorrection, &peakValue) == false)
  115. {
  116. printf("BUG: Parabola fitting broken");
  117. }
  118. double degree = (b + degreeCorrection) * oneBinRad - PI;
  119. if (degree < -PI)
  120. degree += 2.0 * PI;
  121. else if (degree > PI)
  122. degree -= 2.0 * PI;
  123. //存储方向,可以直接利用检测到的链表进行该步主方向的指定;
  124. //分配内存重新存储特征点
  125. Keypoint k;
  126. /* Allocate memory for the keypoint Descriptor. */
  127. k = (Keypoint) malloc(sizeof(struct KeypointSt));
  128. k->next = keyDescriptors;
  129. keyDescriptors = k;
  130. k->descrip = (float*)malloc(LEN * sizeof(float));
  131. k->row = p->row;
  132. k->col = p->col;
  133. k->sy = p->sy; //行
  134. k->sx = p->sx; //列
  135. k->octave = p->octave;
  136. k->level = p->level;
  137. k->scale = p->scale;
  138. k->ori = degree;
  139. k->mag = peakValue;
  140. }//for
  141. free(orienthist);
  142. }
  143. p=p->next;
  144. }
  145. }
  146. //寻找与方向直方图最近的柱,确定其index
  147. int FindClosestRotationBin (int binCount, float angle)
  148. {
  149. angle += CV_PI;
  150. angle /= 2.0 * CV_PI;
  151. // calculate the aligned bin
  152. angle *= binCount;
  153. int idx = (int) angle;
  154. if (idx == binCount)
  155. idx = 0;
  156. return (idx);
  157. }
  158. // Average the content of the direction bins.
  159. void AverageWeakBins (double* hist, int binCount)
  160. {
  161. // TODO: make some tests what number of passes is the best. (its clear
  162. // one is not enough, as we may have something like
  163. // ( 0.4, 0.4, 0.3, 0.4, 0.4 ))
  164. for (int sn = 0 ; sn < 2 ; ++sn)
  165. {
  166. double firstE = hist[0];
  167. double last = hist[binCount-1];
  168. for (int sw = 0 ; sw < binCount ; ++sw)
  169. {
  170. double cur = hist[sw];
  171. double next = (sw == (binCount - 1)) ? firstE : hist[(sw + 1) % binCount];
  172. hist[sw] = (last + cur + next) / 3.0;
  173. last = cur;
  174. }
  175. }
  176. }
  177. // Fit a parabol to the three points (-1.0 ; left), (0.0 ; middle) and
  178. // (1.0 ; right).
  179. // Formulas:
  180. // f(x) = a (x - c)^2 + b
  181. // c is the peak offset (where f'(x) is zero), b is the peak value.
  182. // In case there is an error false is returned, otherwise a correction
  183. // value between [-1 ; 1] is returned in 'degreeCorrection', where -1
  184. // means the peak is located completely at the left vector, and -0.5 just
  185. // in the middle between left and middle and > 0 to the right side. In
  186. // 'peakValue' the maximum estimated peak value is stored.
  187. bool InterpolateOrientation (double left, double middle,double right, double *degreeCorrection, double *peakValue)
  188. {
  189. double a = ((left + right) - 2.0 * middle) / 2.0; //抛物线捏合系数a
  190. // degreeCorrection = peakValue = Double.NaN;
  191. // Not a parabol
  192. if (a == 0.0)
  193. return false;
  194. double c = (((left - middle) / a) - 1.0) / 2.0;
  195. double b = middle - c * c * a;
  196. if (c < -0.5 || c > 0.5)
  197. return false;
  198. *degreeCorrection = c;
  199. *peakValue = b;
  200. return true;
  201. }
  202. //显示特征点处的主方向
  203. void DisplayOrientation (IplImage* image, ImageOctaves *GaussianPyr)
  204. {
  205. Keypoint p = keyDescriptors; // p指向第一个结点
  206. while(p) // 没到表尾
  207. {
  208. float scale=(GaussianPyr[p->octave].Octave)[p->level].absolute_sigma;
  209. float autoscale = 3.0;
  210. float uu=autoscale*scale*cos(p->ori);
  211. float vv=autoscale*scale*sin(p->ori);
  212. float x=(p->col)+uu;
  213. float y=(p->row)+vv;
  214. cvLine( image, cvPoint((int)(p->col),(int)(p->row)),
  215. cvPoint((int)x,(int)y), CV_RGB(255,255,0),
  216. 1, 8, 0 );
  217. // Arrow head parameters
  218. float alpha = 0.33; // Size of arrow head relative to the length of the vector
  219. float beta = 0.33; // Width of the base of the arrow head relative to the length
  220. float xx0= (p->col)+uu-alpha*(uu+beta*vv);
  221. float yy0= (p->row)+vv-alpha*(vv-beta*uu);
  222. float xx1= (p->col)+uu-alpha*(uu-beta*vv);
  223. float yy1= (p->row)+vv-alpha*(vv+beta*uu);
  224. cvLine( image, cvPoint((int)xx0,(int)yy0),
  225. cvPoint((int)x,(int)y), CV_RGB(255,255,0),
  226. 1, 8, 0 );
  227. cvLine( image, cvPoint((int)xx1,(int)yy1),
  228. cvPoint((int)x,(int)y), CV_RGB(255,255,0),
  229. 1, 8, 0 );
  230. p=p->next;
  231. }
  232. }

SIFT算法第五步

SIFT算法第五步:抽取各个特征点处的特征描述字,确定特征点的描述字。描述字是Patch网格内梯度方向的描述,旋转网格到主方向,插值得到网格处梯度值。

一个特征点可以用228=32维的向量,也可以用448=128维的向量更精确的进行描述。

  1. void ExtractFeatureDescriptors(int numoctaves, ImageOctaves *GaussianPyr)
  2. {
  3. // The orientation histograms have 8 bins
  4. float orient_bin_spacing = PI/4;
  5. float orient_angles[8]={-PI,-PI+orient_bin_spacing,-PI*0.5, -orient_bin_spacing,
  6. 0.0, orient_bin_spacing, PI*0.5, PI+orient_bin_spacing};
  7. //产生描述字中心各点坐标
  8. float *feat_grid=(float *) malloc( 2*16 * sizeof(float));
  9. for (int i=0;i<GridSpacing;i++)
  10. {
  11. for (int j=0;j<2*GridSpacing;++j,++j)
  12. {
  13. feat_grid[i*2*GridSpacing+j]=-6.0+i*GridSpacing;
  14. feat_grid[i*2*GridSpacing+j+1]=-6.0+0.5*j*GridSpacing;
  15. }
  16. }
  17. //产生网格
  18. float *feat_samples=(float *) malloc( 2*256 * sizeof(float));
  19. for ( i=0;i<4*GridSpacing;i++)
  20. {
  21. for (int j=0;j<8*GridSpacing;j+=2)
  22. {
  23. feat_samples[i*8*GridSpacing+j]=-(2*GridSpacing-0.5)+i;
  24. feat_samples[i*8*GridSpacing+j+1]=-(2*GridSpacing-0.5)+0.5*j;
  25. }
  26. }
  27. float feat_window = 2*GridSpacing;
  28. Keypoint p = keyDescriptors; // p指向第一个结点
  29. while(p) // 没到表尾
  30. {
  31. float scale=(GaussianPyr[p->octave].Octave)[p->level].absolute_sigma;
  32. float sine = sin(p->ori);
  33. float cosine = cos(p->ori);
  34. //计算中心点坐标旋转之后的位置
  35. float *featcenter=(float *) malloc( 2*16 * sizeof(float));
  36. for (int i=0;i<GridSpacing;i++)
  37. {
  38. for (int j=0;j<2*GridSpacing;j+=2)
  39. {
  40. float x=feat_grid[i*2*GridSpacing+j];
  41. float y=feat_grid[i*2*GridSpacing+j+1];
  42. featcenter[i*2*GridSpacing+j]=((cosine * x + sine * y) + p->sx);
  43. featcenter[i*2*GridSpacing+j+1]=((-sine * x + cosine * y) + p->sy);
  44. }
  45. }
  46. // calculate sample window coordinates (rotated along keypoint)
  47. float *feat=(float *) malloc( 2*256 * sizeof(float));
  48. for ( i=0;i<64*GridSpacing;i++,i++)
  49. {
  50. float x=feat_samples[i];
  51. float y=feat_samples[i+1];
  52. feat[i]=((cosine * x + sine * y) + p->sx);
  53. feat[i+1]=((-sine * x + cosine * y) + p->sy);
  54. }
  55. //Initialize the feature descriptor.
  56. float *feat_desc = (float *) malloc( 128 * sizeof(float));
  57. for (i=0;i<128;i++)
  58. {
  59. feat_desc[i]=0.0;
  60. // printf("%f ",feat_desc[i]);
  61. }
  62. //printf("/n");
  63. for ( i=0;i<512;++i,++i)
  64. {
  65. float x_sample = feat[i];
  66. float y_sample = feat[i+1];
  67. // Interpolate the gradient at the sample position
  68. /*
  69. 0 1 0
  70. 1 * 1
  71. 0 1 0 具体插值策略如图示
  72. */
  73. float sample12=getPixelBI(((GaussianPyr[p->octave].Octave)[p->level]).Level, x_sample, y_sample-1);
  74. float sample21=getPixelBI(((GaussianPyr[p->octave].Octave)[p->level]).Level, x_sample-1, y_sample);
  75. float sample22=getPixelBI(((GaussianPyr[p->octave].Octave)[p->level]).Level, x_sample, y_sample);
  76. float sample23=getPixelBI(((GaussianPyr[p->octave].Octave)[p->level]).Level, x_sample+1, y_sample);
  77. float sample32=getPixelBI(((GaussianPyr[p->octave].Octave)[p->level]).Level, x_sample, y_sample+1);
  78. //float diff_x = 0.5*(sample23 - sample21);
  79. //float diff_y = 0.5*(sample32 - sample12);
  80. float diff_x = sample23 - sample21;
  81. float diff_y = sample32 - sample12;
  82. float mag_sample = sqrt( diff_x*diff_x + diff_y*diff_y );
  83. float grad_sample = atan( diff_y / diff_x );
  84. if(grad_sample == CV_PI)
  85. grad_sample = -CV_PI;
  86. // Compute the weighting for the x and y dimensions.
  87. float *x_wght=(float *) malloc( GridSpacing * GridSpacing * sizeof(float));
  88. float *y_wght=(float *) malloc( GridSpacing * GridSpacing * sizeof(float));
  89. float *pos_wght=(float *) malloc( 8*GridSpacing * GridSpacing * sizeof(float));;
  90. for (int m=0;m<32;++m,++m)
  91. {
  92. float x=featcenter[m];
  93. float y=featcenter[m+1];
  94. x_wght[m/2] = max(1 - (fabs(x - x_sample)*1.0/GridSpacing), 0);
  95. y_wght[m/2] = max(1 - (fabs(y - y_sample)*1.0/GridSpacing), 0);
  96. }
  97. for ( m=0;m<16;++m)
  98. for (int n=0;n<8;++n)
  99. pos_wght[m*8+n]=x_wght[m]*y_wght[m];
  100. free(x_wght);
  101. free(y_wght);
  102. //计算方向的加权,首先旋转梯度场到主方向,然后计算差异
  103. float diff[8],orient_wght[128];
  104. for ( m=0;m<8;++m)
  105. {
  106. float angle = grad_sample-(p->ori)-orient_angles[m]+CV_PI;
  107. float temp = angle / (2.0 * CV_PI);
  108. angle -= (int)(temp) * (2.0 * CV_PI);
  109. diff[m]= angle - CV_PI;
  110. }
  111. // Compute the gaussian weighting.
  112. float x=p->sx;
  113. float y=p->sy;
  114. float g = exp(-((x_sample-x)*(x_sample-x)+(y_sample-y)*(y_sample-y))/(2*feat_window*feat_window))/(2*CV_PI*feat_window*feat_window);
  115. for ( m=0;m<128;++m)
  116. {
  117. orient_wght[m] = max((1.0 - 1.0*fabs(diff[m%8])/orient_bin_spacing),0);
  118. feat_desc[m] = feat_desc[m] + orient_wght[m]*pos_wght[m]*g*mag_sample;
  119. }
  120. free(pos_wght);
  121. }
  122. free(feat);
  123. free(featcenter);
  124. float norm=GetVecNorm( feat_desc, 128);
  125. for (int m=0;m<128;m++)
  126. {
  127. feat_desc[m]/=norm;
  128. if (feat_desc[m]>0.2)
  129. feat_desc[m]=0.2;
  130. }
  131. norm=GetVecNorm( feat_desc, 128);
  132. for ( m=0;m<128;m++)
  133. {
  134. feat_desc[m]/=norm;
  135. printf("%f ",feat_desc[m]);
  136. }
  137. printf("/n");
  138. p->descrip = feat_desc;
  139. p=p->next;
  140. }
  141. free(feat_grid);
  142. free(feat_samples);
  143. }
  144. //为了显示图象金字塔,而作的图像水平拼接
  145. CvMat* MosaicHorizen( CvMat* im1, CvMat* im2 )
  146. {
  147. int row,col;
  148. CvMat *mosaic = cvCreateMat( max(im1->rows,im2->rows),(im1->cols+im2->cols),CV_32FC1);
  149. #define Mosaic(ROW,COL) ((float*)(mosaic->data.fl + mosaic->step/sizeof(float)*(ROW)))[(COL)]
  150. #define Im11Mat(ROW,COL) ((float *)(im1->data.fl + im1->step/sizeof(float) *(ROW)))[(COL)]
  151. #define Im22Mat(ROW,COL) ((float *)(im2->data.fl + im2->step/sizeof(float) *(ROW)))[(COL)]
  152. cvZero(mosaic);
  153. /* Copy images into mosaic1. */
  154. for ( row = 0; row < im1->rows; row++)
  155. for ( col = 0; col < im1->cols; col++)
  156. Mosaic(row,col)=Im11Mat(row,col) ;
  157. for ( row = 0; row < im2->rows; row++)
  158. for ( col = 0; col < im2->cols; col++)
  159. Mosaic(row, (col+im1->cols) )= Im22Mat(row,col) ;
  160. return mosaic;
  161. }
  162. //为了显示图象金字塔,而作的图像垂直拼接
  163. CvMat* MosaicVertical( CvMat* im1, CvMat* im2 )
  164. {
  165. int row,col;
  166. CvMat *mosaic = cvCreateMat(im1->rows+im2->rows,max(im1->cols,im2->cols), CV_32FC1);
  167. #define Mosaic(ROW,COL) ((float*)(mosaic->data.fl + mosaic->step/sizeof(float)*(ROW)))[(COL)]
  168. #define Im11Mat(ROW,COL) ((float *)(im1->data.fl + im1->step/sizeof(float) *(ROW)))[(COL)]
  169. #define Im22Mat(ROW,COL) ((float *)(im2->data.fl + im2->step/sizeof(float) *(ROW)))[(COL)]
  170. cvZero(mosaic);
  171. /* Copy images into mosaic1. */
  172. for ( row = 0; row < im1->rows; row++)
  173. for ( col = 0; col < im1->cols; col++)
  174. Mosaic(row,col)= Im11Mat(row,col) ;
  175. for ( row = 0; row < im2->rows; row++)
  176. for ( col = 0; col < im2->cols; col++)
  177. Mosaic((row+im1->rows),col)=Im22Mat(row,col) ;
  178. return mosaic;
  179. }

ok,为了版述清晰,再贴一下上文所述的主函数(注,上文已贴出,此是为了版述清晰,重复造轮):

  1. int main( void )
  2. {
  3. //声明当前帧IplImage指针
  4. IplImage* src = NULL;
  5. IplImage* image1 = NULL;
  6. IplImage* grey_im1 = NULL;
  7. IplImage* DoubleSizeImage = NULL;
  8. IplImage* mosaic1 = NULL;
  9. IplImage* mosaic2 = NULL;
  10. CvMat* mosaicHorizen1 = NULL;
  11. CvMat* mosaicHorizen2 = NULL;
  12. CvMat* mosaicVertical1 = NULL;
  13. CvMat* image1Mat = NULL;
  14. CvMat* tempMat=NULL;
  15. ImageOctaves *Gaussianpyr;
  16. int rows,cols;
  17. #define Im1Mat(ROW,COL) ((float *)(image1Mat->data.fl + image1Mat->step/sizeof(float) *(ROW)))[(COL)]
  18. //灰度图象像素的数据结构
  19. #define Im1B(ROW,COL) ((uchar*)(image1->imageData + image1->widthStep*(ROW)))[(COL)*3]
  20. #define Im1G(ROW,COL) ((uchar*)(image1->imageData + image1->widthStep*(ROW)))[(COL)*3+1]
  21. #define Im1R(ROW,COL) ((uchar*)(image1->imageData + image1->widthStep*(ROW)))[(COL)*3+2]
  22. storage = cvCreateMemStorage(0);
  23. //读取图片
  24. if( (src = cvLoadImage( "street1.jpg", 1)) == 0 ) // test1.jpg einstein.pgm back1.bmp
  25. return -1;
  26. //为图像分配内存
  27. image1 = cvCreateImage(cvSize(src->width, src->height), IPL_DEPTH_8U,3);
  28. grey_im1 = cvCreateImage(cvSize(src->width, src->height), IPL_DEPTH_8U,1);
  29. DoubleSizeImage = cvCreateImage(cvSize(2*(src->width), 2*(src->height)), IPL_DEPTH_8U,3);
  30. //为图像阵列分配内存,假设两幅图像的大小相同,tempMat跟随image1的大小
  31. image1Mat = cvCreateMat(src->height, src->width, CV_32FC1);
  32. //转化成单通道图像再处理
  33. cvCvtColor(src, grey_im1, CV_BGR2GRAY);
  34. //转换进入Mat数据结构,图像操作使用的是浮点型操作
  35. cvConvert(grey_im1, image1Mat);
  36. double t = (double)cvGetTickCount();
  37. //图像归一化
  38. cvConvertScale( image1Mat, image1Mat, 1.0/255, 0 );
  39. int dim = min(image1Mat->rows, image1Mat->cols);
  40. numoctaves = (int) (log((double) dim) / log(2.0)) - 2; //金字塔阶数
  41. numoctaves = min(numoctaves, MAXOCTAVES);
  42. //SIFT算法第一步,预滤波除噪声,建立金字塔底层
  43. tempMat = ScaleInitImage(image1Mat) ;
  44. //SIFT算法第二步,建立Guassian金字塔和DOG金字塔
  45. Gaussianpyr = BuildGaussianOctaves(tempMat) ;
  46. t = (double)cvGetTickCount() - t;
  47. printf( "the time of build Gaussian pyramid and DOG pyramid is %.1f/n", t/(cvGetTickFrequency()*1000.) );
  48. #define ImLevels(OCTAVE,LEVEL,ROW,COL) ((float *)(Gaussianpyr[(OCTAVE)].Octave[(LEVEL)].Level->data.fl + Gaussianpyr[(OCTAVE)].Octave[(LEVEL)].Level->step/sizeof(float) *(ROW)))[(COL)]
  49. //显示高斯金字塔
  50. for (int i=0; i<numoctaves;i++)
  51. {
  52. if (i==0)
  53. {
  54. mosaicHorizen1=MosaicHorizen( (Gaussianpyr[0].Octave)[0].Level, (Gaussianpyr[0].Octave)[1].Level );
  55. for (int j=2;j<SCALESPEROCTAVE+3;j++)
  56. mosaicHorizen1=MosaicHorizen( mosaicHorizen1, (Gaussianpyr[0].Octave)[j].Level );
  57. for ( j=0;j<NUMSIZE;j++)
  58. mosaicHorizen1=halfSizeImage(mosaicHorizen1);
  59. }
  60. else if (i==1)
  61. {
  62. mosaicHorizen2=MosaicHorizen( (Gaussianpyr[1].Octave)[0].Level, (Gaussianpyr[1].Octave)[1].Level );
  63. for (int j=2;j<SCALESPEROCTAVE+3;j++)
  64. mosaicHorizen2=MosaicHorizen( mosaicHorizen2, (Gaussianpyr[1].Octave)[j].Level );
  65. for ( j=0;j<NUMSIZE;j++)
  66. mosaicHorizen2=halfSizeImage(mosaicHorizen2);
  67. mosaicVertical1=MosaicVertical( mosaicHorizen1, mosaicHorizen2 );
  68. }
  69. else
  70. {
  71. mosaicHorizen1=MosaicHorizen( (Gaussianpyr[i].Octave)[0].Level, (Gaussianpyr[i].Octave)[1].Level );
  72. for (int j=2;j<SCALESPEROCTAVE+3;j++)
  73. mosaicHorizen1=MosaicHorizen( mosaicHorizen1, (Gaussianpyr[i].Octave)[j].Level );
  74. for ( j=0;j<NUMSIZE;j++)
  75. mosaicHorizen1=halfSizeImage(mosaicHorizen1);
  76. mosaicVertical1=MosaicVertical( mosaicVertical1, mosaicHorizen1 );
  77. }
  78. }
  79. mosaic1 = cvCreateImage(cvSize(mosaicVertical1->width, mosaicVertical1->height), IPL_DEPTH_8U,1);
  80. cvConvertScale( mosaicVertical1, mosaicVertical1, 255.0, 0 );
  81. cvConvertScaleAbs( mosaicVertical1, mosaic1, 1, 0 );
  82. // cvSaveImage("GaussianPyramid of me.jpg",mosaic1);
  83. cvNamedWindow("mosaic1",1);
  84. cvShowImage("mosaic1", mosaic1);
  85. cvWaitKey(0);
  86. cvDestroyWindow("mosaic1");
  87. //显示DOG金字塔
  88. for ( i=0; i<numoctaves;i++)
  89. {
  90. if (i==0)
  91. {
  92. mosaicHorizen1=MosaicHorizen( (DOGoctaves[0].Octave)[0].Level, (DOGoctaves[0].Octave)[1].Level );
  93. for (int j=2;j<SCALESPEROCTAVE+2;j++)
  94. mosaicHorizen1=MosaicHorizen( mosaicHorizen1, (DOGoctaves[0].Octave)[j].Level );
  95. for ( j=0;j<NUMSIZE;j++)
  96. mosaicHorizen1=halfSizeImage(mosaicHorizen1);
  97. }
  98. else if (i==1)
  99. {
  100. mosaicHorizen2=MosaicHorizen( (DOGoctaves[1].Octave)[0].Level, (DOGoctaves[1].Octave)[1].Level );
  101. for (int j=2;j<SCALESPEROCTAVE+2;j++)
  102. mosaicHorizen2=MosaicHorizen( mosaicHorizen2, (DOGoctaves[1].Octave)[j].Level );
  103. for ( j=0;j<NUMSIZE;j++)
  104. mosaicHorizen2=halfSizeImage(mosaicHorizen2);
  105. mosaicVertical1=MosaicVertical( mosaicHorizen1, mosaicHorizen2 );
  106. }
  107. else
  108. {
  109. mosaicHorizen1=MosaicHorizen( (DOGoctaves[i].Octave)[0].Level, (DOGoctaves[i].Octave)[1].Level );
  110. for (int j=2;j<SCALESPEROCTAVE+2;j++)
  111. mosaicHorizen1=MosaicHorizen( mosaicHorizen1, (DOGoctaves[i].Octave)[j].Level );
  112. for ( j=0;j<NUMSIZE;j++)
  113. mosaicHorizen1=halfSizeImage(mosaicHorizen1);
  114. mosaicVertical1=MosaicVertical( mosaicVertical1, mosaicHorizen1 );
  115. }
  116. }
  117. //考虑到DOG金字塔各层图像都会有正负,所以,必须寻找最负的,以将所有图像抬高一个台阶去显示
  118. double min_val=0;
  119. double max_val=0;
  120. cvMinMaxLoc( mosaicVertical1, &min_val, &max_val,NULL, NULL, NULL );
  121. if ( min_val<0.0 )
  122. cvAddS( mosaicVertical1, cvScalarAll( (-1.0)*min_val ), mosaicVertical1, NULL );
  123. mosaic2 = cvCreateImage(cvSize(mosaicVertical1->width, mosaicVertical1->height), IPL_DEPTH_8U,1);
  124. cvConvertScale( mosaicVertical1, mosaicVertical1, 255.0/(max_val-min_val), 0 );
  125. cvConvertScaleAbs( mosaicVertical1, mosaic2, 1, 0 );
  126. // cvSaveImage("DOGPyramid of me.jpg",mosaic2);
  127. cvNamedWindow("mosaic1",1);
  128. cvShowImage("mosaic1", mosaic2);
  129. cvWaitKey(0);
  130. //SIFT算法第三步:特征点位置检测,最后确定特征点的位置
  131. int keycount=DetectKeypoint(numoctaves, Gaussianpyr);
  132. printf("the keypoints number are %d ;/n", keycount);
  133. cvCopy(src,image1,NULL);
  134. DisplayKeypointLocation( image1 ,Gaussianpyr);
  135. cvPyrUp( image1, DoubleSizeImage, CV_GAUSSIAN_5x5 );
  136. cvNamedWindow("image1",1);
  137. cvShowImage("image1", DoubleSizeImage);
  138. cvWaitKey(0);
  139. cvDestroyWindow("image1");
  140. //SIFT算法第四步:计算高斯图像的梯度方向和幅值,计算各个特征点的主方向
  141. ComputeGrad_DirecandMag(numoctaves, Gaussianpyr);
  142. AssignTheMainOrientation( numoctaves, Gaussianpyr,mag_pyr,grad_pyr);
  143. cvCopy(src,image1,NULL);
  144. DisplayOrientation ( image1, Gaussianpyr);
  145. // cvPyrUp( image1, DoubleSizeImage, CV_GAUSSIAN_5x5 );
  146. cvNamedWindow("image1",1);
  147. // cvResizeWindow("image1", 2*(image1->width), 2*(image1->height) );
  148. cvShowImage("image1", image1);
  149. cvWaitKey(0);
  150. //SIFT算法第五步:抽取各个特征点处的特征描述字
  151. ExtractFeatureDescriptors( numoctaves, Gaussianpyr);
  152. cvWaitKey(0);
  153. //销毁窗口
  154. cvDestroyWindow("image1");
  155. cvDestroyWindow("mosaic1");
  156. //释放图像
  157. cvReleaseImage(&image1);
  158. cvReleaseImage(&grey_im1);
  159. cvReleaseImage(&mosaic1);
  160. cvReleaseImage(&mosaic2);
  161. return 0;
  162. }

最后,再看一下,运行效果(图中美女为老乡+朋友,何姐08年照):

教你一步一步用c语言实现sift算法、下 - 图1

教你一步一步用c语言实现sift算法、下 - 图2

教你一步一步用c语言实现sift算法、下 - 图3

教你一步一步用c语言实现sift算法、下 - 图4

教你一步一步用c语言实现sift算法、下 - 图5

完。

updated

有很多朋友都在本文评论下要求要本程序的完整源码包(注:本文代码未贴全,复制粘贴编译肯定诸多错误),但由于时隔太久,这份代码我自己也找不到了,不过,我可以提供一份sift + KD + BBF,且可以编译正确的代码供大家参考学习,有pudn帐号的朋友可以前去下载:http://www.pudn.com/downloads340/sourcecode/graph/texture_mapping/detail1486667.html (没有pudn账号的同学请加群:169056165,验证信息:sift,至群共享下载),然后用两幅不同的图片做了下匹配(当然,运行结果显示是不匹配的),效果还不错:http://weibo.com/1580904460/yDmzAEwcV#1348475194313! July、二零一二年十月十一日。