当前位置: 首页 > 知识库问答 >
问题:

Android-Android OpenCV在透视后得到空白(黑色)图像?

翟冷勋
2023-03-14

下面是我的JNI代码:

JNIEXPORT jint JNICAL Java_org_opencv_samples_tutorial3_Sample3Native_FindSquares(

    JNIEnv* env, jobject, jlong addrRgba, jint draw, jlong addrDescriptor) {

Mat& image = *(Mat*) addrRgba;
Mat& pMatDesc = *(Mat*) addrDescriptor;
int thresh = 50, N = 4;
int found = 0;

Mat pyr, timg, gray0(image.size(), CV_8U), gray;

// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols / 2, image.rows / 2));
pyrUp(pyr, timg, image.size());
vector < vector<Point> > contours;
// find squares in every color plane of the image
for (int c = 1; c < 3; c++) {
    int ch[] = { c, 0 };
    mixChannels(&timg, 1, &gray0, 1, ch, 1);
    // try several threshold levels
    for (int l = 0; l < N; l++) {
        // hack: use Canny instead of zero threshold level.
        // Canny helps to catch squares with gradient shading
        if (l == 0) {
            // apply Canny. Take the upper threshold from slider
            // and set the lower to 0 (which forces edges merging)
            Canny(gray0, gray, 0, thresh, 5);
            // dilate canny output to remove potential
            // holes between edge segments
            dilate(gray, gray, Mat(), Point(-1, -1));
        } else {
            // apply threshold if l!=0:
            //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
            gray = gray0 >= (l + 1) * 255 / N;
        }
        // find contours and store them all as a list
        findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
        vector<Point> approx;
        // test each contour
        for (size_t i = 0; i < contours.size(); i++) {

            //__android_log_print(ANDROID_LOG_INFO, "Test", "Error:", v);
            // approximate contour with accuracy proportional
            // to the contour perimeter
            approxPolyDP(Mat(contours[i]), approx,
                    arcLength(Mat(contours[i]), true) * 0.02, true);

            // square contours should have 4 vertices after approximation
            // relatively large area (to filter out noisy contours)
            // and be convex.
            // Note: absolute value of an area is used because
            // area may be positive or negative - in accordance with the
            // contour orientation
            if (approx.size() == 4 && fabs(contourArea(Mat(approx))) > 1000
                    && isContourConvex(Mat(approx))) {
                double maxCosine = 0;

                for (int j = 2; j < 5; j++) {
                    // find the maximum cosine of the angle between joint edges
                    double cosine = fabs(
                            angle(approx[j % 4], approx[j - 2],
                                    approx[j - 1]));
                    maxCosine = MAX(maxCosine, cosine);
                }

                // if cosines of all angles are small
                // (all angles are ~90 degree) then write quandrange
                // vertices to resultant sequence
                if (maxCosine < 0.3) {

                    circle(image, approx[0], 5, Scalar(255, 0, 0, 255), 3,
                            4, 0);
                    circle(image, approx[1], 5, Scalar(255, 0, 0, 255), 3,
                            4, 0);
                    circle(image, approx[2], 5, Scalar(255, 0, 0, 255), 3,
                            4, 0);
                    circle(image, approx[3], 5, Scalar(255, 0, 0, 255), 3,
                            4, 0);
                    //rectangle(image, approx[0], approx[2], Scalar(0,255,0,255), 5, 4, 0);

                    //Center of this rectangle
                    int x = (int) ((approx[0].x + approx[1].x + approx[2].x
                            + approx[3].x) / 4.0);
                    int y = (int) ((approx[0].y + approx[1].y + approx[2].y
                            + approx[3].y) / 4.0);

                    if ((int) draw) {
                        //outline
                        line(image, approx[0], approx[1],
                                Scalar(0, 255, 0, 255), 1, 4, 0);
                        line(image, approx[1], approx[2],
                                Scalar(0, 255, 0, 255), 1, 4, 0);
                        line(image, approx[2], approx[3],
                                Scalar(0, 255, 0, 255), 1, 4, 0);
                        line(image, approx[3], approx[0],
                                Scalar(0, 255, 0, 255), 1, 4, 0);
                        //center
                        //circle(image, Point(x,y), 1, Scalar(255,0,0,255));
                    }
                    vector<Point2f> src(4);
                    src[0] = approx[0];
                    src[1] = approx[1];
                    src[2] = approx[2];
                    src[3] = approx[3];
                    cv::Mat quad = cv::Mat::zeros(300, 220, CV_32FC1 );

                    // transformed quadrangle
                    vector<Point2f> quad_pts(4);


                      quad_pts[0] = Point(0, 0);
                      quad_pts[1] = Point(quad.cols, 0);
                      quad_pts[2] = Point(quad.cols, quad.rows);
                      quad_pts[3] = Point(0, quad.rows);

                    Mat transmtx = getPerspectiveTransform(src, quad_pts);
                    warpPerspective(src, quad, transmtx, quad.size());

                    quad.copyTo(pMatDesc);
                    found = 1;
                    jint result = (jint) found;
                    return result;
                }
            }
        }
    }
}
jint result = (jint) found;
return result;

}

Mat final_mat = new Mat(descriptor.height(), descriptor.width(), CvType.CV_8UC4);
descriptor.copyTo(final_mat); 
bitmap = Bitmap.createBitmap(final_mat.cols(), final_mat.rows(),
                Bitmap.Config.ARGB_8888);
Utils.matToBitmap(final_mat, bitmap);

final_mat通道类型正在变为CV_32FC1。如何将频道类型转换为CV_8UC4,请帮我找出解决方法。

编辑:我已将finat_mat图像更改为CV_8UC3

descriptor.copyTo(final_mat);
descriptor.convertTo(final_mat, CvType.CV_8UC1);
Imgproc.cvtColor(final_mat,final_mat,Imgproc.COLOR_GRAY2RGB);

但我得到空白(黑色)图像的结果??

共有1个答案

孟健
2023-03-14

经过长时间的研究,我找到了解决办法。这里的问题是由于实际图像的转换而引起的。我们应该使用实际Mat对象的副本来应用转换(模糊,canny等),并使用实际Mat对象与warp透视转换。这里我是附上参考代码,以找出最大的轮廓。

jni_part.cpp:

extern "C" {
double angle(Point pt1, Point pt2, Point pt0);

JNIEXPORT jint Java_info_androidhive_androidcameraapi_CameraMainActivity_findSquare(
    JNIEnv*, jobject, jlong addrRgba, jlong addrDescriptor, jint width_,
    jint height_);

JNIEXPORT jint Java_info_androidhive_androidcameraapi_CameraMainActivity_findSquare(
    JNIEnv*, jobject, jlong addrRgba, jlong addrDescriptor, jint width_,
    jint height_) {

Mat& image = *(Mat*) addrRgba;
Mat& imageCropped = *(Mat*) addrDescriptor;
int screen_width = (int) width_;
int screen_height = (int) height_;

Mat newSrc = image.clone();
imageCropped = image.clone();
Mat testImage = image.clone();
// blur will enhance edge detection
Mat blurred(testImage);

medianBlur(testImage, blurred, 9);

Mat gray0(blurred.size(), CV_8U), gray;
vector<vector<Point> > contours;

// find squares in every color plane of the image
cv::vector<cv::vector<cv::Point> > squares;

for (int c = 0; c < 3; c++) {
    int ch[] = { c, 0 };
    mixChannels(&blurred, 1, &gray0, 1, ch, 1);

    // try several threshold levels
    const int threshold_level = 2;
    for (int l = 0; l < threshold_level; l++) {
        // Use Canny instead of zero threshold level!
        // Canny helps to catch squares with gradient shading
        if (l == 0) {
            Canny(gray0, gray, 10, 20, 3); //

            // Dilate helps to remove potential holes between edge segments
            dilate(gray, gray, Mat(), Point(-1, -1));
        } else {
            gray = gray0 >= (l + 1) * 255 / threshold_level;
        }

        // Find contours and store them in a list
        findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

        // Test contours
        vector<Point> approx;
        if (contours.size() > 0) {
            for (size_t i = 0; i < contours.size(); i++) {
                // approximate contour with accuracy proportional
                // to the contour perimeter
                approxPolyDP(Mat(contours[i]), approx,
                        arcLength(Mat(contours[i]), true) * 0.02, true);

                // Note: absolute value of an area is used because
                // area may be positive or negative - in accordance with the
                // contour orientation
                if (approx.size() == 4
                        && fabs(contourArea(Mat(approx))) > 1000
                        && isContourConvex(Mat(approx))) {
                    double maxCosine = 0;

                    for (int j = 2; j < 5; j++) {
                        double cosine = fabs(
                                angle(approx[j % 4], approx[j - 2],
                                        approx[j - 1]));
                        maxCosine = MAX(maxCosine, cosine);
                    }

                    if (maxCosine < 0.3) {
                        squares.push_back(approx);

                        /*circle(image, approx[0], 5, Scalar(255, 0, 0, 255), 3,
                         4, 0);
                         circle(image, approx[1], 5, Scalar(255, 0, 0, 255), 3,
                         4, 0);
                         circle(image, approx[2], 5, Scalar(255, 0, 0, 255), 3,
                         4, 0);
                         circle(image, approx[3], 5, Scalar(255, 0, 0, 255), 3,
                         4, 0);
                         if ((int) draw) {
                         line(image, approx[0], approx[1],
                         Scalar(0, 255, 0, 255), 2, 4, 0);
                         line(image, approx[1], approx[2],
                         Scalar(0, 255, 0, 255), 2, 4, 0);
                         line(image, approx[2], approx[3],
                         Scalar(0, 255, 0, 255), 2, 4, 0);
                         line(image, approx[3], approx[0],
                         Scalar(0, 255, 0, 255), 2, 4, 0);
                         }*/
                    }
                }
            }
        }
    }
}
if (squares.size() > 0) {
    int max_width = 0;
    int max_height = 0;
    int max_square_idx = 0;
    cv::vector<cv::Point> biggest_square;

            squares.size());
    for (size_t i = 0; i < squares.size(); i++) {

    cv::Rect structure.
        cv::Rect rectangle = boundingRect(cv::Mat(squares[i]));
        // Store the index position of the biggest square found
        if ((rectangle.width >= max_width)
                && (rectangle.height >= max_height)) {
            max_width = rectangle.width;
            max_height = rectangle.height;
            max_square_idx = i;
        }
    }

    biggest_square = squares[max_square_idx];
    vector<Point> _adjustRect;
    _adjustRect = squares[max_square_idx];
    if (biggest_square.size() == 4) {
        vector<Point> sortedPoints;
        sortedPoints = squares[max_square_idx];

        Point ptbiggest_square = biggest_square[0];

        Point ptBottomLeft1 = biggest_square[0];
        Point ptBottomRight1 = biggest_square[1];
        Point ptTopRight1 = biggest_square[2];
        Point ptTopLeft1 = biggest_square[3];

        int bl = ptBottomLeft1.x + ptBottomLeft1.y;
        int br = ptBottomRight1.x + ptBottomRight1.y;
        int tr = ptTopRight1.x + ptTopRight1.y;
        int tl = ptTopLeft1.x + ptTopLeft1.y;

        int value_array[] = { bl, br, tr, tl };
        int max = value_array[0];
        int min = value_array[0];

        for (int s = 0; s < 4; s++) {
            if (value_array[s] > max) {
                max = value_array[s];
            } else if (value_array[s] < min) {
                min = value_array[s];
            }
        }
        int minIndex = 0;
        int maxIndex = 0;

        int missingIndexOne = 0;
        int missingIndexTwo = 0;

        for (int i = 0; i < 4; i++) {

            if (value_array[i] == min) {
                sortedPoints[0] = biggest_square[i];
                minIndex = i;
                continue;
            }

            if (value_array[i] == max) {
                sortedPoints[2] = biggest_square[i];
                maxIndex = i;
                continue;
            }
            missingIndexOne = i;
        }

        for (int i = 0; i < 4; i++) {
            if (missingIndexOne != i && minIndex != i && maxIndex != i) {
                missingIndexTwo = i;
            }
        }

        if (biggest_square[missingIndexOne].x
                < biggest_square[missingIndexTwo].x) {
            //2nd Point Found

            sortedPoints[3] = biggest_square[missingIndexOne];
            sortedPoints[1] = biggest_square[missingIndexTwo];
        } else {
            //4rd Point Found

            sortedPoints[1] = biggest_square[missingIndexOne];
            sortedPoints[3] = biggest_square[missingIndexTwo];
        }

        _adjustRect[0] = sortedPoints[0];
        _adjustRect[1] = sortedPoints[1];
        _adjustRect[2] = sortedPoints[2];
        _adjustRect[3] = sortedPoints[3];



    }

    Point ptTopLeft = _adjustRect[0];
    Point ptTopRight = _adjustRect[1];
    Point ptBottomRight = _adjustRect[2];
    Point ptBottomLeft = _adjustRect[3];

    float imageScale = fminf((float) screen_width / newSrc.cols,
            (float) screen_height / newSrc.rows);

    __android_log_print(ANDROID_LOG_INFO, "OpenGLTest", "imageScale %f",
            imageScale);
    __android_log_print(ANDROID_LOG_INFO, "OpenGLTest", "width_ %d",
            screen_width);

    float w1 = sqrt(
            pow(ptBottomRight.x / imageScale - ptBottomLeft.x / imageScale,
                    2)
                    + pow(
                            ptBottomRight.x / imageScale
                                    - ptBottomLeft.x / imageScale, 2));
    float w2 = sqrt(
            pow(ptTopRight.x / imageScale - ptTopLeft.x / imageScale, 2)
                    + pow(
                            ptTopRight.x / imageScale
                                    - ptTopLeft.x / imageScale, 2));

    float h1 = sqrt(
            pow(ptTopRight.y / imageScale - ptBottomRight.y / imageScale, 2)
                    + pow(
                            ptTopRight.y / imageScale
                                    - ptBottomRight.y / imageScale, 2));
    float h2 = sqrt(
            pow(ptTopLeft.y / imageScale - ptBottomLeft.y / imageScale, 2)
                    + pow(
                            ptTopLeft.y / imageScale
                                    - ptBottomLeft.y / imageScale, 2));

    float maxWidth = (w1 < w2) ? w1 : w2;
    float maxHeight = (h1 < h2) ? h1 : h2;

    Point2f src[4], quad[4];
    src[0].x = ptTopLeft.x;
    src[0].y = ptTopLeft.y;
    src[1].x = ptTopRight.x;
    src[1].y = ptTopRight.y;
    src[2].x = ptBottomRight.x;
    src[2].y = ptBottomRight.y;
    src[3].x = ptBottomLeft.x;
    src[3].y = ptBottomLeft.y;

    quad[0].x = 0;
    quad[0].y = 0;
    quad[1].x = maxWidth - 1;
    quad[1].y = 0;
    quad[2].x = maxWidth - 1;
    quad[2].y = maxHeight - 1;
    quad[3].x = 0;
    quad[3].y = maxHeight - 1;

    cv::Mat undistorted = cv::Mat(cvSize(maxWidth, maxHeight), CV_8UC1);
    cv::warpPerspective(newSrc, undistorted,
            cv::getPerspectiveTransform(src, quad),
            cvSize(maxWidth, maxHeight));

    imageCropped = undistorted.clone();
}

return 1;

}

double angle(Point pt1, Point pt2, Point pt0) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1 * dx2 + dy1 * dy2)
        / sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2) + 1e-10);
}

}

快乐编码!!

 类似资料:
  • 我以前也发布过同样的问题,但我现在再次发布,因为我在代码中发现了更多的错误并更正了它们。然而,我仍然面临着和以前一样的问题! 原始帖子:我几周前刚开始学习Python,我正在学习一个教程,用pygame构建一个数独解算器! 我现在面临的问题是,每当我尝试运行代码时,只会弹出一个黑色的空白窗口。我已经一遍又一遍地检查我的代码,但我似乎找不到问题。。。我不确定我的代码中到底是哪里出了问题,所以请原谅我

  • 我能够从URL发送JSON响应,然后当我在RecyclerView上显示相同的响应时,得到的输出是空白的,但选项卡和导航抽屉仍然存在。运行该应用程序时也没有错误。我也在使用截击库。 问题:我在列出服务的RecylerView上没有得到任何输出。我会在这里贴出与RecyclerView相关的代码。另外,我已经在底部发布了堆栈跟踪。 serviceViewAdapter.java(适配器) fragm

  • “黑白”调整可让您将彩色图像转换为灰度图像,同时保持对各颜色的转换方式的完全控制。也可以通过对图像应用色调来为灰度着色,例如创建棕褐色效果。 执行下列操作之一: 单击“调整”面板中的“黑白”图标 。 选取“图层”>“新建调整图层”>“黑白”。在“新建图层”对话框中,键入调整图层的名称,然后单击“确定”。 Photoshop 会应用默认的灰度转换。 注意:也可以选择“图像”>“调整”>“黑白”。但是

  • 我运行以下代码: 当我运行代码时,pygame窗口打开,但它是一个空白(黑色)屏幕。我还收到以下错误消息:Traceback(最近一次呼叫last): 文件"C:/用户/Draco/OneDrive/文档/编程/graphics.py",第13行,screen.blit(img(0,0))TypeError:'pyplay.Surface'对象不可调用 我试图打开的图像保存为JPG文件。图像保存在

  • 我不能链接url超过2,所以我把我的照片贴到这个博客上。请在这里看到我的问题。http://blog.naver.com/mail1001/220650041897 我想知道如何使用Android OpenCV使白色部分的图像,这是白纸上的文字,透明。 我研究过通过url(我写在博客上)使黑色背景透明,我认为“阿尔法通道”与此有关。 我认为它将工作,因为我使阿尔法通道的部分,我想使透明的黑色和其他

  • 当我运行此代码时,它会显示一个空白的黑色窗口,并显示未定义。 这不是颜色,它没有画任何线强硬我给的命令 我的错误是什么,阻止我将屏幕变白并引发错误?