Python:在opencv/Python中运行estimategidtransform;8uC1或8uC3

2024-06-16 12:26:54 发布

您现在位置:Python中文网/ 问答频道 /正文

我目前在float32的numpy数组中内置了两个匹配点集:

points1 = 
[[  346.70220947  9076.38476562]
 [  922.99554443  9096.4921875 ]
 [  776.96466064  9108.79101562]
 [  449.0173645   9080.61816406]
 [ 2843.19433594  1226.93212891]
 [  779.95275879  9094.76855469]
 [  451.46853638  9092.5078125 ]
 [ 3981.4621582   1237.50964355]
 [  132.38700867  9086.7890625 ]
 [  819.10943604  8286.74023438]
 [ 1963.64025879  1220.06921387]
 [ 1253.79321289  9095.75292969]]

points2 = 
[[ 55110.36328125   9405.07519531]
 [ 55686.71875      9423.63574219]
 [ 55540.8515625    9435.80078125]
 [ 55212.58203125   9408.00585938]
 [ 57598.76171875   1551.92956543]
 [ 55543.78125      9421.88769531]
 [ 55214.40625      9420.46972656]
 [ 58737.41796875   1561.14831543]
 [ 54895.9296875    9414.58203125]
 [ 55581.87109375   8613.87011719]
 [ 56718.76953125   1546.02197266]
 [ 56017.8125       9422.52050781]]

我试着跑:

^{pr2}$

这样就可以生成一个仿射矩阵(.tfi)。世界文件是为地理信息系统软件,将这些项目的动态。在

目前我收到一个错误:

Both input images must have either 8uC1 or 8uC3 type in function cvEstimateRigidTransform

我不太清楚这是怎么回事。我想我可以用两个点集作为参数,只要我有6个或更多对。在

任何想法或建议将不胜感激!在


Tags: 文件项目numpy软件世界动态矩阵数组
1条回答
网友
1楼 · 发布于 2024-06-16 12:26:54

我也有同样奇怪的错误,但是在Java中。在我的例子中,estimateRigidTransform似乎无法识别出我给出的两张Mat图像实际上是2D点集。所以我应用了一个解决方法,将匹配点从MatOfKeyPoint转换为MatOfPoint2f类型。在

以下是完整的Java代码(它不是Python,但可能会对您有所帮助):

更新:过滤匹配项很重要,因为如果不过滤,则转换后可能会得到一个空数组。在

    FeatureDetector detector = FeatureDetector.create(FeatureDetector.ORB);
    DescriptorExtractor descriptor = DescriptorExtractor.create(DescriptorExtractor.ORB);
    DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);

    // Load First Image
    Mat img1 = Imgcodecs.imread("img1_path", Imgcodecs.IMREAD_GRAYSCALE);
    Mat img1_descriptors = new Mat();
    MatOfKeyPoint img1_keypoints_mat = new MatOfKeyPoint();

    // Detect KeyPoints
    detector.detect(img1, img1_keypoints_mat);
    descriptor.compute(img1, img1_keypoints_mat, img1_descriptors);

    // Load Second Image
    Mat img2 = Imgcodecs.imread("img2_path", Imgcodecs.IMREAD_GRAYSCALE);
    Mat img2_descriptors = new Mat();
    MatOfKeyPoint img2_keypoints_mat = new MatOfKeyPoint();

    // Detect KeyPoints
    detector.detect(img2, img2_keypoints_mat);
    descriptor.compute(img2, img2_keypoints_mat, img2_descriptors);

    // Match KeyPoints
    MatOfDMatch matOfDMatch = new MatOfDMatch();
    matcher.match(img1_descriptors, img2_descriptors, matOfDMatch);

    // Filtering the matches
    List<DMatch> dMatchList = matOfDMatch.toList();
    Double max_dist = 0.0;
    Double min_dist = 100.0;

    for(int i = 0; i < img1_descriptors.rows(); i++){
        Double dist = (double) dMatchList.get(i).distance;
        if(dist < min_dist) min_dist = dist;
        if(dist > max_dist) max_dist = dist;
    }
    LinkedList<DMatch> good_matches = new LinkedList<>();
    for(int i = 0; i < img1_descriptors.rows(); i++){
        if(dMatchList.get(i).distance < 3*min_dist){
            good_matches.addLast(dMatchList.get(i));
        }
    }

    // Converting to MatOfPoint2f format
    LinkedList<Point> img1_points_list = new LinkedList<>();
    LinkedList<Point> img2_points_list = new LinkedList<>();

    List<KeyPoint> img1_keyPoints_list = img1_keypoints_mat.toList();
    List<KeyPoint> img2_keyPoints_list = img2_keypoints_mat.toList();

    int limit = good_matches.size();
    for(int i = 0; i < limit; i++){
        img1_points_list.addLast(img1_keyPoints_list.get(good_matches.get(i).queryIdx).pt);
        img2_points_list.addLast(img2_keyPoints_list.get(good_matches.get(i).trainIdx).pt);
    }

    MatOfPoint2f img1_point2f_mat = new MatOfPoint2f();
    img1_point2f_mat.fromList(img1_points_list);

    MatOfPoint2f img2_point2f_mat = new MatOfPoint2f();
    img2_point2f_mat.fromList(img2_points_list);

    // Draw match points
    Mat output = new Mat();
    Features2d.drawMatches(img1, img1_keypoints_mat, img2, img2_keypoints_mat, matOfDMatch, output);
    Imgcodecs.imwrite("output.png", output);

    Mat result = Video.estimateRigidTransform(img1_point2f_mat, img2_point2f_mat, true);
    printMat(result); // Printing the optimal affine transformation 2x3 array

    // The following variables correspond to the estimateRigidTransform result as shown here: https://stackoverflow.com/a/29511091/5165833
    double a = result.get(0,0)[0];
    double b = result.get(0,1)[0];
    double d = result.get(1,1)[0];
    double c = result.get(1,0)[0];

    // Solving for scale as shown in the link above
    double scale_x = Math.signum(a) * Math.sqrt( (a*a) + (b*b) );
    double scale_y = Math.signum(d) * Math.sqrt( (c*c) + (d*d) );

    System.out.println("a = "+a);
    System.out.println("b = "+b);
    System.out.println("scale_x = "+scale_x);
    System.out.println("scale_y = "+scale_y);
}

public static void printMat(Mat m)
{
    for (int x=0; x < m.height(); x++)
    {
        for (int y=0; y < m.width(); y++)
        {
            System.out.printf("%f",m.get(x,y)[0]);
            System.out.printf("%s"," ");
        }
        System.out.printf("\n");
    }
}

相关问题 更多 >