HED算法中检测大颗粒旁边小颗粒的替代方法

2024-06-09 06:31:00 发布

您现在位置:Python中文网/ 问答频道 /正文

enter image description here

我在项目中的主要任务是检测土壤颗粒图像中遇到的多边形的属性,我使用HED算法检测图像中颗粒的边界,因为它在有效检测边界方面非常智能,并且不受图像中噪声的影响,它基于opencv库中的深层神经网络,但是我遇到了一个问题,在拍摄大颗粒旁边的小颗粒图像时,如下图所示,因为它可以完美地检测到较大的颗粒,并在很大程度上忽略了小颗粒,我不知道如何解决这个问题,因为它会影响我从图像分析中得到的结果。 我的主要问题是,是否有一种方法可以提高算法的效率,从而有效地检测这两种颗粒。 还附上了所使用的HED算法。。我使用的是佳能600D,所以我不怀疑图像质量的效率

enter image description here

以及所使用的算法

# USAGE
# python detect_edges_image.py --edge-detector hed_model --image images/guitar.jpg

# import the necessary packages
import argparse
import cv2
import os
import easygui

path = easygui.fileopenbox()
print(path)
hdir = os.path.dirname(path)
print(hdir)
hfilename = os.path.basename(path)
print(hfilename)
hname = os.path.splitext(hfilename)[0]
print(hname)
houtname = hname+"_out.jpg"
print(houtname)
hout = os.path.sep.join([hdir,houtname])
print(hout)

# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-d", "--edge-detector", type=str, required=True,
#   help="path to OpenCV's deep learning edge detector")
# ap.add_argument("-i", "--image", type=str, required=True,
#   help="path to input image")
# args = vars(ap.parse_args())

class CropLayer(object):
    def __init__(self, params, blobs):
        # initialize our starting and ending (x, y)-coordinates of
        # the crop
        self.startX = 0
        self.startY = 0
        self.endX = 0
        self.endY = 0

    def getMemoryShapes(self, inputs):
        # the crop layer will receive two inputs -- we need to crop
        # the first input blob to match the shape of the second one,
        # keeping the batch size and number of channels
        (inputShape, targetShape) = (inputs[0], inputs[1])
        (batchSize, numChannels) = (inputShape[0], inputShape[1])
        (H, W) = (targetShape[2], targetShape[3])

        # compute the starting and ending crop coordinates
        self.startX = int((inputShape[3] - targetShape[3]) / 2)
        self.startY = int((inputShape[2] - targetShape[2]) / 2)
        self.endX = self.startX + W
        self.endY = self.startY + H

        # return the shape of the volume (we'll perform the actual
        # crop during the forward pass
        return [[batchSize, numChannels, H, W]]

    def forward(self, inputs):
        # use the derived (x, y)-coordinates to perform the crop
        return [inputs[0][:, :, self.startY:self.endY,
                self.startX:self.endX]]

# load our serialized edge detector from disk
print("[INFO] loading edge detector...")

fpath = os.path.abspath(__file__)
fdir =  os.path.dirname(fpath)
print(fdir)
protoPath = os.path.sep.join([fdir,"hed_model", "deploy.prototxt"])
print(protoPath)
modelPath =  os.path.sep.join([fdir,"hed_model","hed_pretrained_bsds.caffemodel"])
print(modelPath)

net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)

# register our new layer with the model
cv2.dnn_registerLayer("Crop", CropLayer)

# load the input image and grab its dimensions
image = cv2.imread('PATH')
# image = cv2.pyrMeanShiftFiltering(image1,10,20)
(H, W) = image.shape[:2]
# print(image.shape[:2])
# image.shape[:2] =(H*3, W*3)ho
# image = cv2.resize(image,0.5)

# convert the image to grayscale, blur it, and perform Canny
# edge detection
print("[INFO] performing Canny edge detection...")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# blurred = cv2.addWeighted(gray,1.5,blurred,-0.5,0)
canny = cv2.Canny(blurred,30, 150)


# construct a blob out of the input image for the Holistically-Nested
# Edge Detector

# cc = cv2.cvtColor(canny, cv2.COLOR_GRAY2BGR)
# image = image+cc

# mean = (104.00698793, 116.66876762, 122.67891434),

blob = cv2.dnn.blobFromImage(image, scalefactor=1.0, size=(W, H),
                             # mean=(110,95,95),
                             mean=(104.00698793, 116.66876762, 122.67891434),
                            # mean=(104, 116, 122),
                            #  mean=(150, 120, 130),
                            #  mean=(145, 147, 180),
                             swapRB= False, crop=False)
print( blob)
cv2.waitKey(0)
# set the blob as the input to the network and perform a forward pass
# to compute the edges
print("[INFO] performing holistically-nested edge detection...")
net.setInput(blob)
hed = net.forward()
hed = cv2.resize(hed[0, 0], (W, H))
hed = (255 * hed).astype("uint8")

# show the output edge detection results for Canny and
# Holistically-Nested Edge Detection
cv2.imshow("Input", image)
cv2.imshow("Canny", canny)
cv2.imshow("HED", hed)
cv2.imwrite(hout, hed)

cv2.waitKey(0)

我发现平均值在函数cv2.dnn.blobFromImage()中的算法中非常有效


Tags: andthetopath图像cropimageself
1条回答
网友
1楼 · 发布于 2024-06-09 06:31:00

另一种方法不是使用HED算法,而是使用已经实现为^{}connected component labeling。我们可以使用它来分离对象,并将像素簇标记为单独的片段


二值图像

enter image description here

import cv2
import numpy as np

# Load image, grayscale, Gaussian Blur, Otsu's threshold
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]

连接组件标签以创建假彩色图像

enter image description here

# Perform connected component labeling
n_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh, connectivity=4)

# Create false color image and color background black
colors = np.random.randint(0, 255, size=(n_labels, 3), dtype=np.uint8)
colors[0] = [0, 0, 0]  # for cosmetic reason we want the background black
false_colors = colors[labels]

现在我们已经分割了像素簇,我们可以找到每个标记对象的质心。此信息已包含在从cv2.connectedComponentsWithStats返回的centroid变量中

enter image description here

# Obtain centroids
false_colors_centroid = false_colors.copy()
for centroid in centroids:
    cv2.drawMarker(false_colors_centroid, (int(centroid[0]), int(centroid[1])),
                   color=(255, 255, 255), markerType=cv2.MARKER_CROSS)

有很多质心。我们可以通过使用stats中包含的信息,使用轮廓区域进行过滤,以仅保留较大的对象

enter image description here

# Only keep larger objects by filtering using area
MIN_AREA = 50
false_color_centroid_filter = false_colors.copy()
for i, centroid in enumerate(centroids[1:], start=1):
    area = stats[i, 4]
    if area > MIN_AREA:
        cv2.drawMarker(false_color_centroid_filter, (int(centroid[0]), int(centroid[1])),
                       color=(255, 255, 255), markerType=cv2.MARKER_CROSS)

完整代码

import cv2
import numpy as np

# Load image, grayscale, Gaussian Blur, Otsu's threshold
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]

# Perform connected component labeling
n_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(thresh, connectivity=4)

# Create false color image and color background black
colors = np.random.randint(0, 255, size=(n_labels, 3), dtype=np.uint8)
colors[0] = [0, 0, 0]  # for cosmetic reason we want the background black
false_colors = colors[labels]

# Obtain centroids
false_colors_centroid = false_colors.copy()
for centroid in centroids:
    cv2.drawMarker(false_colors_centroid, (int(centroid[0]), int(centroid[1])),
                   color=(255, 255, 255), markerType=cv2.MARKER_CROSS)

# Only keep larger objects by filtering using area
MIN_AREA = 50
false_color_centroid_filter = false_colors.copy()
for i, centroid in enumerate(centroids[1:], start=1):
    area = stats[i, 4]
    if area > MIN_AREA:
        cv2.drawMarker(false_color_centroid_filter, (int(centroid[0]), int(centroid[1])),
                       color=(255, 255, 255), markerType=cv2.MARKER_CROSS)

cv2.imshow('binary', thresh)
cv2.imshow('false_colors', false_colors)
cv2.imshow('false_colors_centroids', false_colors_centroid)
cv2.imshow('false_color_centroid_filter', false_color_centroid_filter)
cv2.waitKey()

相关问题 更多 >