人体检测不统一

2024-04-24 05:16:28 发布

您现在位置:Python中文网/ 问答频道 /正文

我做了一个现场视频使用树莓皮3模型B与PiCamera的人反活动。 以下是我们的代码:

# import the necessary packages
from __future__ import print_function
from imutils.object_detection import non_max_suppression
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2
import RPi.GPIO as GPIO

from threading import Thread

from picamera.array import PiRGBArray
from picamera import PiCamera
import time

# initialize the camera and grab a reference to the raw camera capture
resX = 240
resY = 180
camera = PiCamera()
camera.resolution = (resX,resY)
camera.framerate = 10
rawCapture = PiRGBArray(camera, size=(resX, resY))

# initialize textIn and textOut values
textIn = 0
textOut = 0

print(time.strftime("%H_%M_%S"))
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
fourcc = cv2.cv.CV_FOURCC(*'XVID')
out = cv2.VideoWriter(time.strftime("%H_%M_%S")+'.avi',fourcc, 20.0, (resX, resY))

# initialize the HOG descriptor/person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
detectFlag = 0
detectCounter = [0]
# allow the camera to warmup
time.sleep(0.1)

GPIO.setmode(GPIO.BOARD)

GPIO.setup(16, GPIO.OUT)

# methods for IN and OUT counters
def testIntersectionIn(x, y, z):

    if((x >= 75) and  (x <= 90) and (x < z[0]) and (z[0]>0)):
        print (x,z[0],"IN")
        return True
    return False

def testIntersectionOut(x, y, z):

    if((x >= 75) and  (x <= 90) and (x > z[0]) and (z[0]>0)):
        print (x,z[0],"OUT")
        return True

    return False

previousObj = (0,0)

def classfier(testImage,threadNum,capTime, detectCounter):
    global textIn, textOut, previousObj
    #print(threadNum,capTime)
    (rects, weights) = hog.detectMultiScale(testImage, winStride=(8, 8),
        padding=(16, 16), scale=1.1)

    rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
    pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)

    # draw the final bounding boxes

    cv2.line(image, (75,0), (75,192), (250, 0, 1), 2) #blue line
    cv2.line(image, (90,0), (90,192), (0, 0, 255), 2)#red line

    for (xA, yA, xB, yB) in pick:

        print("Image detected")
    print ("Previous Coord : ",previousObj)
        detectCounter[0] = 0
        cv2.rectangle(testImage, (xA, yA), (xB, yB), (0, 255, 0), 2)

    rectangleCenterPont = ((xA + xB) /2, (yA + yB) /2 )
    cv2.circle(testImage, rectangleCenterPont, 1, (0,0,255), 5)
    print (rectangleCenterPont)

    if(testIntersectionIn((xA + xB) /2, (yA + yB) /2,previousObj)):
            textIn += 1
                #print testIntersectionIn((x + x + w) / 2, (y + y + h) / 2)

        if(testIntersectionOut((xA + xB) /2, (yA + yB) /2,previousObj)):
            textOut += 1
                #print textOut

    previousObj = rectangleCenterPont


    cv2.putText(image, "In: {}".format(str(textIn)), (10, 50),
        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    cv2.putText(image, "Out: {}".format(str(textOut)), (10, 70),
        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    # print(pick,"\n");
    curTime = time.time()
    #print ("Total time from capture", curTime - capTime)
    out.write(testImage)
    cv2.imshow("After NMS", testImage)

# capture frames from the camera
i = 0
frameCount = 0
prevTime = time.time()
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
    if (detectCounter[0] < 10):
        GPIO.output(16, GPIO.LOW)
        print ("Waiting ", detectCounter[0])
        detectCounter[0] += 1
    else:
        GPIO.output(16,GPIO.HIGH)
    image = frame.array
    captureTime = time.time()
    # print("FRAME Time", captureTime-prevTime)
    prevTime = captureTime

    # if frameCount == 0:
        # frameCount = 0
    #if i == 0:
    t1 = Thread(target = classfier, args = (image,i,captureTime,detectCounter))
    t1.start()
    threadPick = t1.join()


    # cv2.imshow("Frame", image)
    key = cv2.waitKey(1) & 0xFF

    # clear the stream in preparation for the next frame
    rawCapture.truncate(0)

     # if the `q` key was pressed, break from the loop
    if key == ord("q"):
        cleanup_stop_thread();
        sys.exit()
        t1.stop()

        break

在上述代码中

  • 我们在画框上画两条线
  • 求矩形的中心点
  • 当这个中心点落在这两条线之间时,我们增加了 计数器
  • 输入和输出计数器逻辑基于这两行的值

我面临的问题是

人身上的矩形不是统一的。它会消失,并在帧中的任意点再次出现。 例如,当物体接近第一条直线时,矩形消失,当物体离开直线时,矩形再次出现。所以这里矩形的中心点不会落在这两条线之间,所以我们的计数器在这种情况下不会增加。 在某些情况下,检测发生得很晚。人一进镜框就不行了。你知道吗

请告诉我任何方法来避免这种情况,并保持矩形的所有时间,当任何人是在框架。你知道吗


Tags: andthefromimageimportgpioiftime