from picamera.array import PiRGBArray from picamera import PiCamera from datetime import datetime import time import cv2 import sys import imutils import math # for sqrt distance formula # Create the haar cascade frontalfaceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") profilefaceCascade = cv2.CascadeClassifier("haarcascade_profileface.xml") face = [0,0,0,0] # This will hold the array that OpenCV returns when it finds a face: (makes a rectangle) center = [0,0] # Center of the face: a point calculated from the above variable lastface = 0 # int 1-3 used to speed up detection. The script is looking for a right profile face,- # a left profile face, or a frontal face; rather than searching for all three every time,- # it uses this variable to remember which is last saw: and looks for that again. If it- # doesn't find it, it's set back to zero and on the next loop it will search for all three.- # This basically tripples the detect time so long as the face hasn't moved much. # initialize the camera and grab a reference to the raw camera capture camera = PiCamera() #camera.resolution = (160, 120) camera.resolution = (320,240) cameracenter = (320/2, 240/2) camera.framerate = 32 rawCapture = PiRGBArray(camera, size=(320, 240)) def distance(p0, p1): return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2) def search_rightprofile(i): return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.cv.CV_HAAR_DO_CANNY_PRUNING + cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.cv.CV_HAAR_DO_ROUGH_SEARCH),(30,30)) def search_leftprofile(i): revimage = cv2.flip(i, 1) # Flip the image return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.cv.CV_HAAR_DO_CANNY_PRUNING + cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.cv.CV_HAAR_DO_ROUGH_SEARCH),(30,30)) def search_frontface(i): return frontalfaceCascade.detectMultiScale(i,1.3,4,(cv2.cv.CV_HAAR_DO_CANNY_PRUNING + cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.cv.CV_HAAR_DO_ROUGH_SEARCH),(30,30)) # allow the camera to warmup time.sleep(0.1) lastTime = time.time()*1000.0 # capture frames from the camera for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): # grab the raw NumPy array representing the image, then initialize the timestamp # and occupied/unoccupied text image = frame.array faceFound = False # This variable is set to true if, on THIS loop a face has already been found # We search for a face three diffrent ways, and if we have found one already- # there is no reason to keep looking. faces = () # First Scan if lastface == 1: faces = search_rightprofile(image) if faces != (): faceFound=True lastface = 1 elif lastface == 2: faces = search_leftprofile(image) if faces != (): faceFound=True lastface = 2 else: faces = search_frontface(image) if faces != (): lastface = 3 faceFound=True # Second scan if not faceFound: faces = search_frontface(image) if faces != (): lastface = 3 faceFound=True elif lastface == 2: faces = search_rightprofile(image) if faces != (): faceFound=True lastface = 1 else: faces = search_leftprofile(image) if faces != (): faceFound=True lastface = 2 # Third scan if not faceFound: faces = search_leftprofile(image) if faces != (): faceFound=True lastface = 2 elif lastface == 2: faces = search_frontface(image) if faces != (): lastface = 3 faceFound=True else: faces = search_rightprofile(image) if faces != (): faceFound=True lastface = 1 if not faceFound: print time.time()*1000.0-lastTime," no faces found." lastTime = time.time()*1000.0 # clear the stream in preparation for the next frame rawCapture.truncate(0) continue; print time.time()*1000.0-lastTime," {} faces found.".format(len(faces)) lastTime = time.time()*1000.0 # Draw a rectangle around the faces for (x, y, w, h) in faces: cv2.circle(image, (x+w/2, y+h/2), int((w+h)/3), (255, 255, 255), 1) # Temporary, save the image cv2.imwrite("tmp/img.{}.png".format(datetime.now().strftime("%Y%m%%d.%H%M%S.%f")), image) # clear the stream in preparation for the next frame rawCapture.truncate(0) # Find teh centermost face curdistance = 1000000 # Outside the dimensions of the picture for f in faces: x,y,w,h = f tmpcenter = [(w/2+x),(h/2+y)] # we are given an x,y corner point and a width and height, we need the center tmpdistance = distance(tmpcenter, cameracenter) if(tmpdistance < curdistance): print "Face closer to center detected. New target location: ({}, {}) - distance: {}".format(tmpcenter[0],tmpcenter[1],tmpdistance) center = tmpcenter;