瀏覽代碼

Updated to target tracking.

Fred Damstra 7 年之前
父節點
當前提交
ec1e01c061
共有 1 個文件被更改,包括 45 次插入30 次删除
  1. 45 30
      facetracker.py

+ 45 - 30
facetracker.py

@@ -1,3 +1,4 @@
+#! /usr/bin/python3
 from picamera.array import PiRGBArray
 from picamera import PiCamera
 from datetime import datetime
@@ -24,15 +25,27 @@ lastface = 0                # int 1-3 used to speed up detection. The script is
 # initialize the camera and grab a reference to the raw camera capture
 camera = PiCamera()
 #camera.resolution = (160, 120)
-camera.resolution = (640,480)
+#camera.resolution = (640,480)
+camera.resolution = (1024,768)
 cameracenter = (camera.resolution[0]/2, camera.resolution[1]/2)
 camera.framerate = 32
 rawCapture = PiRGBArray(camera, camera.resolution)
 
+# Points to the last place we sawa a face
+target = ( camera.resolution[0]/2, camera.resolution[1]/2 )
+
 # Fisheye corrections. See https://medium.com/@kennethjiang/calibrate-fisheye-lens-using-opencv-333b05afa0b0
-DIM=(640, 480)
-K=np.array([[363.787052141742, 0.0, 332.09761373599576], [0.0, 362.23769923959975, 238.35982850966641], [0.0, 0.0, 1.0]])
-D=np.array([[-0.019982864934848042], [-0.10107557279423625], [0.20401597940960342], [-0.1406464201639892]])
+# 640x480:
+#correct_fisheye = False
+#DIM=(640, 480)
+#K=np.array([[363.787052141742, 0.0, 332.09761373599576], [0.0, 362.23769923959975, 238.35982850966641], [0.0, 0.0, 1.0]])
+#D=np.array([[-0.019982864934848042], [-0.10107557279423625], [0.20401597940960342], [-0.1406464201639892]])
+
+# 1024x768:
+correct_fisheye = True
+DIM=(1024, 768)
+K=np.array([[583.6639649321671, 0.0, 518.0139106134624], [0.0, 580.8039721094127, 384.32095600935503], [0.0, 0.0, 1.0]])
+D=np.array([[0.0028045742945672475], [-0.14423839478882694], [0.23715105072799644], [-0.1400677375634837]])
 
 def distance(p0, p1):
     return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
@@ -78,7 +91,8 @@ for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=
         # and occupied/unoccupied text
     image = frame.array
     image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to greyscale
-    image = undistort(image, 0.8)
+    if correct_fisheye:
+        image = undistort(image, 0.8)
     faces = ();
     
     faceFound = False        # This variable is set to true if, on THIS loop a face has already been found
@@ -138,36 +152,37 @@ for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=
                 lastface = 1
 
 
-    if not faceFound:
-        print("{}: no faces found.".format(time.time()*1000.0-lastTime))
-        lastTime = time.time()*1000.0
-        # clear the stream in preparation for the next frame
-        rawCapture.truncate(0)
-        continue; 
-
-    print("{}: {} faces found. Type: {}".format(time.time()*1000.0-lastTime, len(faces), lastface))
-        
-    lastTime = time.time()*1000.0
+    if faceFound:
+        print("{}: {} faces found. Type: {}".format(time.time()*1000.0-lastTime, len(faces), lastface))
 
-    # Draw a rectangle around the faces
-    for (x, y, w, h) in faces:
-        cv2.circle(image, (int(x+w/2), int(y+h/2)), int((w+h)/3), (255, 255, 255), 1)
+        # Draw a rectangle around the faces
+        for (x, y, w, h) in faces:
+            cv2.circle(image, (int(x+w/2), int(y+h/2)), int((w+h)/3), (255, 255, 255), 1)
 
-    # Temporary, save the image
-    cv2.imwrite("tmp/img.{}.png".format(datetime.now().strftime("%Y%m%d.%H%M%S.%f")), image)
+        # Temporary, save the image
+        cv2.imwrite("tmp/img.{}.png".format(datetime.now().strftime("%Y%m%d.%H%M%S.%f")), image)
+
+        # Find the centermost face
+        curdistance = 1000000 # Outside the dimensions of the picture
+        for f in faces:
+            x,y,w,h = f
+            tmpcenter   = [(w/2+x),(h/2+y)]        # we are given an x,y corner point and a width and height, we need the center
+            tmpdistance = distance(tmpcenter, cameracenter)
+            if(tmpdistance < curdistance):
+                print("Face closer to center detected. New target location: ({}, {}) - distance: {}".format(tmpcenter[0],tmpcenter[1],tmpdistance))
+                center = tmpcenter;
+        target = center
+    else: # No face found
+        print("{}: no faces found. Continuing with existing target ({}, {})".format(time.time()*1000.0-lastTime, target[0], target[1]))
 
     # clear the stream in preparation for the next frame
     rawCapture.truncate(0)
-   
-    # Find teh centermost face
-    curdistance = 1000000 # Outside the dimensions of the picture
-    for f in faces:
-        x,y,w,h = f
-        tmpcenter   = [(w/2+x),(h/2+y)]        # we are given an x,y corner point and a width and height, we need the center
-        tmpdistance = distance(tmpcenter, cameracenter)
-        if(tmpdistance < curdistance):
-            print("Face closer to center detected. New target location: ({}, {}) - distance: {}".format(tmpcenter[0],tmpcenter[1],tmpdistance))
-            center = tmpcenter;
+    lastTime = time.time()*1000.0
+       
+    # Determine directions and distance
+    travel = [(cameracenter[0] - target[0])/camera.resolution[0], cameracenter[1] - target[1]/camera.resolution[1]]
+    print("To move horizontal: {}, vertical: {}".format(travel[0], travel[1]))
+