facetracker.py 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. from picamera.array import PiRGBArray
  2. from picamera import PiCamera
  3. from datetime import datetime
  4. import time
  5. import cv2
  6. import sys
  7. import imutils
  8. import np
  9. import math # for sqrt distance formula
  10. # Create the haar cascade
  11. frontalfaceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
  12. profilefaceCascade = cv2.CascadeClassifier("haarcascade_profileface.xml")
  13. face = [0,0,0,0] # This will hold the array that OpenCV returns when it finds a face: (makes a rectangle)
  14. center = [0,0] # Center of the face: a point calculated from the above variable
  15. lastface = 0 # int 1-3 used to speed up detection. The script is looking for a right profile face,-
  16. # a left profile face, or a frontal face; rather than searching for all three every time,-
  17. # it uses this variable to remember which is last saw: and looks for that again. If it-
  18. # doesn't find it, it's set back to zero and on the next loop it will search for all three.-
  19. # This basically tripples the detect time so long as the face hasn't moved much.
  20. # initialize the camera and grab a reference to the raw camera capture
  21. camera = PiCamera()
  22. #camera.resolution = (160, 120)
  23. camera.resolution = (640,480)
  24. cameracenter = (camera.resolution[0]/2, camera.resolution[1]/2)
  25. camera.framerate = 32
  26. rawCapture = PiRGBArray(camera, camera.resolution)
  27. # Fisheye corrections. See https://medium.com/@kennethjiang/calibrate-fisheye-lens-using-opencv-333b05afa0b0
  28. DIM=(640, 480)
  29. K=np.array([[363.787052141742, 0.0, 332.09761373599576], [0.0, 362.23769923959975, 238.35982850966641], [0.0, 0.0, 1.0]])
  30. D=np.array([[-0.019982864934848042], [-0.10107557279423625], [0.20401597940960342], [-0.1406464201639892]])
  31. def distance(p0, p1):
  32. return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)
  33. def search_rightprofile(i):
  34. # return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
  35. return profilefaceCascade.detectMultiScale(i)
  36. def search_leftprofile(i):
  37. revimage = cv2.flip(i, 1) # Flip the image
  38. # return profilefaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
  39. return profilefaceCascade.detectMultiScale(i)
  40. def search_frontface(i):
  41. # return frontalfaceCascade.detectMultiScale(i,1.3,4,(cv2.CV_HAAR_DO_CANNY_PRUNING + cv2.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.CV_HAAR_DO_ROUGH_SEARCH),(30,30))
  42. return frontalfaceCascade.detectMultiScale(i)
  43. def undistort(i, balance=0.0, dim2=None, dim3=None):
  44. # Sanity Check the source dimensions
  45. dim1 = i.shape[:2][::-1] #dim1 is the dimension of input image to un-distort
  46. assert dim1[0]/dim1[1] == DIM[0]/DIM[1], "Image to undistort needs to have same aspect ratio as the ones used in calibration"
  47. if not dim2:
  48. dim2 = dim1
  49. if not dim3:
  50. dim3 = dim1
  51. scaled_K = K * dim1[0] / DIM[0] # The values of K is to scale with image dimension.
  52. scaled_K[2][2] = 1.0 # Except that K[2][2] is always 1.0
  53. # This is how scaled_K, dim2 and balance are used to determine the final K used to un-distort image. OpenCV document failed to make this clear!
  54. new_K = cv2.fisheye.estimateNewCameraMatrixForUndistortRectify(scaled_K, D, dim2, np.eye(3), balance=balance)
  55. map1, map2 = cv2.fisheye.initUndistortRectifyMap(scaled_K, D, np.eye(3), new_K, dim3, cv2.CV_16SC2)
  56. return cv2.remap(i, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
  57. # allow the camera to warmup
  58. time.sleep(0.1)
  59. lastTime = time.time()*1000.0
  60. # capture frames from the camera
  61. for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
  62. # grab the raw NumPy array representing the image, then initialize the timestamp
  63. # and occupied/unoccupied text
  64. image = frame.array
  65. image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # convert to greyscale
  66. image = undistort(image, 0.8)
  67. faces = ();
  68. faceFound = False # This variable is set to true if, on THIS loop a face has already been found
  69. # We search for a face three diffrent ways, and if we have found one already-
  70. # there is no reason to keep looking.
  71. # First Scan
  72. if lastface == 1:
  73. faces = search_rightprofile(image)
  74. if faces != ():
  75. faceFound=True
  76. lastface = 1
  77. elif lastface == 2:
  78. faces = search_leftprofile(image)
  79. if faces != ():
  80. faceFound=True
  81. lastface = 2
  82. else:
  83. faces = search_frontface(image)
  84. if faces != ():
  85. lastface = 3
  86. faceFound=True
  87. # Second scan
  88. if not faceFound:
  89. if lastface == 1:
  90. faces = search_frontface(image)
  91. if faces != ():
  92. lastface = 3
  93. faceFound=True
  94. elif lastface == 2:
  95. faces = search_rightprofile(image)
  96. if faces != ():
  97. faceFound=True
  98. lastface = 1
  99. else:
  100. faces = search_leftprofile(image)
  101. if faces != ():
  102. faceFound=True
  103. lastface = 2
  104. # Third scan
  105. if not faceFound:
  106. if lastface == 1:
  107. faces = search_leftprofile(image)
  108. if faces != ():
  109. faceFound=True
  110. lastface = 2
  111. elif lastface == 2:
  112. faces = search_frontface(image)
  113. if faces != ():
  114. lastface = 3
  115. faceFound=True
  116. else:
  117. faces = search_rightprofile(image)
  118. if faces != ():
  119. faceFound=True
  120. lastface = 1
  121. if not faceFound:
  122. print("{}: no faces found.".format(time.time()*1000.0-lastTime))
  123. lastTime = time.time()*1000.0
  124. # clear the stream in preparation for the next frame
  125. rawCapture.truncate(0)
  126. continue;
  127. print("{}: {} faces found. Type: {}".format(time.time()*1000.0-lastTime, len(faces), lastface))
  128. lastTime = time.time()*1000.0
  129. # Draw a rectangle around the faces
  130. for (x, y, w, h) in faces:
  131. cv2.circle(image, (int(x+w/2), int(y+h/2)), int((w+h)/3), (255, 255, 255), 1)
  132. # Temporary, save the image
  133. cv2.imwrite("tmp/img.{}.png".format(datetime.now().strftime("%Y%m%d.%H%M%S.%f")), image)
  134. # clear the stream in preparation for the next frame
  135. rawCapture.truncate(0)
  136. # Find teh centermost face
  137. curdistance = 1000000 # Outside the dimensions of the picture
  138. for f in faces:
  139. x,y,w,h = f
  140. tmpcenter = [(w/2+x),(h/2+y)] # we are given an x,y corner point and a width and height, we need the center
  141. tmpdistance = distance(tmpcenter, cameracenter)
  142. if(tmpdistance < curdistance):
  143. print("Face closer to center detected. New target location: ({}, {}) - distance: {}".format(tmpcenter[0],tmpcenter[1],tmpdistance))
  144. center = tmpcenter;