“MicroMV 人脸识别”的版本间的差异

来自Microduino Wikipedia
跳转至: 导航搜索
 
(未显示1个用户的5个中间版本)
第1行: 第1行:
 
=='''基本原理'''==
 
=='''基本原理'''==
*MicroMV捕捉人脸获取人脸坐标
+
*MicroMV捕捉人脸得到人脸坐标
*通过pid.py算法计算出舵机将要转动的角度,并通过串口发送给microduino
 
*microduino接收数据后控制舵机
 
  
 
=='''MicroMV的代码准备'''==
 
=='''MicroMV的代码准备'''==
把pid.py复制到MicroMV生成的U盘空间中<br />
+
<source lang="py">
pid.py:
+
# Face Detection Example
<source lang="cpp">
+
#
from pyb import millis
+
# This example shows off the built-in face detection feature of the OpenMV Cam.
from math import pi, isnan
+
#
class PID:
+
# Face detection works by using the Haar Cascade feature detector on an image. A
    _kp = _ki = _kd = _integrator = _imax = 0
+
# Haar Cascade is a series of simple area contrasts checks. For the built-in
    _last_error = _last_derivative = _last_t = 0
+
# frontalface detector there are 25 stages of checks with each stage having
    _RC = 1/(2 * pi * 20)
+
# hundreds of checks a piece. Haar Cascades run fast because later stages are
    def __init__(self, p=0, i=0, d=0, imax=0):
+
# only evaluated if previous stages pass. Additionally, your OpenMV Cam uses
        self._kp = float(p)
+
# a data structure called the integral image to quickly execute each area
        self._ki = float(i)
+
# contrast check in constant time (the reason for feature detection being
        self._kd = float(d)
+
# grayscale only is because of the space requirment for the integral image).
        self._imax = abs(imax)
 
        self._last_derivative = float('nan')
 
  
    def get_pid(self, error, scaler):
 
        tnow = millis()
 
        dt = tnow - self._last_t
 
        output = 0
 
        if self._last_t == 0 or dt > 1000:
 
            dt = 0
 
            self.reset_I()
 
        self._last_t = tnow
 
        delta_time = float(dt) / float(1000)
 
        output += error * self._kp
 
        if abs(self._kd) > 0 and dt > 0:
 
            if isnan(self._last_derivative):
 
                derivative = 0
 
                self._last_derivative = 0
 
            else:
 
                derivative = (error - self._last_error) / delta_time
 
            derivative = self._last_derivative + \
 
                                    ((delta_time / (self._RC + delta_time)) * \
 
                                        (derivative - self._last_derivative))
 
            self._last_error = error
 
            self._last_derivative = derivative
 
            output += self._kd * derivative
 
        output *= scaler
 
        if abs(self._ki) > 0 and dt > 0:
 
            self._integrator += (error * self._ki) * scaler * delta_time
 
            if self._integrator < -self._imax: self._integrator = -self._imax
 
            elif self._integrator > self._imax: self._integrator = self._imax
 
            output += self._integrator
 
        return output
 
    def reset_I(self):
 
        self._integrator = 0
 
        self._last_derivative = float('nan')
 
</source>
 
把face_detection1024.py替换到MicroMV生成的U盘空间中的main.py做为主程序<br />
 
face_detection1024.py:
 
<source lang="cpp">
 
 
import sensor, time, image
 
import sensor, time, image
from pyb import UART
+
 
from pid import PID
 
from pyb import LED
 
import json
 
red_led  = LED(1)
 
green_led = LED(2)
 
blue_led  = LED(3)
 
ir_led    = LED(4)
 
pan_pid = PID(p=0.42, i=0.01, imax=90)
 
tilt_pid = PID(p=0.22, i=0.005, imax=90)
 
pan_servo_pos=90
 
tilt_servo_pos=90
 
pan_center = 60
 
tilt_center = 40
 
 
# Reset sensor
 
# Reset sensor
 
sensor.reset()
 
sensor.reset()
 +
 
# Sensor settings
 
# Sensor settings
 
sensor.set_contrast(1)
 
sensor.set_contrast(1)
 
sensor.set_gainceiling(16)
 
sensor.set_gainceiling(16)
 
# HQVGA and GRAYSCALE are the best for face tracking.
 
# HQVGA and GRAYSCALE are the best for face tracking.
sensor.set_framesize(sensor.HQQVGA)
+
sensor.set_framesize(sensor.HQVGA)
 
sensor.set_pixformat(sensor.GRAYSCALE)
 
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_brightness(-3)
+
 
 
# Load Haar Cascade
 
# Load Haar Cascade
 
# By default this will use all stages, lower satges is faster but less accurate.
 
# By default this will use all stages, lower satges is faster but less accurate.
 
face_cascade = image.HaarCascade("frontalface", stages=25)
 
face_cascade = image.HaarCascade("frontalface", stages=25)
 
print(face_cascade)
 
print(face_cascade)
 +
 
# FPS clock
 
# FPS clock
 
clock = time.clock()
 
clock = time.clock()
uart = UART(4, 115200)
+
 
 
while (True):
 
while (True):
 
     clock.tick()
 
     clock.tick()
 +
 
     # Capture snapshot
 
     # Capture snapshot
 
     img = sensor.snapshot()
 
     img = sensor.snapshot()
 +
 
     # Find objects.
 
     # Find objects.
 
     # Note: Lower scale factor scales-down the image more and detects smaller objects.
 
     # Note: Lower scale factor scales-down the image more and detects smaller objects.
 
     # Higher threshold results in a higher detection rate, with more false positives.
 
     # Higher threshold results in a higher detection rate, with more false positives.
 
     objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
 
     objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
 +
 
     # Draw objects
 
     # Draw objects
 
     for r in objects:
 
     for r in objects:
        red_led.on()
 
 
         img.draw_rectangle(r)
 
         img.draw_rectangle(r)
        pan_center = int(r[0]+r[2]/2)
+
 
        tilt_center = int(r[1]+r[3]/2)
+
    # Print FPS.
        img.draw_cross(pan_center, tilt_center)
+
    # Note: Actual FPS is higher, streaming the FB makes it slower.
        pan_error = pan_center-img.width()/2
+
    print(clock.fps())
        tilt_error = tilt_center-img.height()/2
 
        pan_output=pan_pid.get_pid(pan_error,1)/3
 
        tilt_output=tilt_pid.get_pid(tilt_error,1)
 
        pan_servo_pos=int(pan_servo_pos-pan_output)
 
        tilt_servo_pos=int(tilt_servo_pos+tilt_output)
 
        if pan_servo_pos>=170:
 
          pan_servo_pos=170
 
        if pan_servo_pos<=10:
 
          pan_servo_pos=10
 
        if tilt_servo_pos>=160:
 
          tilt_servo_pos=160
 
        if tilt_servo_pos<=30:
 
          tilt_servo_pos=30
 
        output_str="[%d,%d,%d]" % (pan_servo_pos,tilt_servo_pos,r[2])
 
        print('you send:',output_str)
 
        uart.write(output_str+'\n')
 
        red_led.off()
 
 
</source>
 
</source>
IDE运行效果为<br/>
+
效果如下:
[[File:microMVFaceDetector1.png||600px|left]]<br style="clear: left"/>
+
[[File:microMVGettingStart8.png||600px|center]]
屏幕中有人脸就会用方框注明
+
 
  
=='''Microduino的代码准备'''==
 
<source lang="cpp">
 
#include <Servo.h>
 
#define servoPin1  A2
 
#define servoPin2  A3
 
Servo myservo1;
 
Servo myservo2;
 
String msg = "";
 
int16_t data0, data1, data2, data3;
 
void setup() {
 
  pinMode(A0, INPUT);
 
  Serial.begin(115200);
 
  myservo1.attach(servoPin1);
 
  myservo2.attach(servoPin2);
 
  myservo1.write(90);
 
  myservo2.write(90);
 
}
 
void loop() {
 
  if (Serial.available() > 0) {
 
    msg = Serial.readStringUntil('\n');
 
    sscanf(msg.c_str(), "[%d,%d,%d]", &data0, &data1, &data2);
 
    Serial.print(data0);
 
    Serial.print(" ,");
 
    Serial.print(data1);
 
    Serial.print(" ,");
 
    Serial.println(data2);
 
    myservo1.write(data0);
 
    myservo2.write(data1);
 
    delay(1);
 
  }
 
}
 
</source>
 
  
===== 函数: =====
 
  
• 函数说明<br />
+
[[MicroMV 简介|返回MicroMV目录页面]]

2018年12月7日 (五) 03:19的最新版本

基本原理

  • MicroMV捕捉人脸得到人脸坐标

MicroMV的代码准备

# Face Detection Example
#
# This example shows off the built-in face detection feature of the OpenMV Cam.
#
# Face detection works by using the Haar Cascade feature detector on an image. A
# Haar Cascade is a series of simple area contrasts checks. For the built-in
# frontalface detector there are 25 stages of checks with each stage having
# hundreds of checks a piece. Haar Cascades run fast because later stages are
# only evaluated if previous stages pass. Additionally, your OpenMV Cam uses
# a data structure called the integral image to quickly execute each area
# contrast check in constant time (the reason for feature detection being
# grayscale only is because of the space requirment for the integral image).

import sensor, time, image

# Reset sensor
sensor.reset()

# Sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
# HQVGA and GRAYSCALE are the best for face tracking.
sensor.set_framesize(sensor.HQVGA)
sensor.set_pixformat(sensor.GRAYSCALE)

# Load Haar Cascade
# By default this will use all stages, lower satges is faster but less accurate.
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)

# FPS clock
clock = time.clock()

while (True):
    clock.tick()

    # Capture snapshot
    img = sensor.snapshot()

    # Find objects.
    # Note: Lower scale factor scales-down the image more and detects smaller objects.
    # Higher threshold results in a higher detection rate, with more false positives.
    objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)

    # Draw objects
    for r in objects:
        img.draw_rectangle(r)

    # Print FPS.
    # Note: Actual FPS is higher, streaming the FB makes it slower.
    print(clock.fps())

效果如下:

MicroMVGettingStart8.png



返回MicroMV目录页面