百度智能云平臺(tái)的人臉識(shí)別項(xiàng)目,旨在利用其強(qiáng)大的人臉識(shí)別服務(wù)實(shí)現(xiàn)自動(dòng)人臉識(shí)別。選擇百度智能云的原因是其高效的API接口和穩(wěn)定的服務(wù)質(zhì)量,能夠幫助開(kāi)發(fā)者快速實(shí)現(xiàn)人臉識(shí)別應(yīng)用。
1、圖片處理和人臉識(shí)別:使用百度智能云的人臉識(shí)別服務(wù),通過(guò)API輕松識(shí)別圖像中的人臉。
2、攝像頭實(shí)時(shí)采集圖像并保存:使用Qt設(shè)計(jì)了直觀的用戶界面,控制USB攝像頭的打開(kāi)、關(guān)閉以及實(shí)時(shí)顯示攝像頭捕獲的視頻流,并將采集到的視頻流保存為圖像。
?
1、開(kāi)發(fā)環(huán)境操作系統(tǒng):Ubuntu18.04 64位版
2、交叉編譯工具鏈:arm-poky-linux-gnueabi-gcc 5.3.0
3、開(kāi)發(fā)板使用Bootloader版本:u-boot-2016.03
4、開(kāi)發(fā)板內(nèi)核版本:linux-4.1.15
5、開(kāi)發(fā)板移植QT版本:qt5.6.2
?
圖片處理和人臉識(shí)別
百度智能云網(wǎng)址:cloud.baidu.com
本次人臉識(shí)別的方案是通過(guò)百度智能云平臺(tái)進(jìn)行實(shí)現(xiàn)的。首先進(jìn)入百度智能云網(wǎng)頁(yè)- >?選擇人臉與人體?- > 人臉識(shí)別。
1、創(chuàng)建人臉庫(kù)
2、在線識(shí)別人臉圖片
3、識(shí)別本地人臉圖片
#include <stdio.h>
#include <iostream>
#include <string.h>
#include <curl/curl.h>
#include <json/json.h>
#include <fstream>
#include <memory>
#include <cstdlib>
#include <regex>
#include <string>
#include <unistd.h>
#include <cstdio>
inline size_t onWriteData(void * buffer, size_t size, size_t nmemb, void * userp)
{
std::string * str = dynamic_cast<std::string *>((std::string *)userp);
str->append((char *)buffer, size * nmemb);
return nmemb;
}
std::string getFileBase64Content(const char * path, bool urlencoded=false)
{
const std::string base64_chars =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
std::string ret;
int i = 0;
int j = 0;
unsigned char char_array_3[3];
unsigned char char_array_4[4];
unsigned int bufferSize = 1024;
unsigned char buffer[bufferSize];
std::ifstream file_read;
file_read.open(path, std::ios::binary);
while (!file_read.eof()){
file_read.read((char *) buffer, bufferSize * sizeof(char));
int num = file_read.gcount();
int m = 0;
while (num--){
char_array_3[i++] = buffer[m++];
if(i == 3){
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for(i = 0; (i <4) ; i++)
ret += base64_chars[char_array_4[i]];
i = 0;
}
}
}
file_read.close();
if(i){
for(j = i; j < 3; j++)
char_array_3[j] = '';
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for(j = 0; (j < i + 1); j++)
ret += base64_chars[char_array_4[j]];
while((i++ < 3))
ret += '=';
}
if (urlencoded)
ret = curl_escape(ret.c_str(), ret.length());
return ret;
}
std::string performCurlRequest(const char *pic_path, const std::string &token)
{
std::string result;
char *web_curl = nullptr;
CURL *curl = curl_easy_init();
CURLcode res = CURLE_OK;
if (!asprintf(&web_curl, "https://aip.baidubce.com/rest/2.0/face/v3/search?access_token=%s", token.c_str())) {
perror("asprintf error");
}
if (curl) {
curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "POST");
// std::string url = "https://aip.baidubce.com/rest/2.0/face/v3/search?access_token=" + token;
curl_easy_setopt(curl, CURLOPT_URL, web_curl);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_DEFAULT_PROTOCOL, "HTTPS");
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L);
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 0L);
struct curl_slist *headers = NULL;
headers = curl_slist_append(headers, "Content-Type: application/json");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
std::string base64_image = getFileBase64Content(pic_path, true);
std::string post_data = "image=" + base64_image + "&group_id_list=one&image_type=BASE64";
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, post_data.c_str());
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &result);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, onWriteData);
if(curl_easy_perform(curl) != CURLE_OK)
fprintf(stderr, "Curl request failed: %sn", curl_easy_strerror(res));
}
curl_easy_cleanup(curl);
free(web_curl);
return result;
}
int main(int argc, char *argv[]) {
std::string result;
std::string name;
std::string token = "24.2bc619cf9c09c32ce5af202ccc98c0c9.2592000.1724918062.282335-100710397";
result = performCurlRequest("/home/root/num/1.jpg", token);
std::string json = result;
std::regex pattern(""user_id":"(.*?)"");
std::smatch match;
if (std::regex_search(json, match, pattern)) {
name = match[1].str();
std::cout << "read name is: " << name << std::endl;
}
return 0;
}
1)依賴庫(kù)編譯
編譯人臉識(shí)別的應(yīng)用需要依賴Curl庫(kù)、OpenSSL庫(kù)、OpenCv庫(kù)、JsonCPP庫(kù)。詳細(xì)的依賴庫(kù)安裝步驟請(qǐng)參考以下鏈接:
bbs.elfboard.com/forum.php?mod=viewthread&tid=496&extra=page%3D1
2)應(yīng)用編譯
elf@ubuntu:~/work$ . /opt/fsl-imx-x11/4.1.15-2.0.0/environment-setup-cortexa7hf-neon-poky-linux-gnueabi
elf@ubuntu:~/work$ $CC demoFace.cpp -o demoFace -I /home/elf/work/opencv-3.4.1/install/include/ -I /home/elf/work/curl-7.71.1/install/include/ -I /home/elf/work/jsoncpp-1.9.5/install/include/ -L /home/elf/work/opencv-3.4.1/install/lib/ -L /home/elf/work/curl-7.71.1/install/lib/ -L /home/elf/work/jsoncpp-1.9.5/install/lib/ -lopencv_highgui -lopencv_core -lopencv_imgproc -lopencv_objdetect -lopencv_videoio -lopencv_imgcodecs -std=c++11 -lcurl -lcrypto -ljsoncpp -lstdc++
1、程序設(shè)計(jì)
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
Camera w;
w.setWindowFlags(w.windowFlags()& ~Qt::WindowMaximizeButtonHint& ~Qt::WindowMinimizeButtonHint );
w.showMaximized();
w.show();
return a.exec();
}
ui->setupUi(this);
timer = new QTimer;
QDesktopWidget* desktopWidget = QApplication::desktop();
QRect screenRect = desktopWidget->screenGeometry();
qDebug("screen.width = %d , screen.height = %d",screenRect.width(),screenRect.height());
this->imageWidget = new ImageWidget(this);
this->imageWidget->setBackgroundRole(QPalette::Dark);
this->imageWidget->setSizePolicy(QSizePolicy::Ignored, QSizePolicy::Ignored);
this->imageWidget->setObjectName(QString::fromUtf8("imageWidget"));
if(screenRect.width()==800)
{
ui->pbt_start->setGeometry(60,300,70,50);
ui->pbt_stop->setGeometry(190,300,70,50);
this->imageWidget->setGeometry(QRect(5, 30, 350, 250));
}
else if(screenRect.width()>800)
{
ui->pbt_start->setGeometry(80,400,70,70);
ui->pbt_stop->setGeometry(260,400,70,70);
this->imageWidget->setGeometry(QRect(6, 37, 500, 330));
}
void deviceOpen(void)
{
fd = open(deviceName, O_RDWR | O_NONBLOCK, 0);
if (-1 == fd)
{
QMessageBox::about(NULL, "About", "camera open error");
exit(EXIT_FAILURE);
}
}
void deviceInit(void)
{
struct v4l2_capability cap;
struct v4l2_cropcap cropcap;
struct v4l2_crop crop;
struct v4l2_format fmt;
struct v4l2_streamparm sparm;
unsigned int min;
if (-1 == xioctl(fd, VIDIOC_QUERYCAP, &cap))
{
if (EINVAL == errno)
{
QMessageBox::about(NULL,"Information"," no V4L2 device");
exit(EXIT_FAILURE);
}
else
{
errno_exit("VIDIOC_QUERYCAP");
}
}
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))
{
QMessageBox::about(NULL,"Information"," no video capture device");
exit(EXIT_FAILURE);
}
struct v4l2_input input;
input.index = 0;
if ( ioctl(fd, VIDIOC_ENUMINPUT, &input) != 0)
{
QMessageBox::about(NULL,"Information","set input error");
exit(0);
}
if ((ioctl(fd, VIDIOC_S_INPUT, &input)) < 0)
{
QMessageBox::about(NULL,"Information","set s_input error");
exit(0);
}
CLEAR(cropcap);
cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (0 == xioctl(fd, VIDIOC_CROPCAP, &cropcap))
{
crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
crop.c.top = 0;
crop.c.left = 0;
crop.c.height = 720;
crop.c.width = 1280;
if (-1 == xioctl(fd, VIDIOC_S_CROP, &crop))
{
switch (errno)
{
case EINVAL:
break;
default:
break;
}
}
}
CLEAR (fmt);
// v4l2_format
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt.fmt.pix.width = width;
fmt.fmt.pix.height = height;
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
fmt.fmt.pix.field = V4L2_FIELD_ANY;
if (-1 == xioctl(fd, VIDIOC_S_FMT, &fmt))
errno_exit("VIDIOC_S_FMT");
/* Note VIDIOC_S_FMT may change width and height.*/
if (width != fmt.fmt.pix.width)
{
width = fmt.fmt.pix.width;
//fprintf(stderr,"Image width set to %i by device %s.n",width,deviceName);
}
if (height != fmt.fmt.pix.height)
{
height = fmt.fmt.pix.height;
//fprintf(stderr,"Image height set to %i by device %s.n",height,deviceName);
}
/*Buggy driver paranoia. */
min = fmt.fmt.pix.width * 2;
if (fmt.fmt.pix.bytesperline < min)
fmt.fmt.pix.bytesperline = min;
min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;
if (fmt.fmt.pix.sizeimage < min)
fmt.fmt.pix.sizeimage = min;
CLEAR (sparm);
sparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
sparm.parm.capture.capturemode = 0;
sparm.parm.capture.timeperframe.numerator = 1;
sparm.parm.capture.timeperframe.denominator = 30;
if(xioctl(fd,VIDIOC_S_PARM,&sparm) < 0){
errno_exit("cam s parm");
// exit(1);
}
mmapInit();
}
void captureStart(void)
{
unsigned int i;
enum v4l2_buf_type type;
for (i = 0; i < n_buffers; ++i)
{
struct v4l2_buffer buf;
CLEAR (buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errno_exit("VIDIOC_QBUF");
}
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (-1 == xioctl(fd, VIDIOC_STREAMON, &type))
errno_exit("VIDIOC_STREAMON");
}
void Camera::up_date()
{
unsigned char image_buf[921600+54];
frameRead(image_buf);
this->imageWidget->setPixmap(image_buf);
}
2、應(yīng)用編譯及測(cè)試
elf@ubuntu:~/work/camera-demo$ . /opt/fsl-imx-x11/4.1.15-2.0.0/environment-setup-cortexa7hf-neon-poky-linux-gnueabi
elf@ubuntu:~/work/camera-demo$ qmake
elf@ubuntu:~/work/camera-demo$ make
root@ELF1:~# cp /run/media/sda1/camera-demo ./
root@ELF1:~# chmod 777 camera-demo
root@ELF1:~# export DISPLAY=:0.0
root@ELF1:~# ./camera-demo
在這里就可以和前面人臉識(shí)別結(jié)合起來(lái)了,比如攝像頭里面的畫面是一張人臉信息,通過(guò)截取攝像頭中的實(shí)時(shí)畫面到本地,然后上傳到百度智能云進(jìn)行識(shí)別,至此就完成了通過(guò)攝像頭進(jìn)行人臉識(shí)別的過(guò)程。
項(xiàng)目測(cè)試
在此基礎(chǔ)上再次完善應(yīng)用,識(shí)別人臉的應(yīng)用將識(shí)別到的人臉信息保存到文本中,基于攝像頭的應(yīng)用讀取文檔中的人臉信息顯示在Qt界面中。??
1、確保開(kāi)發(fā)板已連接USB攝像頭和屏幕
2、設(shè)置Wi-Fi連接
root@ELF1:~# elf1_cmd_wifi.sh -i 8723 -s 賬號(hào) -p 密碼
3、執(zhí)行應(yīng)用
root@ELF1:~# ./camera-demo &
root@ELF1:~# ./demoFace
單擊“start”按鈕,識(shí)別結(jié)果如下圖所示。