Python怎么实现人脸识别微笑检测
发表于:2025-11-08 作者:千家信息网编辑
千家信息网最后更新 2025年11月08日,这篇文章主要介绍"Python怎么实现人脸识别微笑检测",在日常操作中,相信很多人在Python怎么实现人脸识别微笑检测问题上存在疑惑,小编查阅了各式资料,整理出简单好用的操作方法,希望对大家解答"P
千家信息网最后更新 2025年11月08日Python怎么实现人脸识别微笑检测
这篇文章主要介绍"Python怎么实现人脸识别微笑检测",在日常操作中,相信很多人在Python怎么实现人脸识别微笑检测问题上存在疑惑,小编查阅了各式资料,整理出简单好用的操作方法,希望对大家解答"Python怎么实现人脸识别微笑检测"的疑惑有所帮助!接下来,请跟着小编一起来学习吧!
一.实验准备
环境搭建
pip install tensorflow==1.2.0pip install keras==2.0.6pip install dlib==19.6.1pip install h6py==2.10
如果是新建虚拟环境,还需安装以下包
pip install opencv_python==4.1.2.30pip install pillowpip install matplotlibpip install h6py
使用genki-4k数据集
可从此处下载
二.图片预处理
打开数据集

我们需要将人脸检测出来并对图片进行裁剪
代码如下:
import dlib # 人脸识别的库dlibimport numpy as np # 数据处理的库numpyimport cv2 # 图像处理的库OpenCvimport os # dlib预测器detector = dlib.get_frontal_face_detector()predictor = dlib.shape_predictor('D:\\shape_predictor_68_face_landmarks.dat') # 读取图像的路径path_read = "C:\\Users\\28205\\Documents\\Tencent Files\\2820535964\\FileRecv\\genki4k\\files"num=0for file_name in os.listdir(path_read): #aa是图片的全路径 aa=(path_read +"/"+file_name) #读入的图片的路径中含非英文 img=cv2.imdecode(np.fromfile(aa, dtype=np.uint8), cv2.IMREAD_UNCHANGED) #获取图片的宽高 img_shape=img.shape img_height=img_shape[0] img_width=img_shape[1] # 用来存储生成的单张人脸的路径 path_save="C:\\Users\\28205\\Documents\\Tencent Files\\2820535964\\FileRecv\\genki4k\\files1" # dlib检测 dets = detector(img,1) print("人脸数:", len(dets)) for k, d in enumerate(dets): if len(dets)>1: continue num=num+1 # 计算矩形大小 # (x,y), (宽度width, 高度height) pos_start = tuple([d.left(), d.top()]) pos_end = tuple([d.right(), d.bottom()]) # 计算矩形框大小 height = d.bottom()-d.top() width = d.right()-d.left() # 根据人脸大小生成空的图像 img_blank = np.zeros((height, width, 3), np.uint8) for i in range(height): if d.top()+i>=img_height:# 防止越界 continue for j in range(width): if d.left()+j>=img_width:# 防止越界 continue img_blank[i][j] = img[d.top()+i][d.left()+j] img_blank = cv2.resize(img_blank, (200, 200), interpolation=cv2.INTER_CUBIC) cv2.imencode('.jpg', img_blank)[1].tofile(path_save+"\\"+"file"+str(num)+".jpg") # 正确方法运行效果如下:
共识别出3878张图片。
某些图片没有识别出人脸,所以没有裁剪保存,可以自行添加图片补充。
三.划分数据集
代码:
import os, shutil# 原始数据集路径original_dataset_dir = 'C:\\Users\\28205\\Documents\\Tencent Files\\2820535964\\FileRecv\\genki4k\\files1'# 新的数据集base_dir = 'C:\\Users\\28205\\Documents\\Tencent Files\\2820535964\\FileRecv\\genki4k\\files2'os.mkdir(base_dir)# 训练图像、验证图像、测试图像的目录train_dir = os.path.join(base_dir, 'train')os.mkdir(train_dir)validation_dir = os.path.join(base_dir, 'validation')os.mkdir(validation_dir)test_dir = os.path.join(base_dir, 'test')os.mkdir(test_dir)train_cats_dir = os.path.join(train_dir, 'smile')os.mkdir(train_cats_dir)train_dogs_dir = os.path.join(train_dir, 'unsmile')os.mkdir(train_dogs_dir)validation_cats_dir = os.path.join(validation_dir, 'smile')os.mkdir(validation_cats_dir)validation_dogs_dir = os.path.join(validation_dir, 'unsmile')os.mkdir(validation_dogs_dir)test_cats_dir = os.path.join(test_dir, 'smile')os.mkdir(test_cats_dir)test_dogs_dir = os.path.join(test_dir, 'unsmile')os.mkdir(test_dogs_dir)# 复制1000张笑脸图片到train_c_dirfnames = ['file{}.jpg'.format(i) for i in range(1,900)]for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(train_cats_dir, fname) shutil.copyfile(src, dst)fnames = ['file{}.jpg'.format(i) for i in range(900, 1350)]for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(validation_cats_dir, fname) shutil.copyfile(src, dst) # Copy next 500 cat images to test_cats_dirfnames = ['file{}.jpg'.format(i) for i in range(1350, 1800)]for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(test_cats_dir, fname) shutil.copyfile(src, dst) fnames = ['file{}.jpg'.format(i) for i in range(2127,3000)]for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(train_dogs_dir, fname) shutil.copyfile(src, dst) # Copy next 500 dog images to validation_dogs_dirfnames = ['file{}.jpg'.format(i) for i in range(3000,3878)]for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(validation_dogs_dir, fname) shutil.copyfile(src, dst) # Copy next 500 dog images to test_dogs_dirfnames = ['file{}.jpg'.format(i) for i in range(3000,3878)]for fname in fnames: src = os.path.join(original_dataset_dir, fname) dst = os.path.join(test_dogs_dir, fname) shutil.copyfile(src, dst)运行效果如下:

四.CNN提取人脸识别笑脸和非笑脸
1.创建模型
代码:
#创建模型from keras import layersfrom keras import modelsmodel = models.Sequential()model.add(layers.Conv2D(32, (3, 3), activation='relu',input_shape=(150, 150, 3)))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(64, (3, 3), activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(128, (3, 3), activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(128, (3, 3), activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Flatten())model.add(layers.Dense(512, activation='relu'))model.add(layers.Dense(1, activation='sigmoid'))model.summary()#查看
运行效果:
2.归一化处理
代码:
#归一化from keras import optimizersmodel.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])from keras.preprocessing.image import ImageDataGeneratortrain_datagen = ImageDataGenerator(rescale=1./255)validation_datagen=ImageDataGenerator(rescale=1./255)test_datagen = ImageDataGenerator(rescale=1./255)train_generator = train_datagen.flow_from_directory( # 目标文件目录 train_dir, #所有图片的size必须是150x150 target_size=(150, 150), batch_size=20, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary')validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=20, class_mode='binary')test_generator = test_datagen.flow_from_directory(test_dir, target_size=(150, 150), batch_size=20, class_mode='binary')for data_batch, labels_batch in train_generator: print('data batch shape:', data_batch.shape) print('labels batch shape:', labels_batch) break#'smile': 0, 'unsmile': 13.数据增强
代码:
#数据增强datagen = ImageDataGenerator( rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')#数据增强后图片变化import matplotlib.pyplot as plt# This is module with image preprocessing utilitiesfrom keras.preprocessing import imagefnames = [os.path.join(train_smile_dir, fname) for fname in os.listdir(train_smile_dir)]img_path = fnames[3]img = image.load_img(img_path, target_size=(150, 150))x = image.img_to_array(img)x = x.reshape((1,) + x.shape)i = 0for batch in datagen.flow(x, batch_size=1): plt.figure(i) imgplot = plt.imshow(image.array_to_img(batch[0])) i += 1 if i % 4 == 0: breakplt.show()
运行效果:
4.创建网络
代码:
#创建网络model = models.Sequential()model.add(layers.Conv2D(32, (3, 3), activation='relu',input_shape=(150, 150, 3)))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(64, (3, 3), activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(128, (3, 3), activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Conv2D(128, (3, 3), activation='relu'))model.add(layers.MaxPooling2D((2, 2)))model.add(layers.Flatten())model.add(layers.Dropout(0.5))model.add(layers.Dense(512, activation='relu'))model.add(layers.Dense(1, activation='sigmoid'))model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['acc'])#归一化处理train_datagen = ImageDataGenerator( rescale=1./255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True,)test_datagen = ImageDataGenerator(rescale=1./255)train_generator = train_datagen.flow_from_directory( # This is the target directory train_dir, # All images will be resized to 150x150 target_size=(150, 150), batch_size=32, # Since we use binary_crossentropy loss, we need binary labels class_mode='binary')validation_generator = test_datagen.flow_from_directory( validation_dir, target_size=(150, 150), batch_size=32, class_mode='binary')history = model.fit_generator( train_generator, steps_per_epoch=100, epochs=60, validation_data=validation_generator, validation_steps=50)model.save('smileAndUnsmile1.h6')#数据增强过后的训练集与验证集的精确度与损失度的图形acc = history.history['acc']val_acc = history.history['val_acc']loss = history.history['loss']val_loss = history.history['val_loss']epochs = range(len(acc))plt.plot(epochs, acc, 'bo', label='Training acc')plt.plot(epochs, val_acc, 'b', label='Validation acc')plt.title('Training and validation accuracy')plt.legend()plt.figure()plt.plot(epochs, loss, 'bo', label='Training loss')plt.plot(epochs, val_loss, 'b', label='Validation loss')plt.title('Training and validation loss')plt.legend()plt.show()运行结果:
速度较慢,要等很久
5.单张图片测试
代码:
# 单张图片进行判断 是笑脸还是非笑脸import cv2from keras.preprocessing import imagefrom keras.models import load_modelimport numpy as np#加载模型model = load_model('smileAndUnsmile1.h6')#本地图片路径img_path='test.jpg'img = image.load_img(img_path, target_size=(150, 150))img_tensor = image.img_to_array(img)/255.0img_tensor = np.expand_dims(img_tensor, axis=0)prediction =model.predict(img_tensor) print(prediction)if prediction[0][0]>0.5: result='非笑脸'else: result='笑脸'print(result)运行结果:
6.摄像头实时测试
代码:
#检测视频或者摄像头中的人脸import cv2from keras.preprocessing import imagefrom keras.models import load_modelimport numpy as npimport dlibfrom PIL import Imagemodel = load_model('smileAndUnsmile1.h6')detector = dlib.get_frontal_face_detector()video=cv2.VideoCapture(0)font = cv2.FONT_HERSHEY_SIMPLEXdef rec(img): gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) dets=detector(gray,1) if dets is not None: for face in dets: left=face.left() top=face.top() right=face.right() bottom=face.bottom() cv2.rectangle(img,(left,top),(right,bottom),(0,255,0),2) img1=cv2.resize(img[top:bottom,left:right],dsize=(150,150)) img1=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB) img1 = np.array(img1)/255. img_tensor = img1.reshape(-1,150,150,3) prediction =model.predict(img_tensor) if prediction[0][0]>0.5: result='unsmile' else: result='smile' cv2.putText(img, result, (left,top), font, 2, (0, 255, 0), 2, cv2.LINE_AA) cv2.imshow('Video', img)while video.isOpened(): res, img_rd = video.read() if not res: break rec(img_rd) if cv2.waitKey(1) & 0xFF == ord('q'): breakvideo.release()cv2.destroyAllWindows()运行结果:
五.Dlib提取人脸特征识别笑脸和非笑脸
代码:
import cv2 # 图像处理的库 OpenCvimport dlib # 人脸识别的库 dlibimport numpy as np # 数据处理的库 numpyclass face_emotion(): def __init__(self): self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") self.cap = cv2.VideoCapture(0) self.cap.set(3, 480) self.cnt = 0 def learning_face(self): line_brow_x = [] line_brow_y = [] while(self.cap.isOpened()): flag, im_rd = self.cap.read() k = cv2.waitKey(1) # 取灰度 img_gray = cv2.cvtColor(im_rd, cv2.COLOR_RGB2GRAY) faces = self.detector(img_gray, 0) font = cv2.FONT_HERSHEY_SIMPLEX # 如果检测到人脸 if(len(faces) != 0): # 对每个人脸都标出68个特征点 for i in range(len(faces)): for k, d in enumerate(faces): cv2.rectangle(im_rd, (d.left(), d.top()), (d.right(), d.bottom()), (0,0,255)) self.face_width = d.right() - d.left() shape = self.predictor(im_rd, d) mouth_width = (shape.part(54).x - shape.part(48).x) / self.face_width mouth_height = (shape.part(66).y - shape.part(62).y) / self.face_width brow_sum = 0 frown_sum = 0 for j in range(17, 21): brow_sum += (shape.part(j).y - d.top()) + (shape.part(j + 5).y - d.top()) frown_sum += shape.part(j + 5).x - shape.part(j).x line_brow_x.append(shape.part(j).x) line_brow_y.append(shape.part(j).y) tempx = np.array(line_brow_x) tempy = np.array(line_brow_y) z1 = np.polyfit(tempx, tempy, 1) self.brow_k = -round(z1[0], 3) brow_height = (brow_sum / 10) / self.face_width # 眉毛高度占比 brow_width = (frown_sum / 5) / self.face_width # 眉毛距离占比 eye_sum = (shape.part(41).y - shape.part(37).y + shape.part(40).y - shape.part(38).y + shape.part(47).y - shape.part(43).y + shape.part(46).y - shape.part(44).y) eye_hight = (eye_sum / 4) / self.face_width if round(mouth_height >= 0.03) and eye_hight<0.56: cv2.putText(im_rd, "smile", (d.left(), d.bottom() + 20), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,0), 2, 4) if round(mouth_height<0.03) and self.brow_k>-0.3: cv2.putText(im_rd, "unsmile", (d.left(), d.bottom() + 20), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,255,0), 2, 4) cv2.putText(im_rd, "Face-" + str(len(faces)), (20,50), font, 0.6, (0,0,255), 1, cv2.LINE_AA) else: cv2.putText(im_rd, "No Face", (20,50), font, 0.6, (0,0,255), 1, cv2.LINE_AA) im_rd = cv2.putText(im_rd, "S: screenshot", (20,450), font, 0.6, (255,0,255), 1, cv2.LINE_AA) im_rd = cv2.putText(im_rd, "Q: quit", (20,470), font, 0.6, (255,0,255), 1, cv2.LINE_AA) if (cv2.waitKey(1) & 0xFF) == ord('s'): self.cnt += 1 cv2.imwrite("screenshoot" + str(self.cnt) + ".jpg", im_rd) # 按下 q 键退出 if (cv2.waitKey(1)) == ord('q'): break # 窗口显示 cv2.imshow("Face Recognition", im_rd) self.cap.release() cv2.destroyAllWindows()if __name__ == "__main__": my_face = face_emotion() my_face.learning_face()运行结果:
到此,关于"Python怎么实现人脸识别微笑检测"的学习就结束了,希望能够解决大家的疑惑。理论与实践的搭配能更好的帮助大家学习,快去试试吧!若想继续学习更多相关知识,请继续关注网站,小编会继续努力为大家带来更多实用的文章!
人脸
图片
数据
代码
笑脸
检测
运行
图像
路径
处理
微笑
效果
结果
学习
大小
模型
测试
图像处理
摄像头
数据处理
数据库的安全要保护哪些东西
数据库安全各自的含义是什么
生产安全数据库录入
数据库的安全性及管理
数据库安全策略包含哪些
海淀数据库安全审计系统
建立农村房屋安全信息数据库
易用的数据库客户端支持安全管理
连接数据库失败ssl安全错误
数据库的锁怎样保障安全
温晓飞网络安全笔记
停车收费服务器端口设置
互联网 软件开发
电脑配置高用什么软件开发
安卓软件开发就业前景怎样
网络安全创业问题
广州卓越互联网科技公司
网络安全大赛宣传报道
网络安全行稳致远
湖南101软件开发
高新区推广网络技术怎么样
软件开发需求分析难点
腾讯的软件开发流程
河南新一代软件开发价格监测中心
网站连接数据库假死
摸清家底 网络安全宣传周
服务器分区格式exfat
黔西南华为服务器数据库
网络安全教育签名活动
ibm 服务器sn号
宿迁网络营销软件开发经验丰富
在ncbi上怎么下载数据库
网络安全课学习到了什么
邹平仓储库存软件开发
sql数据库质疑如何修复
安徽电力应急软件开发价格
河南新一代软件开发价格监测中心
db2数据库快捷方式
华为网络安全在哪儿关闭
新余云服务器价格