基於Python實現簡單的人臉識別系統
前言
最近又多瞭不少朋友關註,先在這裡謝謝大傢。關註我的朋友大多數都是大學生,而且我簡單看瞭一下,低年級的大學生居多,大多數都是為瞭完成課程設計,作為一個過來人,還是希望大傢平時能多抽出點時間學習一下,這種臨時抱佛腳的策略要少用嗷。今天我們來python實現一個人臉識別系統,主要是借助瞭dlib這個庫,相當於我們直接調用現成的庫來進行人臉識別,就省去瞭之前教程中的數據收集和模型訓練的步驟瞭。
B站視頻:用300行代碼實現人臉識別系統_嗶哩嗶哩_bilibili
碼雲地址:face_dlib_py37_42: 用300行代碼開發一個人臉識別系統-42 (gitee.com)
基本原理
人臉識別和目標檢測這些還不太一樣,比如大傢傳統的訓練一個目標檢測模型,你隻有對這個目標訓練瞭之後,你的模型才能找到這樣的目標,比如你的目標檢測模型如果是檢測植物的,那顯然就不能檢測動物。但是人臉識別就不一樣,以你的手機為例,你發現你隻錄入瞭一次你的人臉信息,不需要訓練,他就能準確的識別你,這裡識別的原理是通過人臉識別的模型提取你臉部的特征向量,然後將實時檢測到的你的人臉同數據庫中保存的人臉進行比對,如果相似度超過一定的閾值之後,就認為比對成功。不過我這裡說的隻是簡化版本的人臉識別,現在手機和門禁這些要復雜和安全的多,也不是簡單平面上的人臉識別。
總結下來可以分為下面的步驟:
1.上傳人臉到數據庫
2.人臉檢測
3.數據庫比對並返回結果
這裡我做瞭一個簡答的示意圖,可以幫助大傢簡單理解一下。
代碼實現
廢話不多說,這裡就是我們的代碼實現,代碼我已經上傳到碼雲,大傢直接下載就行,地址就在博客開頭。
不會安裝python環境的兄弟請看這裡:如何在pycharm中配置anaconda的虛擬環境
創建虛擬環境
創建虛擬環境前請大傢先下載博客開頭的碼雲源碼到本地。
本次我們需要使用到python3.7的虛擬環境,命令如下:
conda create -n face python==3.7.3 conda activate face
安裝必要的庫
pip install -r requirements.txt
愉快地開始你的人臉識別吧!
執行下面的主文件即可
python UI.py
或者在pycharm中按照下面的方式直接運行即可
首先將你需要識別的人臉上傳到數據庫中
通過第二個視頻檢測功能識別實時的人臉
詳細的代碼如下:
# -*- coding: utf-8 -*- """ ------------------------------------------------- Project Name: yolov5-jungong File Name: window.py.py Author: chenming Create Date: 2021/11/8 Description:圖形化界面,可以檢測攝像頭、視頻和圖片文件 ------------------------------------------------- """ # 應該在界面啟動的時候就將模型加載出來,設置tmp的目錄來放中間的處理結果 import shutil import PyQt5.QtCore from PyQt5.QtGui import * from PyQt5.QtCore import * from PyQt5.QtWidgets import * import threading import argparse import os import sys from pathlib import Path import cv2 import torch import torch.backends.cudnn as cudnn import os.path as osp FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync # 添加一個關於界面 # 窗口主類 class MainWindow(QTabWidget): # 基本配置不動,然後隻動第三個界面 def __init__(self): # 初始化界面 super().__init__() self.setWindowTitle('Target detection system') self.resize(1200, 800) self.setWindowIcon(QIcon("images/UI/lufei.png")) # 圖片讀取進程 self.output_size = 480 self.img2predict = "" self.device = 'cpu' # # 初始化視頻讀取線程 self.vid_source = '0' # 初始設置為攝像頭 self.stopEvent = threading.Event() self.webcam = True self.stopEvent.clear() self.model = self.model_load(weights="runs/train/exp_yolov5s/weights/best.pt", device="cpu") # todo 指明模型加載的位置的設備 self.initUI() self.reset_vid() ''' ***模型初始化*** ''' @torch.no_grad() def model_load(self, weights="", # model.pt path(s) device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): device = select_device(device) half &= device.type != 'cpu' # half precision only supported on CUDA device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx # Half half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt: model.model.half() if half else model.model.float() print("模型加載完成!") return model ''' ***界面初始化*** ''' def initUI(self): # 圖片檢測子界面 font_title = QFont('楷體', 16) font_main = QFont('楷體', 14) # 圖片識別界面, 兩個按鈕,上傳圖片和顯示結果 img_detection_widget = QWidget() img_detection_layout = QVBoxLayout() img_detection_title = QLabel("圖片識別功能") img_detection_title.setFont(font_title) mid_img_widget = QWidget() mid_img_layout = QHBoxLayout() self.left_img = QLabel() self.right_img = QLabel() self.left_img.setPixmap(QPixmap("images/UI/up.jpeg")) self.right_img.setPixmap(QPixmap("images/UI/right.jpeg")) self.left_img.setAlignment(Qt.AlignCenter) self.right_img.setAlignment(Qt.AlignCenter) mid_img_layout.addWidget(self.left_img) mid_img_layout.addStretch(0) mid_img_layout.addWidget(self.right_img) mid_img_widget.setLayout(mid_img_layout) up_img_button = QPushButton("上傳圖片") det_img_button = QPushButton("開始檢測") up_img_button.clicked.connect(self.upload_img) det_img_button.clicked.connect(self.detect_img) up_img_button.setFont(font_main) det_img_button.setFont(font_main) up_img_button.setStyleSheet("QPushButton{color:white}" "QPushButton:hover{background-color: rgb(2,110,180);}" "QPushButton{background-color:rgb(48,124,208)}" "QPushButton{border:2px}" "QPushButton{border-radius:5px}" "QPushButton{padding:5px 5px}" "QPushButton{margin:5px 5px}") det_img_button.setStyleSheet("QPushButton{color:white}" "QPushButton:hover{background-color: rgb(2,110,180);}" "QPushButton{background-color:rgb(48,124,208)}" "QPushButton{border:2px}" "QPushButton{border-radius:5px}" "QPushButton{padding:5px 5px}" "QPushButton{margin:5px 5px}") img_detection_layout.addWidget(img_detection_title, alignment=Qt.AlignCenter) img_detection_layout.addWidget(mid_img_widget, alignment=Qt.AlignCenter) img_detection_layout.addWidget(up_img_button) img_detection_layout.addWidget(det_img_button) img_detection_widget.setLayout(img_detection_layout) # todo 視頻識別界面 # 視頻識別界面的邏輯比較簡單,基本就從上到下的邏輯 vid_detection_widget = QWidget() vid_detection_layout = QVBoxLayout() vid_title = QLabel("視頻檢測功能") vid_title.setFont(font_title) self.vid_img = QLabel() self.vid_img.setPixmap(QPixmap("images/UI/up.jpeg")) vid_title.setAlignment(Qt.AlignCenter) self.vid_img.setAlignment(Qt.AlignCenter) self.webcam_detection_btn = QPushButton("攝像頭實時監測") self.mp4_detection_btn = QPushButton("視頻文件檢測") self.vid_stop_btn = QPushButton("停止檢測") self.webcam_detection_btn.setFont(font_main) self.mp4_detection_btn.setFont(font_main) self.vid_stop_btn.setFont(font_main) self.webcam_detection_btn.setStyleSheet("QPushButton{color:white}" "QPushButton:hover{background-color: rgb(2,110,180);}" "QPushButton{background-color:rgb(48,124,208)}" "QPushButton{border:2px}" "QPushButton{border-radius:5px}" "QPushButton{padding:5px 5px}" "QPushButton{margin:5px 5px}") self.mp4_detection_btn.setStyleSheet("QPushButton{color:white}" "QPushButton:hover{background-color: rgb(2,110,180);}" "QPushButton{background-color:rgb(48,124,208)}" "QPushButton{border:2px}" "QPushButton{border-radius:5px}" "QPushButton{padding:5px 5px}" "QPushButton{margin:5px 5px}") self.vid_stop_btn.setStyleSheet("QPushButton{color:white}" "QPushButton:hover{background-color: rgb(2,110,180);}" "QPushButton{background-color:rgb(48,124,208)}" "QPushButton{border:2px}" "QPushButton{border-radius:5px}" "QPushButton{padding:5px 5px}" "QPushButton{margin:5px 5px}") self.webcam_detection_btn.clicked.connect(self.open_cam) self.mp4_detection_btn.clicked.connect(self.open_mp4) self.vid_stop_btn.clicked.connect(self.close_vid) # 添加組件到佈局上 vid_detection_layout.addWidget(vid_title) vid_detection_layout.addWidget(self.vid_img) vid_detection_layout.addWidget(self.webcam_detection_btn) vid_detection_layout.addWidget(self.mp4_detection_btn) vid_detection_layout.addWidget(self.vid_stop_btn) vid_detection_widget.setLayout(vid_detection_layout) # todo 關於界面 about_widget = QWidget() about_layout = QVBoxLayout() about_title = QLabel('歡迎使用目標檢測系統\n\n 提供付費指導:有需要的好兄弟加下面的QQ即可') # todo 修改歡迎詞語 about_title.setFont(QFont('楷體', 18)) about_title.setAlignment(Qt.AlignCenter) about_img = QLabel() about_img.setPixmap(QPixmap('images/UI/qq.png')) about_img.setAlignment(Qt.AlignCenter) # label4.setText("<a href='https://oi.wiki/wiki/學習率的調整'>如何調整學習率</a>") label_super = QLabel() # todo 更換作者信息 label_super.setText("<a href='https://blog.csdn.net/ECHOSON'>或者你可以在這裡找到我-->肆十二</a>") label_super.setFont(QFont('楷體', 16)) label_super.setOpenExternalLinks(True) # label_super.setOpenExternalLinks(True) label_super.setAlignment(Qt.AlignRight) about_layout.addWidget(about_title) about_layout.addStretch() about_layout.addWidget(about_img) about_layout.addStretch() about_layout.addWidget(label_super) about_widget.setLayout(about_layout) self.left_img.setAlignment(Qt.AlignCenter) self.addTab(img_detection_widget, '圖片檢測') self.addTab(vid_detection_widget, '視頻檢測') self.addTab(about_widget, '聯系我') self.setTabIcon(0, QIcon('images/UI/lufei.png')) self.setTabIcon(1, QIcon('images/UI/lufei.png')) self.setTabIcon(2, QIcon('images/UI/lufei.png')) ''' ***上傳圖片*** ''' def upload_img(self): # 選擇錄像文件進行讀取 fileName, fileType = QFileDialog.getOpenFileName(self, 'Choose file', '', '*.jpg *.png *.tif *.jpeg') if fileName: suffix = fileName.split(".")[-1] save_path = osp.join("images/tmp", "tmp_upload." + suffix) shutil.copy(fileName, save_path) # 應該調整一下圖片的大小,然後統一防在一起 im0 = cv2.imread(save_path) resize_scale = self.output_size / im0.shape[0] im0 = cv2.resize(im0, (0, 0), fx=resize_scale, fy=resize_scale) cv2.imwrite("images/tmp/upload_show_result.jpg", im0) # self.right_img.setPixmap(QPixmap("images/tmp/single_result.jpg")) self.img2predict = fileName self.left_img.setPixmap(QPixmap("images/tmp/upload_show_result.jpg")) # todo 上傳圖片之後右側的圖片重置, self.right_img.setPixmap(QPixmap("images/UI/right.jpeg")) ''' ***檢測圖片*** ''' def detect_img(self): model = self.model output_size = self.output_size source = self.img2predict # file/dir/URL/glob, 0 for webcam imgsz = 640 # inference size (pixels) conf_thres = 0.25 # confidence threshold iou_thres = 0.45 # NMS IOU threshold max_det = 1000 # maximum detections per image device = self.device # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img = False # show results save_txt = False # save results to *.txt save_conf = False # save confidences in --save-txt labels save_crop = False # save cropped prediction boxes nosave = False # do not save images/videos classes = None # filter by class: --class 0, or --class 0 2 3 agnostic_nms = False # class-agnostic NMS augment = False # ugmented inference visualize = False # visualize features line_thickness = 3 # bounding box thickness (pixels) hide_labels = False # hide labels hide_conf = False # hide confidences half = False # use FP16 half-precision inference dnn = False # use OpenCV DNN for ONNX inference print(source) if source == "": QMessageBox.warning(self, "請上傳", "請先上傳圖片再進行檢測") else: source = str(source) device = select_device(self.device) webcam = False stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx imgsz = check_img_size(imgsz, s=stride) # check image size save_img = not nosave and not source.endswith('.txt') # save inference images # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if pt and device.type != 'cpu': model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.model.parameters()))) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference # visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view( -1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format # with open(txt_path + '.txt', 'a') as f: # f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) # if save_crop: # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', # BGR=True) # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() # if view_img: # cv2.imshow(str(p), im0) # cv2.waitKey(1) # 1 millisecond # Save results (image with detections) resize_scale = output_size / im0.shape[0] im0 = cv2.resize(im0, (0, 0), fx=resize_scale, fy=resize_scale) cv2.imwrite("images/tmp/single_result.jpg", im0) # 目前的情況來看,應該隻是ubuntu下會出問題,但是在windows下是完整的,所以繼續 self.right_img.setPixmap(QPixmap("images/tmp/single_result.jpg")) # 視頻檢測,邏輯基本一致,有兩個功能,分別是檢測攝像頭的功能和檢測視頻文件的功能,先做檢測攝像頭的功能。 ''' ### 界面關閉事件 ### ''' def closeEvent(self, event): reply = QMessageBox.question(self, 'quit', "Are you sure?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if reply == QMessageBox.Yes: self.close() event.accept() else: event.ignore() ''' ### 視頻關閉事件 ### ''' def open_cam(self): self.webcam_detection_btn.setEnabled(False) self.mp4_detection_btn.setEnabled(False) self.vid_stop_btn.setEnabled(True) self.vid_source = '0' self.webcam = True th = threading.Thread(target=self.detect_vid) th.start() ''' ### 開啟視頻文件檢測事件 ### ''' def open_mp4(self): fileName, fileType = QFileDialog.getOpenFileName(self, 'Choose file', '', '*.mp4 *.avi') if fileName: self.webcam_detection_btn.setEnabled(False) self.mp4_detection_btn.setEnabled(False) # self.vid_stop_btn.setEnabled(True) self.vid_source = fileName self.webcam = False th = threading.Thread(target=self.detect_vid) th.start() ''' ### 視頻開啟事件 ### ''' # 視頻和攝像頭的主函數是一樣的,不過是傳入的source不同罷瞭 def detect_vid(self): # pass model = self.model output_size = self.output_size # source = self.img2predict # file/dir/URL/glob, 0 for webcam imgsz = 640 # inference size (pixels) conf_thres = 0.25 # confidence threshold iou_thres = 0.45 # NMS IOU threshold max_det = 1000 # maximum detections per image # device = self.device # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img = False # show results save_txt = False # save results to *.txt save_conf = False # save confidences in --save-txt labels save_crop = False # save cropped prediction boxes nosave = False # do not save images/videos classes = None # filter by class: --class 0, or --class 0 2 3 agnostic_nms = False # class-agnostic NMS augment = False # ugmented inference visualize = False # visualize features line_thickness = 3 # bounding box thickness (pixels) hide_labels = False # hide labels hide_conf = False # hide confidences half = False # use FP16 half-precision inference dnn = False # use OpenCV DNN for ONNX inference source = str(self.vid_source) webcam = self.webcam device = select_device(self.device) stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx imgsz = check_img_size(imgsz, s=stride) # check image size save_img = not nosave and not source.endswith('.txt') # save inference images # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if pt and device.type != 'cpu': model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.model.parameters()))) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference # visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path # save_path = str(save_dir / p.name) # im.jpg # txt_path = str(save_dir / 'labels' / p.stem) + ( # '' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view( -1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format # with open(txt_path + '.txt', 'a') as f: # f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) # if save_crop: # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', # BGR=True) # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results # Save results (image with detections) im0 = annotator.result() frame = im0 resize_scale = output_size / frame.shape[0] frame_resized = cv2.resize(frame, (0, 0), fx=resize_scale, fy=resize_scale) cv2.imwrite("images/tmp/single_result_vid.jpg", frame_resized) self.vid_img.setPixmap(QPixmap("images/tmp/single_result_vid.jpg")) # self.vid_img # if view_img: # cv2.imshow(str(p), im0) # self.vid_img.setPixmap(QPixmap("images/tmp/single_result_vid.jpg")) # cv2.waitKey(1) # 1 millisecond if cv2.waitKey(25) & self.stopEvent.is_set() == True: self.stopEvent.clear() self.webcam_detection_btn.setEnabled(True) self.mp4_detection_btn.setEnabled(True) self.reset_vid() break # self.reset_vid() ''' ### 界面重置事件 ### ''' def reset_vid(self): self.webcam_detection_btn.setEnabled(True) self.mp4_detection_btn.setEnabled(True) self.vid_img.setPixmap(QPixmap("images/UI/up.jpeg")) self.vid_source = '0' self.webcam = True ''' ### 視頻重置事件 ### ''' def close_vid(self): self.stopEvent.set() self.reset_vid() if __name__ == "__main__": app = QApplication(sys.argv) mainWindow = MainWindow() mainWindow.show() sys.exit(app.exec_())
以上就是基於Python實現簡單的人臉識別系統的詳細內容,更多關於Python人臉識別系統的資料請關註WalkonNet其它相關文章!
推薦閱讀:
- 基於PyQt5制作一個gif動態圖片生成器
- 基於PyQt5制作Excel數據分組匯總器
- Python編程實現下載器自動爬取采集B站彈幕示例
- 利用PyQt5生成過年春聯
- python目標檢測實現黑花屏分類任務示例