Python實作人臉辨識(3)–開始辨識

Share This Post

建立一個資料夾,要包含以下內容:

  • Superstar(資料夾, 內含多位不同的明星照)
  • myself.jpg (要辨識的照片)
  • Facenet_keras.h5 (已訓練好之模型)
  • FaceRecognitionTraining.py(訓練模型程式)
  • FaceRecognitionPredition.py(照片辨識程式)

Facenet_keras.h5 資料來源:
https://github.com/nyoki-mtl/keras-facenet
模型訓練環境:
• Ubuntu16.04 or Windows10
• python3.6.2
• tensorflow: 1.3.0
• keras: 2.1.2

開始執行

  1. 安裝mtcnn
  2. 貼上程式碼並執行FaceRecognitionTraining.py
# FaceRecognitionTraining.py

from google.colab import drive
drive.mount('/content/drive')

#import model
from os import listdir
import numpy as np
import matplotlib.pyplot as plt
from mtcnn.mtcnn import MTCNN
from tensorflow.keras.models import load_model
import cv2

#dir and file
dir_base = '/content/drive/MyDrive/FaceRecognition/'
dir_superstar = dir_base + 'Superstar/'
file_myself = dir_base + 'myself.jpg'
model_name = dir_base + 'facenet_keras.h5'

#detect face
except_file = list()
files = list()
faces  =  list()
detector  =  MTCNN()
for  filename in  listdir(dir_superstar):
    print(filename)
    if filename == '.ipynb_checkpoints':
        continue
    path  =  dir_superstar  +  filename
    print('load from: ',path)
    filename = filename[:-4]
    try:
        imageBGR = cv2.imread( path, 1 )
        imageRGB = cv2.cvtColor(imageBGR,cv2.COLOR_BGR2RGB)
        results = detector.detect_faces(imageRGB)
        x1,  y1,  width,  height  = results[0]['box']
        x1,  y1  =  abs(x1),  abs(y1)
        x2,  y2  =  x1  +  width,  y1  +  height
        faceRGB = imageRGB[y1:y2,  x1:x2]
        faceRGB = cv2.resize(faceRGB, (160, 160), interpolation=cv2.INTER_AREA)
        plt.figure()
        plt.subplot(121)
        plt.imshow(imageRGB)
        plt.title(filename)
        plt.subplot(122)
        plt.imshow(faceRGB)
        plt.show()
        faces.append(faceRGB)
        files.append(filename)     
    except(IndexError,IsADirectoryError,OSError):
        except_file.append(filename)
        print('except file...'+str(filename))
        pass
file_embeddings = dir_base + 'embeddings.npz'

#load facenet model
model  =  load_model(model_name)
print('Loaded Model ',model_name)


# convert each face to an embedding and save it
def l2_normalize(x, axis=-1, epsilon=1e-10):
    output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))
    return output

faceEmbedding  =  list()
for faceRGB in  faces:
      face_pixels  =  faceRGB.astype('float32')
      mean,  std  =  face_pixels.mean(),  face_pixels.std()
      face_pixels  =  (face_pixels  -  mean)  /  std
      samples  =  np.expand_dims(face_pixels,  axis=0)
      pred=model.predict(samples)
      embedding = l2_normalize(pred[0])
      faceEmbedding.append(embedding)
faceEmbedding  =  np.asarray(faceEmbedding)
print(faceEmbedding.shape)
np.savez_compressed(file_embeddings,  faceEmbedding,  files)

得到結果:

  1. 貼上程式碼並執行 FaceRecognitionPredition
from google.colab import drive
drive.mount('/content/drive')

#import model
from os import listdir
import numpy as np
import matplotlib.pyplot as plt
from mtcnn.mtcnn import MTCNN
from tensorflow.keras.models import load_model
import cv2

# convert each face to an embedding and save it
def l2_normalize(x, axis=-1, epsilon=1e-10):
    output = x / np.sqrt(np.maximum(np.sum(np.square(x), axis=axis, keepdims=True), epsilon))
    return output

#dir and file
dir_base = '/content/drive/My Drive/FaceRecognitionColab/'
dir_superstar = dir_base + 'superstar/'
file_myself = dir_base + 'myself.jpg'
model_name = dir_base + 'facenet_keras.h5'
file_embeddings = dir_base + 'embeddings.npz'

#load facenet model
model  =  load_model(model_name)
print('Loaded Model ',model_name)

#prediction
data  =  np.load(file_embeddings)
faceEmbedding  =  data['arr_0']
starName=data['arr_1']
image_myself = cv2.imread(file_myself, 1 )
image_myself=cv2.cvtColor(image_myself,cv2.COLOR_BGR2RGB)
detector  =  MTCNN()
results  =  detector.detect_faces(image_myself)
x1,  y1,  width,  height  = results[0]['box']
x1,  y1  =  abs(x1),  abs(y1)
x2,  y2  =  x1  +  width,  y1  +  height
face  =  image_myself[y1:y2,  x1:x2]
face_myself= cv2.resize(face, (160, 160), interpolation=cv2.INTER_AREA)
plt.subplot(121)
plt.imshow(image_myself)
plt.title(file_myself)
plt.subplot(122)
plt.imshow(face_myself)
plt.show()

face_myself  =  face_myself.astype('float32')
mean,  std  =  face_myself.mean(),  face_myself.std()
face_myself  =  (face_myself  -  mean)  /  std
samples  = np.expand_dims(face_myself,  axis=0)
pred=model.predict(samples)
myself_embedding = l2_normalize(pred[0])
myself_embedding  =  np.asarray(myself_embedding)

for index,star in enumerate(faceEmbedding):
    distanceNum = np.sqrt(np.sum(np.square(star-myself_embedding)))
    print('star name, distance: ',starName[index],distanceNum)
  1. 結果,