from scipy.spatial import distance as dist from imutils.video import FileVideoStream from imutils.video import VideoStream from imutils import face_utils import numpy as np import argparse import imutils import time import dlib import cv2 import datetime import csv import os import math
# grab the indexes of the facial landmarks for the left and # right eye, respectively (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS['left_eye'] (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS['right_eye']
defeye_aspect_ratio(eye): A = dist.euclidean(eye[1], eye[5]) B = dist.euclidean(eye[2], eye[4]) C = dist.euclidean(eye[0], eye[3]) ear = (A + B) / (2.0 * C) return ear
眨眼频率:每分钟眨眼的次数称为眨眼频率。
deftime_difference(start_time, end_time): start_time = start_time.split() for i in range(0,8): hours = int(start_time[3]) mins = int(start_time[4]) secs = int(start_time[5]) milisecs = int(start_time[6]) microsecs = int(start_time[7]) #converting it to microsecs t1, m1, s1, ms1, mis1 = hours, mins, secs, milisecs, microsecs start_time_microsecs = mis1 + 1000*(ms1 + 1000*(s1 + 60*(m1 + 60*t1))) end_time = end_time.split() for x in range(0,8,1): hours = int(end_time[3]) mins = int(end_time[4]) secs = int(end_time[5]) milisecs = int(end_time[6]) microsecs = int(end_time[7])
(omouth, emouth) = face_utils.FACIAL_LANDMARKS_IDXS['mouth'] defmouth_aspect_ratio(mouth): # compute the euclidean distances between the two sets of # vertical mouth landmarks (x, y)-coordinates A = dist.euclidean(mouth[2], mouth[10]) # 51, 59 B = dist.euclidean(mouth[4], mouth[8]) # 53, 57
# compute the euclidean distance between the horizontal # mouth landmark (x, y)-coordinates C = dist.euclidean(mouth[0], mouth[6]) # 49, 55
# compute the mouth aspect ratio mar = (A + B) / (2.0 * C)
#get rotation matrix from the rotation vector rotation_matrix, _ = cv2.Rodrigues(rotation_vector)
#calculate head tilt angle in degrees head_tilt_degree = abs( [-180] - np.rad2deg([rotationMatrixToEulerAngles(rotation_matrix)[0]]))
#calculate starting and ending points for the two lines for illustration starting_point = (int(image_points[0][0]), int(image_points[0][1])) ending_point = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
import numpy as np import sklearn from sklearn import preprocessing #from sklearn.datasets.samples_generator import make_blobs #from sklearn.preprocessing import LabelEncoder, StandardScaler import csv import os from tensorflow import keras import random from keras.models import Sequential from keras.layers import Dense , Dropout, Activation, BatchNormalization from keras import regularizers importmatplotlib.pyplot as plt #from keras.utils import plot_model import sklearn from sklearn.metrics import chaos_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report import pickle from keras.utils import np_utils from keras import optimizers from keras.models import load_model
#compile the model #adam = keras.optimizers.Adam(lr=0.01) adam = keras.optimizers.Adam(lr=0.001) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
#fit the model checkpoint = keras.callbacks.ModelCheckpoint(filepath='trained_models/DrowDet_model(output4).hdf5', period=1) tbCallBack = keras.callbacks.TensorBoard(log_dir='./scalar', histogram_freq=0, write_graph=True, write_images=True) history=model.fit(Xtrain, Ytrain, epochs=50, batch_size=256, callbacks=[checkpoint, tbCallBack], validation_data=(Xval,Yval))