update tracking
This commit is contained in:
		
							parent
							
								
									07d28e24cd
								
							
						
					
					
						commit
						b908185658
					
				| 
						 | 
					@ -0,0 +1,2 @@
 | 
				
			||||||
 | 
					dataset
 | 
				
			||||||
 | 
					test
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,77 @@
 | 
				
			||||||
 | 
					import cv2
 | 
				
			||||||
 | 
					import face_recognition
 | 
				
			||||||
 | 
					import os
 | 
				
			||||||
 | 
					import numpy as np
 | 
				
			||||||
 | 
					import pickle 
 | 
				
			||||||
 | 
					datasetPath = "dataset"
 | 
				
			||||||
 | 
					images = []
 | 
				
			||||||
 | 
					classNames = []
 | 
				
			||||||
 | 
					lisFileTrain = os.listdir(datasetPath)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					for file in lisFileTrain:
 | 
				
			||||||
 | 
					    currentImg = cv2.imread(f"{datasetPath}/{file}")
 | 
				
			||||||
 | 
					    images.append(currentImg)
 | 
				
			||||||
 | 
					    classNames.append(os.path.splitext(file)[0].split('_')[0])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					print(len(images))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def encodeImgs(images, save_path="encodings.pkl"):
 | 
				
			||||||
 | 
					    if os.path.exists(save_path):
 | 
				
			||||||
 | 
					        print(f"Loading encodings from {save_path}...")
 | 
				
			||||||
 | 
					        with open(save_path, "rb") as f:
 | 
				
			||||||
 | 
					            return pickle.load(f)
 | 
				
			||||||
 | 
					        
 | 
				
			||||||
 | 
					    encodeList = []
 | 
				
			||||||
 | 
					    for i, img in enumerate(images):
 | 
				
			||||||
 | 
					        print(i+1)
 | 
				
			||||||
 | 
					        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
 | 
				
			||||||
 | 
					        encode = face_recognition.face_encodings(img)
 | 
				
			||||||
 | 
					       
 | 
				
			||||||
 | 
					        if encode:  # Check if encodings list is not empty
 | 
				
			||||||
 | 
					            encodeList.append(encode[0])
 | 
				
			||||||
 | 
					        else:
 | 
				
			||||||
 | 
					            print("No face detected in an image. Skipping...")
 | 
				
			||||||
 | 
					            os.remove(f"{datasetPath}/{lisFileTrain[i]}")
 | 
				
			||||||
 | 
					    # Lưu encodeList vào file
 | 
				
			||||||
 | 
					    print(f"Saving encodings to {save_path}...")
 | 
				
			||||||
 | 
					    with open(save_path, "wb") as f:
 | 
				
			||||||
 | 
					        pickle.dump(encodeList, f)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    return encodeList
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					encodeListKnow = encodeImgs(images)
 | 
				
			||||||
 | 
					print("Load data success")
 | 
				
			||||||
 | 
					print(len(encodeListKnow))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					cap = cv2.VideoCapture(0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					while True:
 | 
				
			||||||
 | 
					    ret, frame = cap.read()
 | 
				
			||||||
 | 
					    frameS = cv2.resize(frame, (0,0), None, fx=1, fy=1)
 | 
				
			||||||
 | 
					    frameS = cv2.cvtColor(frameS, cv2.COLOR_BGR2RGB)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    faceCurFrame = face_recognition.face_locations(frameS)
 | 
				
			||||||
 | 
					    encodeCurFrame = face_recognition.face_encodings(frameS)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    for encodeFace, faceLoc in zip(encodeCurFrame, faceCurFrame):
 | 
				
			||||||
 | 
					        matches = face_recognition.compare_faces(encodeListKnow, encodeFace)
 | 
				
			||||||
 | 
					        faceDis = face_recognition.face_distance(encodeListKnow, encodeFace)
 | 
				
			||||||
 | 
					        print(faceDis)
 | 
				
			||||||
 | 
					        matchIndex = np.argmin(faceDis)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if faceDis[matchIndex] < 0.3:
 | 
				
			||||||
 | 
					            name = classNames[matchIndex].upper()
 | 
				
			||||||
 | 
					        else:
 | 
				
			||||||
 | 
					            name = "Unknow"
 | 
				
			||||||
 | 
					        
 | 
				
			||||||
 | 
					        y1, x2, y2, x1 = faceLoc
 | 
				
			||||||
 | 
					        y1, x2, y2, x1 = y1, x2, y2, x1
 | 
				
			||||||
 | 
					        cv2.rectangle(frame, (x1,y1), (x2,y2), (0,255,0), 2)
 | 
				
			||||||
 | 
					        cv2.putText(frame, name + f"({(1 - round(faceDis[matchIndex], 2))*100}%)", (x2, y2), cv2.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 2)
 | 
				
			||||||
 | 
					    cv2.imshow('Face decting', frame)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    if cv2.waitKey(1) == ord("q"):
 | 
				
			||||||
 | 
					        break
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					cap.release()
 | 
				
			||||||
 | 
					cv2.destroyAllWindows()
 | 
				
			||||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							| 
						 | 
					@ -1,66 +0,0 @@
 | 
				
			||||||
import tensorflow as tf
 | 
					 | 
				
			||||||
import numpy as np
 | 
					 | 
				
			||||||
import cv2
 | 
					 | 
				
			||||||
import os
 | 
					 | 
				
			||||||
from tensorflow.keras import layers, models
 | 
					 | 
				
			||||||
from sklearn.model_selection import train_test_split
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# 1. Load dữ liệu
 | 
					 | 
				
			||||||
def load_data(data_dir):
 | 
					 | 
				
			||||||
    images = []
 | 
					 | 
				
			||||||
    labels = []
 | 
					 | 
				
			||||||
    for label, folder in enumerate(os.listdir(data_dir)):
 | 
					 | 
				
			||||||
        folder_path = os.path.join(data_dir, folder)
 | 
					 | 
				
			||||||
        for file in os.listdir(folder_path):
 | 
					 | 
				
			||||||
            img_path = os.path.join(folder_path, file)
 | 
					 | 
				
			||||||
            img = cv2.imread(img_path)
 | 
					 | 
				
			||||||
            img = cv2.resize(img, (128, 128))  # Resize về kích thước cố định
 | 
					 | 
				
			||||||
            images.append(img)
 | 
					 | 
				
			||||||
            labels.append(label)
 | 
					 | 
				
			||||||
    images = np.array(images) / 255.0  # Chuẩn hóa
 | 
					 | 
				
			||||||
    labels = np.array(labels)
 | 
					 | 
				
			||||||
    return images, labels
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# Đường dẫn dữ liệu
 | 
					 | 
				
			||||||
data_dir = '/home/joseph/DetectFace/dataset'
 | 
					 | 
				
			||||||
images, labels = load_data(data_dir)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# Chia dữ liệu train/test
 | 
					 | 
				
			||||||
X_train, X_test, y_train, y_test = train_test_split(images, labels, test_size=0.2, random_state=42)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# 2. Tạo mô hình phát hiện khuôn mặt
 | 
					 | 
				
			||||||
model = models.Sequential([
 | 
					 | 
				
			||||||
    layers.Conv2D(32, (3, 3), activation='relu', input_shape=(128, 128, 3)),
 | 
					 | 
				
			||||||
    layers.MaxPooling2D((2, 2)),
 | 
					 | 
				
			||||||
    layers.Conv2D(64, (3, 3), activation='relu'),
 | 
					 | 
				
			||||||
    layers.MaxPooling2D((2, 2)),
 | 
					 | 
				
			||||||
    layers.Conv2D(128, (3, 3), activation='relu'),
 | 
					 | 
				
			||||||
    layers.Flatten(),
 | 
					 | 
				
			||||||
    layers.Dense(128, activation='relu'),
 | 
					 | 
				
			||||||
    layers.Dense(len(set(labels)), activation='softmax')  # Số lớp tương ứng số nhãn
 | 
					 | 
				
			||||||
])
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# Compile mô hình
 | 
					 | 
				
			||||||
model.compile(optimizer='adam',
 | 
					 | 
				
			||||||
              loss='sparse_categorical_crossentropy',
 | 
					 | 
				
			||||||
              metrics=['accuracy'])
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# Huấn luyện mô hình
 | 
					 | 
				
			||||||
model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# 3. Lưu mô hình
 | 
					 | 
				
			||||||
model.save('face_detection_model.h5')
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# 4. Sử dụng mô hình để dự đoán
 | 
					 | 
				
			||||||
def predict_face(image_path, model_path='face_detection_model.h5'):
 | 
					 | 
				
			||||||
    model = tf.keras.models.load_model(model_path)
 | 
					 | 
				
			||||||
    img = cv2.imread(image_path)
 | 
					 | 
				
			||||||
    img_resized = cv2.resize(img, (128, 128)) / 255.0
 | 
					 | 
				
			||||||
    img_resized = np.expand_dims(img_resized, axis=0)
 | 
					 | 
				
			||||||
    prediction = model.predict(img_resized)
 | 
					 | 
				
			||||||
    return np.argmax(prediction), np.max(prediction)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# Test với một ảnh
 | 
					 | 
				
			||||||
image_path = '/home/joseph/DetectFace/test/NGUYEN\ HOANG\ VI_check\ in_at_2024_09_05_11_31_24.png'
 | 
					 | 
				
			||||||
label, confidence = predict_face(image_path)
 | 
					 | 
				
			||||||
print(f"Detected label: {label}, Confidence: {confidence:.2f}")
 | 
					 | 
				
			||||||
| 
						 | 
					@ -6,6 +6,9 @@ import pyautogui
 | 
				
			||||||
from pyzbar import pyzbar
 | 
					from pyzbar import pyzbar
 | 
				
			||||||
from datetime import datetime
 | 
					from datetime import datetime
 | 
				
			||||||
import requests
 | 
					import requests
 | 
				
			||||||
 | 
					import face_recognition
 | 
				
			||||||
 | 
					import numpy as np
 | 
				
			||||||
 | 
					import pickle 
 | 
				
			||||||
# Khởi tạo danh sách rỗng để lưu trữ thông tin người dùng
 | 
					# Khởi tạo danh sách rỗng để lưu trữ thông tin người dùng
 | 
				
			||||||
user_data = []
 | 
					user_data = []
 | 
				
			||||||
history = []
 | 
					history = []
 | 
				
			||||||
| 
						 | 
					@ -193,6 +196,70 @@ def process_qr_code(frame):
 | 
				
			||||||
            cv.destroyWindow(WINDOW_QR_CODE)
 | 
					            cv.destroyWindow(WINDOW_QR_CODE)
 | 
				
			||||||
    return frame
 | 
					    return frame
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Hàm để xử lý quá trình quét mã QR code
 | 
				
			||||||
 | 
					def process_face_detect(text, frame):
 | 
				
			||||||
 | 
					    if text.endswith("\n\n"):
 | 
				
			||||||
 | 
					        file_name = ""
 | 
				
			||||||
 | 
					        status = ""
 | 
				
			||||||
 | 
					        id_log = 0
 | 
				
			||||||
 | 
					        if text not in [user["name"] for user in user_data]:
 | 
				
			||||||
 | 
					            print(f"{text} đã check in lúc {datetime.now()}")
 | 
				
			||||||
 | 
					            status += "check in"
 | 
				
			||||||
 | 
					            file_name+=text.split('\n')[0]+"_"+f"{status}_at_{datetime.now().strftime("%Y_%m_%d_%H_%M_%S")}.png"
 | 
				
			||||||
 | 
					            # screenshot_window(file_name)
 | 
				
			||||||
 | 
					            res = check_in(text, frame, text)
 | 
				
			||||||
 | 
					            id_log = res.get('data').get('id')
 | 
				
			||||||
 | 
					        else:
 | 
				
			||||||
 | 
					            print(f"{text} đã check out lúc {datetime.now()}")
 | 
				
			||||||
 | 
					            status += "check out"
 | 
				
			||||||
 | 
					            file_name+=text.split('\n')[0]+"_"+f"{status}_at_{datetime.now().strftime("%Y_%m_%d_%H_%M_%S")}.png"
 | 
				
			||||||
 | 
					            # screenshot_window(file_name)
 | 
				
			||||||
 | 
					            res = check_out(text, frame, text)
 | 
				
			||||||
 | 
					            id_log = res.get('data').get('id')
 | 
				
			||||||
 | 
					        cv.namedWindow(WINDOW_QR_CODE, cv.WINDOW_NORMAL)
 | 
				
			||||||
 | 
					        cv.resizeWindow(WINDOW_QR_CODE, screen_width, screen_height)
 | 
				
			||||||
 | 
					        cv.imshow(WINDOW_QR_CODE, frame)
 | 
				
			||||||
 | 
					        cv.moveWindow(WINDOW_QR_CODE, 10, 10)
 | 
				
			||||||
 | 
					        cv.waitKey(5000)  # Chờ 5 giây
 | 
				
			||||||
 | 
					        screenshot_window(file_name)
 | 
				
			||||||
 | 
					        send_image(id_log, file_name)
 | 
				
			||||||
 | 
					        cv.destroyWindow(WINDOW_QR_CODE)
 | 
				
			||||||
 | 
					    else:
 | 
				
			||||||
 | 
					        display_text(frame, f"QR invalid", (25, 25), 0.7, (6, 6, 255), 2)
 | 
				
			||||||
 | 
					        display_text(frame, f"Failed", (25, 50), 0.7, (6, 6, 255), 2)
 | 
				
			||||||
 | 
					        speak("Failed")   
 | 
				
			||||||
 | 
					        cv.namedWindow(WINDOW_QR_CODE, cv.WINDOW_NORMAL)
 | 
				
			||||||
 | 
					        cv.resizeWindow(WINDOW_QR_CODE, screen_width, screen_height)
 | 
				
			||||||
 | 
					        cv.imshow(WINDOW_QR_CODE, frame)
 | 
				
			||||||
 | 
					        cv.moveWindow(WINDOW_QR_CODE, 10, 10)
 | 
				
			||||||
 | 
					        cv.waitKey(2000)
 | 
				
			||||||
 | 
					        cv.destroyWindow(WINDOW_QR_CODE)
 | 
				
			||||||
 | 
					    return frame
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					datasetPath = "../DetectFace/dataset"
 | 
				
			||||||
 | 
					listFilesPath = '../DetectFace/listFiles.pkl'
 | 
				
			||||||
 | 
					images = []
 | 
				
			||||||
 | 
					classNames = []
 | 
				
			||||||
 | 
					lisFileTrain = []
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					if os.path.exists(listFilesPath):
 | 
				
			||||||
 | 
					    with open(listFilesPath, 'rb') as f:
 | 
				
			||||||
 | 
					        lisFileTrain = pickle.load(f)
 | 
				
			||||||
 | 
					else:
 | 
				
			||||||
 | 
					    lisFileTrain = os.listdir(datasetPath)
 | 
				
			||||||
 | 
					    with open(listFilesPath, 'wb') as f:
 | 
				
			||||||
 | 
					        pickle.dump(lisFileTrain, f)
 | 
				
			||||||
 | 
					for file in lisFileTrain:
 | 
				
			||||||
 | 
					    classNames.append(os.path.splitext(file)[0].split('_')[0])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def encodeImgs(save_path="../DetectFace/encodings.pkl"):
 | 
				
			||||||
 | 
					    if os.path.exists(save_path):
 | 
				
			||||||
 | 
					        print(f"Loading encodings from {save_path}...")
 | 
				
			||||||
 | 
					        with open(save_path, "rb") as f:
 | 
				
			||||||
 | 
					            return pickle.load(f)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					encodeListKnow = encodeImgs()
 | 
				
			||||||
 | 
					print("Load data success")
 | 
				
			||||||
# Khởi tạo camera
 | 
					# Khởi tạo camera
 | 
				
			||||||
def main():
 | 
					def main():
 | 
				
			||||||
    cap = cv.VideoCapture(0)
 | 
					    cap = cv.VideoCapture(0)
 | 
				
			||||||
| 
						 | 
					@ -203,20 +270,52 @@ def main():
 | 
				
			||||||
        ret, frame = cap.read()
 | 
					        ret, frame = cap.read()
 | 
				
			||||||
        if not ret:
 | 
					        if not ret:
 | 
				
			||||||
            break
 | 
					            break
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        frameS = cv.resize(frame, (0,0), None, fx=1, fy=1)
 | 
				
			||||||
 | 
					        frameS = cv.cvtColor(frameS, cv.COLOR_BGR2RGB)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        faceCurFrame = face_recognition.face_locations(frameS)
 | 
				
			||||||
 | 
					        encodeCurFrame = face_recognition.face_encodings(frameS)
 | 
				
			||||||
 | 
					        frame = process_qr_code(frame)
 | 
				
			||||||
 | 
					        for encodeFace, faceLoc in zip(encodeCurFrame, faceCurFrame):
 | 
				
			||||||
 | 
					            matches = face_recognition.compare_faces(encodeListKnow, encodeFace)
 | 
				
			||||||
 | 
					            faceDis = face_recognition.face_distance(encodeListKnow, encodeFace)
 | 
				
			||||||
 | 
					            print(faceDis)
 | 
				
			||||||
 | 
					            matchIndex = np.argmin(faceDis)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            if faceDis[matchIndex] < 0.3:
 | 
				
			||||||
 | 
					                name = classNames[matchIndex].upper()
 | 
				
			||||||
 | 
					                process_face_detect(f"{name}\n{"Staff"}\n\n", frame)
 | 
				
			||||||
 | 
					            else:
 | 
				
			||||||
 | 
					                name = "Unknow"
 | 
				
			||||||
 | 
					                display_text(frame, f"Face not found - use QRcode", (20, 55), 0.7, (6, 6, 255), 2)
 | 
				
			||||||
 | 
					            y1, x2, y2, x1 = faceLoc
 | 
				
			||||||
 | 
					            y1, x2, y2, x1 = y1, x2, y2, x1
 | 
				
			||||||
 | 
					            cv.rectangle(frame, (x1,y1), (x2,y2), (0,255,0), 2)
 | 
				
			||||||
 | 
					            cv.putText(frame, name + f"({(1 - round(faceDis[matchIndex], 2))*100}%)", (20, 25), cv.FONT_HERSHEY_COMPLEX, 1, (255,255,255), 2)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        # Convert the frame to grayscale
 | 
					        # Convert the frame to grayscale
 | 
				
			||||||
        gray_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
 | 
					        # gray_frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        # Detect faces in the frame
 | 
					        # # Detect faces in the frame
 | 
				
			||||||
        faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=25, minSize=(30, 30))
 | 
					        # faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=25, minSize=(30, 30))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        # Draw rectangles around the faces
 | 
					        # # Draw rectangles around the faces
 | 
				
			||||||
        if len(faces) == 1:
 | 
					        # if len(faces) == 1:
 | 
				
			||||||
            for (x, y, w, h) in faces:
 | 
					        #     for (x, y, w, h) in faces:
 | 
				
			||||||
                cv.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
 | 
					        #         cv.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
 | 
				
			||||||
                display_text(frame, f"Face detected", (430, 25), 0.7, (0, 255, 0), 2)
 | 
					        #         display_text(frame, f"Face detected", (430, 25), 0.7, (0, 255, 0), 2)
 | 
				
			||||||
                frame = process_qr_code(frame)
 | 
					        #         frame = process_qr_code(frame)
 | 
				
			||||||
        else:
 | 
					        # else:
 | 
				
			||||||
            display_text(frame, f"Face not found", (430, 25), 0.7, (6, 6, 255), 2)
 | 
					        #     display_text(frame, f"Face not found", (430, 25), 0.7, (6, 6, 255), 2)
 | 
				
			||||||
        cv.imshow(WINDOW_TRACKING, frame)
 | 
					        cv.imshow(WINDOW_TRACKING, frame)
 | 
				
			||||||
        cv.moveWindow(WINDOW_TRACKING, 10, 10)
 | 
					        cv.moveWindow(WINDOW_TRACKING, 10, 10)
 | 
				
			||||||
        if cv.waitKey(1) == ord('q'):
 | 
					        if cv.waitKey(1) == ord('q'):
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue