Skip to content

Commit

Permalink
Merge pull request #168 from J-B-Mugundh/file-locking
Browse files Browse the repository at this point in the history
Added File Locking Mechanism with OpenCV
  • Loading branch information
UTSAVS26 authored Oct 7, 2024
2 parents 6f76791 + b6c96b8 commit 2e4c4f2
Show file tree
Hide file tree
Showing 7 changed files with 26,255 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,182 @@
import cv2
import os
import sys
import numpy as np
import FaceDetection
import warnings
from os import system
import os
warnings.filterwarnings("ignore")
faces=[]
labels=[]
names={}
dirpath = os.getcwd()
training_folder = dirpath+"\\Face_Recognition_Script\\training-data"

def newUser():
name = input("Enter Your Name: ")
dirs = os.listdir(training_folder)
os.makedirs(training_folder+'/'+name+'@'+str(len(dirs)+1))
cap = cv2.VideoCapture(0)
i=0
while (True):
ret, frame = cap.read()
test = frame.copy()
frame,frame_crop,rect = FaceDetection.detect_faces(FaceDetection.lbp_face_cascade,frame)
cv2.imshow('Smile :) with different moods', frame)
cv2.waitKey(50)
if frame_crop!="None" and i<100:
print(training_folder+"/" + name + '@' + str(len(dirs)+1) + '/' + str(i) + '.jpg')
cv2.imwrite(training_folder+"/" + name + '@' + str(len(dirs)+1) + '/' + str(i) + '.jpg', frame_crop)
#cv2.imwrite("sample.jpg",test)
i+=1
elif i>=100:
break

cap.release()
cv2.destroyAllWindows()



def createLables():
dirs = os.listdir(training_folder)
for users in dirs:
lable = int(users[users.find("@")+1:len(users)])
names[lable] = users[0:users.find("@")]
subfolders = training_folder + "/" + users
imagesName = os.listdir(subfolders)
for image in imagesName:
imagePath = subfolders + "/" + image
face = cv2.imread(imagePath)
face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
#cv2.imshow("Training on this image...",face)
#cv2.waitKey(10)
#cv2.destroyAllWindows()
faces.append(face)
labels.append(lable)
#print("Labels: "+ str(labels))
#print("Total Number of Faces: "+str(len(faces)))
#print(names)

face_recognizer = object
def trainDataLBPH():
# create our LBPH face recognizer
#face_recognizer = cv2.
global face_recognizer
if len(labels)>0:
face_recognizer = cv2.face.createLBPHFaceRecognizer()
face_recognizer.train(faces, np.array(labels))
else:
print("No train data is present. Add train data using -train flag.")
sys.exit()
def trainDataEigen():
# or use EigenFaceRecognizer by replacing above line with
if len(labels)>0:
face_recognizer = cv2.face.createEigenFaceRecognizer()
face_recognizer.train(faces, np.array(labels))
else:
print("No train data is present. Add train data using -train flag.")
sys.exit()
def trainDataFisher():
# or use FisherFaceRecognizer by replacing above line with
if len(labels)>0:
face_recognizer = cv2.face.createFisherFaceRecognizer()
face_recognizer.train(faces, np.array(labels))
else:
print("No train data is present. Add train data using -train flag.")
sys.exit()


def draw_rectangle(img, rect):
(x, y, w, h) = rect
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
def draw_text(img, text, x, y):
cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)


def predict(test_img):
img = test_img
img, face, rect = FaceDetection.detect_faces(FaceDetection.haar_face_cascade,img,1.1)
if face=="None":
pass
else:
face = cv2.cvtColor(np.array(face,dtype=np.uint16),cv2.COLOR_BGR2GRAY)
label,conf = face_recognizer.predict(np.array(face,dtype=np.uint16))
if label==-1:
label_text = "unknown"
else:
label_text = names[label]
draw_rectangle(img, rect)
draw_text(img, label_text, rect[0], rect[1] - 5)

return img

def newUserTest():
cap = cv2.VideoCapture(0)
os.system('cls')
previous_label = ""
while (True):
ret, frame = cap.read()
#test = frame.copy()
frame,frame_crop,rect = FaceDetection.detect_faces(FaceDetection.haar_face_cascade,frame,1.1)
if frame_crop == "None":
pass
else:

frame_crop = cv2.cvtColor(np.array(frame_crop, dtype=np.uint16), cv2.COLOR_BGR2GRAY)
label, conf = face_recognizer.predict(np.array(frame_crop, dtype=np.uint16))
if label == -1:
label_text = "unknown"
else:
label_text = names[label]
#label_text = names[label]
# print(face)
draw_rectangle(frame, rect)
global pass_name
if previous_label!=label_text:
os.system('cls')
previous_label = label_text
print(label_text)
if label_text==pass_name and pass_name!='':
sys.exit()
draw_text(frame, label_text, rect[0], rect[1] - 5)
cv2.imshow('Smile :) with different moods', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
#cv2.imwrite("sample.jpg",test)
break

cap.release()
cv2.destroyAllWindows()

if __name__ == '__main__':
if len(sys.argv)>1:
if str(sys.argv[1]) == '-train':
newUser()
elif str(sys.argv[1]) == '-run':
pass_name=''
createLables()
os.system('cls')
trainDataLBPH()
os.system('cls')
newUserTest()
else:
pass_name = sys.argv[1]
createLables()
os.system('cls')
trainDataLBPH()
os.system('cls')
newUserTest()
else:
createLables()
os.system('cls')
trainDataLBPH()
os.system('cls')
newUserTest()

newUser()
createLables()
os.system('cls')
trainDataLBPH()
os.system('cls')
newUserTest()

Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
'''
detectMultiScale(image, scaleFactor, minNeighbors):
This is a general function to detect objects, in this case, it'll detect faces since we called in the face cascade.
If it finds a face, it returns a list of positions of said face in the form “Rect(x,y,w,h).”, if not, then returns “None”.
Image:
The first input is the grayscale image. So make sure the image is in grayscale.
scaleFactor:
This function compensates a false perception in size that occurs when one face appears to be bigger than the other simply because it is closer to the camera.
minNeighbors:
This is a detection algorithm that uses a moving window to detect objects,
it does so by defining how many objects are found near the current one before it can declare the face found.
'''

import cv2

haar_face_cascade = cv2.CascadeClassifier('E://PYTHON//Windows-Folder-Unlock-Using-Face-Recognition-master//Face_Recognition_Script//haarcascade_frontalface_alt.xml')
lbp_face_cascade = cv2.CascadeClassifier('E://PYTHON//Windows-Folder-Unlock-Using-Face-Recognition-master//Face_Recognition_Script//lbpcascade_frontalface.xml')


def detect_faces(f_cascade, colored_img, scaleFactor=1.1):
img_copy = colored_img
# convert the test image to gray image as opencv face detector expects gray images
gray = cv2.cvtColor(img_copy, cv2.COLOR_BGR2GRAY)
# let's detect multiscale (some i
# mages may be closer to camera than others) images
faces = f_cascade.detectMultiScale(gray, scaleFactor=scaleFactor, minNeighbors=5);
# go over list of faces and draw them as rectangles on original colored img
x=0
y=0
z=0
w = 0
if len(faces)==0:
return img_copy,"None","None"
for (x, y, w, h) in faces:
cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 255, 0), 2)
return img_copy,img_copy[y:y+w, x:x+h], faces[0]

def staticFaceDetectHaar(img):
test1 = cv2.imread(img)
test1 = detect_faces(haar_face_cascade,test1)
cv2.imshow('finanl',test1)
cv2.waitKey(0)
cv2.destroyAllWindows()

def staticFaceDetectLbp(img):
test1 = cv2.imread(img)
test1 = detect_faces(lbp_face_cascade,test1)
cv2.imshow('finanl',test1)
cv2.waitKey(0)
cv2.destroyAllWindows()

def liveFaceDetectLbp():
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
frame = detect_faces(lbp_face_cascade,frame,1.1)
cv2.imshow("frame",frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

def liveFaceDetectHaar():
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
frame = detect_faces(haar_face_cascade,frame,1.1)
cv2.imshow("frame",frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()


#liveFaceDetectHaar()
#liveFaceDetectLbp()
Loading

0 comments on commit 2e4c4f2

Please sign in to comment.