diff --git a/Age and Sex Prediction/Web App/app.py b/Age and Sex Prediction/Web App/app.py index 408160442..be25816de 100644 --- a/Age and Sex Prediction/Web App/app.py +++ b/Age and Sex Prediction/Web App/app.py @@ -4,6 +4,7 @@ import cv2 import tensorflow as tf import sys +import logging sys.stdout.reconfigure(encoding='utf-8') @@ -16,7 +17,13 @@ # loading the model # model is saved in static folder -model = tf.keras.models.load_model('static/age_and_gender_prediction_model.h5') +# Load the model +try: + model = tf.keras.models.load_model('static/age_and_gender_prediction_model.h5') + logging.info("Model loaded successfully.") +except Exception as e: + logging.error("Error loading model: %s", e) + model = None # gender mapping # if gender prediction = 0, the predition is MALE @@ -28,39 +35,44 @@ # This function finds the face area using # a haar cascade designed by OpenCV to detect the frontal face def extract_face(): - - img = cv2.imread('static/uploaded_img.jpg') # reading uploaded image - gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # converted to grayscale - - # OpenCV's CascadeClassifier to load a pre-trained Haar cascade for detecting frontal faces - haar_cascade = cv2.CascadeClassifier('static/haarcascade_frontalface_default.xml') - faces_rect = haar_cascade.detectMultiScale(gray_img, scaleFactor=1.1, minNeighbors=9) - extracted_faces = [] - - if len(faces_rect) == 0: - cv2.imwrite('static/extracted.jpg', img) # if no coordinates are detected, its only face image - else: + try: + img = cv2.imread('static/uploaded_img.jpg') # reading uploaded image + gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # converted to grayscale + + # OpenCV's CascadeClassifier to load a pre-trained Haar cascade for detecting frontal faces + haar_cascade = cv2.CascadeClassifier('static/haarcascade_frontalface_default.xml') + faces_rect = haar_cascade.detectMultiScale(gray_img, scaleFactor=1.1, minNeighbors=9) extracted_faces = [] - for (x, y, w, h) in faces_rect: - face = img[y:y+h, x:x+w] - extracted_faces.append(face) - concatenated_faces = cv2.hconcat(extracted_faces) - cv2.imwrite('static/extracted.jpg', concatenated_faces) # face extracted image - saved as extracted.jpg - if len(faces_rect) != 0: - for (x, y, w, h) in faces_rect: - cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), thickness=2) - cv2.imwrite('static/uploaded_img.jpg', img) # original uploaded image where face is marked + if len(faces_rect) == 0: + cv2.imwrite('static/extracted.jpg', img) # if no coordinates are detected, its only face image + else: + extracted_faces = [] + for (x, y, w, h) in faces_rect: + face = img[y:y+h, x:x+w] + extracted_faces.append(face) + concatenated_faces = cv2.hconcat(extracted_faces) + cv2.imwrite('static/extracted.jpg', concatenated_faces) # face extracted image - saved as extracted.jpg + + if len(faces_rect) != 0: + for (x, y, w, h) in faces_rect: + cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), thickness=2) + cv2.imwrite('static/uploaded_img.jpg', img) # original uploaded image where face is marked + except Exception as e: + logging.error("Error occured while extracting face",e) # Function to extract features from the image # This is the pre-process technique # which was applied on original dataset before training def extract_features(images): - img = cv2.imread(images, cv2.IMREAD_GRAYSCALE) # Read image in grayscale - img = cv2.resize(img, (128, 128)) # Resize image - img = np.array(img) # Convert image to numpy array - features = img.reshape(1, 128, 128, 1) # Reshape to match input shape - return features + try: + img = cv2.imread(images, cv2.IMREAD_GRAYSCALE) # Read image in grayscale + img = cv2.resize(img, (128, 128)) # Resize image + img = np.array(img) # Convert image to numpy array + features = img.reshape(1, 128, 128, 1) # Reshape to match input shape + return features + except Exception as e: + logging.error("error occured during extracting feature",e) # Function to predict gender and age def predict_result():