From 5e601c889e15fafc5d2bb2b38cb599753145088d Mon Sep 17 00:00:00 2001 From: "yanis.bouarfa" Date: Tue, 7 Jan 2025 19:22:56 +0100 Subject: [PATCH] Automatisation du choix de l'analyse du plan ou de la page --- main.py | 70 +++++++++++----------------- src/classifiers/bayesian.py | 92 ++++++++++++++++++++----------------- src/pipeline.py | 66 ++++++++++++++++---------- train.py | 91 ++++++++++++++++++------------------ 4 files changed, 165 insertions(+), 154 deletions(-) diff --git a/main.py b/main.py index b6c09cd..b9e92cb 100644 --- a/main.py +++ b/main.py @@ -1,12 +1,31 @@ import os -import cv2 +import subprocess from src.pipeline import ObjectDetectionPipeline from src.classifiers.bayesian import BayesianClassifier from collections import defaultdict +# Définissez le mode d'analyse ici : "plan" ou "page" +analysis_mode = "plan" + if __name__ == "__main__": - # Chemin vers le modèle entraîné - model_path = "models/bayesian_modelPAGE.pth" + # Configuration basée sur le mode + if analysis_mode == "plan": + dataset_path = "data/catalogueSymbol" + model_path = "models/bayesian_modelPLAN.pth" + image_path = "data/plan.png" + else: + dataset_path = "data/catalogue" + model_path = "models/bayesian_modelPAGE.pth" + image_path = "data/page.png" + + # Lancer l'entraînement via train.py + print(f"Lancement de l'entraînement pour le mode '{analysis_mode}'...") + try: + subprocess.run(["python", "train.py", dataset_path, model_path], check=True) + print(f"Entraînement terminé et modèle sauvegardé dans {model_path}") + except subprocess.CalledProcessError as e: + print(f"Erreur lors de l'exécution de train.py : {e}") + exit(1) # Chargement du modèle bayésien print(f"Chargement du modèle bayésien depuis {model_path}") @@ -18,8 +37,7 @@ if __name__ == "__main__": print(f"Erreur lors du chargement du modèle : {e}") exit(1) - # Chemin de l'image de test - image_path = "data/page.png" + # Vérification de l'existence de l'image if not os.path.exists(image_path): print(f"L'image de test {image_path} n'existe pas.") exit(1) @@ -33,6 +51,9 @@ if __name__ == "__main__": print("Initialisation de la pipeline...") pipeline = ObjectDetectionPipeline(image_path=image_path, model=bayesian_model, output_dir=output_dir) + # Définition du mode (plan ou page) + pipeline.set_mode(analysis_mode) + # Chargement de l'image print("Chargement de l'image...") try: @@ -45,6 +66,7 @@ if __name__ == "__main__": print("Détection et classification des objets...") try: class_counts, detected_objects = pipeline.detect_and_classify_objects() + print("Classes détectées :", class_counts) except Exception as e: print(f"Erreur lors de la détection/classification : {e}") exit(1) @@ -53,42 +75,4 @@ if __name__ == "__main__": print("Sauvegarde et affichage des résultats...") pipeline.display_results(class_counts, detected_objects) - # Chargement des comptes réels manuels avec distinction entre minuscule et majuscule - true_counts_manual = { - 'A_': 30, 'A': 30, 'B_': 4, 'B': 0, 'C_': 14, 'C': 14, 'D_': 17, 'D': 17, - 'E_': 68, 'E': 69, 'F_': 2, 'F': 2, 'G_': 8, 'G': 8, 'H_': 9, 'H': 9, - 'I_': 26, 'I': 25, 'J_': 1, 'J': 0, 'K_': 0, 'K': 0, 'L_': 20, 'L': 19, - 'M_': 15, 'M': 15, 'N_': 30, 'N': 29, 'O_': 37, 'O': 37, 'P_': 23, 'P': 22, - 'Q_': 5, 'Q': 4, 'R_': 28, 'R': 27, 'S_': 26, 'S': 25, 'T_': 38, 'T': 38, - 'U_': 25, 'U': 25, 'V_': 7, 'V': 6, 'W_': 1, 'W': 0, 'X_': 2, 'X': 2, - 'Y_': 6, 'Y': 5, 'Z_': 3, 'Z': 2, - '1': 8, '2': 11, '3': 2, '4': 1, '5': 2, '6': 1, '7': 1, '8': 3, '9': 3 - } - - # Chargement des résultats détectés depuis results.txt - results_path = "output/results.txt" - detected_counts = defaultdict(int) - if os.path.exists(results_path): - with open(results_path, "r") as f: - for line in f: - char, count = line.strip().split(":") - detected_counts[char.strip()] = int(count.strip()) - else: - print(f"Le fichier {results_path} n'existe pas.") - exit(1) - - # Calcul du pourcentage de précision - print("Calcul du pourcentage de précision...") - total_true = sum(true_counts_manual.values()) - common_keys = set(true_counts_manual.keys()) & set(detected_counts.keys()) - - correctly_detected = sum(min(detected_counts[char], true_counts_manual[char]) for char in common_keys) - precision = (correctly_detected / total_true) * 100 if total_true > 0 else 0 - - # Afficher les résultats - print("\nRésultats de comparaison :") - for char in sorted(common_keys): - print(f"{char}: True={true_counts_manual[char]}, Detected={detected_counts[char]}") - - print(f"\nPrécision globale : {precision:.2f}%") print(f"Les résultats ont été sauvegardés dans le dossier : {output_dir}") diff --git a/src/classifiers/bayesian.py b/src/classifiers/bayesian.py index 3a2be5e..7cf714b 100644 --- a/src/classifiers/bayesian.py +++ b/src/classifiers/bayesian.py @@ -12,6 +12,7 @@ class BayesianClassifier: self.feature_variances = {} self.class_priors = {} self.classes = [] + self.mode = None # Défini par le main.py ("plan" ou "page") # Initialize HOG descriptor with standard parameters self.hog = cv2.HOGDescriptor( @@ -22,61 +23,51 @@ class BayesianClassifier: _nbins=9 ) + def set_mode(self, mode): + """ + Configure le mode d'analyse (plan ou page) et ajuste les classes autorisées. + """ + self.mode = mode + if mode == "plan": + self.classes = ['Figure1', 'Figure2', 'Figure3', 'Figure4', 'Figure5', 'Figure6'] + elif mode == "page": + self.classes = ['2', 'd', 'I', 'n', 'o', 'u'] + else: + raise ValueError(f"Mode inconnu : {mode}") + def extract_features(self, image): + """ + Extrait des caractéristiques d'une image (via HOG et normalisation). + """ try: - # Convert image to grayscale if len(image.shape) == 3 and image.shape[2] == 3: gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) else: gray_image = image - # Apply adaptive thresholding - binary_image = cv2.adaptiveThreshold( - gray_image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 2 - ) + resized_image = cv2.resize(gray_image, (28, 28)) + hog_features = self.hog.compute(resized_image) - # Find contours - contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) - if not contours: - print("No contours found.") - return np.array([]) - - features = [] - for contour in contours: - if cv2.contourArea(contour) < 22: - continue - - x, y, w, h = cv2.boundingRect(contour) - letter_image = gray_image[y:y + h, x:x + w] - letter_image = cv2.resize(letter_image, (28, 28)) - - # Compute HOG features - hog_features = self.hog.compute(letter_image) - features.append(hog_features.flatten()) - - features = np.array(features) - if features.size == 0: - print("No features extracted.") - return np.array([]) - - norms = np.linalg.norm(features, axis=1, keepdims=True) - features = features / np.where(norms > 1e-6, norms, 1) - - return features + features = hog_features.flatten() + norm = np.linalg.norm(features) + return features / norm if norm > 1e-6 else features except Exception as e: print(f"Error in extract_features: {e}") return np.array([]) def train(self, dataset_path): + """ + Entraîne le modèle bayésien sur un dataset structuré en sous-dossiers par classe. + """ class_features = defaultdict(list) total_images = 0 for class_name in os.listdir(dataset_path): + if class_name not in self.classes: + continue + class_folder_path = os.path.join(dataset_path, class_name) if os.path.isdir(class_folder_path): - if class_name not in self.classes: - self.classes.append(class_name) - for img_name in os.listdir(class_folder_path): img_path = os.path.join(class_folder_path, img_name) if os.path.isfile(img_path): @@ -85,11 +76,8 @@ class BayesianClassifier: if image is not None: features = self.extract_features(image) if features.size > 0: - for feature in features: - class_features[class_name].append(feature) + class_features[class_name].append(features) total_images += 1 - else: - print(f"No features extracted for {img_path}") else: print(f"Failed to load image: {img_path}") except Exception as e: @@ -105,6 +93,9 @@ class BayesianClassifier: print("Training completed for classes:", self.classes) def save_model(self, model_path): + """ + Sauvegarde le modèle entraîné dans un fichier. + """ model_data = { "feature_means": self.feature_means, "feature_variances": self.feature_variances, @@ -117,8 +108,11 @@ class BayesianClassifier: print(f"Model saved to {model_path}") def load_model(self, model_path): + """ + Charge un modèle existant depuis un fichier. + """ if os.path.exists(model_path): - model_data = torch.load(model_path, weights_only=False) + model_data = torch.load(model_path) self.feature_means = model_data["feature_means"] self.feature_variances = model_data["feature_variances"] self.class_priors = model_data["class_priors"] @@ -127,7 +121,10 @@ class BayesianClassifier: else: print(f"No model found at {model_path}.") - def predict(self, image): + def predict(self, image, threshold=-65000): + """ + Prédit la classe d'une image en utilisant le modèle bayésien. + """ try: features = self.extract_features(image) if features.size == 0: @@ -144,12 +141,21 @@ class BayesianClassifier: posterior = likelihood + np.log(prior) posteriors[class_name] = posterior - return max(posteriors, key=posteriors.get) + max_class = max(posteriors, key=posteriors.get) + max_posterior = posteriors[max_class] + + print(f"Class: {max_class}, Posterior: {max_posterior}") + if max_posterior < threshold: + return None + return max_class except Exception as e: print(f"Error in prediction: {e}") return None def visualize(self): + """ + Visualise les moyennes des caractéristiques par classe. + """ if not self.classes: print("No classes to visualize.") return diff --git a/src/pipeline.py b/src/pipeline.py index cb2110b..4ca8a48 100644 --- a/src/pipeline.py +++ b/src/pipeline.py @@ -5,15 +5,9 @@ from collections import defaultdict class ObjectDetectionPipeline: - def __init__(self, image_path, model=None, output_dir="output", min_contour_area=50, binary_threshold=127): + def __init__(self, image_path, model=None, output_dir="output", min_contour_area=20, binary_threshold=None): """ - Initialisation de la pipeline de détection d'objets. - - :param image_path: Chemin de l'image à traiter - :param model: Modèle de classification à utiliser - :param output_dir: Dossier où les résultats seront sauvegardés - :param min_contour_area: Aire minimale des contours à prendre en compte - :param binary_threshold: Seuil de binarisation pour les canaux + Initialise le pipeline de détection et classification d'objets. """ self.image_path = image_path self.image = None @@ -22,24 +16,46 @@ class ObjectDetectionPipeline: self.output_dir = output_dir self.min_contour_area = min_contour_area self.binary_threshold = binary_threshold + self.mode = None # Défini par le main.py ("plan" ou "page") if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) + def set_mode(self, mode): + """ + Configure le mode d'analyse (plan ou page). + """ + self.mode = mode + if self.mode == "plan": + self.annotated_output_path = os.path.join(self.output_dir, "annotated_plan.jpg") + self.detection_threshold = -395000 # Seuil pour le mode plan + elif self.mode == "page": + self.annotated_output_path = os.path.join(self.output_dir, "annotated_page.jpg") + self.detection_threshold = -65000 # Seuil pour le mode page + else: + raise ValueError(f"Mode inconnu : {mode}") + def load_image(self): - """Charge l'image spécifiée.""" + """ + Charge l'image spécifiée. + """ self.image = cv2.imread(self.image_path) if self.image is None: - raise FileNotFoundError(f"L'image {self.image_path} est introuvable.") + raise FileNotFoundError(f"Image {self.image_path} non trouvée.") return self.image def preprocess_image(self): - """Prétraite l'image pour la préparer à l'inférence.""" + """ + Prétraite l'image pour la détection. + """ channels = cv2.split(self.image) binary_images = [] for channel in channels: - _, binary_channel = cv2.threshold(channel, self.binary_threshold, 255, cv2.THRESH_BINARY_INV) + if self.binary_threshold is None: + _, binary_channel = cv2.threshold(channel, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) + else: + _, binary_channel = cv2.threshold(channel, self.binary_threshold, 255, cv2.THRESH_BINARY_INV) binary_images.append(binary_channel) binary_image = cv2.bitwise_or(binary_images[0], binary_images[1]) @@ -48,7 +64,9 @@ class ObjectDetectionPipeline: return binary_image def detect_and_classify_objects(self): - """Détecte et classe les objets présents dans l'image.""" + """ + Détecte et classe les objets dans l'image. + """ if self.model is None: raise ValueError("Aucun modèle de classification fourni.") @@ -65,9 +83,10 @@ class ObjectDetectionPipeline: x, y, w, h = cv2.boundingRect(contour) letter_image = self.image[y:y + h, x:x + w] - predicted_class = self.model.predict(letter_image) + # Prédit la classe de l'objet détecté + predicted_class = self.model.predict(letter_image, threshold=self.detection_threshold) if predicted_class is None: - print("Skipping object with invalid prediction.") + print("Objet ignoré en raison d'une faible ressemblance.") continue class_counts[predicted_class] += 1 @@ -76,33 +95,34 @@ class ObjectDetectionPipeline: return dict(sorted(class_counts.items())), detected_objects def save_results(self, class_counts, detected_objects): - """Sauvegarde les résultats de détection et classification.""" - # Sauvegarder l'image binaire + """ + Sauvegarde les résultats de la détection et de la classification. + """ binary_output_path = os.path.join(self.output_dir, "binary_image.jpg") cv2.imwrite(binary_output_path, self.binary_image) - # Sauvegarder l'image annotée annotated_image = self.image.copy() for (x, y, w, h, predicted_class) in detected_objects: cv2.rectangle(annotated_image, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.putText(annotated_image, str(predicted_class), (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2) - annotated_output_path = os.path.join(self.output_dir, "annotated_page.jpg") - cv2.imwrite(annotated_output_path, annotated_image) - # Sauvegarder les classes et leurs occurrences + cv2.imwrite(self.annotated_output_path, annotated_image) + results_text_path = os.path.join(self.output_dir, "results.txt") with open(results_text_path, "w") as f: for class_name, count in class_counts.items(): f.write(f"{class_name}: {count}\n") def display_results(self, class_counts, detected_objects): - """Affiche et sauvegarde les résultats.""" + """ + Affiche et sauvegarde les résultats. + """ self.save_results(class_counts, detected_objects) plt.figure(figsize=(10, 5)) plt.bar(class_counts.keys(), class_counts.values()) plt.xlabel("Classes") - plt.ylabel("Nombre d'objets") + plt.ylabel("Nombre d'objets détectés") plt.title("Distribution des classes détectées") plt.show() diff --git a/train.py b/train.py index 588c3f9..4e78a9c 100644 --- a/train.py +++ b/train.py @@ -1,56 +1,57 @@ -import os -from collections import defaultdict -import numpy as np -import cv2 +from main import analysis_mode + +if analysis_mode == "plan": + dataset_path = "data/catalogueSymbol" + allowed_classes = ['Figure1', 'Figure2', 'Figure3', 'Figure4', 'Figure5', 'Figure6'] + model_path = "models/bayesian_modelPLAN.pth" +else: + dataset_path = "data/catalogue" + allowed_classes = ['2', 'd', 'I', 'n', 'o', 'u'] + model_path = "models/bayesian_modelPAGE.pth" from src.classifiers.bayesian import BayesianClassifier +from collections import defaultdict +import os +import cv2 +import numpy as np -if __name__ == "__main__": - # Chemin vers le dataset d'entraînement - dataset_path = "data/catalogue" +# Initialisation +bayesian_model = BayesianClassifier() - # Initialisation du classifieur Bayésien - bayesian_model = BayesianClassifier() +print("Début de l'entraînement...") +class_features = defaultdict(list) +total_images = 0 - print("Début de l'entraînement...") +# Parcours des classes dans le dataset +for class_name in os.listdir(dataset_path): + if class_name not in allowed_classes: + continue - # Dictionnaire pour stocker les caractéristiques par classe - class_features = defaultdict(list) - total_images = 0 + class_folder_path = os.path.join(dataset_path, class_name) + if not os.path.isdir(class_folder_path): + continue - # Parcours des classes dans le dataset - for class_name in os.listdir(dataset_path): - class_folder_path = os.path.join(dataset_path, class_name) - if not os.path.isdir(class_folder_path): - continue # Ignorer les fichiers qui ne sont pas des dossiers + if class_name not in bayesian_model.classes: + bayesian_model.classes.append(class_name) - # Ajouter la classe au modèle si elle n'existe pas déjà - if class_name not in bayesian_model.classes: - bayesian_model.classes.append(class_name) + for image_name in os.listdir(class_folder_path): + image_path = os.path.join(class_folder_path, image_name) + image = cv2.imread(image_path) - # Parcours des images dans le dossier de la classe - for image_name in os.listdir(class_folder_path): - image_path = os.path.join(class_folder_path, image_name) - image = cv2.imread(image_path) + if image is not None: + features = bayesian_model.extract_features(image) + for feature in features: + class_features[class_name].append(feature) + total_images += 1 - if image is not None: - # Extraire les caractéristiques de l'image - features = bayesian_model.extract_features(image) - for feature in features: - class_features[class_name].append(feature) - total_images += 1 +# Calcul des statistiques pour chaque classe +for class_name in bayesian_model.classes: + if class_name in class_features: + features = np.array(class_features[class_name]) + bayesian_model.feature_means[class_name] = np.mean(features, axis=0) + bayesian_model.feature_variances[class_name] = np.var(features, axis=0) + 1e-6 + bayesian_model.class_priors[class_name] = len(features) / total_images - # Calcul des statistiques pour chaque classe - for class_name in bayesian_model.classes: - if class_name in class_features: - features = np.array(class_features[class_name]) - bayesian_model.feature_means[class_name] = np.mean(features, axis=0) - bayesian_model.feature_variances[class_name] = np.var(features, axis=0) + 1e-6 # Éviter la division par zéro - bayesian_model.class_priors[class_name] = len(features) / total_images - - print("Entraînement terminé.") - - # Sauvegarde du modèle entraîné - model_path = "models/bayesian_modelPAGE.pth" - bayesian_model.save_model(model_path) - print(f"Modèle sauvegardé dans : {model_path}") +print("Entraînement terminé.") +bayesian_model.save_model(model_path) +print(f"Modèle sauvegardé dans : {model_path}")