Dernière version fonctionnelle en date.

This commit is contained in:
yanis.bouarfa 2025-01-07 20:23:25 +01:00
parent 5e601c889e
commit 7abdb91d06
4 changed files with 117 additions and 156 deletions

33
main.py
View file

@ -1,31 +1,12 @@
import os
import subprocess
import cv2
from src.pipeline import ObjectDetectionPipeline
from src.classifiers.bayesian import BayesianClassifier
from collections import defaultdict
# Définissez le mode d'analyse ici : "plan" ou "page"
analysis_mode = "plan"
if __name__ == "__main__":
# Configuration basée sur le mode
if analysis_mode == "plan":
dataset_path = "data/catalogueSymbol"
model_path = "models/bayesian_modelPLAN.pth"
image_path = "data/plan.png"
else:
dataset_path = "data/catalogue"
model_path = "models/bayesian_modelPAGE.pth"
image_path = "data/page.png"
# Lancer l'entraînement via train.py
print(f"Lancement de l'entraînement pour le mode '{analysis_mode}'...")
try:
subprocess.run(["python", "train.py", dataset_path, model_path], check=True)
print(f"Entraînement terminé et modèle sauvegardé dans {model_path}")
except subprocess.CalledProcessError as e:
print(f"Erreur lors de l'exécution de train.py : {e}")
exit(1)
# Chemin vers le modèle entraîné
model_path = "models/bayesian_modelPAGE.pth"
# Chargement du modèle bayésien
print(f"Chargement du modèle bayésien depuis {model_path}")
@ -37,7 +18,8 @@ if __name__ == "__main__":
print(f"Erreur lors du chargement du modèle : {e}")
exit(1)
# Vérification de l'existence de l'image
# Chemin de l'image de test
image_path = "data/page.png"
if not os.path.exists(image_path):
print(f"L'image de test {image_path} n'existe pas.")
exit(1)
@ -51,9 +33,6 @@ if __name__ == "__main__":
print("Initialisation de la pipeline...")
pipeline = ObjectDetectionPipeline(image_path=image_path, model=bayesian_model, output_dir=output_dir)
# Définition du mode (plan ou page)
pipeline.set_mode(analysis_mode)
# Chargement de l'image
print("Chargement de l'image...")
try:
@ -66,7 +45,7 @@ if __name__ == "__main__":
print("Détection et classification des objets...")
try:
class_counts, detected_objects = pipeline.detect_and_classify_objects()
print("Classes détectées :", class_counts)
print("Classes détectées :", class_counts) # Added debug info
except Exception as e:
print(f"Erreur lors de la détection/classification : {e}")
exit(1)

View file

@ -12,7 +12,6 @@ class BayesianClassifier:
self.feature_variances = {}
self.class_priors = {}
self.classes = []
self.mode = None # Défini par le main.py ("plan" ou "page")
# Initialize HOG descriptor with standard parameters
self.hog = cv2.HOGDescriptor(
@ -23,51 +22,66 @@ class BayesianClassifier:
_nbins=9
)
def set_mode(self, mode):
"""
Configure le mode d'analyse (plan ou page) et ajuste les classes autorisées.
"""
self.mode = mode
if mode == "plan":
self.classes = ['Figure1', 'Figure2', 'Figure3', 'Figure4', 'Figure5', 'Figure6']
elif mode == "page":
self.classes = ['2', 'd', 'I', 'n', 'o', 'u']
else:
raise ValueError(f"Mode inconnu : {mode}")
def extract_features(self, image):
"""
Extrait des caractéristiques d'une image (via HOG et normalisation).
"""
try:
# Convert image to grayscale
if len(image.shape) == 3 and image.shape[2] == 3:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray_image = image
resized_image = cv2.resize(gray_image, (28, 28))
hog_features = self.hog.compute(resized_image)
# Apply adaptive thresholding for better segmentation
binary_image = cv2.adaptiveThreshold(
gray_image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 2
)
features = hog_features.flatten()
norm = np.linalg.norm(features)
return features / norm if norm > 1e-6 else features
# Find contours
contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if not contours:
print("No contours found.")
return np.array([])
features = []
for contour in contours:
if cv2.contourArea(contour) < 20: # Lowered area threshold
continue
x, y, w, h = cv2.boundingRect(contour)
letter_image = gray_image[y:y + h, x:x + w]
letter_image = cv2.resize(letter_image, (28, 28))
# Compute HOG features
hog_features = self.hog.compute(letter_image)
features.append(hog_features.flatten())
features = np.array(features)
if features.size == 0:
print("No features extracted.")
return np.array([])
# Normalize features for better consistency
norms = np.linalg.norm(features, axis=1, keepdims=True)
features = features / np.where(norms > 1e-6, norms, 1)
return features
except Exception as e:
print(f"Error in extract_features: {e}")
return np.array([])
def train(self, dataset_path):
"""
Entraîne le modèle bayésien sur un dataset structuré en sous-dossiers par classe.
"""
class_features = defaultdict(list)
total_images = 0
allowed_classes = ['2', 'd', 'I', 'n', 'o', 'u'] # Modifiez selon vos besoins
for class_name in os.listdir(dataset_path):
if class_name not in self.classes:
if class_name not in allowed_classes:
continue
class_folder_path = os.path.join(dataset_path, class_name)
if os.path.isdir(class_folder_path):
if class_name not in self.classes:
self.classes.append(class_name)
for img_name in os.listdir(class_folder_path):
img_path = os.path.join(class_folder_path, img_name)
if os.path.isfile(img_path):
@ -76,8 +90,11 @@ class BayesianClassifier:
if image is not None:
features = self.extract_features(image)
if features.size > 0:
class_features[class_name].append(features)
for feature in features:
class_features[class_name].append(feature)
total_images += 1
else:
print(f"No features extracted for {img_path}")
else:
print(f"Failed to load image: {img_path}")
except Exception as e:
@ -93,9 +110,6 @@ class BayesianClassifier:
print("Training completed for classes:", self.classes)
def save_model(self, model_path):
"""
Sauvegarde le modèle entraîné dans un fichier.
"""
model_data = {
"feature_means": self.feature_means,
"feature_variances": self.feature_variances,
@ -108,11 +122,8 @@ class BayesianClassifier:
print(f"Model saved to {model_path}")
def load_model(self, model_path):
"""
Charge un modèle existant depuis un fichier.
"""
if os.path.exists(model_path):
model_data = torch.load(model_path)
model_data = torch.load(model_path, weights_only=False)
self.feature_means = model_data["feature_means"]
self.feature_variances = model_data["feature_variances"]
self.class_priors = model_data["class_priors"]
@ -121,10 +132,7 @@ class BayesianClassifier:
else:
print(f"No model found at {model_path}.")
def predict(self, image, threshold=-65000):
"""
Prédit la classe d'une image en utilisant le modèle bayésien.
"""
def predict(self, image, threshold=0.3): # Lowered threshold
try:
features = self.extract_features(image)
if features.size == 0:
@ -144,7 +152,7 @@ class BayesianClassifier:
max_class = max(posteriors, key=posteriors.get)
max_posterior = posteriors[max_class]
print(f"Class: {max_class}, Posterior: {max_posterior}")
print(f"Class: {max_class}, Posterior: {max_posterior}") # Added debug info
if max_posterior < threshold:
return None
return max_class
@ -153,9 +161,6 @@ class BayesianClassifier:
return None
def visualize(self):
"""
Visualise les moyennes des caractéristiques par classe.
"""
if not self.classes:
print("No classes to visualize.")
return

View file

@ -6,9 +6,7 @@ from collections import defaultdict
class ObjectDetectionPipeline:
def __init__(self, image_path, model=None, output_dir="output", min_contour_area=20, binary_threshold=None):
"""
Initialise le pipeline de détection et classification d'objets.
"""
# Initialize the object detection pipeline
self.image_path = image_path
self.image = None
self.binary_image = None
@ -16,38 +14,19 @@ class ObjectDetectionPipeline:
self.output_dir = output_dir
self.min_contour_area = min_contour_area
self.binary_threshold = binary_threshold
self.mode = None # Défini par le main.py ("plan" ou "page")
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def set_mode(self, mode):
"""
Configure le mode d'analyse (plan ou page).
"""
self.mode = mode
if self.mode == "plan":
self.annotated_output_path = os.path.join(self.output_dir, "annotated_plan.jpg")
self.detection_threshold = -395000 # Seuil pour le mode plan
elif self.mode == "page":
self.annotated_output_path = os.path.join(self.output_dir, "annotated_page.jpg")
self.detection_threshold = -65000 # Seuil pour le mode page
else:
raise ValueError(f"Mode inconnu : {mode}")
def load_image(self):
"""
Charge l'image spécifiée.
"""
# Load the specified image
self.image = cv2.imread(self.image_path)
if self.image is None:
raise FileNotFoundError(f"Image {self.image_path} non trouvée.")
raise FileNotFoundError(f"Image {self.image_path} not found.")
return self.image
def preprocess_image(self):
"""
Prétraite l'image pour la détection.
"""
# Preprocess the image for inference
channels = cv2.split(self.image)
binary_images = []
@ -64,11 +43,9 @@ class ObjectDetectionPipeline:
return binary_image
def detect_and_classify_objects(self):
"""
Détecte et classe les objets dans l'image.
"""
# Detect and classify objects in the image
if self.model is None:
raise ValueError("Aucun modèle de classification fourni.")
raise ValueError("No classification model provided.")
self.binary_image = self.preprocess_image()
contours, _ = cv2.findContours(self.binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
@ -83,10 +60,9 @@ class ObjectDetectionPipeline:
x, y, w, h = cv2.boundingRect(contour)
letter_image = self.image[y:y + h, x:x + w]
# Prédit la classe de l'objet détecté
predicted_class = self.model.predict(letter_image, threshold=self.detection_threshold)
predicted_class = self.model.predict(letter_image, threshold=-65000) # Adjusted threshold
if predicted_class is None:
print("Objet ignoré en raison d'une faible ressemblance.")
print("Object ignored due to low resemblance.")
continue
class_counts[predicted_class] += 1
@ -95,9 +71,7 @@ class ObjectDetectionPipeline:
return dict(sorted(class_counts.items())), detected_objects
def save_results(self, class_counts, detected_objects):
"""
Sauvegarde les résultats de la détection et de la classification.
"""
# Save detection and classification results
binary_output_path = os.path.join(self.output_dir, "binary_image.jpg")
cv2.imwrite(binary_output_path, self.binary_image)
@ -106,8 +80,8 @@ class ObjectDetectionPipeline:
cv2.rectangle(annotated_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(annotated_image, str(predicted_class), (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
cv2.imwrite(self.annotated_output_path, annotated_image)
annotated_output_path = os.path.join(self.output_dir, "annotated_page.jpg")
cv2.imwrite(annotated_output_path, annotated_image)
results_text_path = os.path.join(self.output_dir, "results.txt")
with open(results_text_path, "w") as f:
@ -115,14 +89,12 @@ class ObjectDetectionPipeline:
f.write(f"{class_name}: {count}\n")
def display_results(self, class_counts, detected_objects):
"""
Affiche et sauvegarde les résultats.
"""
# Display and save the results
self.save_results(class_counts, detected_objects)
plt.figure(figsize=(10, 5))
plt.bar(class_counts.keys(), class_counts.values())
plt.xlabel("Classes")
plt.ylabel("Nombre d'objets détectés")
plt.title("Distribution des classes détectées")
plt.ylabel("Object count")
plt.title("Detected Class Distribution")
plt.show()

View file

@ -1,57 +1,62 @@
from main import analysis_mode
if analysis_mode == "plan":
dataset_path = "data/catalogueSymbol"
allowed_classes = ['Figure1', 'Figure2', 'Figure3', 'Figure4', 'Figure5', 'Figure6']
model_path = "models/bayesian_modelPLAN.pth"
else:
dataset_path = "data/catalogue"
allowed_classes = ['2', 'd', 'I', 'n', 'o', 'u']
model_path = "models/bayesian_modelPAGE.pth"
import os
from collections import defaultdict
import numpy as np
import cv2
from src.classifiers.bayesian import BayesianClassifier
from collections import defaultdict
import os
import cv2
import numpy as np
# Initialisation
bayesian_model = BayesianClassifier()
if __name__ == "__main__":
# Chemin vers le dataset d'entraînement
dataset_path = "data/catalogue"
print("Début de l'entraînement...")
class_features = defaultdict(list)
total_images = 0
# Initialisation du classifieur Bayésien
bayesian_model = BayesianClassifier()
# Parcours des classes dans le dataset
for class_name in os.listdir(dataset_path):
if class_name not in allowed_classes:
continue
print("Début de l'entraînement...")
class_folder_path = os.path.join(dataset_path, class_name)
if not os.path.isdir(class_folder_path):
continue
# Dictionnaire pour stocker les caractéristiques par classe
class_features = defaultdict(list)
total_images = 0
if class_name not in bayesian_model.classes:
bayesian_model.classes.append(class_name)
# Liste des classes autorisées
allowed_classes = ['2', 'd', 'I', 'n', 'o', 'u'] # Classes spécifiques au projet
for image_name in os.listdir(class_folder_path):
image_path = os.path.join(class_folder_path, image_name)
image = cv2.imread(image_path)
# Parcours des classes dans le dataset
for class_name in os.listdir(dataset_path):
if class_name not in allowed_classes:
continue # Ignorer les classes non autorisées
if image is not None:
features = bayesian_model.extract_features(image)
for feature in features:
class_features[class_name].append(feature)
total_images += 1
class_folder_path = os.path.join(dataset_path, class_name)
if not os.path.isdir(class_folder_path):
continue # Ignorer les fichiers qui ne sont pas des dossiers
# Calcul des statistiques pour chaque classe
for class_name in bayesian_model.classes:
if class_name in class_features:
features = np.array(class_features[class_name])
bayesian_model.feature_means[class_name] = np.mean(features, axis=0)
bayesian_model.feature_variances[class_name] = np.var(features, axis=0) + 1e-6
bayesian_model.class_priors[class_name] = len(features) / total_images
# Ajouter la classe au modèle si elle n'existe pas déjà
if class_name not in bayesian_model.classes:
bayesian_model.classes.append(class_name)
print("Entraînement terminé.")
bayesian_model.save_model(model_path)
print(f"Modèle sauvegardé dans : {model_path}")
# Parcours des images dans le dossier de la classe
for image_name in os.listdir(class_folder_path):
image_path = os.path.join(class_folder_path, image_name)
image = cv2.imread(image_path)
if image is not None:
# Extraire les caractéristiques de l'image
features = bayesian_model.extract_features(image)
for feature in features:
class_features[class_name].append(feature)
total_images += 1
# Calcul des statistiques pour chaque classe
for class_name in bayesian_model.classes:
if class_name in class_features:
features = np.array(class_features[class_name])
bayesian_model.feature_means[class_name] = np.mean(features, axis=0)
bayesian_model.feature_variances[class_name] = np.var(features, axis=0) + 1e-6 # Éviter la division par zéro
bayesian_model.class_priors[class_name] = len(features) / total_images
print("Entraînement terminé.")
# Sauvegarde du modèle entraîné
model_path = "models/bayesian_modelPAGE.pth"
bayesian_model.save_model(model_path)
print(f"Modèle sauvegardé dans : {model_path}")