Update app.py
Browse files
app.py
CHANGED
|
@@ -16,7 +16,6 @@ CLASS_NAMES = {
|
|
| 16 |
3: "packaging",
|
| 17 |
}
|
| 18 |
|
| 19 |
-
# Caminho para o modelo
|
| 20 |
MODEL_PATH = "segmentation_model.h5"
|
| 21 |
model = tf.keras.models.load_model(MODEL_PATH)
|
| 22 |
|
|
@@ -40,19 +39,22 @@ def predict_image(input_image):
|
|
| 40 |
original_size = original_img_np.shape[:2]
|
| 41 |
mask_final = cv2.resize(mask_predicted.astype(np.uint8), (original_size[1], original_size[0]), interpolation=cv2.INTER_NEAREST)
|
| 42 |
confidences_final = cv2.resize(confidences, (original_size[1], original_size[0]), interpolation=cv2.INTER_LINEAR)
|
| 43 |
-
|
|
|
|
| 44 |
final_img = original_img_cv2.copy()
|
| 45 |
-
confidence_threshold = 0.
|
| 46 |
|
| 47 |
for class_id in np.unique(mask_final):
|
| 48 |
if class_id == 0:
|
| 49 |
continue
|
| 50 |
|
| 51 |
class_name = CLASS_NAMES.get(class_id, f"Classe {class_id}")
|
| 52 |
-
|
|
|
|
| 53 |
binary_mask = binary_fill_holes(binary_mask)
|
|
|
|
| 54 |
|
| 55 |
-
contours, _ = cv2.findContours(
|
| 56 |
|
| 57 |
for contour in contours:
|
| 58 |
x_min, y_min, w, h = cv2.boundingRect(contour)
|
|
@@ -70,17 +72,19 @@ def predict_image(input_image):
|
|
| 70 |
label_text = f"{class_name}: {avg_confidence:.2f}%"
|
| 71 |
color_tuple = CLASS_COLORS.get(class_id, (255, 255, 255))
|
| 72 |
|
|
|
|
| 73 |
cv2.rectangle(final_img, (x_min, y_min), (x_max, y_max), color_tuple, 2)
|
| 74 |
|
|
|
|
| 75 |
(text_width, text_height), baseline = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
|
| 76 |
cv2.rectangle(final_img, (x_min, y_min - text_height - 10), (x_min + text_width, y_min), color_tuple, -1)
|
| 77 |
cv2.putText(final_img, label_text, (x_min, y_min - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)
|
| 78 |
|
| 79 |
-
|
| 80 |
final_img_rgb = cv2.cvtColor(final_img, cv2.COLOR_BGR2RGB)
|
| 81 |
return Image.fromarray(final_img_rgb)
|
| 82 |
|
| 83 |
-
|
| 84 |
gr.Interface(
|
| 85 |
fn=predict_image,
|
| 86 |
inputs=gr.Image(type="pil"),
|
|
|
|
| 16 |
3: "packaging",
|
| 17 |
}
|
| 18 |
|
|
|
|
| 19 |
MODEL_PATH = "segmentation_model.h5"
|
| 20 |
model = tf.keras.models.load_model(MODEL_PATH)
|
| 21 |
|
|
|
|
| 39 |
original_size = original_img_np.shape[:2]
|
| 40 |
mask_final = cv2.resize(mask_predicted.astype(np.uint8), (original_size[1], original_size[0]), interpolation=cv2.INTER_NEAREST)
|
| 41 |
confidences_final = cv2.resize(confidences, (original_size[1], original_size[0]), interpolation=cv2.INTER_LINEAR)
|
| 42 |
+
|
| 43 |
+
# Processa a imagem para desenhar as caixas
|
| 44 |
final_img = original_img_cv2.copy()
|
| 45 |
+
confidence_threshold = 0.8 # Limiar de 80%
|
| 46 |
|
| 47 |
for class_id in np.unique(mask_final):
|
| 48 |
if class_id == 0:
|
| 49 |
continue
|
| 50 |
|
| 51 |
class_name = CLASS_NAMES.get(class_id, f"Classe {class_id}")
|
| 52 |
+
|
| 53 |
+
binary_mask = (mask_final == class_id)
|
| 54 |
binary_mask = binary_fill_holes(binary_mask)
|
| 55 |
+
binary_mask_uint8 = binary_mask.astype(np.uint8)
|
| 56 |
|
| 57 |
+
contours, _ = cv2.findContours(binary_mask_uint8, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 58 |
|
| 59 |
for contour in contours:
|
| 60 |
x_min, y_min, w, h = cv2.boundingRect(contour)
|
|
|
|
| 72 |
label_text = f"{class_name}: {avg_confidence:.2f}%"
|
| 73 |
color_tuple = CLASS_COLORS.get(class_id, (255, 255, 255))
|
| 74 |
|
| 75 |
+
# Desenha a caixa
|
| 76 |
cv2.rectangle(final_img, (x_min, y_min), (x_max, y_max), color_tuple, 2)
|
| 77 |
|
| 78 |
+
# Desenha o fundo do texto
|
| 79 |
(text_width, text_height), baseline = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
|
| 80 |
cv2.rectangle(final_img, (x_min, y_min - text_height - 10), (x_min + text_width, y_min), color_tuple, -1)
|
| 81 |
cv2.putText(final_img, label_text, (x_min, y_min - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)
|
| 82 |
|
| 83 |
+
# Converte de volta para PIL e retorna
|
| 84 |
final_img_rgb = cv2.cvtColor(final_img, cv2.COLOR_BGR2RGB)
|
| 85 |
return Image.fromarray(final_img_rgb)
|
| 86 |
|
| 87 |
+
# Define a interface Gradio
|
| 88 |
gr.Interface(
|
| 89 |
fn=predict_image,
|
| 90 |
inputs=gr.Image(type="pil"),
|