Initial commit
This commit is contained in:
11
label/augment.py
Normal file
11
label/augment.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from PIL import Image
|
||||
import os
|
||||
for root,dirs,files in os.walk("./label/dataset/valvulas"):
|
||||
for dir in dirs:
|
||||
image=Image.open(root+"/"+dir+"/"+"normal.png")
|
||||
vertical=image.rotate(180)
|
||||
diagonal=image.rotate(45)
|
||||
refletido=diagonal.rotate(180)
|
||||
vertical.save(root+"/"+dir+"/"+"vertical.png")
|
||||
diagonal.save(root+"/"+dir+"/"+"diagonal45.png")
|
||||
refletido.save(root+"/"+dir+"/"+"diagonal135.png")
|
||||
1770
label/backend/backend.py
Normal file
1770
label/backend/backend.py
Normal file
File diff suppressed because it is too large
Load Diff
583
label/backend/extract_text.py
Normal file
583
label/backend/extract_text.py
Normal file
@@ -0,0 +1,583 @@
|
||||
import boto3
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from PIL import Image, ImageDraw, ImageFont, ImageEnhance, ImageOps, ImageFilter
|
||||
import random
|
||||
import shutil
|
||||
|
||||
# Configuration
|
||||
BUCKET_NAME = 'custom-labels-valvulas-bloco-funcao'
|
||||
FOLDER_PREFIX = 'splitted_diagrams/' # Directory in S3 (e.g., 'images/' or '' for root)
|
||||
REGION = 'us-east-1'
|
||||
OUTPUT_FOLDER = 'text_json'
|
||||
|
||||
def detect_text():
|
||||
"""Detect text using Textract, returning bounding box for each word"""
|
||||
# Initialize clients
|
||||
s3 = boto3.client('s3', region_name=REGION)
|
||||
textract = boto3.client('textract', region_name=REGION)
|
||||
|
||||
# List all PNG files in the S3 folder
|
||||
response = s3.list_objects_v2(Bucket=BUCKET_NAME, Prefix=FOLDER_PREFIX)
|
||||
|
||||
if 'Contents' not in response:
|
||||
print(f"No files found in {BUCKET_NAME}/{FOLDER_PREFIX}")
|
||||
exit()
|
||||
|
||||
# Process each PNG file
|
||||
for obj in response['Contents']:
|
||||
key = obj['Key']
|
||||
|
||||
# Skip if not a PNG file
|
||||
if not key.lower().endswith('.png'):
|
||||
continue
|
||||
|
||||
print(f"\nProcessing: {key}")
|
||||
|
||||
# Detect text using Textract
|
||||
result = textract.detect_document_text(
|
||||
Document={
|
||||
'S3Object': {
|
||||
'Bucket': BUCKET_NAME,
|
||||
'Name': key
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
# Save result to JSON file
|
||||
filename = os.path.basename(key).replace('.png', '.json').replace('.PNG', '.json')
|
||||
output_path = os.path.join(OUTPUT_FOLDER, filename)
|
||||
|
||||
os.makedirs(OUTPUT_FOLDER, exist_ok=True)
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(result, f, indent=2, ensure_ascii=False)
|
||||
|
||||
print(f" Saved to: {output_path}")
|
||||
|
||||
# Print detected text (per word)
|
||||
word_count = 0
|
||||
for block in result['Blocks']:
|
||||
if block['BlockType'] == 'WORD':
|
||||
word_count += 1
|
||||
text = block['Text']
|
||||
confidence = block['Confidence']
|
||||
bbox = block['Geometry']['BoundingBox']
|
||||
print(f" Word: {text} ({confidence:.1f}%) - BBox: L={bbox['Left']:.3f}, T={bbox['Top']:.3f}, W={bbox['Width']:.3f}, H={bbox['Height']:.3f}")
|
||||
|
||||
print(f" Total words detected: {word_count}")
|
||||
|
||||
|
||||
def draw_bounding_boxes(sectors_dir, json_dir, output_dir='bounding_box_images'):
|
||||
"""
|
||||
Draw bounding boxes around detected words on original images
|
||||
|
||||
Args:
|
||||
sectors_dir: Directory containing the original PNG images
|
||||
json_dir: Directory containing the JSON text detection files
|
||||
output_dir: Directory to save images with bounding boxes
|
||||
"""
|
||||
# Create output directory
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Get all PNG files
|
||||
png_files = [f for f in os.listdir(sectors_dir) if f.lower().endswith('.png')]
|
||||
|
||||
for png_file in png_files:
|
||||
# Get corresponding JSON file name
|
||||
json_file = os.path.splitext(png_file)[0] + '.json'
|
||||
|
||||
image_path = os.path.join(sectors_dir, png_file)
|
||||
json_path = os.path.join(json_dir, json_file)
|
||||
output_path = os.path.join(output_dir, png_file.replace('.png', '_bbox.png'))
|
||||
|
||||
# Check if JSON file exists
|
||||
if not os.path.exists(json_path):
|
||||
print(f"Warning: JSON not found for {png_file}, skipping...")
|
||||
continue
|
||||
|
||||
print(f"Processing: {png_file}")
|
||||
|
||||
# Load the image
|
||||
img = Image.open(image_path)
|
||||
width, height = img.size
|
||||
|
||||
# Load JSON data
|
||||
with open(json_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
# Create a drawing object
|
||||
draw = ImageDraw.Draw(img)
|
||||
|
||||
# Try to use a better font, fall back to default if not available
|
||||
try:
|
||||
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 12)
|
||||
except:
|
||||
try:
|
||||
font = ImageFont.truetype("arial.ttf", 12)
|
||||
except:
|
||||
font = ImageFont.load_default()
|
||||
|
||||
# Draw bounding box for each WORD
|
||||
word_count = 0
|
||||
for block in data['Blocks']:
|
||||
if block['BlockType'] == 'WORD':
|
||||
word_count += 1
|
||||
bbox = block['Geometry']['BoundingBox']
|
||||
|
||||
# Convert relative coordinates to absolute pixels
|
||||
left = int(bbox['Left'] * width)
|
||||
top = int(bbox['Top'] * height)
|
||||
box_width = int(bbox['Width'] * width)
|
||||
box_height = int(bbox['Height'] * height)
|
||||
|
||||
# Calculate rectangle coordinates
|
||||
x1 = left
|
||||
y1 = top
|
||||
x2 = left + box_width
|
||||
y2 = top + box_height
|
||||
|
||||
# Draw rectangle around word
|
||||
draw.rectangle([x1, y1, x2, y2], outline='red', width=2)
|
||||
|
||||
# Draw text label above bounding box
|
||||
text = block['Text']
|
||||
confidence = block['Confidence']
|
||||
label = f"{text} ({confidence:.0f}%)"
|
||||
|
||||
# Draw text background for better visibility
|
||||
try:
|
||||
text_bbox = draw.textbbox((x1, y1 - 15), label, font=font)
|
||||
draw.rectangle(text_bbox, fill='red')
|
||||
draw.text((x1, y1 - 15), label, fill='white', font=font)
|
||||
except:
|
||||
# Fallback for older Pillow versions
|
||||
draw.text((x1, y1 - 15), label, fill='red', font=font)
|
||||
|
||||
# Save the image with bounding boxes
|
||||
img.save(output_path)
|
||||
print(f" Saved: {output_path} ({word_count} bounding boxes drawn)")
|
||||
|
||||
print(f"\nAll images with bounding boxes saved to: {output_dir}")
|
||||
|
||||
|
||||
def remove_text_from_images(sectors_dir, json_dir, output_dir='cleaned_images', shrink_percent=0, keep_regex_list=None, min_confidence=0):
|
||||
"""
|
||||
Replace text bounding boxes with white pixels for all images in directory
|
||||
|
||||
Args:
|
||||
sectors_dir: Directory containing the original PNG images
|
||||
json_dir: Directory containing the JSON text detection files
|
||||
output_dir: Directory to save cleaned images
|
||||
shrink_percent: Percentage to shrink the bounding box (0-100). E.g., 10 = shrink by 10%
|
||||
keep_regex_list: List of regex patterns. Words matching these patterns will NOT be removed.
|
||||
Add "+" to the list to keep the "+" symbol.
|
||||
min_confidence: Minimum confidence threshold (0-100). Words with confidence below this will NOT be removed.
|
||||
"""
|
||||
# Create output directory
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Compile regex patterns for efficiency
|
||||
compiled_patterns = []
|
||||
if keep_regex_list:
|
||||
for pattern in keep_regex_list:
|
||||
try:
|
||||
compiled_patterns.append(re.compile(pattern))
|
||||
except re.error as e:
|
||||
print(f"Warning: Invalid regex pattern '{pattern}': {e}")
|
||||
|
||||
# Get all PNG files
|
||||
png_files = [f for f in os.listdir(sectors_dir) if f.lower().endswith('.png')]
|
||||
|
||||
for png_file in png_files:
|
||||
# Get corresponding JSON file name
|
||||
json_file = os.path.splitext(png_file)[0] + '.json'
|
||||
|
||||
image_path = os.path.join(sectors_dir, png_file)
|
||||
json_path = os.path.join(json_dir, json_file)
|
||||
output_path = os.path.join(output_dir, png_file)
|
||||
|
||||
# Check if JSON file exists
|
||||
if not os.path.exists(json_path):
|
||||
print(f"Warning: JSON not found for {png_file}, skipping...")
|
||||
continue
|
||||
|
||||
print(f"Processing: {png_file}")
|
||||
|
||||
# Load the image
|
||||
img = Image.open(image_path)
|
||||
width, height = img.size
|
||||
|
||||
# Load JSON data
|
||||
with open(json_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
# Create a drawing object
|
||||
draw = ImageDraw.Draw(img)
|
||||
|
||||
# Process each text detection - NOW PER WORD
|
||||
word_count = 0
|
||||
kept_by_regex = 0
|
||||
kept_by_confidence = 0
|
||||
for block in data['Blocks']:
|
||||
if block['BlockType'] == 'WORD':
|
||||
text = block['Text']
|
||||
confidence = block['Confidence']
|
||||
|
||||
# Check if confidence is below minimum threshold
|
||||
if confidence < min_confidence:
|
||||
kept_by_confidence += 1
|
||||
print(f" Keeping word: {text} (confidence {confidence:.1f}% < {min_confidence}%)")
|
||||
continue
|
||||
|
||||
# Check if word matches any keep pattern
|
||||
should_keep = False
|
||||
if compiled_patterns:
|
||||
for pattern in compiled_patterns:
|
||||
if pattern.match(text):
|
||||
should_keep = True
|
||||
kept_by_regex += 1
|
||||
print(f" Keeping word: {text} (matches pattern)")
|
||||
break
|
||||
|
||||
# Skip removal if word should be kept
|
||||
if should_keep:
|
||||
continue
|
||||
|
||||
word_count += 1
|
||||
bbox = block['Geometry']['BoundingBox']
|
||||
|
||||
# Convert relative coordinates to absolute pixels
|
||||
left = int(bbox['Left'] * width)
|
||||
top = int(bbox['Top'] * height)
|
||||
box_width = int(bbox['Width'] * width)
|
||||
box_height = int(bbox['Height'] * height)
|
||||
|
||||
# Apply shrink percentage
|
||||
if shrink_percent > 0:
|
||||
shrink_factor = shrink_percent / 100
|
||||
width_reduction = int(box_width * shrink_factor / 2)
|
||||
height_reduction = int(box_height * shrink_factor / 2)
|
||||
|
||||
left += width_reduction
|
||||
top += height_reduction
|
||||
box_width -= width_reduction * 2
|
||||
box_height -= height_reduction * 2
|
||||
|
||||
# Draw white rectangle over the text
|
||||
draw.rectangle(
|
||||
[(left, top), (left + box_width, top + box_height)],
|
||||
fill='white'
|
||||
)
|
||||
|
||||
# Save the modified image
|
||||
img.save(output_path)
|
||||
total_kept = kept_by_regex + kept_by_confidence
|
||||
print(f" Saved: {output_path} ({word_count} words removed, {total_kept} words kept: {kept_by_regex} by regex, {kept_by_confidence} by confidence)")
|
||||
|
||||
print(f"\nAll cleaned images saved to: {output_dir}")
|
||||
def pick_random_images(source_dir, output_dir, n, seed=None):
|
||||
"""
|
||||
Pick N random images from source directory and copy them to output directory
|
||||
|
||||
Args:
|
||||
source_dir: Directory containing the cleaned images
|
||||
output_dir: Directory to save random sample of images
|
||||
n: Number of random images to pick
|
||||
seed: Random seed for reproducibility (optional)
|
||||
|
||||
Returns:
|
||||
List of selected image filenames
|
||||
"""
|
||||
# Create output directory
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Get all PNG files from source directory
|
||||
png_files = [f for f in os.listdir(source_dir) if f.lower().endswith('.png')]
|
||||
|
||||
if len(png_files) == 0:
|
||||
print(f"No PNG files found in {source_dir}")
|
||||
return []
|
||||
|
||||
# Check if n is larger than available files
|
||||
if n > len(png_files):
|
||||
print(f"Warning: Requested {n} images but only {len(png_files)} available. Using all images.")
|
||||
n = len(png_files)
|
||||
|
||||
# Set random seed if provided
|
||||
if seed is not None:
|
||||
random.seed(seed)
|
||||
|
||||
# Randomly select n images
|
||||
selected_files = random.sample(png_files, n)
|
||||
|
||||
print(f"\nPicking {n} random images from {source_dir}:")
|
||||
|
||||
# Copy selected images to output directory
|
||||
for filename in selected_files:
|
||||
source_path = os.path.join(source_dir, filename)
|
||||
dest_path = os.path.join(output_dir, filename)
|
||||
|
||||
shutil.copy2(source_path, dest_path)
|
||||
print(f" Copied: {filename}")
|
||||
|
||||
print(f"\n{len(selected_files)} random images saved to: {output_dir}")
|
||||
|
||||
return selected_files
|
||||
def augment_images(source_dir, output_dir='augmented_images', augmentations_per_image=5,
|
||||
brightness_range=(0.7, 1.3), contrast_range=(0.7, 1.3),
|
||||
rotation_range=(-15, 15), blur_probability=0.3, noise_probability=0.3,
|
||||
flip_horizontal=True, flip_vertical=False, seed=None):
|
||||
"""
|
||||
Apply data augmentation to images in source directory
|
||||
|
||||
Args:
|
||||
source_dir: Directory containing the original images
|
||||
output_dir: Directory to save augmented images
|
||||
augmentations_per_image: Number of augmented versions to create per image
|
||||
brightness_range: Tuple (min, max) for brightness adjustment (1.0 = original)
|
||||
contrast_range: Tuple (min, max) for contrast adjustment (1.0 = original)
|
||||
rotation_range: Tuple (min_degrees, max_degrees) for rotation
|
||||
blur_probability: Probability of applying blur (0.0 to 1.0)
|
||||
noise_probability: Probability of adding noise (0.0 to 1.0)
|
||||
flip_horizontal: Whether to include horizontal flips
|
||||
flip_vertical: Whether to include vertical flips
|
||||
seed: Random seed for reproducibility (optional)
|
||||
|
||||
Returns:
|
||||
Total number of augmented images created
|
||||
"""
|
||||
# Create output directory
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Set random seed if provided
|
||||
if seed is not None:
|
||||
random.seed(seed)
|
||||
|
||||
# Get all PNG files from source directory
|
||||
png_files = [f for f in os.listdir(source_dir) if f.lower().endswith('.png')]
|
||||
|
||||
if len(png_files) == 0:
|
||||
print(f"No PNG files found in {source_dir}")
|
||||
return 0
|
||||
|
||||
print(f"\nAugmenting {len(png_files)} images from {source_dir}:")
|
||||
print(f"Creating {augmentations_per_image} augmented versions per image")
|
||||
|
||||
total_created = 0
|
||||
|
||||
for png_file in png_files:
|
||||
image_path = os.path.join(source_dir, png_file)
|
||||
base_name = os.path.splitext(png_file)[0]
|
||||
|
||||
# Load the image
|
||||
img = Image.open(image_path)
|
||||
|
||||
print(f"\nProcessing: {png_file}")
|
||||
|
||||
for aug_idx in range(augmentations_per_image):
|
||||
# Start with a copy of the original image
|
||||
aug_img = img.copy()
|
||||
|
||||
augmentation_list = []
|
||||
|
||||
# Random brightness adjustment
|
||||
if random.random() > 0.5:
|
||||
brightness_factor = random.uniform(*brightness_range)
|
||||
enhancer = ImageEnhance.Brightness(aug_img)
|
||||
aug_img = enhancer.enhance(brightness_factor)
|
||||
augmentation_list.append(f"brightness_{brightness_factor:.2f}")
|
||||
|
||||
# Random contrast adjustment
|
||||
if random.random() > 0.5:
|
||||
contrast_factor = random.uniform(*contrast_range)
|
||||
enhancer = ImageEnhance.Contrast(aug_img)
|
||||
aug_img = enhancer.enhance(contrast_factor)
|
||||
augmentation_list.append(f"contrast_{contrast_factor:.2f}")
|
||||
|
||||
# Random rotation
|
||||
if random.random() > 0.5:
|
||||
rotation_angle = random.uniform(*rotation_range)
|
||||
aug_img = aug_img.rotate(rotation_angle, fillcolor='white', expand=False)
|
||||
augmentation_list.append(f"rotate_{rotation_angle:.1f}")
|
||||
|
||||
# Random blur
|
||||
if random.random() < blur_probability:
|
||||
blur_radius = random.uniform(0.5, 2.0)
|
||||
aug_img = aug_img.filter(ImageFilter.GaussianBlur(radius=blur_radius))
|
||||
augmentation_list.append(f"blur_{blur_radius:.1f}")
|
||||
|
||||
# Random noise (salt and pepper)
|
||||
if random.random() < noise_probability:
|
||||
aug_img = add_noise(aug_img, noise_level=0.02)
|
||||
augmentation_list.append("noise")
|
||||
|
||||
# Random horizontal flip
|
||||
if flip_horizontal and random.random() > 0.5:
|
||||
aug_img = ImageOps.mirror(aug_img)
|
||||
augmentation_list.append("flip_h")
|
||||
|
||||
# Random vertical flip
|
||||
if flip_vertical and random.random() > 0.5:
|
||||
aug_img = ImageOps.flip(aug_img)
|
||||
augmentation_list.append("flip_v")
|
||||
|
||||
# Save augmented image
|
||||
aug_suffix = "_".join(augmentation_list) if augmentation_list else "original"
|
||||
output_filename = f"{base_name}_aug{aug_idx}_{aug_suffix}.png"
|
||||
output_path = os.path.join(output_dir, output_filename)
|
||||
|
||||
aug_img.save(output_path)
|
||||
total_created += 1
|
||||
|
||||
print(f" Created: {output_filename}")
|
||||
|
||||
print(f"\n{total_created} augmented images saved to: {output_dir}")
|
||||
return total_created
|
||||
|
||||
|
||||
def add_noise(image, noise_level=0.02):
|
||||
"""
|
||||
Add salt and pepper noise to an image
|
||||
|
||||
Args:
|
||||
image: PIL Image object
|
||||
noise_level: Probability of a pixel being noisy (0.0 to 1.0)
|
||||
|
||||
Returns:
|
||||
PIL Image with noise added
|
||||
"""
|
||||
img_array = list(image.getdata())
|
||||
width, height = image.size
|
||||
|
||||
for i in range(len(img_array)):
|
||||
if random.random() < noise_level:
|
||||
# Randomly choose salt (white) or pepper (black)
|
||||
if random.random() > 0.5:
|
||||
img_array[i] = (255, 255, 255) if image.mode == 'RGB' else 255
|
||||
else:
|
||||
img_array[i] = (0, 0, 0) if image.mode == 'RGB' else 0
|
||||
|
||||
noisy_image = Image.new(image.mode, (width, height))
|
||||
noisy_image.putdata(img_array)
|
||||
|
||||
return noisy_image
|
||||
|
||||
def filter_images_by_pattern(image_dir, json_dir, output_dir_match, output_dir_no_match, pattern=r'VM-\d{4}'):
|
||||
"""
|
||||
Filter images that contain at least one word matching the specified pattern
|
||||
Creates two folders: one with matches and one without matches
|
||||
|
||||
Args:
|
||||
image_dir: Directory containing the images
|
||||
json_dir: Directory containing the JSON text detection files
|
||||
output_dir_match: Directory to save images that MATCH the pattern
|
||||
output_dir_no_match: Directory to save images that DO NOT match the pattern
|
||||
pattern: Regex pattern to match (default: VM-#### where #### is 4 digits)
|
||||
|
||||
Returns:
|
||||
Tuple of (matched_count, no_match_count, matched_files, no_match_files)
|
||||
"""
|
||||
# Create output directories
|
||||
os.makedirs(output_dir_match, exist_ok=True)
|
||||
os.makedirs(output_dir_no_match, exist_ok=True)
|
||||
|
||||
# Compile the regex pattern
|
||||
try:
|
||||
compiled_pattern = re.compile(pattern)
|
||||
except re.error as e:
|
||||
print(f"Error: Invalid regex pattern '{pattern}': {e}")
|
||||
return 0, 0, [], []
|
||||
|
||||
# Get all PNG files from image directory
|
||||
png_files = [f for f in os.listdir(image_dir) if f.lower().endswith('.png')]
|
||||
|
||||
if len(png_files) == 0:
|
||||
print(f"No PNG files found in {image_dir}")
|
||||
return 0, 0, [], []
|
||||
|
||||
print(f"\nFiltering images by pattern: {pattern}")
|
||||
print(f"Checking {len(png_files)} images...")
|
||||
|
||||
matched_count = 0
|
||||
no_match_count = 0
|
||||
matched_files = []
|
||||
no_match_files = []
|
||||
|
||||
for png_file in png_files:
|
||||
# Get corresponding JSON file name
|
||||
json_file = os.path.splitext(png_file)[0] + '.json'
|
||||
|
||||
image_path = os.path.join(image_dir, png_file)
|
||||
json_path = os.path.join(json_dir, json_file)
|
||||
|
||||
# Check if JSON file exists
|
||||
if not os.path.exists(json_path):
|
||||
print(f"Warning: JSON not found for {png_file}, skipping...")
|
||||
continue
|
||||
|
||||
# Load JSON data
|
||||
with open(json_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
# Check if any word matches the pattern
|
||||
matching_words = []
|
||||
for block in data['Blocks']:
|
||||
if block['BlockType'] == 'WORD':
|
||||
text = block['Text']
|
||||
if compiled_pattern.search(text):
|
||||
matching_words.append(text)
|
||||
|
||||
# Copy image to appropriate folder
|
||||
if matching_words:
|
||||
# Image has matching words
|
||||
matched_count += 1
|
||||
output_path = os.path.join(output_dir_match, png_file)
|
||||
shutil.copy2(image_path, output_path)
|
||||
|
||||
matched_files.append((png_file, matching_words))
|
||||
print(f" ✓ MATCH: {png_file} - Found: {', '.join(matching_words)}")
|
||||
else:
|
||||
# Image has no matching words
|
||||
no_match_count += 1
|
||||
output_path = os.path.join(output_dir_no_match, png_file)
|
||||
shutil.copy2(image_path, output_path)
|
||||
|
||||
no_match_files.append(png_file)
|
||||
print(f" ✗ NO MATCH: {png_file}")
|
||||
|
||||
print(f"\n=== Filtering Summary ===")
|
||||
print(f"Pattern: '{pattern}'")
|
||||
print(f"Total images processed: {len(png_files)}")
|
||||
print(f"Images WITH pattern: {matched_count} (saved to {output_dir_match})")
|
||||
print(f"Images WITHOUT pattern: {no_match_count} (saved to {output_dir_no_match})")
|
||||
|
||||
return matched_count, no_match_count, matched_files, no_match_files
|
||||
#matched_count, no_match_count, matched_files, no_match_files = filter_images_by_pattern(
|
||||
# './clean_image', './text_json', './vm_images', './no_vm_images'
|
||||
#)
|
||||
|
||||
# Print detailed summary
|
||||
#print("\n=== Images WITH VM-#### Pattern ===")
|
||||
#for filename, words in matched_files:
|
||||
# print(f"{filename}: {', '.join(words)}")
|
||||
|
||||
#print(f"\n=== Images WITHOUT VM-#### Pattern ===")
|
||||
#for filename in no_match_files:
|
||||
# print(f"{filename}")
|
||||
# Run text detection
|
||||
#detect_text()
|
||||
|
||||
# Draw bounding boxes on original images
|
||||
#draw_bounding_boxes('./sectors', './text_json', './bounding_box_images')
|
||||
|
||||
# Remove text from images, but keep words matching the regex patterns
|
||||
# Example: Keep "+" symbol and any words starting with "PT" or "FT"
|
||||
#remove_text_from_images('./sectors', './text_json', './clean_image', 0, [r'\+',r'.*[Xx].*',r'\1',r'L'],25)
|
||||
augment_images('./to_augment', './test_dataset',
|
||||
augmentations_per_image=1,
|
||||
rotation_range=(-5,5),
|
||||
blur_probability=0.5,
|
||||
noise_probability=0.5)
|
||||
#pick_random_images("./clean_image","./dataset",200)
|
||||
1
label/backend/notas.txt
Normal file
1
label/backend/notas.txt
Normal file
@@ -0,0 +1 @@
|
||||
* /
|
||||
81
label/backend/rekognition.py
Normal file
81
label/backend/rekognition.py
Normal file
@@ -0,0 +1,81 @@
|
||||
#Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-custom-labels-developer-guide/blob/master/LICENSE-SAMPLECODE.)
|
||||
|
||||
import boto3
|
||||
import io
|
||||
from PIL import Image, ImageDraw, ExifTags, ImageColor, ImageFont
|
||||
|
||||
def display_image(bucket,photo,response):
|
||||
# Load image from S3 bucket
|
||||
s3_connection = boto3.resource('s3')
|
||||
|
||||
s3_object = s3_connection.Object(bucket,photo)
|
||||
s3_response = s3_object.get()
|
||||
|
||||
stream = io.BytesIO(s3_response['Body'].read())
|
||||
image=Image.open(stream)
|
||||
|
||||
# Ready image to draw bounding boxes on it.
|
||||
imgWidth, imgHeight = image.size
|
||||
draw = ImageDraw.Draw(image)
|
||||
|
||||
# calculate and display bounding boxes for each detected custom label
|
||||
print('Detected custom labels for ' + photo)
|
||||
for customLabel in response['CustomLabels']:
|
||||
print('Label ' + str(customLabel['Name']))
|
||||
print('Confidence ' + str(customLabel['Confidence']))
|
||||
if 'Geometry' in customLabel:
|
||||
box = customLabel['Geometry']['BoundingBox']
|
||||
left = imgWidth * box['Left']
|
||||
top = imgHeight * box['Top']
|
||||
width = imgWidth * box['Width']
|
||||
height = imgHeight * box['Height']
|
||||
|
||||
fnt = ImageFont.truetype('/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf', 50)
|
||||
draw.text((left,top), customLabel['Name'], fill='#00d400', font=fnt)
|
||||
|
||||
print('Left: ' + '{0:.0f}'.format(left))
|
||||
print('Top: ' + '{0:.0f}'.format(top))
|
||||
print('Label Width: ' + "{0:.0f}".format(width))
|
||||
print('Label Height: ' + "{0:.0f}".format(height))
|
||||
|
||||
points = (
|
||||
(left,top),
|
||||
(left + width, top),
|
||||
(left + width, top + height),
|
||||
(left , top + height),
|
||||
(left, top))
|
||||
draw.line(points, fill='#00d400', width=5)
|
||||
|
||||
output_filename = 'output_' + photo.split('/')[-1]
|
||||
image.save(output_filename)
|
||||
print(f'\nImage saved as: {output_filename}')
|
||||
|
||||
def show_custom_labels(model,bucket,photo, min_confidence):
|
||||
client=boto3.client('rekognition')
|
||||
|
||||
#Call DetectCustomLabels
|
||||
response = client.detect_custom_labels(Image={'S3Object': {'Bucket': bucket, 'Name': photo}},
|
||||
MinConfidence=min_confidence,
|
||||
ProjectVersionArn=model)
|
||||
|
||||
# For object detection use case, uncomment below code to display image.
|
||||
|
||||
display_image(bucket,photo,response)
|
||||
|
||||
return len(response['CustomLabels'])
|
||||
|
||||
def main():
|
||||
|
||||
bucket='custom-labels-valvulas-bloco-funcao'
|
||||
photo='clean_image/DE-5400.00-4710-944-TYS-009=C_row2_col4.png'
|
||||
model='arn:aws:rekognition:us-east-1:173378533286:project/labels-valvula/version/labels-valvula.2025-11-24T15.44.16/1764009856090'
|
||||
min_confidence=80
|
||||
|
||||
|
||||
label_count=show_custom_labels(model,bucket,photo, min_confidence)
|
||||
print("Custom labels detected: " + str(label_count))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
211
label/backend/topng.py
Normal file
211
label/backend/topng.py
Normal file
@@ -0,0 +1,211 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
from pdf2image import convert_from_path
|
||||
from PIL import Image
|
||||
import json
|
||||
def convert_pdfs_to_png(input_dir, output_dir=None, dpi=300):
|
||||
"""
|
||||
Convert all PDFs in a directory to PNG images.
|
||||
|
||||
Args:
|
||||
input_dir: Directory containing PDF files
|
||||
output_dir: Directory to save PNG files (defaults to input_dir/png_output)
|
||||
dpi: Resolution for conversion (default 300 for high quality)
|
||||
"""
|
||||
input_path = Path(input_dir)
|
||||
|
||||
if not input_path.exists():
|
||||
print(f"Error: Directory '{input_dir}' does not exist")
|
||||
return
|
||||
|
||||
# Set output directory
|
||||
if output_dir is None:
|
||||
output_path = input_path / "png_output"
|
||||
else:
|
||||
output_path = Path(output_dir)
|
||||
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Find all PDF files
|
||||
pdf_files = list(input_path.glob("*.pdf"))
|
||||
|
||||
if not pdf_files:
|
||||
print(f"No PDF files found in '{input_dir}'")
|
||||
return
|
||||
|
||||
print(f"Found {len(pdf_files)} PDF file(s)")
|
||||
print(f"Converting with {dpi} DPI for high quality...")
|
||||
|
||||
for pdf_file in pdf_files:
|
||||
try:
|
||||
print(f"\nProcessing: {pdf_file.name}")
|
||||
|
||||
# Convert PDF to images
|
||||
images = convert_from_path(
|
||||
pdf_file,
|
||||
dpi=dpi,
|
||||
fmt='png',
|
||||
thread_count=4 # Use multiple threads for faster conversion
|
||||
)
|
||||
|
||||
# Save each page
|
||||
for i, image in enumerate(images, start=1):
|
||||
if len(images) > 1:
|
||||
output_filename = f"{pdf_file.stem}_page_{i}.png"
|
||||
else:
|
||||
output_filename = f"{pdf_file.stem}.png"
|
||||
|
||||
output_file = output_path / output_filename
|
||||
|
||||
# Save with optimized compression
|
||||
image.save(
|
||||
output_file,
|
||||
'PNG',
|
||||
optimize=True, # Enable optimization
|
||||
compress_level=6 # Balanced compression (0-9, 6 is good balance)
|
||||
)
|
||||
|
||||
print(f" Saved: {output_filename} ({image.size[0]}x{image.size[1]}px)")
|
||||
|
||||
print(f"✓ Completed: {pdf_file.name} ({len(images)} page(s))")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Error processing {pdf_file.name}: {str(e)}")
|
||||
|
||||
print(f"\n{'='*50}")
|
||||
print(f"Conversion complete!")
|
||||
print(f"Output directory: {output_path.absolute()}")
|
||||
|
||||
def split_images_into_sectors(input_dir, output_dir=None, overlap_percent=1):
|
||||
"""
|
||||
Split PNG images in a directory into 25 sectors (5x5 grid) with overlap.
|
||||
|
||||
Args:
|
||||
input_dir: Directory containing PNG files
|
||||
output_dir: Directory to save split images (defaults to input_dir/sectors)
|
||||
overlap_percent: Percentage of overlap (default 1%)
|
||||
"""
|
||||
input_path = Path(input_dir)
|
||||
|
||||
if not input_path.exists():
|
||||
print(f"Error: Directory '{input_dir}' does not exist")
|
||||
return
|
||||
|
||||
# Set output directory
|
||||
if output_dir is None:
|
||||
output_path = input_path / "sectors"
|
||||
else:
|
||||
output_path = Path(output_dir)
|
||||
|
||||
output_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Find all PNG files
|
||||
png_files = list(input_path.glob("*.png"))
|
||||
|
||||
if not png_files:
|
||||
print(f"No PNG files found in '{input_dir}'")
|
||||
return
|
||||
|
||||
print(f"Found {len(png_files)} PNG file(s)")
|
||||
print(f"Splitting into 25 sectors (5x5 grid) with {overlap_percent}% overlap...")
|
||||
|
||||
for png_file in png_files:
|
||||
try:
|
||||
print(f"\nProcessing: {png_file.name}")
|
||||
|
||||
# Open image
|
||||
img = Image.open(png_file)
|
||||
width, height = img.size
|
||||
|
||||
# Calculate overlap in pixels
|
||||
h_overlap = int(width * overlap_percent / 100)
|
||||
v_overlap = int(height * overlap_percent / 100)
|
||||
|
||||
# Calculate split points for 5x5 grid
|
||||
h_split1 = width // 5
|
||||
h_split2 = 2 * width // 5
|
||||
h_split3 = 3 * width // 5
|
||||
h_split4 = 4 * width // 5
|
||||
v_split1 = height // 5
|
||||
v_split2 = 2 * height // 5
|
||||
v_split3 = 3 * height // 5
|
||||
v_split4 = 4 * height // 5
|
||||
|
||||
# Define 25 sectors with overlap (5x5 grid)
|
||||
# Format: (left, top, right, bottom)
|
||||
sectors = {
|
||||
# Row 1
|
||||
'row1_col1': (0, 0, h_split1 + h_overlap, v_split1 + v_overlap),
|
||||
'row1_col2': (h_split1 - h_overlap, 0, h_split2 + h_overlap, v_split1 + v_overlap),
|
||||
'row1_col3': (h_split2 - h_overlap, 0, h_split3 + h_overlap, v_split1 + v_overlap),
|
||||
'row1_col4': (h_split3 - h_overlap, 0, h_split4 + h_overlap, v_split1 + v_overlap),
|
||||
'row1_col5': (h_split4 - h_overlap, 0, width, v_split1 + v_overlap),
|
||||
|
||||
# Row 2
|
||||
'row2_col1': (0, v_split1 - v_overlap, h_split1 + h_overlap, v_split2 + v_overlap),
|
||||
'row2_col2': (h_split1 - h_overlap, v_split1 - v_overlap, h_split2 + h_overlap, v_split2 + v_overlap),
|
||||
'row2_col3': (h_split2 - h_overlap, v_split1 - v_overlap, h_split3 + h_overlap, v_split2 + v_overlap),
|
||||
'row2_col4': (h_split3 - h_overlap, v_split1 - v_overlap, h_split4 + h_overlap, v_split2 + v_overlap),
|
||||
'row2_col5': (h_split4 - h_overlap, v_split1 - v_overlap, width, v_split2 + v_overlap),
|
||||
|
||||
# Row 3
|
||||
'row3_col1': (0, v_split2 - v_overlap, h_split1 + h_overlap, v_split3 + v_overlap),
|
||||
'row3_col2': (h_split1 - h_overlap, v_split2 - v_overlap, h_split2 + h_overlap, v_split3 + v_overlap),
|
||||
'row3_col3': (h_split2 - h_overlap, v_split2 - v_overlap, h_split3 + h_overlap, v_split3 + v_overlap),
|
||||
'row3_col4': (h_split3 - h_overlap, v_split2 - v_overlap, h_split4 + h_overlap, v_split3 + v_overlap),
|
||||
'row3_col5': (h_split4 - h_overlap, v_split2 - v_overlap, width, v_split3 + v_overlap),
|
||||
|
||||
# Row 4
|
||||
'row4_col1': (0, v_split3 - v_overlap, h_split1 + h_overlap, v_split4 + v_overlap),
|
||||
'row4_col2': (h_split1 - h_overlap, v_split3 - v_overlap, h_split2 + h_overlap, v_split4 + v_overlap),
|
||||
'row4_col3': (h_split2 - h_overlap, v_split3 - v_overlap, h_split3 + h_overlap, v_split4 + v_overlap),
|
||||
'row4_col4': (h_split3 - h_overlap, v_split3 - v_overlap, h_split4 + h_overlap, v_split4 + v_overlap),
|
||||
'row4_col5': (h_split4 - h_overlap, v_split3 - v_overlap, width, v_split4 + v_overlap),
|
||||
|
||||
# Row 5
|
||||
'row5_col1': (0, v_split4 - v_overlap, h_split1 + h_overlap, height),
|
||||
'row5_col2': (h_split1 - h_overlap, v_split4 - v_overlap, h_split2 + h_overlap, height),
|
||||
'row5_col3': (h_split2 - h_overlap, v_split4 - v_overlap, h_split3 + h_overlap, height),
|
||||
'row5_col4': (h_split3 - h_overlap, v_split4 - v_overlap, h_split4 + h_overlap, height),
|
||||
'row5_col5': (h_split4 - h_overlap, v_split4 - v_overlap, width, height)
|
||||
}
|
||||
|
||||
# Crop and save each sector
|
||||
for sector_name, bbox in sectors.items():
|
||||
sector_img = img.crop(bbox)
|
||||
|
||||
output_filename = f"{png_file.stem}_{sector_name}.png"
|
||||
output_file = output_path / output_filename
|
||||
|
||||
# Save with optimized compression
|
||||
sector_img.save(
|
||||
output_file,
|
||||
'PNG',
|
||||
optimize=True,
|
||||
compress_level=6
|
||||
)
|
||||
|
||||
sector_width = bbox[2] - bbox[0]
|
||||
sector_height = bbox[3] - bbox[1]
|
||||
print(f" Saved: {output_filename} ({sector_width}x{sector_height}px)")
|
||||
|
||||
print(f"✓ Completed: {png_file.name} (25 sectors)")
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Error processing {png_file.name}: {str(e)}")
|
||||
|
||||
print(f"\n{'='*50}")
|
||||
print(f"Splitting complete!")
|
||||
print(f"Output directory: {output_path.absolute()}")
|
||||
if __name__ == "__main__":
|
||||
# Example usage
|
||||
|
||||
# Step 1: Convert PDFs to PNGs
|
||||
input_directory = "./02_Fluxogramas" # Change this to your PDF directory
|
||||
output_directory = "./pngs" # Optional: specify output directory
|
||||
|
||||
# Convert with high DPI (300 is standard for print quality)
|
||||
#convert_pdfs_to_png(input_directory, output_directory, dpi=300)
|
||||
|
||||
# Step 2: Split PNGs into 9 sectors (3x3 grid)
|
||||
split_images_into_sectors("./pngs", "./sectors", overlap_percent=1)
|
||||
275
label/cores/cores.py
Normal file
275
label/cores/cores.py
Normal file
@@ -0,0 +1,275 @@
|
||||
import os
|
||||
import shutil
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from pdf2image import convert_from_path
|
||||
|
||||
def clear_output_folder(folder):
|
||||
"""Clear and create output folder."""
|
||||
if os.path.exists(folder):
|
||||
shutil.rmtree(folder)
|
||||
os.makedirs(folder)
|
||||
|
||||
def load_image(file_path, dpi=300):
|
||||
"""Load image from PDF or image file."""
|
||||
file_ext = os.path.splitext(file_path)[1].lower()
|
||||
|
||||
if file_ext == '.pdf':
|
||||
print(f"Converting PDF to image (DPI: {dpi})...")
|
||||
images = convert_from_path(file_path, dpi=dpi, fmt='png')
|
||||
img = images[0] # First page only
|
||||
print(f" Converted: {img.size[0]}x{img.size[1]}")
|
||||
else:
|
||||
print(f"Loading image...")
|
||||
img = Image.open(file_path)
|
||||
print(f" Loaded: {img.size[0]}x{img.size[1]}")
|
||||
|
||||
# Convert to RGB
|
||||
if img.mode != 'RGB':
|
||||
img = img.convert('RGB')
|
||||
|
||||
return img
|
||||
|
||||
def find_main_colors(img, color_threshold=30, min_percentage=0.5):
|
||||
"""
|
||||
Find main distinct colors in image.
|
||||
|
||||
Algorithm:
|
||||
1. Find most common color
|
||||
2. Group all colors within distance threshold (Euclidean distance in RGB space)
|
||||
3. Remove those colors
|
||||
4. Find next most common color
|
||||
5. Repeat until no colors left
|
||||
|
||||
Parameters:
|
||||
- color_threshold: Maximum distance between colors to group them (0-441)
|
||||
Distance = sqrt((R1-R2)² + (G1-G2)² + (B1-B2)²)
|
||||
- min_percentage: Minimum percentage to be a "main" color
|
||||
"""
|
||||
print(f"\nAnalyzing colors...")
|
||||
print(f" Color distance threshold: {color_threshold}")
|
||||
print(f" Minimum percentage: {min_percentage}%")
|
||||
|
||||
# Get all pixels
|
||||
pixels = np.array(img)
|
||||
h, w = pixels.shape[:2]
|
||||
pixels = pixels.reshape(-1, 3)
|
||||
total_pixels = len(pixels)
|
||||
|
||||
# Remove white background (>= 250 in all RGB channels)
|
||||
is_white = (pixels[:, 0] >= 250) & (pixels[:, 1] >= 250) & (pixels[:, 2] >= 250)
|
||||
pixels = pixels[~is_white]
|
||||
|
||||
print(f" Total pixels: {total_pixels:,}")
|
||||
print(f" White background: {np.sum(is_white):,} ({np.sum(is_white)/total_pixels*100:.1f}%) - IGNORED")
|
||||
print(f" Color pixels: {len(pixels):,} ({len(pixels)/total_pixels*100:.1f}%)")
|
||||
|
||||
if len(pixels) == 0:
|
||||
print(" Error: Image is entirely white!")
|
||||
return []
|
||||
|
||||
# Get unique colors and their counts
|
||||
unique_colors, counts = np.unique(pixels, axis=0, return_counts=True)
|
||||
print(f" Unique colors found: {len(unique_colors):,}")
|
||||
|
||||
# Greedy grouping by frequency
|
||||
print(f"\n Grouping colors (greedy by frequency)...")
|
||||
|
||||
color_groups = []
|
||||
remaining = np.ones(len(unique_colors), dtype=bool) # Track which colors are still available
|
||||
|
||||
iteration = 0
|
||||
while np.any(remaining):
|
||||
iteration += 1
|
||||
|
||||
# Find most common remaining color
|
||||
remaining_counts = counts.copy()
|
||||
remaining_counts[~remaining] = 0 # Zero out already-used colors
|
||||
|
||||
if np.max(remaining_counts) == 0:
|
||||
break
|
||||
|
||||
most_common_idx = np.argmax(remaining_counts)
|
||||
base_color = unique_colors[most_common_idx]
|
||||
|
||||
# Calculate Euclidean distance from base_color to all colors
|
||||
# Distance = sqrt((R1-R2)² + (G1-G2)² + (B1-B2)²)
|
||||
diff = unique_colors.astype(float) - base_color.astype(float)
|
||||
distances = np.sqrt(np.sum(diff ** 2, axis=1))
|
||||
|
||||
# Find all colors within threshold distance
|
||||
within_threshold = (distances <= color_threshold) & remaining
|
||||
|
||||
# Mark these colors as used
|
||||
remaining[within_threshold] = False
|
||||
|
||||
# Group info
|
||||
group_colors = unique_colors[within_threshold]
|
||||
group_counts = counts[within_threshold]
|
||||
total_count = np.sum(group_counts)
|
||||
percentage = (total_count / len(pixels)) * 100
|
||||
|
||||
color_groups.append({
|
||||
'color': base_color,
|
||||
'count': total_count,
|
||||
'percentage': percentage,
|
||||
'num_variants': len(group_colors)
|
||||
})
|
||||
|
||||
print(f" Group {iteration}: RGB{tuple(base_color)} -> {len(group_colors)} variants, {percentage:.1f}%")
|
||||
|
||||
print(f" Created {len(color_groups)} color groups")
|
||||
|
||||
# Filter by minimum percentage
|
||||
color_groups = [g for g in color_groups if g['percentage'] >= min_percentage]
|
||||
|
||||
print(f" Main colors (>= {min_percentage}%): {len(color_groups)}")
|
||||
|
||||
# Verify percentages
|
||||
total_percentage = sum(g['percentage'] for g in color_groups)
|
||||
print(f" Total percentage: {total_percentage:.1f}%")
|
||||
|
||||
# Display results
|
||||
print(f"\n{'='*60}")
|
||||
print(f"MAIN COLORS:")
|
||||
print(f"{'='*60}")
|
||||
for i, group in enumerate(color_groups, 1):
|
||||
r, g, b = group['color']
|
||||
print(f"{i}. RGB({r:3d}, {g:3d}, {b:3d}) - {group['percentage']:5.1f}% ({group['count']:,} pixels, {group['num_variants']} variants)")
|
||||
|
||||
return color_groups
|
||||
|
||||
def create_color_layers(img, color_groups, color_threshold, output_folder='output'):
|
||||
"""Create one image per color group showing only that color."""
|
||||
print(f"\nCreating color layers...")
|
||||
|
||||
# Get all pixels
|
||||
pixels = np.array(img)
|
||||
h, w = pixels.shape[:2]
|
||||
original_pixels = pixels.reshape(-1, 3)
|
||||
|
||||
# Remove white background for grouping
|
||||
is_white = (original_pixels[:, 0] >= 250) & (original_pixels[:, 1] >= 250) & (original_pixels[:, 2] >= 250)
|
||||
|
||||
# Get unique colors for matching
|
||||
unique_colors, inverse = np.unique(original_pixels[~is_white], axis=0, return_inverse=True)
|
||||
|
||||
# For each color group, create a layer
|
||||
for i, group in enumerate(color_groups, 1):
|
||||
base_color = group['color']
|
||||
|
||||
# Calculate distances from base_color to all unique colors
|
||||
diff = unique_colors.astype(float) - base_color.astype(float)
|
||||
distances = np.sqrt(np.sum(diff ** 2, axis=1))
|
||||
|
||||
# Find which unique colors belong to this group
|
||||
in_group = distances <= color_threshold
|
||||
|
||||
# Create mask for pixels in this group
|
||||
pixel_mask = np.zeros(len(original_pixels), dtype=bool)
|
||||
pixel_mask[~is_white] = in_group[inverse]
|
||||
|
||||
# Create layer image (white background)
|
||||
layer = np.full((h, w, 3), 255, dtype=np.uint8)
|
||||
layer_flat = layer.reshape(-1, 3)
|
||||
|
||||
# Set pixels for this color group
|
||||
layer_flat[pixel_mask] = original_pixels[pixel_mask]
|
||||
|
||||
# Save layer
|
||||
r, g, b = base_color
|
||||
filename = f'layer_{i}_rgb{r}_{g}_{b}.png'
|
||||
filepath = os.path.join(output_folder, filename)
|
||||
Image.fromarray(layer).save(filepath)
|
||||
|
||||
pixel_count = np.sum(pixel_mask)
|
||||
print(f" Layer {i}: {filename} ({pixel_count:,} pixels)")
|
||||
|
||||
def save_results(color_groups, output_folder='output'):
|
||||
"""Save color palette to file."""
|
||||
output_path = os.path.join(output_folder, 'main_colors.txt')
|
||||
|
||||
with open(output_path, 'w') as f:
|
||||
f.write("MAIN COLORS (by frequency)\n")
|
||||
f.write("="*60 + "\n")
|
||||
f.write("Note: White background ignored\n")
|
||||
f.write(" Similar colors grouped together\n\n")
|
||||
|
||||
for i, group in enumerate(color_groups, 1):
|
||||
r, g, b = group['color']
|
||||
f.write(f"{i}. RGB({r}, {g}, {b})\n")
|
||||
f.write(f" {group['percentage']:.2f}% ({group['count']:,} pixels)\n")
|
||||
f.write(f" {group['num_variants']} color variants\n")
|
||||
f.write(f" Hex: #{r:02X}{g:02X}{b:02X}\n\n")
|
||||
|
||||
print(f"\nResults saved to: {output_path}")
|
||||
|
||||
def main(file_path, color_threshold=30, min_percentage=0.5, dpi=300, output_folder='output'):
|
||||
"""Main function."""
|
||||
print("="*60)
|
||||
print("COLOR EXTRACTOR - Find Main Colors")
|
||||
print("="*60)
|
||||
|
||||
# Clear output
|
||||
clear_output_folder(output_folder)
|
||||
|
||||
# Load image
|
||||
print(f"\nInput: {file_path}")
|
||||
img = load_image(file_path, dpi)
|
||||
|
||||
# Save original
|
||||
original_path = os.path.join(output_folder, 'original.png')
|
||||
img.save(original_path)
|
||||
|
||||
# Find main colors
|
||||
color_groups = find_main_colors(img, color_threshold, min_percentage)
|
||||
|
||||
if len(color_groups) == 0:
|
||||
print("\nNo main colors found.")
|
||||
return
|
||||
|
||||
# Create color layers
|
||||
create_color_layers(img, color_groups, color_threshold, output_folder)
|
||||
|
||||
# Save results
|
||||
save_results(color_groups, output_folder)
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f"✓ COMPLETE - Found {len(color_groups)} main colors")
|
||||
print(f" Created {len(color_groups)} color layer images")
|
||||
print(f"{'='*60}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Input file (PDF or image)
|
||||
file_path = "input.pdf" # or "input.png", "input.jpg", etc.
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
print(f"Error: '{file_path}' not found!")
|
||||
print("Usage: Place your file as 'input.pdf' or 'input.png'")
|
||||
else:
|
||||
# Parameters:
|
||||
# color_threshold: Distance between colors to group them (0-441)
|
||||
# Distance = sqrt((R1-R2)² + (G1-G2)² + (B1-B2)²)
|
||||
# Examples:
|
||||
# RGB(0,0,0) to RGB(0,0,1) = distance of 1
|
||||
# RGB(0,0,0) to RGB(10,10,10) = distance of ~17
|
||||
# RGB(0,0,0) to RGB(30,30,30) = distance of ~52
|
||||
# Recommended values:
|
||||
# 10-20: Very strict - only very similar colors grouped
|
||||
# 30-50: Good for most diagrams (RECOMMENDED)
|
||||
# 60-100: Loose - more aggressive grouping
|
||||
#
|
||||
# min_percentage: Minimum % to be a "main" color
|
||||
# 0.5: Include colors that are at least 0.5% of image
|
||||
# 1.0: Only colors that are at least 1% of image
|
||||
# 0.1: Include even small but significant colors
|
||||
#
|
||||
# dpi: Resolution for PDF conversion (300 recommended)
|
||||
|
||||
main(
|
||||
file_path=file_path,
|
||||
color_threshold=120, # Group similar colors
|
||||
min_percentage=3, # Min 0.5% to be considered "main"
|
||||
dpi=300, # PDF resolution
|
||||
output_folder='output'
|
||||
)
|
||||
114
label/cores/file/QUICKSTART.md
Normal file
114
label/cores/file/QUICKSTART.md
Normal file
@@ -0,0 +1,114 @@
|
||||
# Quick Start Guide
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Basic Usage
|
||||
|
||||
### Extract layers from PDF
|
||||
```bash
|
||||
python layer_extractor.py diagram.pdf
|
||||
```
|
||||
|
||||
That's it! Layers will be saved to `output/` directory.
|
||||
|
||||
## Common Adjustments
|
||||
|
||||
### Problem: Colors bleeding between layers
|
||||
```bash
|
||||
# INCREASE tolerance (counter-intuitive but correct!)
|
||||
# Antialiasing creates intermediate colors that need higher tolerance
|
||||
python layer_extractor.py diagram.pdf -t 45
|
||||
```
|
||||
|
||||
### Problem: Layers mixing too much
|
||||
```bash
|
||||
# Decrease tolerance (stricter color matching)
|
||||
python layer_extractor.py diagram.pdf -t 20
|
||||
```
|
||||
|
||||
### Problem: Missing fine details
|
||||
```bash
|
||||
# Increase tolerance + higher DPI
|
||||
python layer_extractor.py diagram.pdf -t 40 --dpi 600
|
||||
```
|
||||
|
||||
### Problem: Too many layers detected
|
||||
```bash
|
||||
# Increase minimum pixel threshold
|
||||
python layer_extractor.py diagram.pdf -m 500
|
||||
```
|
||||
|
||||
### Problem: Need exact number of layers
|
||||
```bash
|
||||
# Specify layer count (extracts top N by frequency)
|
||||
python layer_extractor.py diagram.pdf -n 4
|
||||
```
|
||||
|
||||
### Problem: Low quality output
|
||||
```bash
|
||||
# Render at higher DPI
|
||||
python layer_extractor.py diagram.pdf --dpi 600
|
||||
```
|
||||
|
||||
## Output Files
|
||||
|
||||
Files are saved as:
|
||||
```
|
||||
output/diagram_layer1_220_050_050.png (Red layer)
|
||||
output/diagram_layer2_050_100_220.png (Blue layer)
|
||||
output/diagram_layer3_050_180_050.png (Green layer)
|
||||
```
|
||||
|
||||
The numbers in filename are RGB values of the layer color.
|
||||
|
||||
## Typical Workflows
|
||||
|
||||
### Standard diagram (moderate antialiasing)
|
||||
```bash
|
||||
python layer_extractor.py diagram.pdf
|
||||
# Use defaults - works for most cases
|
||||
```
|
||||
|
||||
### High-detail mechanical drawing
|
||||
```bash
|
||||
python layer_extractor.py drawing.pdf --dpi 600 -t 25
|
||||
# Higher resolution, tighter tolerance
|
||||
```
|
||||
|
||||
### Scanned/compressed diagram
|
||||
```bash
|
||||
python layer_extractor.py scanned.pdf -t 45
|
||||
# More lenient to handle artifacts
|
||||
```
|
||||
|
||||
### Known layer count
|
||||
```bash
|
||||
python layer_extractor.py diagram.pdf -n 3
|
||||
# Faster if you know there are 3 layers
|
||||
```
|
||||
|
||||
## Parameters Quick Reference
|
||||
|
||||
- `--dpi` - Resolution (default 300)
|
||||
- 150 = draft
|
||||
- 300 = standard
|
||||
- 600 = high quality
|
||||
|
||||
- `-t` - Tolerance (default 30, scale 0-100)
|
||||
- 15-20 = strict
|
||||
- 30 = balanced (RECOMMENDED)
|
||||
- 45-60 = lenient (for antialiasing)
|
||||
|
||||
- `-n` - Number of layers (default auto-detect)
|
||||
|
||||
- `-o` - Output directory (default "output")
|
||||
|
||||
## Getting Help
|
||||
|
||||
```bash
|
||||
python layer_extractor.py --help
|
||||
```
|
||||
181
label/cores/file/README.md
Normal file
181
label/cores/file/README.md
Normal file
@@ -0,0 +1,181 @@
|
||||
# PDF Layer Extractor for Industrial Diagrams
|
||||
|
||||
Extract colored layers from PDF industrial diagrams with white backgrounds. Automatically handles variable layer counts and antialiasing around text.
|
||||
|
||||
## Features
|
||||
|
||||
- **PDF Support**: Direct PDF processing at configurable DPI
|
||||
- **Automatic Layer Detection**: K-means clustering identifies distinct colored layers
|
||||
- **Handles Antialiasing**: Tolerates color mixing around text and fine details
|
||||
- **Variable Layer Counts**: Auto-detects all colored layers
|
||||
- **Strict White Filtering**: Pure white (255,255,255) treated as background only
|
||||
- **High Quality Output**: Each layer saved as transparent PNG
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Basic usage
|
||||
python layer_extractor.py diagram.pdf
|
||||
|
||||
# Higher resolution
|
||||
python layer_extractor.py diagram.pdf --dpi 600
|
||||
|
||||
# Extract to specific folder
|
||||
python layer_extractor.py diagram.pdf -o my_layers/
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Command
|
||||
|
||||
```bash
|
||||
python layer_extractor.py diagram.pdf
|
||||
```
|
||||
|
||||
Output: `output/diagram_layer1_255_000_000.png`, `output/diagram_layer2_000_000_255.png`, etc.
|
||||
|
||||
### Common Options
|
||||
|
||||
```bash
|
||||
# High resolution rendering (better for detailed diagrams)
|
||||
python layer_extractor.py diagram.pdf --dpi 600
|
||||
|
||||
# Adjust color tolerance (for antialiasing issues)
|
||||
python layer_extractor.py diagram.pdf -t 40
|
||||
|
||||
# Extract only top 3 layers
|
||||
python layer_extractor.py diagram.pdf -n 3
|
||||
|
||||
# Custom output directory
|
||||
python layer_extractor.py diagram.pdf -o layers/
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
- `--dpi` (default: 300) - PDF rendering resolution
|
||||
- 300: Standard quality, faster
|
||||
- 600: High quality, larger files
|
||||
- 150: Draft quality, quick preview
|
||||
|
||||
- `-t, --tolerance` (default: 30) - Color matching tolerance (0-100 scale)
|
||||
- **10-15**: Very strict, only nearly identical colors
|
||||
- **20-25**: Strict, minimal antialiasing
|
||||
- **30**: Default, handles moderate antialiasing (RECOMMENDED)
|
||||
- **40-50**: Lenient, good for heavy antialiasing around text
|
||||
- **60+**: Very lenient, may blur layer boundaries
|
||||
|
||||
- `-n, --n-layers` - Extract specific number of layers (default: auto-detect)
|
||||
|
||||
- `-m, --min-pixels` (default: 100) - Minimum pixels to consider a valid layer
|
||||
|
||||
## How It Works
|
||||
|
||||
1. **PDF Rendering**: Converts PDF to high-resolution image at specified DPI
|
||||
2. **Color Analysis**: Uses K-means clustering on pixel colors
|
||||
3. **White Filtering**: Removes pure white background (RGB ≥ 250,250,250)
|
||||
4. **Layer Extraction**: For each color, creates a mask of similar pixels
|
||||
5. **Alpha Blending**: Handles antialiasing with gradient transparency
|
||||
6. **Output**: Saves each layer as transparent PNG
|
||||
|
||||
## Output Format
|
||||
|
||||
Files are named: `{pdf_name}_layer{N}_{R}_{G}_{B}.png`
|
||||
|
||||
Example:
|
||||
```
|
||||
output/
|
||||
├── piping_diagram_layer1_220_050_050.png (Red layer)
|
||||
├── piping_diagram_layer2_050_100_220.png (Blue layer)
|
||||
└── piping_diagram_layer3_050_180_050.png (Green layer)
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Colors bleeding between layers (antialiasing issue)
|
||||
|
||||
**Problem**: Gray pixels from antialiasing appearing in wrong layer, especially around black text on gray layers
|
||||
|
||||
**Explanation**: When black text (0,0,0) sits on a gray layer (150,150,150), antialiasing creates intermediate grays (75,75,75, 100,100,100, etc.) that are far from both black and gray in color space.
|
||||
|
||||
**Solution**: Increase tolerance to capture these intermediate colors
|
||||
```bash
|
||||
# For moderate antialiasing (default, usually works)
|
||||
python layer_extractor.py diagram.pdf -t 30
|
||||
|
||||
# For heavy antialiasing (small text, compressed PDFs)
|
||||
python layer_extractor.py diagram.pdf -t 45
|
||||
|
||||
# For extreme cases (very compressed or low quality)
|
||||
python layer_extractor.py diagram.pdf -t 60
|
||||
```
|
||||
|
||||
### Missing fine details
|
||||
|
||||
**Problem**: Thin lines or small text not captured
|
||||
|
||||
**Solution**: Increase tolerance or DPI
|
||||
```bash
|
||||
python layer_extractor.py diagram.pdf -t 40 --dpi 600
|
||||
```
|
||||
|
||||
### Too many layers detected
|
||||
|
||||
**Problem**: Small color artifacts creating extra layers
|
||||
|
||||
**Solution**: Increase minimum pixel threshold
|
||||
```bash
|
||||
python layer_extractor.py diagram.pdf -m 500
|
||||
```
|
||||
|
||||
### Blurry output
|
||||
|
||||
**Problem**: Output quality not good enough
|
||||
|
||||
**Solution**: Increase DPI
|
||||
```bash
|
||||
python layer_extractor.py diagram.pdf --dpi 600
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Standard industrial diagram
|
||||
```bash
|
||||
python layer_extractor.py electrical_schematic.pdf
|
||||
```
|
||||
|
||||
### High-detail mechanical drawing
|
||||
```bash
|
||||
python layer_extractor.py mechanical_drawing.pdf --dpi 600 -t 25
|
||||
```
|
||||
|
||||
### Diagram with known 4 layers
|
||||
```bash
|
||||
python layer_extractor.py hvac_diagram.pdf -n 4
|
||||
```
|
||||
|
||||
### Compressed/low-quality PDF
|
||||
```bash
|
||||
python layer_extractor.py scanned_diagram.pdf -t 50 --dpi 300
|
||||
```
|
||||
|
||||
## Tips
|
||||
|
||||
1. **Start with defaults** - They work for most diagrams
|
||||
2. **Check first** - Run once and review output before batch processing
|
||||
3. **DPI vs File Size** - Higher DPI = better quality but larger files
|
||||
4. **Tolerance tuning** - Adjust by ±5-10 at a time
|
||||
5. **Layer count** - Use `-n` if you know exact number for faster processing
|
||||
|
||||
## Requirements
|
||||
|
||||
- Python 3.7+
|
||||
- PyMuPDF (PDF rendering)
|
||||
- Pillow (image processing)
|
||||
- NumPy (array operations)
|
||||
- scikit-learn (color clustering)
|
||||
120
label/cores/file/SUMMARY.md
Normal file
120
label/cores/file/SUMMARY.md
Normal file
@@ -0,0 +1,120 @@
|
||||
# PDF Layer Extractor - Summary
|
||||
|
||||
## What It Does
|
||||
|
||||
Extracts colored layers from PDF industrial diagrams into separate transparent PNG files.
|
||||
|
||||
✓ Single PDF file processing
|
||||
✓ White background filtered (pure white only)
|
||||
✓ Variable number of layers (auto-detected)
|
||||
✓ Handles antialiasing around text
|
||||
✓ High-quality output at configurable DPI
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. Install dependencies:
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
2. Run on your PDF:
|
||||
```bash
|
||||
python layer_extractor.py your_diagram.pdf
|
||||
```
|
||||
|
||||
3. Find layers in `output/` folder
|
||||
|
||||
## Key Features
|
||||
|
||||
### Automatic Color Detection
|
||||
Uses K-means clustering to identify distinct colored layers. White (RGB ≥ 250) is treated as background only.
|
||||
|
||||
### Antialiasing Handling
|
||||
The tolerance parameter (default 30) handles color mixing:
|
||||
- Text antialiasing creates gray pixels around black text
|
||||
- Tolerance value captures these gradual color transitions
|
||||
- Each pixel gets alpha based on distance from target color
|
||||
|
||||
### Output Format
|
||||
Files named: `diagram_layerN_RRR_GGG_BBB.png`
|
||||
- Transparent PNG with only that color layer
|
||||
- RGB values in filename for reference
|
||||
|
||||
## Common Usage
|
||||
|
||||
```bash
|
||||
# Default (works for most diagrams)
|
||||
python layer_extractor.py diagram.pdf
|
||||
|
||||
# High quality
|
||||
python layer_extractor.py diagram.pdf --dpi 600
|
||||
|
||||
# Strict color separation (less antialiasing bleed)
|
||||
python layer_extractor.py diagram.pdf -t 20
|
||||
|
||||
# Lenient (more antialiasing tolerance)
|
||||
python layer_extractor.py diagram.pdf -t 40
|
||||
|
||||
# Extract top 3 layers only
|
||||
python layer_extractor.py diagram.pdf -n 3
|
||||
|
||||
# Custom output folder
|
||||
python layer_extractor.py diagram.pdf -o my_layers/
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
| Parameter | Default | Description |
|
||||
|-----------|---------|-------------|
|
||||
| `--dpi` | 300 | PDF rendering resolution (150/300/600) |
|
||||
| `-t, --tolerance` | 30 | Color matching tolerance (15-50 typical) |
|
||||
| `-n, --n-layers` | auto | Number of layers to extract |
|
||||
| `-m, --min-pixels` | 100 | Minimum pixels for valid layer |
|
||||
| `-o, --output` | output | Output directory |
|
||||
|
||||
## Tolerance Guide
|
||||
|
||||
The tolerance parameter is key to handling antialiasing:
|
||||
|
||||
- **15-20**: Very strict, clean diagrams with no antialiasing
|
||||
- **30** (default): Balanced, handles moderate antialiasing
|
||||
- **40-50**: Lenient, for heavy antialiasing or compression artifacts
|
||||
|
||||
### Example: Gray Layer with Black Text
|
||||
|
||||
When you have a light gray layer with black text:
|
||||
- Black text creates gray antialiasing pixels
|
||||
- These gray pixels are close to the gray layer color
|
||||
- Higher tolerance includes them in the gray layer
|
||||
- Lower tolerance might miss them
|
||||
|
||||
Start with default (30) and adjust ±10 based on results.
|
||||
|
||||
## Files Included
|
||||
|
||||
1. **layer_extractor.py** - Main script
|
||||
2. **requirements.txt** - Dependencies (PyMuPDF, Pillow, numpy, scikit-learn)
|
||||
3. **README.md** - Full documentation
|
||||
4. **QUICKSTART.md** - Quick reference guide
|
||||
|
||||
## Technical Notes
|
||||
|
||||
- Uses PyMuPDF to render PDF at specified DPI
|
||||
- K-means clustering identifies dominant colors
|
||||
- Euclidean distance in RGB space for color matching
|
||||
- Alpha channel gradient for smooth edges
|
||||
- White detection: all RGB values ≥ 250
|
||||
|
||||
## Example Output
|
||||
|
||||
Input: `piping_diagram.pdf`
|
||||
Output:
|
||||
```
|
||||
output/
|
||||
├── piping_diagram_layer1_220_050_050.png (red piping)
|
||||
├── piping_diagram_layer2_050_100_220.png (blue electrical)
|
||||
├── piping_diagram_layer3_150_150_150.png (gray annotations)
|
||||
└── piping_diagram_layer4_050_180_050.png (green mechanical)
|
||||
```
|
||||
|
||||
Each PNG has transparent background with only that color layer visible.
|
||||
52
label/cores/file/UPDATE_NOTES.md
Normal file
52
label/cores/file/UPDATE_NOTES.md
Normal file
@@ -0,0 +1,52 @@
|
||||
# Tolerance Parameter Fix - Update Notes
|
||||
|
||||
## What Was Wrong
|
||||
|
||||
The original tolerance parameter used raw Euclidean distance in RGB space (0-255 scale), which was unintuitive:
|
||||
- Max possible distance in RGB = sqrt(3 × 255²) ≈ 441
|
||||
- A tolerance of "30" was actually very strict (only ~7% of max distance)
|
||||
- For antialiasing around text, you needed values like 150+ which wasn't obvious
|
||||
|
||||
## What's Fixed
|
||||
|
||||
**New Scale: 0-100** (percentage-based)
|
||||
- 0 = exact color match only
|
||||
- 30 = 30% of maximum color distance (default, RECOMMENDED)
|
||||
- 100 = maximum tolerance
|
||||
|
||||
**Why This Matters for Antialiasing:**
|
||||
|
||||
Example: Gray layer (150,150,150) with black text (0,0,0)
|
||||
- Antialiasing creates intermediate colors: (75,75,75), (100,100,100), (125,125,125)
|
||||
- Distance from gray (150,150,150) to (75,75,75) = sqrt(3 × 75²) ≈ 130
|
||||
- Old scale: You'd need tolerance ~130 (not intuitive)
|
||||
- New scale: tolerance 30-45 captures these (makes sense!)
|
||||
|
||||
## Updated Recommendations
|
||||
|
||||
```bash
|
||||
# Default - good for most diagrams
|
||||
python layer_extractor.py diagram.pdf -t 30
|
||||
|
||||
# Heavy antialiasing (small text, complex diagrams)
|
||||
python layer_extractor.py diagram.pdf -t 45
|
||||
|
||||
# Extreme antialiasing (compressed PDFs, low quality)
|
||||
python layer_extractor.py diagram.pdf -t 60
|
||||
|
||||
# Very strict (clean diagrams, no antialiasing)
|
||||
python layer_extractor.py diagram.pdf -t 15
|
||||
```
|
||||
|
||||
## Key Point
|
||||
|
||||
**If you see missing pixels around text or edges → INCREASE tolerance (not decrease!)**
|
||||
|
||||
The antialiased pixels are "far" from the target color in RGB space, so they need higher tolerance to be captured.
|
||||
|
||||
## Test Your Diagram
|
||||
|
||||
Start with default (30), then:
|
||||
- Missing pixels/gaps around text? → Try 45
|
||||
- Still missing details? → Try 60
|
||||
- Layers bleeding together? → Try 20
|
||||
418
label/cores/file/layer_extractor.py
Normal file
418
label/cores/file/layer_extractor.py
Normal file
@@ -0,0 +1,418 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
PDF Industrial Diagram Layer Extractor
|
||||
Extracts colored layers from PDF diagrams with white backgrounds.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from sklearn.cluster import KMeans
|
||||
from collections import Counter
|
||||
import argparse
|
||||
|
||||
try:
|
||||
import fitz # PyMuPDF
|
||||
except ImportError:
|
||||
print("Error: PyMuPDF not installed. Install with: pip install PyMuPDF")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def pdf_to_image(pdf_path, dpi=300):
|
||||
"""
|
||||
Convert PDF to PIL Image.
|
||||
|
||||
Args:
|
||||
pdf_path: Path to PDF file
|
||||
dpi: Resolution for rendering (default: 300)
|
||||
|
||||
Returns:
|
||||
PIL Image object
|
||||
"""
|
||||
print(f"Loading PDF: {pdf_path}")
|
||||
doc = fitz.open(pdf_path)
|
||||
|
||||
if len(doc) > 1:
|
||||
print(f" PDF has {len(doc)} pages, processing first page only")
|
||||
|
||||
# Get first page
|
||||
page = doc[0]
|
||||
|
||||
# Render page to image
|
||||
mat = fitz.Matrix(dpi/72, dpi/72) # Scale factor for DPI
|
||||
pix = page.get_pixmap(matrix=mat, alpha=False)
|
||||
|
||||
# Convert to PIL Image
|
||||
img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
|
||||
|
||||
print(f" Rendered at {img.size[0]}x{img.size[1]} pixels ({dpi} DPI)")
|
||||
doc.close()
|
||||
|
||||
return img
|
||||
|
||||
|
||||
def get_dominant_colors(img, n_colors=15, sample_fraction=0.2):
|
||||
"""
|
||||
Identify dominant colors using KMeans clustering.
|
||||
|
||||
Args:
|
||||
img: PIL Image object
|
||||
n_colors: Maximum number of colors to detect
|
||||
sample_fraction: Fraction of pixels to sample
|
||||
|
||||
Returns:
|
||||
List of (color, pixel_count) tuples sorted by frequency
|
||||
"""
|
||||
print("Analyzing colors...")
|
||||
img_array = np.array(img)
|
||||
pixels = img_array.reshape(-1, 3)
|
||||
|
||||
# Sample pixels for speed
|
||||
if sample_fraction < 1.0:
|
||||
n_samples = int(len(pixels) * sample_fraction)
|
||||
indices = np.random.choice(len(pixels), n_samples, replace=False)
|
||||
sampled_pixels = pixels[indices]
|
||||
else:
|
||||
sampled_pixels = pixels
|
||||
|
||||
# KMeans clustering
|
||||
n_clusters = min(n_colors, len(np.unique(sampled_pixels, axis=0)))
|
||||
kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
|
||||
kmeans.fit(sampled_pixels)
|
||||
|
||||
# Get colors and frequencies
|
||||
colors = kmeans.cluster_centers_.astype(int)
|
||||
labels = kmeans.predict(pixels)
|
||||
counts = Counter(labels)
|
||||
|
||||
# Sort by frequency
|
||||
color_counts = [(tuple(colors[i]), counts[i]) for i in range(len(colors))]
|
||||
color_counts.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
return color_counts
|
||||
|
||||
|
||||
def is_white(color, threshold=250):
|
||||
"""Check if color is white (strict check for pure white background)."""
|
||||
return all(c >= threshold for c in color)
|
||||
|
||||
|
||||
def color_distance(color1, color2):
|
||||
"""Calculate Euclidean distance between two RGB colors."""
|
||||
return np.sqrt(sum((a - b) ** 2 for a, b in zip(color1, color2)))
|
||||
|
||||
|
||||
def merge_similar_colors(color_counts, similarity_threshold=40):
|
||||
"""
|
||||
Merge similar colors into groups to reduce layer fragmentation.
|
||||
|
||||
Args:
|
||||
color_counts: List of (color, pixel_count) tuples
|
||||
similarity_threshold: Maximum color distance to merge (0-441, default: 40)
|
||||
|
||||
Returns:
|
||||
List of merged (color, total_pixel_count) tuples
|
||||
"""
|
||||
if not color_counts:
|
||||
return []
|
||||
|
||||
merged = []
|
||||
used = set()
|
||||
|
||||
for i, (color1, count1) in enumerate(color_counts):
|
||||
if i in used:
|
||||
continue
|
||||
|
||||
# Start a new group with this color
|
||||
group_colors = [color1]
|
||||
group_count = count1
|
||||
used.add(i)
|
||||
|
||||
# Find similar colors to merge
|
||||
for j, (color2, count2) in enumerate(color_counts):
|
||||
if j <= i or j in used:
|
||||
continue
|
||||
|
||||
if color_distance(color1, color2) <= similarity_threshold:
|
||||
group_colors.append(color2)
|
||||
group_count += count2
|
||||
used.add(j)
|
||||
|
||||
# Use the average color of the group
|
||||
avg_color = tuple(int(np.mean([c[i] for c in group_colors])) for i in range(3))
|
||||
merged.append((avg_color, group_count))
|
||||
|
||||
# Sort by pixel count (most predominant first)
|
||||
merged.sort(key=lambda x: x[1], reverse=True)
|
||||
return merged
|
||||
|
||||
|
||||
def get_layer_region(img, target_color, tolerance=30):
|
||||
"""
|
||||
Get the bounding box region of a layer.
|
||||
|
||||
Args:
|
||||
img: PIL Image object
|
||||
target_color: RGB tuple of target color
|
||||
tolerance: Color matching tolerance
|
||||
|
||||
Returns:
|
||||
Tuple of (min_x, min_y, max_x, max_y, pixel_count) or None if no pixels found
|
||||
"""
|
||||
img_array = np.array(img).astype(np.float32)
|
||||
|
||||
# Calculate color distance for all pixels
|
||||
target = np.array(target_color, dtype=np.float32)
|
||||
distances = np.sqrt(np.sum((img_array - target) ** 2, axis=2))
|
||||
|
||||
# Scale tolerance
|
||||
max_distance = np.sqrt(3 * 255 ** 2)
|
||||
actual_tolerance = (tolerance / 100.0) * max_distance
|
||||
|
||||
# Find matching pixels
|
||||
mask = distances <= actual_tolerance
|
||||
|
||||
if not mask.any():
|
||||
return None
|
||||
|
||||
# Get coordinates of matching pixels
|
||||
y_coords, x_coords = np.where(mask)
|
||||
|
||||
if len(y_coords) == 0:
|
||||
return None
|
||||
|
||||
# Calculate bounding box
|
||||
min_x, max_x = x_coords.min(), x_coords.max()
|
||||
min_y, max_y = y_coords.min(), y_coords.max()
|
||||
pixel_count = mask.sum()
|
||||
|
||||
return (min_x, min_y, max_x, max_y, pixel_count)
|
||||
|
||||
|
||||
def extract_layer(img, target_color, tolerance=30, min_alpha=128):
|
||||
"""
|
||||
Extract a single colored layer.
|
||||
|
||||
Args:
|
||||
img: PIL Image object
|
||||
target_color: RGB tuple of target color
|
||||
tolerance: Per-channel color distance (0-100, where 100 = max tolerance)
|
||||
min_alpha: Minimum alpha value to keep (0-255, higher = less ghost pixels)
|
||||
|
||||
Returns:
|
||||
PIL Image with transparent background
|
||||
"""
|
||||
img_array = np.array(img).astype(np.float32)
|
||||
h, w, _ = img_array.shape
|
||||
|
||||
# Create output with alpha channel
|
||||
output = np.zeros((h, w, 4), dtype=np.uint8)
|
||||
|
||||
# Calculate color distance for all pixels
|
||||
target = np.array(target_color, dtype=np.float32)
|
||||
distances = np.sqrt(np.sum((img_array - target) ** 2, axis=2))
|
||||
|
||||
# Scale tolerance: max Euclidean distance in RGB is sqrt(3*255^2) ≈ 441
|
||||
# Map tolerance (0-100) to actual distance (0-441)
|
||||
# tolerance=30 maps to ~132 distance (good for moderate antialiasing)
|
||||
# tolerance=50 maps to ~220 distance (good for heavy antialiasing)
|
||||
max_distance = np.sqrt(3 * 255 ** 2) # ≈ 441.67
|
||||
actual_tolerance = (tolerance / 100.0) * max_distance
|
||||
|
||||
# Mask pixels within tolerance
|
||||
mask = distances <= actual_tolerance
|
||||
|
||||
# Calculate alpha with gradient based on distance
|
||||
alpha = np.clip(255 * (1 - distances / actual_tolerance), 0, 255).astype(np.uint8)
|
||||
|
||||
# Filter out ghost pixels: only keep pixels with alpha >= min_alpha
|
||||
strong_mask = mask & (alpha >= min_alpha)
|
||||
|
||||
# Copy matching pixels
|
||||
output[strong_mask, :3] = img_array[strong_mask].astype(np.uint8)
|
||||
output[strong_mask, 3] = alpha[strong_mask]
|
||||
|
||||
return Image.fromarray(output, 'RGBA')
|
||||
|
||||
|
||||
def process_pdf(pdf_path, output_dir='output', dpi=300, tolerance=30,
|
||||
min_pixels=100, n_layers=None, merge_threshold=40,
|
||||
show_regions=True, min_alpha=128):
|
||||
"""
|
||||
Process a PDF diagram and extract layers.
|
||||
|
||||
Args:
|
||||
pdf_path: Path to PDF file
|
||||
output_dir: Output directory for layers
|
||||
dpi: PDF rendering resolution
|
||||
tolerance: Color matching tolerance
|
||||
min_pixels: Minimum pixels for valid layer
|
||||
n_layers: Number of layers to extract (None = auto)
|
||||
merge_threshold: Color distance for merging similar layers (0-441)
|
||||
show_regions: Display bounding box regions for each layer
|
||||
min_alpha: Minimum alpha value to keep (0-255, removes ghost pixels)
|
||||
"""
|
||||
# Convert PDF to image
|
||||
img = pdf_to_image(pdf_path, dpi)
|
||||
total_pixels = img.size[0] * img.size[1]
|
||||
|
||||
# Detect colors
|
||||
color_counts = get_dominant_colors(img, n_colors=20)
|
||||
|
||||
# Filter out white background
|
||||
layer_colors = []
|
||||
print("\nDetected colors (before merging):")
|
||||
for color, count in color_counts:
|
||||
if is_white(color):
|
||||
print(f" RGB{color}: {count:,} pixels - WHITE BACKGROUND (skipped)")
|
||||
elif count >= min_pixels:
|
||||
percentage = (count / total_pixels) * 100
|
||||
layer_colors.append((color, count))
|
||||
print(f" RGB{color}: {count:,} pixels ({percentage:.1f}%)")
|
||||
|
||||
if not layer_colors:
|
||||
print("No colored layers found!")
|
||||
return
|
||||
|
||||
# Merge similar colors to reduce fragmentation
|
||||
print(f"\nMerging similar colors (threshold: {merge_threshold})...")
|
||||
merged_layers = merge_similar_colors(layer_colors, merge_threshold)
|
||||
|
||||
print(f"\nMerged layers (predominant first):")
|
||||
for color, count in merged_layers:
|
||||
percentage = (count / total_pixels) * 100
|
||||
print(f" RGB{color}: {count:,} pixels ({percentage:.1f}%)")
|
||||
|
||||
# Limit layers if specified
|
||||
if n_layers:
|
||||
merged_layers = merged_layers[:n_layers]
|
||||
print(f"\nKeeping top {n_layers} layers")
|
||||
|
||||
print(f"\n{len(merged_layers)} layers to extract")
|
||||
|
||||
# Create output directory
|
||||
base_name = os.path.splitext(os.path.basename(pdf_path))[0]
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Analyze regions and extract layers
|
||||
if show_regions:
|
||||
print("\nAnalyzing layer regions...")
|
||||
|
||||
print("\nExtracting layers...")
|
||||
for idx, (color, count) in enumerate(merged_layers, 1):
|
||||
percentage = (count / total_pixels) * 100
|
||||
|
||||
if show_regions:
|
||||
# Get region information
|
||||
region = get_layer_region(img, color, tolerance)
|
||||
if region:
|
||||
min_x, min_y, max_x, max_y, pixel_count = region
|
||||
width = max_x - min_x + 1
|
||||
height = max_y - min_y + 1
|
||||
print(f" [{idx}/{len(merged_layers)}] RGB{color} - {count:,} px ({percentage:.1f}%)")
|
||||
print(f" Region: ({min_x},{min_y}) to ({max_x},{max_y}) - {width}x{height} px")
|
||||
else:
|
||||
print(f" [{idx}/{len(merged_layers)}] RGB{color} - {count:,} px ({percentage:.1f}%)")
|
||||
else:
|
||||
print(f" [{idx}/{len(merged_layers)}] RGB{color}...", end=' ')
|
||||
|
||||
# Extract layer
|
||||
layer_img = extract_layer(img, color, tolerance, min_alpha)
|
||||
|
||||
# Save with descriptive filename
|
||||
color_name = f"{color[0]:03d}_{color[1]:03d}_{color[2]:03d}"
|
||||
output_path = os.path.join(output_dir, f"{base_name}_layer{idx}_{color_name}.png")
|
||||
layer_img.save(output_path)
|
||||
|
||||
if not show_regions:
|
||||
print(f"✓ Saved")
|
||||
else:
|
||||
print(f" Saved: {output_path}")
|
||||
|
||||
print(f"\n✓ Complete! {len(merged_layers)} layers saved to: {output_dir}/")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Extract colored layers from PDF industrial diagrams',
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
# Basic usage
|
||||
python layer_extractor.py diagram.pdf
|
||||
|
||||
# Custom output directory and DPI
|
||||
python layer_extractor.py diagram.pdf -o layers/ --dpi 600
|
||||
|
||||
# Adjust color tolerance
|
||||
python layer_extractor.py diagram.pdf -t 40
|
||||
|
||||
# Extract specific number of layers
|
||||
python layer_extractor.py diagram.pdf -n 5
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument('pdf', help='Input PDF file')
|
||||
parser.add_argument('-o', '--output', default='output',
|
||||
help='Output directory (default: output)')
|
||||
parser.add_argument('--dpi', type=int, default=300,
|
||||
help='PDF rendering DPI (default: 300, higher = better quality)')
|
||||
parser.add_argument('-t', '--tolerance', type=int, default=30,
|
||||
help='Color matching tolerance 0-100 (default: 30, higher = more lenient)')
|
||||
parser.add_argument('-n', '--n-layers', type=int,
|
||||
help='Extract exactly N layers (default: auto-detect all)')
|
||||
parser.add_argument('-m', '--min-pixels', type=int, default=100,
|
||||
help='Minimum pixels for valid layer (default: 100)')
|
||||
parser.add_argument('--merge', type=int, default=40,
|
||||
help='Color merge threshold 0-441 (default: 40, higher = more aggressive merging)')
|
||||
parser.add_argument('--min-alpha', type=int, default=128,
|
||||
help='Minimum alpha value 0-255 (default: 128, higher = remove more ghost pixels)')
|
||||
parser.add_argument('--no-regions', action='store_true',
|
||||
help='Disable region analysis output')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate tolerance range
|
||||
if not 0 <= args.tolerance <= 100:
|
||||
print(f"Error: Tolerance must be between 0-100 (got {args.tolerance})")
|
||||
return 1
|
||||
|
||||
# Validate min_alpha range
|
||||
if not 0 <= args.min_alpha <= 255:
|
||||
print(f"Error: min-alpha must be between 0-255 (got {args.min_alpha})")
|
||||
return 1
|
||||
|
||||
# Validate input
|
||||
if not os.path.isfile(args.pdf):
|
||||
print(f"Error: File not found: {args.pdf}")
|
||||
return 1
|
||||
|
||||
if not args.pdf.lower().endswith('.pdf'):
|
||||
print(f"Error: Input must be a PDF file")
|
||||
return 1
|
||||
|
||||
# Process PDF
|
||||
try:
|
||||
process_pdf(
|
||||
args.pdf,
|
||||
output_dir=args.output,
|
||||
dpi=args.dpi,
|
||||
tolerance=args.tolerance,
|
||||
min_pixels=args.min_pixels,
|
||||
n_layers=args.n_layers,
|
||||
merge_threshold=args.merge,
|
||||
show_regions=not args.no_regions,
|
||||
min_alpha=args.min_alpha
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"\n✗ Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
exit(main())
|
||||
4
label/cores/file/requirements.txt
Normal file
4
label/cores/file/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
PyMuPDF>=1.23.0
|
||||
Pillow>=10.0.0
|
||||
numpy>=1.24.0
|
||||
scikit-learn>=1.3.0
|
||||
376
label/cores/sobel.py
Normal file
376
label/cores/sobel.py
Normal file
@@ -0,0 +1,376 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
PDF Edge Detection with Color Grouping (Preserving Edge Segregation)
|
||||
|
||||
Input: input.pdf
|
||||
Output: output_sobel/ folder
|
||||
"""
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pdf2image import convert_from_path
|
||||
import os
|
||||
import shutil
|
||||
from collections import Counter
|
||||
|
||||
|
||||
def clear_output_directory(output_dir):
|
||||
if os.path.exists(output_dir):
|
||||
shutil.rmtree(output_dir)
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
|
||||
def enhance_pastel_colors(image_bgr):
|
||||
"""
|
||||
Increase saturation of pastel colors, keep gray closer to black.
|
||||
"""
|
||||
hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV).astype(np.float32)
|
||||
h, s, v = cv2.split(hsv)
|
||||
|
||||
# Identify pastel colors
|
||||
pastel_mask = (v > 150) & (s < 100) & (s > 10)
|
||||
|
||||
# Identify gray
|
||||
gray_mask = (s <= 10)
|
||||
|
||||
# Boost saturation for pastels
|
||||
s[pastel_mask] = np.clip(s[pastel_mask] * 2.5, 0, 255)
|
||||
|
||||
# Darken grays
|
||||
v[gray_mask] = np.clip(v[gray_mask] * 0.3, 0, 255)
|
||||
|
||||
# Reconstruct
|
||||
hsv_enhanced = cv2.merge([h, s, v]).astype(np.uint8)
|
||||
result = cv2.cvtColor(hsv_enhanced, cv2.COLOR_HSV2BGR)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def sobel_edge_detection(image):
|
||||
"""Apply Sobel filter to detect edges."""
|
||||
# Quantize colors
|
||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
||||
h, s, v = cv2.split(hsv)
|
||||
|
||||
h_quantized = (h // 5) * 5
|
||||
s_quantized = (s // 64) * 64
|
||||
v_quantized = (v // 64) * 64
|
||||
|
||||
hsv_quantized = cv2.merge([h_quantized, s_quantized, v_quantized])
|
||||
image_quantized = cv2.cvtColor(hsv_quantized, cv2.COLOR_HSV2BGR)
|
||||
|
||||
# Apply Sobel
|
||||
b, g, r = cv2.split(image_quantized)
|
||||
|
||||
edges_b = np.sqrt(cv2.Sobel(b, cv2.CV_64F, 1, 0, ksize=3)**2 +
|
||||
cv2.Sobel(b, cv2.CV_64F, 0, 1, ksize=3)**2)
|
||||
edges_g = np.sqrt(cv2.Sobel(g, cv2.CV_64F, 1, 0, ksize=3)**2 +
|
||||
cv2.Sobel(g, cv2.CV_64F, 0, 1, ksize=3)**2)
|
||||
edges_r = np.sqrt(cv2.Sobel(r, cv2.CV_64F, 1, 0, ksize=3)**2 +
|
||||
cv2.Sobel(r, cv2.CV_64F, 0, 1, ksize=3)**2)
|
||||
|
||||
combined = np.sqrt(edges_b**2 + edges_g**2 + edges_r**2)
|
||||
combined = combined / (combined.max() + 1e-8)
|
||||
|
||||
edge_mask = (combined > 0.10).astype(np.uint8) * 255
|
||||
kernel = np.ones((2, 2), np.uint8)
|
||||
edge_mask = cv2.morphologyEx(edge_mask, cv2.MORPH_CLOSE, kernel)
|
||||
|
||||
# Create BGRA with original colors
|
||||
result = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8)
|
||||
result[edge_mask > 0, :3] = image[edge_mask > 0]
|
||||
result[edge_mask > 0, 3] = 255
|
||||
|
||||
# Remove white pixels
|
||||
white_mask = np.all(result[:, :, :3] > 240, axis=2)
|
||||
result[white_mask, 3] = 0
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def analyze_edge_colors(edge_img, edge_mask):
|
||||
"""
|
||||
Analyze if an edge has multiple distinct colors.
|
||||
|
||||
Returns:
|
||||
(has_multiple_colors, num_colors, dominant_hues)
|
||||
"""
|
||||
bgr = edge_img[:, :, :3]
|
||||
pixels = bgr[edge_mask]
|
||||
|
||||
# Filter white
|
||||
non_white = pixels[~np.all(pixels > 240, axis=1)]
|
||||
|
||||
if len(non_white) < 10:
|
||||
return False, 0, []
|
||||
|
||||
# Convert to HSV
|
||||
hsv = cv2.cvtColor(non_white.reshape(-1, 1, 3), cv2.COLOR_BGR2HSV).reshape(-1, 3)
|
||||
|
||||
# Filter low saturation (gray)
|
||||
saturated_mask = hsv[:, 1] > 30
|
||||
saturated_hsv = hsv[saturated_mask]
|
||||
|
||||
if len(saturated_hsv) < 10:
|
||||
return False, 0, []
|
||||
|
||||
# Quantize hue into bins (every 10 degrees)
|
||||
hue_bins = (saturated_hsv[:, 0] // 10).astype(np.int32)
|
||||
|
||||
# Count occurrences
|
||||
unique_hues, counts = np.unique(hue_bins, return_counts=True)
|
||||
|
||||
# Filter significant hues (>5% of pixels)
|
||||
total = len(hue_bins)
|
||||
significant_mask = counts > (total * 0.05)
|
||||
significant_hues = unique_hues[significant_mask]
|
||||
|
||||
num_colors = len(significant_hues)
|
||||
|
||||
return num_colors > 1, num_colors, significant_hues.tolist()
|
||||
|
||||
|
||||
def split_edge_by_color(edges_bgra, edge_mask, labels, edge_id, num_colors):
|
||||
"""
|
||||
Split edge into multiple sub-edges based on color using K-means.
|
||||
|
||||
Returns:
|
||||
List of (sub_edge_image, cluster_id) tuples
|
||||
"""
|
||||
bgr = edges_bgra[:, :, :3]
|
||||
|
||||
# Get edge pixels
|
||||
y_coords, x_coords = np.where(edge_mask)
|
||||
edge_pixels = bgr[edge_mask]
|
||||
|
||||
# Filter white and convert to HSV
|
||||
non_white_mask = ~np.all(edge_pixels > 240, axis=1)
|
||||
valid_pixels = edge_pixels[non_white_mask]
|
||||
valid_y = y_coords[non_white_mask]
|
||||
valid_x = x_coords[non_white_mask]
|
||||
|
||||
if len(valid_pixels) < 10:
|
||||
# Return original edge
|
||||
edge_img = np.zeros_like(edges_bgra)
|
||||
edge_img[edge_mask] = edges_bgra[edge_mask]
|
||||
return [(edge_img, 0)]
|
||||
|
||||
# Convert to HSV for clustering (use only H and S)
|
||||
hsv = cv2.cvtColor(valid_pixels.reshape(-1, 1, 3), cv2.COLOR_BGR2HSV).reshape(-1, 3)
|
||||
features = hsv[:, :2].astype(np.float32) # Hue and Saturation only
|
||||
|
||||
# K-means clustering
|
||||
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
|
||||
_, cluster_labels, centers = cv2.kmeans(features, num_colors, None, criteria, 3, cv2.KMEANS_PP_CENTERS)
|
||||
|
||||
cluster_labels = cluster_labels.flatten()
|
||||
|
||||
# Create sub-edges (keep them separate!)
|
||||
sub_edges = []
|
||||
|
||||
for cluster_id in range(num_colors):
|
||||
cluster_mask_1d = (cluster_labels == cluster_id)
|
||||
|
||||
# Create separate image for this sub-edge
|
||||
sub_edge_img = np.zeros_like(edges_bgra)
|
||||
cluster_y = valid_y[cluster_mask_1d]
|
||||
cluster_x = valid_x[cluster_mask_1d]
|
||||
sub_edge_img[cluster_y, cluster_x] = edges_bgra[cluster_y, cluster_x]
|
||||
|
||||
sub_edges.append((sub_edge_img, cluster_id))
|
||||
|
||||
return sub_edges
|
||||
|
||||
|
||||
def get_edge_mode_color(edge_img, edge_mask):
|
||||
"""
|
||||
Get the mode (most common) color of an edge.
|
||||
"""
|
||||
bgr = edge_img[:, :, :3]
|
||||
pixels = bgr[edge_mask]
|
||||
|
||||
# Filter white
|
||||
non_white = pixels[~np.all(pixels > 240, axis=1)]
|
||||
|
||||
if len(non_white) == 0:
|
||||
return None
|
||||
|
||||
# Convert to HSV
|
||||
hsv = cv2.cvtColor(non_white.reshape(-1, 1, 3), cv2.COLOR_BGR2HSV).reshape(-1, 3)
|
||||
|
||||
# Filter low saturation
|
||||
saturated_mask = (hsv[:, 1] > 30)
|
||||
saturated_pixels = non_white[saturated_mask]
|
||||
|
||||
if len(saturated_pixels) == 0:
|
||||
saturated_pixels = non_white
|
||||
|
||||
# Get mode color
|
||||
pixel_ints = (saturated_pixels[:, 0].astype(np.int32) +
|
||||
saturated_pixels[:, 1].astype(np.int32) * 256 +
|
||||
saturated_pixels[:, 2].astype(np.int32) * 65536)
|
||||
|
||||
mode_int = np.bincount(pixel_ints).argmax()
|
||||
|
||||
mode_color = np.array([
|
||||
mode_int % 256,
|
||||
(mode_int // 256) % 256,
|
||||
(mode_int // 65536) % 256
|
||||
], dtype=np.uint8)
|
||||
|
||||
return mode_color
|
||||
|
||||
|
||||
def process_and_group_edges(edges_bgra, color_threshold=30):
|
||||
"""
|
||||
Process edges: split multi-color edges, then group by color.
|
||||
Edges remain separate (segregated) even within groups.
|
||||
|
||||
Returns:
|
||||
List of (group_image, mode_color, edge_count) tuples
|
||||
"""
|
||||
alpha = edges_bgra[:, :, 3]
|
||||
|
||||
# Find connected components
|
||||
num_labels, labels = cv2.connectedComponents(alpha)
|
||||
|
||||
print(f" Found {num_labels - 1} edges")
|
||||
|
||||
if num_labels <= 1:
|
||||
return []
|
||||
|
||||
# Process each edge: split if multi-color
|
||||
all_edge_images = []
|
||||
|
||||
for edge_id in range(1, num_labels):
|
||||
edge_mask = (labels == edge_id)
|
||||
|
||||
if not np.any(edge_mask):
|
||||
continue
|
||||
|
||||
# Analyze colors
|
||||
has_multiple, num_colors, hues = analyze_edge_colors(edges_bgra, edge_mask)
|
||||
|
||||
if has_multiple:
|
||||
print(f" Edge {edge_id}: {num_colors} colors detected, splitting...")
|
||||
# Split into sub-edges
|
||||
sub_edges = split_edge_by_color(edges_bgra, edge_mask, labels, edge_id, num_colors)
|
||||
all_edge_images.extend(sub_edges)
|
||||
else:
|
||||
# Keep as single edge
|
||||
edge_img = np.zeros_like(edges_bgra)
|
||||
edge_img[edge_mask] = edges_bgra[edge_mask]
|
||||
all_edge_images.append((edge_img, 0))
|
||||
|
||||
print(f" Total edges after splitting: {len(all_edge_images)}")
|
||||
|
||||
# Get mode color for each edge
|
||||
edge_colors = []
|
||||
for edge_img, cluster_id in all_edge_images:
|
||||
edge_mask = edge_img[:, :, 3] > 0
|
||||
mode_color = get_edge_mode_color(edge_img, edge_mask)
|
||||
edge_colors.append(mode_color)
|
||||
|
||||
# Group by similar colors
|
||||
groups = []
|
||||
used_indices = set()
|
||||
|
||||
for i, mode_color in enumerate(edge_colors):
|
||||
if i in used_indices or mode_color is None:
|
||||
continue
|
||||
|
||||
# Start new group
|
||||
group_indices = [i]
|
||||
used_indices.add(i)
|
||||
|
||||
# Find similar edges
|
||||
for j, other_color in enumerate(edge_colors):
|
||||
if j in used_indices or other_color is None:
|
||||
continue
|
||||
|
||||
# Calculate color distance
|
||||
distance = np.linalg.norm(mode_color.astype(float) - other_color.astype(float))
|
||||
|
||||
if distance <= color_threshold:
|
||||
group_indices.append(j)
|
||||
used_indices.add(j)
|
||||
|
||||
# Create group image (edges remain separate!)
|
||||
group_img = np.zeros_like(edges_bgra)
|
||||
for idx in group_indices:
|
||||
edge_img, _ = all_edge_images[idx]
|
||||
mask = edge_img[:, :, 3] > 0
|
||||
group_img[mask] = edge_img[mask]
|
||||
|
||||
groups.append((group_img, mode_color, len(group_indices)))
|
||||
|
||||
print(f" Grouped into {len(groups)} color groups")
|
||||
|
||||
return groups
|
||||
|
||||
|
||||
def process_pdf(pdf_path, output_dir, dpi=200):
|
||||
clear_output_directory(output_dir)
|
||||
|
||||
print(f"Processing PDF: {pdf_path}")
|
||||
print(f"Converting at {dpi} DPI...\n")
|
||||
|
||||
images = convert_from_path(pdf_path, dpi=dpi)
|
||||
print(f"Total pages: {len(images)}\n")
|
||||
|
||||
for page_num, pil_image in enumerate(images, start=1):
|
||||
print(f"Page {page_num}/{len(images)}...")
|
||||
|
||||
# Convert to BGR
|
||||
image = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
|
||||
|
||||
# Enhance pastel colors
|
||||
print(f" - Enhancing pastel colors...")
|
||||
enhanced_image = enhance_pastel_colors(image)
|
||||
|
||||
# Detect edges
|
||||
print(f" - Detecting edges...")
|
||||
edges = sobel_edge_detection(enhanced_image)
|
||||
|
||||
# Process and group edges
|
||||
print(f" - Processing and grouping edges by color...")
|
||||
groups = process_and_group_edges(edges, color_threshold=30)
|
||||
|
||||
# Save outputs
|
||||
base = f"page{page_num:03d}"
|
||||
|
||||
cv2.imwrite(os.path.join(output_dir, f"{base}_original.png"), image)
|
||||
cv2.imwrite(os.path.join(output_dir, f"{base}_enhanced.png"), enhanced_image)
|
||||
cv2.imwrite(os.path.join(output_dir, f"{base}_edges.png"), edges)
|
||||
|
||||
# Save each group
|
||||
for group_idx, (group_img, mode_color, edge_count) in enumerate(groups, start=1):
|
||||
path = os.path.join(output_dir, f"{base}_group{group_idx}.png")
|
||||
cv2.imwrite(path, group_img)
|
||||
print(f" Group {group_idx}: {edge_count} edges, mode color (BGR): {tuple(mode_color)}")
|
||||
|
||||
print(f" - Saved {len(groups)} group images\n")
|
||||
|
||||
print("Complete!")
|
||||
|
||||
|
||||
def main():
|
||||
pdf_path = "input.pdf"
|
||||
output_dir = "output_sobel"
|
||||
|
||||
if not os.path.exists(pdf_path):
|
||||
print(f"Error: '{pdf_path}' not found!")
|
||||
return 1
|
||||
|
||||
try:
|
||||
process_pdf(pdf_path, output_dir)
|
||||
return 0
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
exit(main())
|
||||
31
label/infra/Pulumi.coodex.yaml
Normal file
31
label/infra/Pulumi.coodex.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
config:
|
||||
lambda_api_gateway:lambda_api_gateway: valvulas_funcao_rekognition_dev
|
||||
aws:region: us-east-1
|
||||
project: Rekognition Valvula Funcao
|
||||
environment: dev
|
||||
vpc:
|
||||
id: vpc-0c83ac3bfb36f79b4
|
||||
api:
|
||||
name: AssistentesProdutosServicosAPI
|
||||
description: API gateway created by pulumi
|
||||
endpoint_type: PRIVATE
|
||||
lambda:
|
||||
entity_extraction:
|
||||
name: assistente-produtos-servicos-dev
|
||||
handler: agent.agent_call
|
||||
timeout: 900
|
||||
runtime: python3.12
|
||||
ecr_repo:
|
||||
name: assistente-produtos-servicos-backend-dev
|
||||
repository_url: 277048801940.dkr.ecr.us-east-1.amazonaws.com/assistente-produtos-servicos-backend-dev
|
||||
api_gateway:
|
||||
name: token-assistente-produtos-servicos-pulumi
|
||||
description: API Key para o stage dev da API Gateway
|
||||
usage_plan_name: APIAIUsagePlan
|
||||
stage_name: dev
|
||||
method: POST
|
||||
api_key_required: true
|
||||
request_model: Empty
|
||||
deployment:
|
||||
stage_name: dev
|
||||
|
||||
13
label/infra/code/Dockerfile
Normal file
13
label/infra/code/Dockerfile
Normal file
@@ -0,0 +1,13 @@
|
||||
FROM public.ecr.aws/lambda/python:3.13
|
||||
|
||||
# Copy requirements.txt
|
||||
COPY requirements.txt ${LAMBDA_TASK_ROOT}
|
||||
|
||||
# Install the specified packages
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
# Copy function code
|
||||
COPY ./ ${LAMBDA_TASK_ROOT}
|
||||
|
||||
# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
|
||||
CMD ["lambda_handler.lambda_handler"]
|
||||
93
label/infra/code/README.md
Normal file
93
label/infra/code/README.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# ChatBot
|
||||
|
||||
|
||||
|
||||
## Getting started
|
||||
|
||||
To make it easy for you to get started with GitLab, here's a list of recommended next steps.
|
||||
|
||||
Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
|
||||
|
||||
## Add your files
|
||||
|
||||
- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
|
||||
- [ ] [Add files using the command line](https://docs.gitlab.com/topics/git/add_files/#add-files-to-a-git-repository) or push an existing Git repository with the following command:
|
||||
|
||||
```
|
||||
cd existing_repo
|
||||
git remote add origin https://gitlab.shared.cloud.dnxbrasil.com.br/dnx-br/clientes/ifsp/chatbot.git
|
||||
git branch -M main
|
||||
git push -uf origin main
|
||||
```
|
||||
|
||||
## Integrate with your tools
|
||||
|
||||
- [ ] [Set up project integrations](https://gitlab.shared.cloud.dnxbrasil.com.br/dnx-br/clientes/ifsp/chatbot/-/settings/integrations)
|
||||
|
||||
## Collaborate with your team
|
||||
|
||||
- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/)
|
||||
- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html)
|
||||
- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)
|
||||
- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/)
|
||||
- [ ] [Set auto-merge](https://docs.gitlab.com/user/project/merge_requests/auto_merge/)
|
||||
|
||||
## Test and Deploy
|
||||
|
||||
Use the built-in continuous integration in GitLab.
|
||||
|
||||
- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/)
|
||||
- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)
|
||||
- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html)
|
||||
- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/)
|
||||
- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)
|
||||
|
||||
***
|
||||
|
||||
# Editing this README
|
||||
|
||||
When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thanks to [makeareadme.com](https://www.makeareadme.com/) for this template.
|
||||
|
||||
## Suggestions for a good README
|
||||
|
||||
Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
|
||||
|
||||
## Name
|
||||
Choose a self-explaining name for your project.
|
||||
|
||||
## Description
|
||||
Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors.
|
||||
|
||||
## Badges
|
||||
On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
|
||||
|
||||
## Visuals
|
||||
Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
|
||||
|
||||
## Installation
|
||||
Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
|
||||
|
||||
## Usage
|
||||
Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
|
||||
|
||||
## Support
|
||||
Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
|
||||
|
||||
## Roadmap
|
||||
If you have ideas for releases in the future, it is a good idea to list them in the README.
|
||||
|
||||
## Contributing
|
||||
State if you are open to contributions and what your requirements are for accepting them.
|
||||
|
||||
For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.
|
||||
|
||||
You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.
|
||||
|
||||
## Authors and acknowledgment
|
||||
Show your appreciation to those who have contributed to the project.
|
||||
|
||||
## License
|
||||
For open source projects, say how it is licensed.
|
||||
|
||||
## Project status
|
||||
If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.
|
||||
1261
label/infra/code/diagram_processor.py
Normal file
1261
label/infra/code/diagram_processor.py
Normal file
File diff suppressed because it is too large
Load Diff
181
label/infra/code/function_a.py
Normal file
181
label/infra/code/function_a.py
Normal file
@@ -0,0 +1,181 @@
|
||||
import boto3
|
||||
import os
|
||||
import tempfile
|
||||
import json
|
||||
from urllib.parse import urlparse
|
||||
from diagram_processor import DiagramProcessor
|
||||
|
||||
|
||||
def parse_s3_path(s3_path):
|
||||
"""
|
||||
Parse S3 path into bucket and key
|
||||
|
||||
Args:
|
||||
s3_path: S3 path like 's3://bucket-name/path/to/file.pdf'
|
||||
|
||||
Returns:
|
||||
Tuple (bucket, key)
|
||||
"""
|
||||
if not s3_path.startswith('s3://'):
|
||||
raise ValueError(f"Invalid S3 path: {s3_path}. Must start with 's3://'")
|
||||
|
||||
parsed = urlparse(s3_path)
|
||||
bucket = parsed.netloc
|
||||
key = parsed.path.lstrip('/')
|
||||
|
||||
return bucket, key
|
||||
|
||||
|
||||
def download_from_s3(s3_path, local_path):
|
||||
"""
|
||||
Download file from S3
|
||||
|
||||
Args:
|
||||
s3_path: S3 path (s3://bucket/key)
|
||||
local_path: Local file path to save to
|
||||
"""
|
||||
bucket, key = parse_s3_path(s3_path)
|
||||
|
||||
s3_client = boto3.client('s3')
|
||||
print(f"Downloading from S3: {s3_path}")
|
||||
s3_client.download_file(bucket, key, local_path)
|
||||
print(f"Downloaded to: {local_path}")
|
||||
|
||||
|
||||
def execute(s3_path):
|
||||
"""
|
||||
Function A - Process diagram from S3 and return matches only
|
||||
|
||||
Args:
|
||||
s3_path: S3 path to diagram (e.g., 's3://my-bucket/diagrams/diagram.pdf')
|
||||
|
||||
Returns:
|
||||
Dictionary with matches of labels and blocks
|
||||
"""
|
||||
print(f"Function A - Diagram Processing")
|
||||
print(f"Input S3 path: {s3_path}")
|
||||
|
||||
# Create temporary directory
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
# Download diagram from S3
|
||||
bucket, key = parse_s3_path(s3_path)
|
||||
input_file = os.path.join(temp_dir, os.path.basename(key))
|
||||
download_from_s3(s3_path, input_file)
|
||||
|
||||
# Create output directory for processing
|
||||
output_dir = os.path.join(temp_dir, 'output')
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
# Initialize processor
|
||||
print("\nInitializing DiagramProcessor...")
|
||||
processor = DiagramProcessor(
|
||||
region=os.environ.get('AWS_REGION', 'us-east-1'),
|
||||
custom_labels_arn=os.environ.get('CUSTOM_LABELS_ARN', 'arn:aws:rekognition:us-east-1:173378533286:project/labels-valvula/version/labels-valvula.2025-11-24T15.44.16/1764009856090')
|
||||
)
|
||||
|
||||
# Process diagram
|
||||
print("\nProcessing diagram...")
|
||||
try:
|
||||
results = processor.process_single_diagram(
|
||||
diagram_path=input_file,
|
||||
output_base_dir=output_dir,
|
||||
grid_size=(5, 5),
|
||||
overlap_percent=10,
|
||||
keep_regex_list=[r'\+', r'\+', r'.*[Xx].*', r'\*', r'\\'],
|
||||
min_confidence=80,
|
||||
custom_labels_confidence=60,
|
||||
iou_threshold=0.3,
|
||||
matching_max_distance=200
|
||||
)
|
||||
|
||||
# Extract only the matches
|
||||
matching_results = results['matching_results']
|
||||
|
||||
# Format matches for clean output
|
||||
formatted_matches = []
|
||||
for match in matching_results['matches']:
|
||||
match_type = match.get('match_type', 'vm_label')
|
||||
|
||||
if match_type == 'two_labels':
|
||||
formatted_match = {
|
||||
'object_name': match['object_name'],
|
||||
'object_confidence': round(match['object_confidence'], 2),
|
||||
'match_type': match_type,
|
||||
'text_top': match['text_top'],
|
||||
'text_top_confidence': round(match['text_confidence_top'], 2),
|
||||
'text_bottom': match['text_bottom'],
|
||||
'text_bottom_confidence': round(match['text_confidence_bottom'], 2),
|
||||
'object_bbox': match['object_bbox'],
|
||||
'text_bbox_top': match['text_bbox_top'],
|
||||
'text_bbox_bottom': match['text_bbox_bottom']
|
||||
}
|
||||
else:
|
||||
formatted_match = {
|
||||
'object_name': match['object_name'],
|
||||
'object_confidence': round(match['object_confidence'], 2),
|
||||
'match_type': match_type,
|
||||
'text': match['text'],
|
||||
'text_confidence': round(match['text_confidence'], 2),
|
||||
'distance_pixels': round(match['distance_pixels'], 2),
|
||||
'object_bbox': match['object_bbox'],
|
||||
'text_bbox': match['text_bbox']
|
||||
}
|
||||
|
||||
formatted_matches.append(formatted_match)
|
||||
|
||||
# Format unmatched objects
|
||||
unmatched_objects = [
|
||||
{
|
||||
'name': obj['Name'],
|
||||
'confidence': round(obj['Confidence'], 2),
|
||||
'bbox': obj['global_bbox']
|
||||
}
|
||||
for obj in matching_results['unmatched_objects']
|
||||
]
|
||||
|
||||
# Format unmatched texts
|
||||
unmatched_texts = [
|
||||
{
|
||||
'text': text['text'],
|
||||
'confidence': round(text['confidence'], 2),
|
||||
'bbox': text['global_bbox']
|
||||
}
|
||||
for text in matching_results['unmatched_texts']
|
||||
]
|
||||
|
||||
# Prepare response
|
||||
response = {
|
||||
'status': 'success',
|
||||
'input_s3_path': s3_path,
|
||||
'summary': {
|
||||
'total_matches': len(formatted_matches),
|
||||
'unmatched_objects': len(unmatched_objects),
|
||||
'unmatched_texts': len(unmatched_texts),
|
||||
'matching_rate': f"{matching_results['matching_rate']*100:.1f}%"
|
||||
},
|
||||
'matches': formatted_matches,
|
||||
'unmatched_objects': unmatched_objects,
|
||||
'unmatched_texts': unmatched_texts
|
||||
}
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("PROCESSING COMPLETE")
|
||||
print("="*80)
|
||||
print(f"Total matches: {len(formatted_matches)}")
|
||||
print(f"Matching rate: {matching_results['matching_rate']*100:.1f}%")
|
||||
print(f"Unmatched objects: {len(unmatched_objects)}")
|
||||
print(f"Unmatched texts: {len(unmatched_texts)}")
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
error_message = f"Error processing diagram: {str(e)}"
|
||||
print(error_message)
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
return {
|
||||
'status': 'error',
|
||||
'error': error_message,
|
||||
'input_s3_path': s3_path
|
||||
}
|
||||
6
label/infra/code/function_b.py
Normal file
6
label/infra/code/function_b.py
Normal file
@@ -0,0 +1,6 @@
|
||||
def execute(text):
|
||||
"""
|
||||
Function B - prints the received text parameter
|
||||
"""
|
||||
print(f"Function B received: {text}")
|
||||
return f"Function B processed: {text}"
|
||||
111
label/infra/code/lambda_handler.py
Normal file
111
label/infra/code/lambda_handler.py
Normal file
@@ -0,0 +1,111 @@
|
||||
import json
|
||||
import function_a
|
||||
import function_b
|
||||
|
||||
|
||||
def lambda_handler(event, context):
|
||||
"""
|
||||
AWS Lambda handler that routes to function_a or function_b
|
||||
|
||||
Expected event structure:
|
||||
{
|
||||
"function_name": "function_a" or "function_b",
|
||||
"text_parameter": "your string here"
|
||||
}
|
||||
"""
|
||||
try:
|
||||
# DEBUG: Log the entire event
|
||||
print(f"Received event: {json.dumps(event)}")
|
||||
|
||||
# Handle different event sources
|
||||
body = None
|
||||
|
||||
# Check if body exists and is a string (API Gateway)
|
||||
if 'body' in event:
|
||||
if event['body'] is None:
|
||||
return {
|
||||
'statusCode': 400,
|
||||
'body': json.dumps({'error': 'Request body is empty'})
|
||||
}
|
||||
|
||||
if isinstance(event['body'], str):
|
||||
# Try to parse JSON
|
||||
try:
|
||||
body = json.loads(event['body'])
|
||||
except json.JSONDecodeError as e:
|
||||
return {
|
||||
'statusCode': 400,
|
||||
'body': json.dumps({
|
||||
'error': 'Invalid JSON in request body',
|
||||
'details': str(e),
|
||||
'received': event['body'][:100] # First 100 chars
|
||||
})
|
||||
}
|
||||
else:
|
||||
body = event['body']
|
||||
else:
|
||||
# Direct invocation (no body wrapper)
|
||||
body = event
|
||||
|
||||
print(f"Parsed body: {json.dumps(body)}")
|
||||
|
||||
# Get parameters
|
||||
function_name = body.get('function_name')
|
||||
text_parameter = body.get('text_parameter')
|
||||
|
||||
# Validate inputs
|
||||
if not function_name:
|
||||
return {
|
||||
'statusCode': 400,
|
||||
'body': json.dumps({'error': 'function_name is required'})
|
||||
}
|
||||
|
||||
if not text_parameter:
|
||||
return {
|
||||
'statusCode': 400,
|
||||
'body': json.dumps({'error': 'text_parameter is required'})
|
||||
}
|
||||
|
||||
# Route to the appropriate function
|
||||
if function_name == 'function_a':
|
||||
result = function_a.execute(text_parameter)
|
||||
elif function_name == 'function_b':
|
||||
result = function_b.execute(text_parameter)
|
||||
else:
|
||||
return {
|
||||
'statusCode': 400,
|
||||
'body': json.dumps({'error': f'Unknown function: {function_name}. Use "function_a" or "function_b"'})
|
||||
}
|
||||
|
||||
# Return success response
|
||||
return {
|
||||
'statusCode': 200,
|
||||
'body': json.dumps({
|
||||
'message': 'Success',
|
||||
'result': result
|
||||
})
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error: {str(e)}")
|
||||
import traceback
|
||||
print(traceback.format_exc())
|
||||
return {
|
||||
'statusCode': 500,
|
||||
'body': json.dumps({
|
||||
'error': str(e),
|
||||
'type': type(e).__name__
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
# For local testing
|
||||
if __name__ == "__main__":
|
||||
# Test event
|
||||
test_event = {
|
||||
'function_name': 'function_a',
|
||||
'text_parameter': 'Hello from Lambda!'
|
||||
}
|
||||
|
||||
result = lambda_handler(test_event, None)
|
||||
print(json.dumps(result, indent=2))
|
||||
4
label/infra/code/requirements.txt
Normal file
4
label/infra/code/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
Pillow
|
||||
numpy
|
||||
scipy
|
||||
pdf2image
|
||||
8
label/infra/ecr/Pulumi.coodex.yaml
Normal file
8
label/infra/ecr/Pulumi.coodex.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
config:
|
||||
ecr_dev:entity_extraction_dev: ecr
|
||||
ecr_dev:environment: dev
|
||||
ecr_dev:ecr:
|
||||
entity_extraction:
|
||||
image_mutability: MUTABLE
|
||||
name: rekognition-valvulas-funcao
|
||||
ecr_dev:project: Rekognition Valvula Funcao
|
||||
3
label/infra/ecr/Pulumi.yaml
Normal file
3
label/infra/ecr/Pulumi.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
name: ecr_dev
|
||||
runtime: python
|
||||
description: Infraestrutura da aplicação ECR
|
||||
93
label/infra/ecr/README.md
Normal file
93
label/infra/ecr/README.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# ecr
|
||||
|
||||
|
||||
|
||||
## Getting started
|
||||
|
||||
To make it easy for you to get started with GitLab, here's a list of recommended next steps.
|
||||
|
||||
Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
|
||||
|
||||
## Add your files
|
||||
|
||||
- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
|
||||
- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command:
|
||||
|
||||
```
|
||||
cd existing_repo
|
||||
git remote add origin https://gitlab.shared.cloud.dnxbrasil.com.br/dnx-br/sandbox/genai/ecr.git
|
||||
git branch -M main
|
||||
git push -uf origin main
|
||||
```
|
||||
|
||||
## Integrate with your tools
|
||||
|
||||
- [ ] [Set up project integrations](https://gitlab.shared.cloud.dnxbrasil.com.br/dnx-br/sandbox/genai/ecr/-/settings/integrations)
|
||||
|
||||
## Collaborate with your team
|
||||
|
||||
- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/)
|
||||
- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html)
|
||||
- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)
|
||||
- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/)
|
||||
- [ ] [Set auto-merge](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html)
|
||||
|
||||
## Test and Deploy
|
||||
|
||||
Use the built-in continuous integration in GitLab.
|
||||
|
||||
- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html)
|
||||
- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)
|
||||
- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html)
|
||||
- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/)
|
||||
- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)
|
||||
|
||||
***
|
||||
|
||||
# Editing this README
|
||||
|
||||
When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thanks to [makeareadme.com](https://www.makeareadme.com/) for this template.
|
||||
|
||||
## Suggestions for a good README
|
||||
|
||||
Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
|
||||
|
||||
## Name
|
||||
Choose a self-explaining name for your project.
|
||||
|
||||
## Description
|
||||
Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors.
|
||||
|
||||
## Badges
|
||||
On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
|
||||
|
||||
## Visuals
|
||||
Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
|
||||
|
||||
## Installation
|
||||
Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
|
||||
|
||||
## Usage
|
||||
Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
|
||||
|
||||
## Support
|
||||
Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
|
||||
|
||||
## Roadmap
|
||||
If you have ideas for releases in the future, it is a good idea to list them in the README.
|
||||
|
||||
## Contributing
|
||||
State if you are open to contributions and what your requirements are for accepting them.
|
||||
|
||||
For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.
|
||||
|
||||
You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.
|
||||
|
||||
## Authors and acknowledgment
|
||||
Show your appreciation to those who have contributed to the project.
|
||||
|
||||
## License
|
||||
For open source projects, say how it is licensed.
|
||||
|
||||
## Project status
|
||||
If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.
|
||||
26
label/infra/ecr/__main__.py
Normal file
26
label/infra/ecr/__main__.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import json
|
||||
import pulumi
|
||||
import pulumi_aws as aws
|
||||
|
||||
caller_identity = aws.get_caller_identity()
|
||||
account_id = caller_identity.account_id
|
||||
|
||||
config = pulumi.Config()
|
||||
project = config.require("project")
|
||||
environment = config.require("environment")
|
||||
|
||||
ecr_config = config.require_object("ecr")["entity_extraction"]
|
||||
|
||||
ecr_repo = aws.ecr.Repository(ecr_config['name'],
|
||||
name=ecr_config['name'],
|
||||
encryption_configurations=[{
|
||||
"encryption_type": "AES256",
|
||||
}],
|
||||
image_scanning_configuration={
|
||||
"scan_on_push": False,
|
||||
},
|
||||
image_tag_mutability=ecr_config['image_mutability'],
|
||||
opts = pulumi.ResourceOptions(protect=False))
|
||||
|
||||
|
||||
pulumi.export("url", pulumi.Output.concat("ECR REPO ID:", ecr_repo.id))
|
||||
5
label/infra/ecr/requirements.txt
Normal file
5
label/infra/ecr/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
pulumi
|
||||
pulumi-aws
|
||||
pulumi-docker
|
||||
boto3
|
||||
setuptools
|
||||
736
label/infra/handler.py
Normal file
736
label/infra/handler.py
Normal file
@@ -0,0 +1,736 @@
|
||||
import boto3
|
||||
import json
|
||||
import base64
|
||||
from io import BytesIO
|
||||
from PIL import Image, ImageDraw
|
||||
import numpy as np
|
||||
from scipy.optimize import linear_sum_assignment
|
||||
import re
|
||||
from pdf2image import convert_from_bytes
|
||||
|
||||
# Configuration
|
||||
REGION = 'us-east-1'
|
||||
CUSTOM_LABELS_PROJECT_ARN = 'modelid'
|
||||
CONFIDENCE_THRESHOLD = 80
|
||||
|
||||
class InMemoryDiagramProcessor:
|
||||
"""Process diagrams entirely in memory for Lambda"""
|
||||
|
||||
def __init__(self, region=REGION, custom_labels_arn=CUSTOM_LABELS_PROJECT_ARN):
|
||||
self.textract_client = boto3.client('textract', region_name=region)
|
||||
self.rekognition_client = boto3.client('rekognition', region_name=region)
|
||||
self.custom_labels_arn = custom_labels_arn
|
||||
self.region = region
|
||||
|
||||
def segment_image(self, img, grid_size=(5, 5), overlap_percent=10):
|
||||
"""
|
||||
Segment PIL Image into grid with overlap (in-memory)
|
||||
Returns list of (PIL Image, position_info) tuples
|
||||
"""
|
||||
img_width, img_height = img.size
|
||||
rows, cols = grid_size
|
||||
|
||||
overlap_factor = overlap_percent / 100.0
|
||||
segment_width = img_width / cols
|
||||
segment_height = img_height / rows
|
||||
|
||||
step_width = segment_width * (1 - overlap_factor)
|
||||
step_height = segment_height * (1 - overlap_factor)
|
||||
|
||||
segments = []
|
||||
|
||||
for row in range(rows):
|
||||
for col in range(cols):
|
||||
left = int(col * step_width)
|
||||
top = int(row * step_height)
|
||||
right = int(min(left + segment_width, img_width))
|
||||
bottom = int(min(top + segment_height, img_height))
|
||||
|
||||
segment = img.crop((left, top, right, bottom))
|
||||
|
||||
position_info = {
|
||||
'row': row,
|
||||
'col': col,
|
||||
'left': left,
|
||||
'top': top,
|
||||
'right': right,
|
||||
'bottom': bottom,
|
||||
'width': right - left,
|
||||
'height': bottom - top
|
||||
}
|
||||
|
||||
segments.append((segment, position_info))
|
||||
|
||||
return segments
|
||||
|
||||
def pil_to_bytes(self, pil_image):
|
||||
"""Convert PIL Image to bytes for AWS API calls"""
|
||||
buffer = BytesIO()
|
||||
pil_image.save(buffer, format='PNG')
|
||||
return buffer.getvalue()
|
||||
|
||||
def detect_text_segment(self, segment_image):
|
||||
"""Detect text in PIL Image segment using Textract"""
|
||||
image_bytes = self.pil_to_bytes(segment_image)
|
||||
|
||||
result = self.textract_client.detect_document_text(
|
||||
Document={'Bytes': image_bytes}
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def clean_text_from_segment(self, segment_image, textract_data,
|
||||
shrink_percent=8.5, keep_regex_list=None, min_confidence=80):
|
||||
"""Remove text from PIL Image segment (in-memory)"""
|
||||
compiled_patterns = []
|
||||
if keep_regex_list:
|
||||
for pattern in keep_regex_list:
|
||||
try:
|
||||
compiled_patterns.append(re.compile(pattern))
|
||||
except re.error:
|
||||
pass
|
||||
|
||||
img = segment_image.copy()
|
||||
width, height = img.size
|
||||
draw = ImageDraw.Draw(img)
|
||||
|
||||
words_removed = 0
|
||||
words_kept = 0
|
||||
|
||||
for block in textract_data['Blocks']:
|
||||
if block['BlockType'] == 'WORD':
|
||||
text = block['Text']
|
||||
confidence = block['Confidence']
|
||||
|
||||
should_keep = False
|
||||
|
||||
if confidence < min_confidence:
|
||||
should_keep = True
|
||||
words_kept += 1
|
||||
|
||||
if compiled_patterns:
|
||||
for pattern in compiled_patterns:
|
||||
if pattern.match(text):
|
||||
should_keep = True
|
||||
words_kept += 1
|
||||
break
|
||||
|
||||
if should_keep:
|
||||
continue
|
||||
|
||||
bbox = block['Geometry']['BoundingBox']
|
||||
left = int(bbox['Left'] * width)
|
||||
top = int(bbox['Top'] * height)
|
||||
box_width = int(bbox['Width'] * width)
|
||||
box_height = int(bbox['Height'] * height)
|
||||
|
||||
if shrink_percent > 0:
|
||||
shrink_factor = shrink_percent / 100
|
||||
width_reduction = int(box_width * shrink_factor / 2)
|
||||
height_reduction = int(box_height * shrink_factor / 2)
|
||||
|
||||
left += width_reduction
|
||||
top += height_reduction
|
||||
box_width -= width_reduction * 2
|
||||
box_height -= height_reduction * 2
|
||||
|
||||
draw.rectangle(
|
||||
[(left, top), (left + box_width, top + box_height)],
|
||||
fill='white'
|
||||
)
|
||||
words_removed += 1
|
||||
|
||||
return img, {'words_removed': words_removed, 'words_kept': words_kept}
|
||||
|
||||
def recognize_objects_segment(self, segment_image, min_confidence=CONFIDENCE_THRESHOLD):
|
||||
"""Recognize objects in PIL Image using Custom Labels"""
|
||||
image_bytes = self.pil_to_bytes(segment_image)
|
||||
|
||||
try:
|
||||
response = self.rekognition_client.detect_custom_labels(
|
||||
ProjectVersionArn=self.custom_labels_arn,
|
||||
Image={'Bytes': image_bytes},
|
||||
MinConfidence=min_confidence
|
||||
)
|
||||
|
||||
return {
|
||||
'custom_labels': response.get('CustomLabels', []),
|
||||
'success': True
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
'custom_labels': [],
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
def calculate_iou(self, box1, box2):
|
||||
"""Calculate IoU between two bounding boxes"""
|
||||
x_left = max(box1['left'], box2['left'])
|
||||
y_top = max(box1['top'], box2['top'])
|
||||
x_right = min(box1['right'], box2['right'])
|
||||
y_bottom = min(box1['bottom'], box2['bottom'])
|
||||
|
||||
if x_right < x_left or y_bottom < y_top:
|
||||
return 0.0
|
||||
|
||||
intersection_area = (x_right - x_left) * (y_bottom - y_top)
|
||||
|
||||
box1_area = (box1['right'] - box1['left']) * (box1['bottom'] - box1['top'])
|
||||
box2_area = (box2['right'] - box2['left']) * (box2['bottom'] - box2['top'])
|
||||
union_area = box1_area + box2_area - intersection_area
|
||||
|
||||
if union_area == 0:
|
||||
return 0.0
|
||||
|
||||
return intersection_area / union_area
|
||||
|
||||
def merge_bounding_boxes(self, boxes):
|
||||
"""Merge multiple bounding boxes into one"""
|
||||
if not boxes:
|
||||
return None
|
||||
|
||||
return {
|
||||
'left': min(box['left'] for box in boxes),
|
||||
'top': min(box['top'] for box in boxes),
|
||||
'right': max(box['right'] for box in boxes),
|
||||
'bottom': max(box['bottom'] for box in boxes)
|
||||
}
|
||||
|
||||
def deduplicate_detections(self, all_detections, iou_threshold=0.3):
|
||||
"""Remove duplicate detections using NMS"""
|
||||
if not all_detections:
|
||||
return []
|
||||
|
||||
detections_by_label = {}
|
||||
for det in all_detections:
|
||||
label = det['Name']
|
||||
if label not in detections_by_label:
|
||||
detections_by_label[label] = []
|
||||
detections_by_label[label].append(det)
|
||||
|
||||
deduplicated = []
|
||||
|
||||
for label, detections in detections_by_label.items():
|
||||
detections = sorted(detections, key=lambda x: x['Confidence'], reverse=True)
|
||||
|
||||
groups = []
|
||||
used = set()
|
||||
|
||||
for i, det in enumerate(detections):
|
||||
if i in used:
|
||||
continue
|
||||
|
||||
group = [det]
|
||||
used.add(i)
|
||||
|
||||
for j, other_det in enumerate(detections):
|
||||
if j in used or j == i:
|
||||
continue
|
||||
|
||||
iou = self.calculate_iou(det['global_bbox'], other_det['global_bbox'])
|
||||
|
||||
if iou > iou_threshold:
|
||||
group.append(other_det)
|
||||
used.add(j)
|
||||
|
||||
groups.append(group)
|
||||
|
||||
for group in groups:
|
||||
if len(group) == 1:
|
||||
deduplicated.append(group[0])
|
||||
else:
|
||||
merged_bbox = self.merge_bounding_boxes([d['global_bbox'] for d in group])
|
||||
merged_bbox['width'] = merged_bbox['right'] - merged_bbox['left']
|
||||
merged_bbox['height'] = merged_bbox['bottom'] - merged_bbox['top']
|
||||
|
||||
avg_confidence = sum(d['Confidence'] for d in group) / len(group)
|
||||
|
||||
merged_detection = {
|
||||
'Name': label,
|
||||
'Confidence': avg_confidence,
|
||||
'global_bbox': merged_bbox,
|
||||
'merged_from': len(group)
|
||||
}
|
||||
|
||||
deduplicated.append(merged_detection)
|
||||
|
||||
return deduplicated
|
||||
|
||||
def deduplicate_text_detections(self, all_text_detections, iou_threshold=0.5):
|
||||
"""Remove duplicate text detections"""
|
||||
if not all_text_detections:
|
||||
return []
|
||||
|
||||
all_text_detections = sorted(all_text_detections, key=lambda x: x['confidence'], reverse=True)
|
||||
|
||||
deduplicated = []
|
||||
used = set()
|
||||
|
||||
for i, text_det in enumerate(all_text_detections):
|
||||
if i in used:
|
||||
continue
|
||||
|
||||
group = [text_det]
|
||||
used.add(i)
|
||||
|
||||
for j, other_det in enumerate(all_text_detections):
|
||||
if j in used or j == i:
|
||||
continue
|
||||
|
||||
if text_det['text'].lower() == other_det['text'].lower():
|
||||
iou = self.calculate_iou(text_det['global_bbox'], other_det['global_bbox'])
|
||||
|
||||
if iou > iou_threshold:
|
||||
group.append(other_det)
|
||||
used.add(j)
|
||||
|
||||
deduplicated.append(text_det)
|
||||
|
||||
return deduplicated
|
||||
|
||||
def get_bbox_center(self, bbox):
|
||||
"""Get center point of bounding box"""
|
||||
center_x = bbox['left'] + bbox['width'] / 2
|
||||
center_y = bbox['top'] + bbox['height'] / 2
|
||||
return (center_x, center_y)
|
||||
|
||||
def calculate_distance(self, center1, center2):
|
||||
"""Calculate Euclidean distance"""
|
||||
return np.sqrt((center1[0] - center2[0])**2 + (center1[1] - center2[1])**2)
|
||||
|
||||
def match_objects_to_text_by_type(self, objects, all_text_detections, max_distance=200):
|
||||
"""Match objects to text based on object type"""
|
||||
VM_LABEL_OBJECTS = ['globo', 'gaveta', 'retencao', 'espera']
|
||||
TWO_LABEL_OBJECTS = ['sis_con_dist', 'instrumento_local']
|
||||
|
||||
vm_label_objects = []
|
||||
two_label_objects = []
|
||||
single_label_objects = []
|
||||
|
||||
for obj in objects:
|
||||
obj_name = obj['Name'].lower()
|
||||
if obj_name in VM_LABEL_OBJECTS:
|
||||
vm_label_objects.append(obj)
|
||||
elif obj_name in TWO_LABEL_OBJECTS:
|
||||
two_label_objects.append(obj)
|
||||
else:
|
||||
single_label_objects.append(obj)
|
||||
|
||||
vm_pattern = re.compile(r'VM-\d{4}')
|
||||
vm_texts = [t for t in all_text_detections if vm_pattern.search(t['text'])]
|
||||
other_texts = [t for t in all_text_detections if not vm_pattern.search(t['text'])]
|
||||
|
||||
all_matches = []
|
||||
all_unmatched_objects = []
|
||||
all_unmatched_texts = []
|
||||
used_texts = set()
|
||||
|
||||
# Part 1: Match VM-#### objects using Hungarian algorithm
|
||||
if vm_label_objects and vm_texts:
|
||||
n_objects = len(vm_label_objects)
|
||||
n_texts = len(vm_texts)
|
||||
|
||||
max_dim = max(n_objects, n_texts)
|
||||
cost_matrix = np.full((max_dim, max_dim), 1e10)
|
||||
|
||||
for i, obj in enumerate(vm_label_objects):
|
||||
obj_center = self.get_bbox_center(obj['global_bbox'])
|
||||
|
||||
for j, text_data in enumerate(vm_texts):
|
||||
text_center = self.get_bbox_center(text_data['global_bbox'])
|
||||
distance = self.calculate_distance(obj_center, text_center)
|
||||
|
||||
if max_distance and distance > max_distance:
|
||||
cost_matrix[i, j] = 1e10
|
||||
else:
|
||||
cost_matrix[i, j] = distance
|
||||
|
||||
row_indices, col_indices = linear_sum_assignment(cost_matrix)
|
||||
|
||||
matched_obj_indices = set()
|
||||
matched_text_indices = set()
|
||||
|
||||
for obj_idx, text_idx in zip(row_indices, col_indices):
|
||||
if (obj_idx >= n_objects or text_idx >= n_texts or
|
||||
cost_matrix[obj_idx, text_idx] >= 1e10):
|
||||
continue
|
||||
|
||||
distance = cost_matrix[obj_idx, text_idx]
|
||||
|
||||
match = {
|
||||
'object_name': vm_label_objects[obj_idx]['Name'],
|
||||
'object_bbox': vm_label_objects[obj_idx]['global_bbox'],
|
||||
'object_confidence': vm_label_objects[obj_idx]['Confidence'],
|
||||
'text': vm_texts[text_idx]['text'],
|
||||
'text_bbox': vm_texts[text_idx]['global_bbox'],
|
||||
'text_confidence': vm_texts[text_idx]['confidence'],
|
||||
'distance': distance,
|
||||
'match_type': 'vm_label'
|
||||
}
|
||||
|
||||
all_matches.append(match)
|
||||
matched_obj_indices.add(obj_idx)
|
||||
matched_text_indices.add(text_idx)
|
||||
|
||||
all_unmatched_objects.extend([vm_label_objects[i] for i in range(n_objects)
|
||||
if i not in matched_obj_indices])
|
||||
all_unmatched_texts.extend([vm_texts[j] for j in range(n_texts)
|
||||
if j not in matched_text_indices])
|
||||
|
||||
# Part 2: Match two-label objects
|
||||
for obj in two_label_objects:
|
||||
obj_bbox = obj['global_bbox']
|
||||
obj_center_x = obj_bbox['left'] + obj_bbox['width'] / 2
|
||||
obj_center_y = obj_bbox['top'] + obj_bbox['height'] / 2
|
||||
|
||||
texts_inside = []
|
||||
for text_data in other_texts:
|
||||
if id(text_data) in used_texts:
|
||||
continue
|
||||
|
||||
text_bbox = text_data['global_bbox']
|
||||
text_center_x = text_bbox['left'] + text_bbox['width'] / 2
|
||||
text_center_y = text_bbox['top'] + text_bbox['height'] / 2
|
||||
|
||||
if (obj_bbox['left'] <= text_center_x <= obj_bbox['right'] and
|
||||
obj_bbox['top'] <= text_center_y <= obj_bbox['bottom']):
|
||||
|
||||
distance_to_center = self.calculate_distance(
|
||||
(obj_center_x, obj_center_y),
|
||||
(text_center_x, text_center_y)
|
||||
)
|
||||
|
||||
texts_inside.append({
|
||||
'text_data': text_data,
|
||||
'distance_to_center': distance_to_center,
|
||||
'y_position': text_center_y
|
||||
})
|
||||
|
||||
if len(texts_inside) >= 2:
|
||||
texts_inside.sort(key=lambda t: t['distance_to_center'])
|
||||
closest_two = texts_inside[:2]
|
||||
closest_two.sort(key=lambda t: t['y_position'])
|
||||
|
||||
top_text = closest_two[0]['text_data']
|
||||
bottom_text = closest_two[1]['text_data']
|
||||
|
||||
match = {
|
||||
'object_name': obj['Name'],
|
||||
'object_bbox': obj_bbox,
|
||||
'object_confidence': obj['Confidence'],
|
||||
'text': f"{top_text['text']} / {bottom_text['text']}",
|
||||
'text_top': top_text['text'],
|
||||
'text_bottom': bottom_text['text'],
|
||||
'text_bbox_top': top_text['global_bbox'],
|
||||
'text_bbox_bottom': bottom_text['global_bbox'],
|
||||
'text_confidence_top': top_text['confidence'],
|
||||
'text_confidence_bottom': bottom_text['confidence'],
|
||||
'distance': 0,
|
||||
'match_type': 'two_labels'
|
||||
}
|
||||
|
||||
all_matches.append(match)
|
||||
used_texts.add(id(top_text))
|
||||
used_texts.add(id(bottom_text))
|
||||
else:
|
||||
all_unmatched_objects.append(obj)
|
||||
|
||||
# Part 3: Match single-label objects
|
||||
for obj in single_label_objects:
|
||||
obj_bbox = obj['global_bbox']
|
||||
obj_center_x = obj_bbox['left'] + obj_bbox['width'] / 2
|
||||
obj_center_y = obj_bbox['top'] + obj_bbox['height'] / 2
|
||||
|
||||
texts_inside = []
|
||||
for text_data in other_texts:
|
||||
if id(text_data) in used_texts:
|
||||
continue
|
||||
|
||||
text_bbox = text_data['global_bbox']
|
||||
text_center_x = text_bbox['left'] + text_bbox['width'] / 2
|
||||
text_center_y = text_bbox['top'] + text_bbox['height'] / 2
|
||||
|
||||
if (obj_bbox['left'] <= text_center_x <= obj_bbox['right'] and
|
||||
obj_bbox['top'] <= text_center_y <= obj_bbox['bottom']):
|
||||
texts_inside.append(text_data)
|
||||
|
||||
if texts_inside:
|
||||
closest_text = min(texts_inside, key=lambda t: self.calculate_distance(
|
||||
(obj_center_x, obj_center_y),
|
||||
(t['global_bbox']['left'] + t['global_bbox']['width'] / 2,
|
||||
t['global_bbox']['top'] + t['global_bbox']['height'] / 2)
|
||||
))
|
||||
|
||||
text_center_x = closest_text['global_bbox']['left'] + closest_text['global_bbox']['width'] / 2
|
||||
text_center_y = closest_text['global_bbox']['top'] + closest_text['global_bbox']['height'] / 2
|
||||
distance_to_center = self.calculate_distance(
|
||||
(obj_center_x, obj_center_y),
|
||||
(text_center_x, text_center_y)
|
||||
)
|
||||
|
||||
match = {
|
||||
'object_name': obj['Name'],
|
||||
'object_bbox': obj_bbox,
|
||||
'object_confidence': obj['Confidence'],
|
||||
'text': closest_text['text'],
|
||||
'text_bbox': closest_text['global_bbox'],
|
||||
'text_confidence': closest_text['confidence'],
|
||||
'distance': distance_to_center,
|
||||
'match_type': 'single_label'
|
||||
}
|
||||
|
||||
all_matches.append(match)
|
||||
used_texts.add(id(closest_text))
|
||||
else:
|
||||
all_unmatched_objects.append(obj)
|
||||
|
||||
for text_data in other_texts:
|
||||
if id(text_data) not in used_texts:
|
||||
all_unmatched_texts.append(text_data)
|
||||
|
||||
return {
|
||||
'matches': all_matches,
|
||||
'unmatched_objects': all_unmatched_objects,
|
||||
'unmatched_texts': all_unmatched_texts,
|
||||
'n_objects': len(objects),
|
||||
'n_texts': len(all_text_detections),
|
||||
'matching_rate': len(all_matches) / len(objects) if objects else 0
|
||||
}
|
||||
|
||||
def process_diagram_inmemory(self, pil_image, grid_size=(5, 5), overlap_percent=10,
|
||||
keep_regex_list=None, min_confidence=80,
|
||||
custom_labels_confidence=80, iou_threshold=0.3,
|
||||
matching_max_distance=200):
|
||||
"""
|
||||
Complete in-memory pipeline
|
||||
Returns only the matches
|
||||
"""
|
||||
img_width, img_height = pil_image.size
|
||||
|
||||
# Step 1: Segment
|
||||
segments = self.segment_image(pil_image, grid_size, overlap_percent)
|
||||
|
||||
all_global_detections = []
|
||||
all_text_detections = []
|
||||
|
||||
# Step 2-4: Process each segment
|
||||
for segment_image, position_info in segments:
|
||||
# Detect text
|
||||
textract_data = self.detect_text_segment(segment_image)
|
||||
|
||||
# Extract text with global coordinates
|
||||
for block in textract_data['Blocks']:
|
||||
if block['BlockType'] == 'WORD':
|
||||
bbox = block['Geometry']['BoundingBox']
|
||||
|
||||
seg_left = position_info['left']
|
||||
seg_top = position_info['top']
|
||||
seg_width = position_info['width']
|
||||
seg_height = position_info['height']
|
||||
|
||||
global_left = seg_left + int(bbox['Left'] * seg_width)
|
||||
global_top = seg_top + int(bbox['Top'] * seg_height)
|
||||
global_width = int(bbox['Width'] * seg_width)
|
||||
global_height = int(bbox['Height'] * seg_height)
|
||||
|
||||
all_text_detections.append({
|
||||
'text': block['Text'],
|
||||
'confidence': block['Confidence'],
|
||||
'global_bbox': {
|
||||
'left': global_left,
|
||||
'top': global_top,
|
||||
'right': global_left + global_width,
|
||||
'bottom': global_top + global_height,
|
||||
'width': global_width,
|
||||
'height': global_height
|
||||
}
|
||||
})
|
||||
|
||||
# Clean text
|
||||
cleaned_image, _ = self.clean_text_from_segment(
|
||||
segment_image, textract_data,
|
||||
keep_regex_list=keep_regex_list, min_confidence=min_confidence
|
||||
)
|
||||
|
||||
# Recognize objects
|
||||
detection_results = self.recognize_objects_segment(
|
||||
cleaned_image, min_confidence=custom_labels_confidence
|
||||
)
|
||||
|
||||
if detection_results['success']:
|
||||
labels = detection_results['custom_labels']
|
||||
|
||||
for label in labels:
|
||||
if 'Geometry' in label and 'BoundingBox' in label['Geometry']:
|
||||
bbox = label['Geometry']['BoundingBox']
|
||||
|
||||
seg_left = position_info['left']
|
||||
seg_top = position_info['top']
|
||||
seg_width = position_info['width']
|
||||
seg_height = position_info['height']
|
||||
|
||||
global_left = seg_left + int(bbox['Left'] * seg_width)
|
||||
global_top = seg_top + int(bbox['Top'] * seg_height)
|
||||
global_width = int(bbox['Width'] * seg_width)
|
||||
global_height = int(bbox['Height'] * seg_height)
|
||||
|
||||
global_detection = {
|
||||
'Name': label['Name'],
|
||||
'Confidence': label['Confidence'],
|
||||
'global_bbox': {
|
||||
'left': global_left,
|
||||
'top': global_top,
|
||||
'right': global_left + global_width,
|
||||
'bottom': global_top + global_height,
|
||||
'width': global_width,
|
||||
'height': global_height
|
||||
}
|
||||
}
|
||||
|
||||
all_global_detections.append(global_detection)
|
||||
|
||||
# Step 5: Deduplicate
|
||||
deduplicated_detections = self.deduplicate_detections(
|
||||
all_global_detections, iou_threshold=iou_threshold
|
||||
)
|
||||
|
||||
deduplicated_text = self.deduplicate_text_detections(
|
||||
all_text_detections, iou_threshold=0.5
|
||||
)
|
||||
|
||||
# Step 6: Match objects to text
|
||||
matching_results = self.match_objects_to_text_by_type(
|
||||
objects=deduplicated_detections,
|
||||
all_text_detections=deduplicated_text,
|
||||
max_distance=matching_max_distance
|
||||
)
|
||||
|
||||
return matching_results
|
||||
|
||||
|
||||
# ==================== LAMBDA HANDLER ====================
|
||||
|
||||
def lambda_handler(event, context):
|
||||
"""
|
||||
AWS Lambda handler function
|
||||
|
||||
Expected event formats:
|
||||
1. PDF as base64 in body:
|
||||
{
|
||||
"pdf_base64": "<base64-encoded-pdf>",
|
||||
"config": {
|
||||
"grid_size": [5, 5],
|
||||
"overlap_percent": 10,
|
||||
...
|
||||
}
|
||||
}
|
||||
|
||||
2. PDF in S3:
|
||||
{
|
||||
"s3_bucket": "bucket-name",
|
||||
"s3_key": "path/to/file.pdf",
|
||||
"config": {...}
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
# Parse event
|
||||
if isinstance(event.get('body'), str):
|
||||
body = json.loads(event['body'])
|
||||
else:
|
||||
body = event
|
||||
|
||||
# Extract configuration
|
||||
config = body.get('config', {})
|
||||
grid_size = tuple(config.get('grid_size', [5, 5]))
|
||||
overlap_percent = config.get('overlap_percent', 10)
|
||||
keep_regex_list = config.get('keep_regex_list', [r'\+', r'.*[Xx].*', r'\*', r'\\'])
|
||||
min_confidence = config.get('min_confidence', 80)
|
||||
custom_labels_confidence = config.get('custom_labels_confidence', 60)
|
||||
iou_threshold = config.get('iou_threshold', 0.3)
|
||||
matching_max_distance = config.get('matching_max_distance', 200)
|
||||
custom_labels_arn = config.get('custom_labels_arn', CUSTOM_LABELS_PROJECT_ARN)
|
||||
dpi = config.get('dpi', 200)
|
||||
|
||||
# Get PDF bytes
|
||||
if 'pdf_base64' in body:
|
||||
# PDF provided as base64 in request
|
||||
pdf_bytes = base64.b64decode(body['pdf_base64'])
|
||||
elif 's3_bucket' in body and 's3_key' in body:
|
||||
# PDF in S3
|
||||
s3_client = boto3.client('s3')
|
||||
response = s3_client.get_object(
|
||||
Bucket=body['s3_bucket'],
|
||||
Key=body['s3_key']
|
||||
)
|
||||
pdf_bytes = response['Body'].read()
|
||||
else:
|
||||
return {
|
||||
'statusCode': 400,
|
||||
'body': json.dumps({
|
||||
'error': 'Must provide either pdf_base64 or s3_bucket/s3_key'
|
||||
})
|
||||
}
|
||||
|
||||
# Convert PDF to image (first page only, or specify page)
|
||||
page_num = config.get('page', 0) # 0-indexed
|
||||
images = convert_from_bytes(pdf_bytes, dpi=dpi, first_page=page_num+1, last_page=page_num+1)
|
||||
|
||||
if not images:
|
||||
return {
|
||||
'statusCode': 400,
|
||||
'body': json.dumps({
|
||||
'error': 'Could not convert PDF to image'
|
||||
})
|
||||
}
|
||||
|
||||
diagram_image = images[0]
|
||||
|
||||
# Initialize processor
|
||||
processor = InMemoryDiagramProcessor(
|
||||
region=REGION,
|
||||
custom_labels_arn=custom_labels_arn
|
||||
)
|
||||
|
||||
# Process diagram
|
||||
matching_results = processor.process_diagram_inmemory(
|
||||
pil_image=diagram_image,
|
||||
grid_size=grid_size,
|
||||
overlap_percent=overlap_percent,
|
||||
keep_regex_list=keep_regex_list,
|
||||
min_confidence=min_confidence,
|
||||
custom_labels_confidence=custom_labels_confidence,
|
||||
iou_threshold=iou_threshold,
|
||||
matching_max_distance=matching_max_distance
|
||||
)
|
||||
|
||||
# Return only matches
|
||||
return {
|
||||
'statusCode': 200,
|
||||
'headers': {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
'body': json.dumps({
|
||||
'matches': matching_results['matches'],
|
||||
'summary': {
|
||||
'total_matches': len(matching_results['matches']),
|
||||
'unmatched_objects': len(matching_results['unmatched_objects']),
|
||||
'unmatched_texts': len(matching_results['unmatched_texts']),
|
||||
'matching_rate': matching_results['matching_rate']
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing diagram: {str(e)}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
return {
|
||||
'statusCode': 500,
|
||||
'body': json.dumps({
|
||||
'error': str(e),
|
||||
'error_type': type(e).__name__
|
||||
})
|
||||
}
|
||||
35
label/infra/lambda_api_gateway/Pulumi.coodez.yaml
Normal file
35
label/infra/lambda_api_gateway/Pulumi.coodez.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
config:
|
||||
aws:region: us-east-1
|
||||
project_name: labels-valvula-bloco-funcao
|
||||
lambda-api:
|
||||
- name: labels-valvula-bloco-funcao
|
||||
network_config:
|
||||
is_private: true
|
||||
vpc_id: vpc-098bd05c4ef524627
|
||||
private_subnet_ids: #using API VPC Endpoint in 1 subnet is cheaper than using it in 2 or more
|
||||
- subnet-0c8b5a9233eff22b4
|
||||
# - subnet-00adc4773686d8c1b
|
||||
timeout: 900
|
||||
memory: 2048
|
||||
ecr:
|
||||
repo_name: rekognition-valvulas-funcao
|
||||
tag: latest
|
||||
#env_vars:
|
||||
provisioned_concurrency: 0
|
||||
api_gateway:
|
||||
use_api_gw: true
|
||||
communication_type: REST #REST # WEBSOCKET | HTTP | REST #TODO implement all
|
||||
type: REGIONAL # PRIVATE | REGIONAL | EDGE
|
||||
authorization: NONE # | AWS_IAM | ...
|
||||
allow_inbound_any: true
|
||||
allow_inbound_cidrs:
|
||||
- 3.14.44.224/32 # IP VPN DNX
|
||||
create_and_allow_vpce: true # only used for PRIVATE api type
|
||||
stage_name: dev
|
||||
routes:
|
||||
- method: POST
|
||||
path: /execute
|
||||
iam:
|
||||
managed_policies: []
|
||||
#custom_policies:
|
||||
|
||||
11
label/infra/lambda_api_gateway/Pulumi.yaml
Normal file
11
label/infra/lambda_api_gateway/Pulumi.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
name: lambda-api
|
||||
runtime:
|
||||
name: python
|
||||
options:
|
||||
toolchain: pip
|
||||
virtualenv: venv
|
||||
description: A Python program to deploy a serverless application on AWS
|
||||
config:
|
||||
pulumi:tags:
|
||||
value:
|
||||
pulumi:template: serverless-aws-python
|
||||
110
label/infra/lambda_api_gateway/__main__.py
Normal file
110
label/infra/lambda_api_gateway/__main__.py
Normal file
@@ -0,0 +1,110 @@
|
||||
import json
|
||||
import pulumi
|
||||
import pulumi_aws as aws
|
||||
import pulumi_aws_apigateway as apigateway
|
||||
import api_gw
|
||||
|
||||
config = pulumi.Config()
|
||||
aws_config = pulumi.Config("aws")
|
||||
aws_region = aws_config.require("region")
|
||||
account_id = aws.get_caller_identity().account_id
|
||||
|
||||
|
||||
def create_lambda_role(lambda_name, iam_config=None):
|
||||
"""Create IAM role for Lambda with configurable policies"""
|
||||
|
||||
# Base managed policies
|
||||
managed_policies = [aws.iam.ManagedPolicy.AWS_LAMBDA_BASIC_EXECUTION_ROLE,
|
||||
aws.iam.ManagedPolicy.AWS_LAMBDA_VPC_ACCESS_EXECUTION_ROLE]
|
||||
|
||||
if iam_config and "managed_policies" in iam_config:
|
||||
managed_policies.extend(iam_config["managed_policies"])
|
||||
|
||||
role = aws.iam.Role(f"role-{lambda_name}",
|
||||
assume_role_policy=json.dumps({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Action": "sts:AssumeRole",
|
||||
"Effect": "Allow",
|
||||
"Principal": {"Service": "lambda.amazonaws.com"},
|
||||
}],
|
||||
}),
|
||||
managed_policy_arns=managed_policies
|
||||
)
|
||||
|
||||
# Create custom inline policies from YAML config
|
||||
if iam_config and "custom_policies" in iam_config:
|
||||
for policy in iam_config["custom_policies"]:
|
||||
aws.iam.RolePolicy(f"{lambda_name}-{policy['name']}",
|
||||
role=role.id,
|
||||
policy=json.dumps({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
"Effect": policy["effect"],
|
||||
"Action": policy["actions"],
|
||||
"Resource": policy["resources"]
|
||||
}]
|
||||
})
|
||||
)
|
||||
|
||||
return role
|
||||
|
||||
lambda_api_configs = config.require_object("lambda-api")
|
||||
for l_api in lambda_api_configs:
|
||||
ecr_repo = aws.ecr.get_repository(name=l_api["ecr"]["repo_name"])
|
||||
ecr_image = aws.ecr.get_image(repository_name=l_api["ecr"]["repo_name"], image_tag=l_api["ecr"]["tag"])
|
||||
lambda_name = l_api["name"]
|
||||
if not l_api["network_config"]["is_private"]:
|
||||
raise "Not implemented yet: Public lambda function"
|
||||
else:
|
||||
lambda_sg = aws.ec2.SecurityGroup(f"lambda-sg-{lambda_name}",
|
||||
description=f"SG for Lambda {lambda_name}",
|
||||
egress=[{
|
||||
"cidr_blocks": ["0.0.0.0/0"],
|
||||
"from_port": 0,
|
||||
"protocol": "-1",
|
||||
"to_port": 0,
|
||||
}],
|
||||
name=lambda_name,
|
||||
vpc_id=l_api["network_config"]["vpc_id"],
|
||||
)
|
||||
|
||||
# print(pulumi.Output.all(ecr_repo.repository_url, ecr_image.image_digest).apply(lambda args: f'{args[0]}@{args[1]}'))
|
||||
# Create role for this specific Lambda
|
||||
role = create_lambda_role(lambda_name, l_api.get("iam"))
|
||||
if "env_vars" in l_api:
|
||||
variables={k:v for k,v in l_api["env_vars"].items()}
|
||||
else:
|
||||
variables={}
|
||||
# Define the Lambda function, replacing <IMAGE_URI> with your actual image URI
|
||||
fn = aws.lambda_.Function(f"{lambda_name}",
|
||||
package_type="Image",
|
||||
# image_uri=ecr_repo.repository_url.apply(lambda url: f"{url}:latest"), # Assuming 'latest' tag
|
||||
image_uri=pulumi.Output.all(ecr_repo.repository_url, ecr_image.image_digest).apply(lambda args: f'{args[0]}@{args[1]}'),
|
||||
role=role.arn,
|
||||
timeout=l_api["timeout"],
|
||||
memory_size=l_api["memory"],
|
||||
environment={
|
||||
"variables": variables
|
||||
},
|
||||
vpc_config=dict(
|
||||
ipv6_allowed_for_dual_stack = False,
|
||||
subnet_ids = l_api["network_config"]["private_subnet_ids"],
|
||||
security_group_ids=[lambda_sg.id]
|
||||
),
|
||||
publish=l_api["provisioned_concurrency"]>0, #necessary for provisioned concurrency
|
||||
)
|
||||
|
||||
if l_api["provisioned_concurrency"]>0:
|
||||
lambda_concurrency = aws.lambda_.ProvisionedConcurrencyConfig(l_api["name"],
|
||||
provisioned_concurrent_executions=l_api["provisioned_concurrency"],
|
||||
function_name=fn.name,
|
||||
qualifier=fn.version
|
||||
)
|
||||
|
||||
api_config = l_api["api_gateway"]
|
||||
if api_config["use_api_gw"]:
|
||||
if api_config["communication_type"]=="HTTP":
|
||||
api_gw.create_api_gatewayv2(api_config, l_api, fn, aws_region, config.require('project_name'))
|
||||
else:
|
||||
api_gw.create_api_gateway(api_config, l_api, fn, account_id, aws_region, config.require('project_name'))
|
||||
407
label/infra/lambda_api_gateway/api_gw.py
Normal file
407
label/infra/lambda_api_gateway/api_gw.py
Normal file
@@ -0,0 +1,407 @@
|
||||
import pulumi
|
||||
import pulumi_aws as aws
|
||||
import json
|
||||
# import time
|
||||
|
||||
def create_api_gatewayv2(api_config, l_api, fn, aws_region, project_name):
|
||||
# Create VPC endpoint for PRIVATE API Gateway
|
||||
# if api_config["type"] == "PRIVATE":
|
||||
# vpce = aws.ec2.VpcEndpoint(f"vpce-{l_api['name']}",
|
||||
# vpc_id=l_api["network_config"]["vpc_id"],
|
||||
# service_name=f"com.amazonaws.{aws_region}.execute-api",
|
||||
# subnet_ids=l_api["network_config"]["private_subnet_ids"],
|
||||
# private_dns_enabled=True,
|
||||
# vpc_endpoint_type="Interface"
|
||||
# )
|
||||
|
||||
# HTTP API apigwv2
|
||||
# Create API Gateway V2 HTTP API
|
||||
api = aws.apigatewayv2.Api(f"api-{l_api['name']}",
|
||||
name=l_api['name'],
|
||||
protocol_type="HTTP",
|
||||
)
|
||||
|
||||
sg_vpc_link = aws.ec2.SecurityGroup(f"secgroup-{l_api['name']}",
|
||||
vpc_id=l_api["network_config"]["vpc_id"],
|
||||
ingress=[
|
||||
aws.ec2.SecurityGroupIngressArgs(
|
||||
protocol="tcp",
|
||||
from_port=0,
|
||||
to_port=0,
|
||||
cidr_blocks=["3.14.44.224/32"]
|
||||
)
|
||||
],
|
||||
egress=[
|
||||
aws.ec2.SecurityGroupEgressArgs(
|
||||
protocol="-1",
|
||||
from_port=0,
|
||||
to_port=0,
|
||||
cidr_blocks=["0.0.0.0/0"]
|
||||
)
|
||||
]
|
||||
)
|
||||
# Create a VPC Link
|
||||
vpc_link = aws.apigatewayv2.VpcLink(
|
||||
f"VpcLink-{project_name}",
|
||||
subnet_ids=l_api["network_config"]["private_subnet_ids"],
|
||||
security_group_ids=sg_vpc_link,
|
||||
)
|
||||
|
||||
# Add IAM resource policy to restrict access by VPC (for PRIVATE type)
|
||||
# if api_config["type"] == "PRIVATE":
|
||||
# api_policy = aws.apigatewayv2.ApiPolicy(f"policy-{l_api['name']}",
|
||||
# api_id=api.id,
|
||||
# policy=pulumi.Output.all(api.arn, l_api["network_config"]["vpc_id"]).apply(
|
||||
# lambda args: json.dumps({
|
||||
# "Version": "2012-10-17",
|
||||
# "Statement": [{
|
||||
# "Effect": "Allow",
|
||||
# "Principal": "*",
|
||||
# "Action": "execute-api:Invoke",
|
||||
# "Resource": f"{args[0]}/*",
|
||||
# "Condition": {
|
||||
# "StringEquals": {
|
||||
# "aws:sourceVpc": args[1]
|
||||
# }
|
||||
# }
|
||||
# }]
|
||||
# })
|
||||
# )
|
||||
# )
|
||||
|
||||
integration_get = None
|
||||
integration_post = None
|
||||
# Create Lambda integrations
|
||||
for route in api_config["routes"]:
|
||||
if route["method"] == "GET" and not integration_get:
|
||||
integration_get = aws.apigatewayv2.Integration(f"integration-{l_api['name']}-{route['path'].replace('/', '-')}",
|
||||
api_id=api.id,
|
||||
integration_type="AWS_PROXY",
|
||||
integration_method="GET",
|
||||
integration_uri=fn.invoke_arn,
|
||||
# connection_type="INTERNET",
|
||||
payload_format_version="2.0",
|
||||
# connection_id=vpc_link.id
|
||||
)
|
||||
elif route["method"] == "POST" and not integration_post:
|
||||
integration_post = aws.apigatewayv2.Integration(f"integration-{l_api['name']}-{route['path'].replace('/', '-')}",
|
||||
api_id=api.id,
|
||||
integration_type="AWS_PROXY",
|
||||
integration_uri=fn.invoke_arn,
|
||||
integration_method="POST",
|
||||
payload_format_version="2.0"
|
||||
)
|
||||
|
||||
# Create routes dynamically from config
|
||||
routes = []
|
||||
for route in api_config["routes"]:
|
||||
if route['method'] == "GET":
|
||||
integration = integration_get
|
||||
elif route['method'] == "POST":
|
||||
integration = integration_post
|
||||
r = aws.apigatewayv2.Route(
|
||||
f"route-{l_api['name']}-{route['method']}-{route['path'].replace('/', '-')}",
|
||||
api_id=api.id,
|
||||
route_key=f"{route['method']} {route['path']}",
|
||||
target=integration.id.apply(lambda id: f"integrations/{id}")
|
||||
)
|
||||
routes.append(r)
|
||||
|
||||
# Lambda permission for API Gateway
|
||||
permission = aws.lambda_.Permission(
|
||||
f"permission-{l_api['name']}",
|
||||
action="lambda:InvokeFunction",
|
||||
function=fn.name,
|
||||
principal="apigateway.amazonaws.com",
|
||||
source_arn=api.execution_arn.apply(lambda arn: f"{arn}/*/*")
|
||||
)
|
||||
|
||||
# Create stage
|
||||
stage = aws.apigatewayv2.Stage(f"stage-{l_api['name']}",
|
||||
api_id=api.id,
|
||||
name="$default",
|
||||
auto_deploy=True
|
||||
)
|
||||
|
||||
# Export the API URL
|
||||
pulumi.export(f"{l_api['name']}-url", api.api_endpoint)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def create_api_gateway(api_config, l_api, fn, account_id, aws_region, project_name):
|
||||
|
||||
vpce_id = None
|
||||
# Create API Gateway
|
||||
if api_config["type"] == "PRIVATE":
|
||||
if api_config["create_and_allow_vpce"]:
|
||||
# Cria uma nova Security Group para o VPC Endpoint
|
||||
vpc_endpoint_sg = aws.ec2.SecurityGroup(f"api-gateway-vpce-sg-{project_name}",
|
||||
vpc_id=l_api["network_config"]["vpc_id"],
|
||||
description=f"Security Group for API Gateway VPC Endpoint - {project_name}",
|
||||
ingress=[
|
||||
aws.ec2.SecurityGroupIngressArgs(
|
||||
protocol="tcp",
|
||||
from_port=0,
|
||||
to_port=0,
|
||||
cidr_blocks=["0.0.0.0/0"],
|
||||
description="Allow HTTPS traffic to API Gateway Endpoint"
|
||||
),
|
||||
],
|
||||
egress=[
|
||||
# Permite todo o tráfego de saída. Pode ser restringido se necessário.
|
||||
aws.ec2.SecurityGroupEgressArgs(
|
||||
protocol="-1", # "-1" significa todos os protocolos
|
||||
from_port=0,
|
||||
to_port=0,
|
||||
cidr_blocks=["0.0.0.0/0"],
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
# Create VPC endpoint for PRIVATE API Gateway
|
||||
vpce = aws.ec2.VpcEndpoint(f"vpce-{l_api['name']}",
|
||||
vpc_id=l_api["network_config"]["vpc_id"],
|
||||
service_name=f"com.amazonaws.{aws_region}.execute-api",
|
||||
subnet_ids=l_api["network_config"]["private_subnet_ids"],
|
||||
private_dns_enabled=False,
|
||||
vpc_endpoint_type="Interface",
|
||||
security_group_ids=[vpc_endpoint_sg]
|
||||
)
|
||||
vpce_id = vpce.id
|
||||
|
||||
# api = aws.apigateway.RestApi(f"api-{l_api["name"]}",
|
||||
# description=l_api["name"],
|
||||
# fail_on_warnings=False,
|
||||
# put_rest_api_mode='merge',
|
||||
# endpoint_configuration={
|
||||
# "types": api_config["type"],
|
||||
# "vpc_endpoint_ids": [vpce.id] if api_config["type"] == "PRIVATE" and api_config["create_and_allow_vpce"] else None
|
||||
# }
|
||||
# )
|
||||
|
||||
api = aws.apigateway.RestApi(f"api-{l_api["name"]}",
|
||||
description=l_api["name"],
|
||||
put_rest_api_mode='merge' if api_config["type"] == "PRIVATE" else 'overwrite',
|
||||
fail_on_warnings=False,
|
||||
endpoint_configuration={
|
||||
"types": api_config["type"],
|
||||
"vpc_endpoint_ids": [vpce_id] if api_config["type"] == "PRIVATE" and api_config["create_and_allow_vpce"] else None
|
||||
}
|
||||
)
|
||||
|
||||
# Build policy statements
|
||||
policy_outputs = []
|
||||
if api_config["allow_inbound_any"]:
|
||||
policy_outputs.append(
|
||||
pulumi.Output.all(aws_region, account_id, api.id, vpce_id).apply(
|
||||
lambda args: {
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Action": "execute-api:Invoke",
|
||||
"Resource": f"arn:aws:execute-api:{args[0]}:{args[1]}:{args[2]}/*"
|
||||
}
|
||||
)
|
||||
)
|
||||
else:
|
||||
if api_config["type"] == "PRIVATE" and api_config["create_and_allow_vpce"]:
|
||||
policy_outputs.append(
|
||||
pulumi.Output.all(aws_region, account_id, api.id, vpce_id).apply(
|
||||
lambda args: {
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Action": "execute-api:Invoke",
|
||||
"Resource": f"arn:aws:execute-api:{args[0]}:{args[1]}:{args[2]}/*",
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"aws:sourceVpce": args[3]
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
if api_config.get("allow_inbound_cidrs"):
|
||||
policy_outputs.append(
|
||||
pulumi.Output.all(aws_region, account_id, api.id).apply(
|
||||
lambda args: {
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Action": "execute-api:Invoke",
|
||||
"Resource": f"arn:aws:execute-api:{args[0]}:{args[1]}:{args[2]}/*",
|
||||
"Condition": {
|
||||
"IpAddress": {
|
||||
"aws:SourceIp": api_config.get("allow_inbound_cidrs", [])
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
if len(policy_outputs) > 0:
|
||||
# Resource policy for private API
|
||||
resource_policy = aws.apigateway.RestApiPolicy(f"policy-{l_api['name']}",
|
||||
rest_api_id=api.id,
|
||||
policy=pulumi.Output.all(*policy_outputs).apply(
|
||||
lambda statements: json.dumps({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": statements
|
||||
})
|
||||
)
|
||||
)
|
||||
|
||||
# Create resources and methods dynamically
|
||||
resources = {}
|
||||
api_dependencies = []
|
||||
# routes = []
|
||||
|
||||
for route in api_config["routes"]:
|
||||
path = route["path"].strip("/")
|
||||
path_parts = path.split("/")
|
||||
|
||||
# Build nested resources
|
||||
parent_id = api.root_resource_id
|
||||
resource_path = ""
|
||||
|
||||
for part in path_parts:
|
||||
resource_path += f"/{part}"
|
||||
resource_key = resource_path
|
||||
|
||||
if resource_key not in resources:
|
||||
resources[resource_key] = aws.apigateway.Resource(
|
||||
f"resource-{l_api['name']}-{part}",
|
||||
rest_api=api.id,
|
||||
parent_id=parent_id,
|
||||
path_part=part
|
||||
)
|
||||
|
||||
parent_id = resources[resource_key].id
|
||||
|
||||
# Create method for this route
|
||||
method = aws.apigateway.Method(
|
||||
f"method-{l_api['name']}-{route['method']}-{path.replace('/', '-')}",
|
||||
rest_api=api.id,
|
||||
resource_id=parent_id,
|
||||
http_method=route["method"],
|
||||
authorization=api_config["authorization"]
|
||||
)
|
||||
|
||||
# Create integration
|
||||
integration = aws.apigateway.Integration(
|
||||
f"integration-{l_api['name']}-{route['method']}-{path.replace('/', '-')}",
|
||||
rest_api=api.id,
|
||||
resource_id=parent_id,
|
||||
http_method=method.http_method,
|
||||
integration_http_method="POST", # for Lambda integration, it's always POST
|
||||
type="AWS_PROXY",
|
||||
uri=fn.invoke_arn
|
||||
)
|
||||
|
||||
method_response = aws.apigateway.MethodResponse(f"methodResponse-{path.replace('/', '-')}",
|
||||
rest_api=api.id,
|
||||
resource_id=parent_id,
|
||||
http_method=method.http_method,
|
||||
status_code="200",
|
||||
response_models={"application/json": "Empty"}
|
||||
)
|
||||
|
||||
integration_response = aws.apigateway.IntegrationResponse(f"integrationResponse-{path.replace('/', '-')}",
|
||||
rest_api=api.id,
|
||||
resource_id=parent_id,
|
||||
http_method=method.http_method,
|
||||
status_code="200",
|
||||
selection_pattern="",
|
||||
response_templates={"application/json": ""},
|
||||
opts=pulumi.ResourceOptions(depends_on=[integration])
|
||||
)
|
||||
|
||||
# # Lambda permission for API Gateway
|
||||
# permission = aws.lambda_.Permission(
|
||||
# f"permission-{l_api['name']}-{route['method']}-{path.replace('/', '-')}",
|
||||
# action="lambda:InvokeFunction",
|
||||
# function=fn.name,
|
||||
# principal="apigateway.amazonaws.com",
|
||||
# source_arn=api.execution_arn.apply(
|
||||
# lambda arn, m=route['method'], r=resource_path: f"{arn}/*/{m}{r}"
|
||||
# )
|
||||
# )
|
||||
api_dependencies.append(method)
|
||||
api_dependencies.append(integration)
|
||||
api_dependencies.append(method_response)
|
||||
api_dependencies.append(integration_response)
|
||||
|
||||
# Lambda permission for API Gateway
|
||||
permission = aws.lambda_.Permission(
|
||||
f"permission-{l_api['name']}-general",
|
||||
action="lambda:InvokeFunction",
|
||||
function=fn.name,
|
||||
principal="apigateway.amazonaws.com",
|
||||
source_arn=api.execution_arn.apply(
|
||||
lambda arn: f"{arn}/*/*"
|
||||
)
|
||||
)
|
||||
api_dependencies.append(permission)
|
||||
|
||||
# method = getattr(apigateway.Method, route["method"])
|
||||
# routes.append(apigateway.RouteArgs(
|
||||
# path=route["path"],
|
||||
# method=method,
|
||||
# event_handler=fn
|
||||
# ))
|
||||
# api = apigateway.RestAPI("api",
|
||||
# routes=routes,
|
||||
# # type=api_config["type"],
|
||||
# # put_rest_api_mode="merge" if api_config["type"] == "PRIVATE" else "overwrite"
|
||||
# )
|
||||
|
||||
# # Create a VPC link to integrate API Gateway with the VPC
|
||||
# vpc_link = aws.apigateway.VpcLink(f"VpcLink-{l_api['name']}",
|
||||
# name=f"VpcLink-{l_api['name']}",
|
||||
# target_arn=l_api["network_config"]["vpc_id"],
|
||||
# tags={
|
||||
# "Name": f'VpcLink-{l_api["name"]}',
|
||||
# })
|
||||
|
||||
# Create a deployment for the API Gateway
|
||||
deployment = aws.apigateway.Deployment(f"deployment-{l_api['name']}",
|
||||
rest_api=api.id,
|
||||
# triggers={"redeployment": str(int(time.time()))},
|
||||
opts=pulumi.ResourceOptions(depends_on=list(resources.values())+api_dependencies)
|
||||
)
|
||||
|
||||
# Create a stage
|
||||
stage = aws.apigateway.Stage(f"stage-{l_api['name']}",
|
||||
deployment=deployment.id,
|
||||
rest_api=api.id,
|
||||
stage_name=api_config["stage_name"],
|
||||
opts=pulumi.ResourceOptions(depends_on=[deployment])
|
||||
)
|
||||
|
||||
if False: #use_api_key: #TODO
|
||||
api_key = aws.apigateway.ApiKey(api_gateway_config["name"],
|
||||
name=api_gateway_config["name"],
|
||||
description=api_gateway_config["description"],
|
||||
enabled=True
|
||||
)
|
||||
|
||||
# API Key to Stage
|
||||
usage_plan = aws.apigateway.UsagePlan("api-usage-plan",
|
||||
name=api_gateway_config["usage_plan_name"],
|
||||
description="Usage plan for API Gateway associated with API Key",
|
||||
api_stages=[aws.apigateway.UsagePlanApiStageArgs(
|
||||
api_id=api.id,
|
||||
stage=deployment.stage_name
|
||||
)]
|
||||
)
|
||||
|
||||
# API Key to Usage Plan
|
||||
aws.apigateway.UsagePlanKey("api-key-usage-plan-association",
|
||||
key_id=api_key.id,
|
||||
key_type="API_KEY",
|
||||
usage_plan_id=usage_plan.id
|
||||
)
|
||||
|
||||
|
||||
# Export the stage URL
|
||||
pulumi.export(f"{l_api['name']}-url", stage.invoke_url)
|
||||
4
label/infra/lambda_api_gateway/requirements.txt
Normal file
4
label/infra/lambda_api_gateway/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
pulumi>=3.0.0,<4.0.0
|
||||
pulumi-aws>=7.0.0,<8.0.0
|
||||
pulumi-aws-apigateway>=3.0.0,<4.0.0
|
||||
pulumi-awsx>=3.0.0,<4.0.0
|
||||
Reference in New Issue
Block a user