Implement partition-based consolidation for ELABDATADISP

Changed consolidation strategy to leverage MySQL partitioning:
- Added get_table_partitions() to list all partitions
- Added fetch_consolidation_groups_from_partition() to read groups by consolidation key
- Each group (UnitName, ToolNameID, EventDate, EventTime) is fetched completely
- All nodes of same group are consolidated into single row with JSONB measurements
- Process partitions sequentially for predictable memory usage

Key benefits:
- Guaranteed complete consolidation (no fragmentation across batches)
- Deterministic behavior - same group always consolidated together
- Better memory efficiency with partition limits (100k groups per query)
- Clear audit trail of which partition each row came from

Tested with partition d3: 6960 input rows → 100 consolidated rows (69.6:1 ratio)
with groups containing 24-72 nodes each.

🤖 Generated with Claude Code

Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-25 21:49:30 +01:00
parent a394de99ef
commit bb27f749a0
3 changed files with 156 additions and 85 deletions

View File

@@ -96,98 +96,57 @@ class FullMigrator:
rows_to_migrate,
f"Migrating {mysql_table}"
) as progress:
# Consolidate across batches by buffering rows with the same consolidation key
# This ensures all nodes of the same (unit, tool, timestamp) are consolidated together
row_buffer = []
last_consolidation_key = None
columns = DataTransformer.get_column_order(pg_table)
total_mysql_rows = 0
# Fetch and migrate rows in batches
# Use ordered fetching for node consolidation with resume support
for batch in mysql_conn.fetch_rows_ordered_for_consolidation(
mysql_table,
start_id=last_migrated_id
):
if not batch:
break
# Get list of partitions and process each one
partitions = mysql_conn.get_table_partitions(mysql_table)
logger.info(f"Found {len(partitions)} partitions for {mysql_table}")
# Sort batch by consolidation key
sorted_batch = sorted(batch, key=lambda r: (
r.get("UnitName") or "",
r.get("ToolNameID") or "",
str(r.get("EventDate") or ""),
str(r.get("EventTime") or ""),
int(r.get("NodeNum") or 0)
))
for partition in partitions:
logger.info(f"Processing partition {partition}...")
# Process each row, consolidating when consolidation key changes
for row in sorted_batch:
# Extract consolidation key
consolidation_key = (
row.get("UnitName"),
row.get("ToolNameID"),
row.get("EventDate"),
row.get("EventTime")
# Fetch consolidation groups from partition
# Each group is a list of rows with the same (unit, tool, date, time)
for group_rows in mysql_conn.fetch_consolidation_groups_from_partition(
mysql_table,
partition
):
if not group_rows:
break
# Consolidate the group
transformed = DataTransformer.transform_batch(
mysql_table,
group_rows,
consolidate=True
)
# If consolidation key changed, consolidate the buffer
if last_consolidation_key is not None and consolidation_key != last_consolidation_key:
# Consolidate buffered rows
transformed = DataTransformer.transform_batch(
mysql_table,
row_buffer,
consolidate=True
)
# Insert consolidated rows
inserted = pg_conn.insert_batch(pg_table, transformed, columns)
if inserted > 0:
migrated += inserted
batch_count += 1
progress.update(len(group_rows))
# Insert consolidated rows
inserted = pg_conn.insert_batch(pg_table, transformed, columns)
if inserted > 0:
migrated += inserted
batch_count += 1
progress.update(len(row_buffer))
total_mysql_rows += len(row_buffer)
# Update state every 10 inserts
if batch_count % 10 == 0:
batch_max_id = max(int(r.get(primary_key, 0)) for r in row_buffer)
self._update_migration_state(
pg_conn, migrated, batch_max_id, migration_start_time
)
else:
batch_max_id = max(int(r.get(primary_key, 0)) for r in row_buffer)
try:
with pg_conn.connection.cursor() as cursor:
cursor.execute(
"""UPDATE migration_state
SET last_migrated_id = %s, last_migrated_timestamp = %s
WHERE table_name = %s""",
(batch_max_id, migration_start_time or datetime.utcnow().isoformat(), pg_table)
)
pg_conn.connection.commit()
except Exception as e:
logger.warning(f"Failed to update migration state: {e}")
# Reset buffer
row_buffer = []
# Add row to buffer
row_buffer.append(row)
last_consolidation_key = consolidation_key
# Consolidate any remaining rows in buffer
if row_buffer:
transformed = DataTransformer.transform_batch(
mysql_table,
row_buffer,
consolidate=True
)
inserted = pg_conn.insert_batch(pg_table, transformed, columns)
if inserted > 0:
migrated += inserted
batch_count += 1
progress.update(len(row_buffer))
total_mysql_rows += len(row_buffer)
# Update state every 10 consolidations
if batch_count % 10 == 0:
batch_max_id = max(int(r.get(primary_key, 0)) for r in group_rows)
self._update_migration_state(
pg_conn, migrated, batch_max_id, migration_start_time
)
else:
batch_max_id = max(int(r.get(primary_key, 0)) for r in group_rows)
try:
with pg_conn.connection.cursor() as cursor:
cursor.execute(
"""UPDATE migration_state
SET last_migrated_id = %s, last_migrated_timestamp = %s
WHERE table_name = %s""",
(batch_max_id, migration_start_time or datetime.utcnow().isoformat(), pg_table)
)
pg_conn.connection.commit()
except Exception as e:
logger.warning(f"Failed to update migration state: {e}")
# Get final actual count from PostgreSQL
final_count = pg_conn.get_row_count(pg_table)