primo commit refactory in python
This commit is contained in:
323
src/rsn/elaboration.py
Normal file
323
src/rsn/elaboration.py
Normal file
@@ -0,0 +1,323 @@
|
||||
"""
|
||||
Data elaboration functions for RSN sensors.
|
||||
|
||||
Processes sensor data to calculate displacements and angles.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import logging
|
||||
from typing import Tuple, Optional
|
||||
from pathlib import Path
|
||||
import csv
|
||||
from ..common.database import DatabaseConnection
|
||||
from ..common.validators import approximate_values
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def elaborate_rsn_data(
|
||||
conn: DatabaseConnection,
|
||||
control_unit_id: str,
|
||||
chain: str,
|
||||
mems_type: int,
|
||||
n_sensors: int,
|
||||
acc_magnitude: np.ndarray,
|
||||
acc_tolerance: float,
|
||||
angle_data: np.ndarray,
|
||||
temp_max: float,
|
||||
temp_min: float,
|
||||
temperature: np.ndarray,
|
||||
node_list: list,
|
||||
timestamps: np.ndarray,
|
||||
is_new_zero: bool,
|
||||
n_data_avg: int,
|
||||
n_data_despike: int,
|
||||
error_flags: np.ndarray,
|
||||
initial_date: str,
|
||||
installation_position: int
|
||||
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
|
||||
"""
|
||||
Elaborate RSN Link data to calculate displacements.
|
||||
|
||||
Converts MATLAB elaborazione_RSN.m function.
|
||||
|
||||
Args:
|
||||
conn: Database connection
|
||||
control_unit_id: Control unit identifier
|
||||
chain: Chain identifier
|
||||
mems_type: MEMS sensor type
|
||||
n_sensors: Number of sensors
|
||||
acc_magnitude: Acceleration magnitude array
|
||||
acc_tolerance: Acceleration tolerance
|
||||
angle_data: Angle data array
|
||||
temp_max: Maximum valid temperature
|
||||
temp_min: Minimum valid temperature
|
||||
temperature: Temperature array
|
||||
node_list: List of node IDs
|
||||
timestamps: Timestamp array
|
||||
is_new_zero: Whether this is a new zero point
|
||||
n_data_avg: Number of data for averaging
|
||||
n_data_despike: Number of data for despiking
|
||||
error_flags: Error flags array
|
||||
initial_date: Initial processing date
|
||||
installation_position: Installation position code (1-8)
|
||||
|
||||
Returns:
|
||||
Tuple of (alpha_x, alpha_y, temperature, timestamps, error_flags)
|
||||
"""
|
||||
logger.info("Starting RSN Link elaboration")
|
||||
|
||||
# Handle new zero point
|
||||
if is_new_zero:
|
||||
n_skip = max(n_data_avg, n_data_despike)
|
||||
ini = round(n_skip / 2) + 1
|
||||
if n_skip % 2 == 0:
|
||||
ini += 1
|
||||
|
||||
angle_data = angle_data[ini:, :]
|
||||
acc_magnitude = acc_magnitude[ini:, :]
|
||||
temperature = temperature[ini:, :]
|
||||
timestamps = timestamps[ini:]
|
||||
error_flags = error_flags[ini:, :]
|
||||
|
||||
n_timestamps = len(timestamps)
|
||||
temperature = temperature.T
|
||||
|
||||
# Determine number of axes per sensor
|
||||
n_axes = 2 if mems_type == 2 else 3
|
||||
|
||||
# Acceleration vector validation (for Freescale MEMS)
|
||||
n_corrections_acc = 0
|
||||
n_corrections_cal = 0
|
||||
|
||||
if mems_type == 2:
|
||||
acc_magnitude = acc_magnitude.T
|
||||
angle_data = angle_data.T
|
||||
|
||||
# Check acceleration vector magnitude
|
||||
for j in range(1, acc_magnitude.shape[1]):
|
||||
for i in range(acc_magnitude.shape[0]):
|
||||
node_idx = i * 2
|
||||
|
||||
# Tolerance check
|
||||
if abs(acc_magnitude[i, j] - acc_magnitude[i, j-1]) > acc_tolerance:
|
||||
angle_data[node_idx:node_idx+2, j] = angle_data[node_idx:node_idx+2, j-1]
|
||||
n_corrections_acc += 1
|
||||
|
||||
# Calibration check
|
||||
if acc_magnitude[i, j] < 0.8 or acc_magnitude[i, j] > 1.3:
|
||||
if j == 0:
|
||||
# Find next valid value
|
||||
nn = 1
|
||||
while nn < acc_magnitude.shape[1]:
|
||||
if 0.8 <= acc_magnitude[i, nn] <= 1.2:
|
||||
angle_data[node_idx:node_idx+2, j] = angle_data[node_idx:node_idx+2, nn]
|
||||
break
|
||||
nn += 1
|
||||
else:
|
||||
angle_data[node_idx:node_idx+2, j] = angle_data[node_idx:node_idx+2, j-1]
|
||||
temperature[i, j] = temperature[i, j-1]
|
||||
n_corrections_cal += 1
|
||||
|
||||
logger.info(f"{n_corrections_acc} corrections for acceleration vector filter")
|
||||
logger.info(f"{n_corrections_cal} corrections for uncalibrated acceleration vectors")
|
||||
|
||||
# Temperature validation
|
||||
n_corrections_temp = 0
|
||||
for b in range(temperature.shape[1]):
|
||||
for a in range(temperature.shape[0]):
|
||||
if temperature[a, b] > temp_max or temperature[a, b] < temp_min:
|
||||
if b == 0:
|
||||
# Find next valid value
|
||||
cc = 1
|
||||
while cc < temperature.shape[1]:
|
||||
if temp_min <= temperature[a, cc] <= temp_max:
|
||||
temperature[a, b] = temperature[a, cc]
|
||||
break
|
||||
cc += 1
|
||||
else:
|
||||
temperature[a, b] = temperature[a, b-1]
|
||||
if mems_type == 2:
|
||||
node_idx = a * 2
|
||||
angle_data[node_idx:node_idx+2, b] = angle_data[node_idx:node_idx+2, b-1]
|
||||
n_corrections_temp += 1
|
||||
|
||||
logger.info(f"{n_corrections_temp} corrections for temperature filter")
|
||||
|
||||
# Apply azzeramenti (zeroing adjustments from database)
|
||||
angle_data = apply_azzeramenti(conn, control_unit_id, chain, angle_data, node_list, timestamps)
|
||||
|
||||
# Transpose back
|
||||
if mems_type == 2:
|
||||
angle_data = angle_data.T
|
||||
temperature = temperature.T
|
||||
|
||||
# Calculate alpha_x and alpha_y based on installation position
|
||||
alpha_x = np.zeros((n_timestamps, n_sensors))
|
||||
alpha_y = np.zeros((n_timestamps, n_sensors))
|
||||
|
||||
for i in range(n_sensors):
|
||||
ax_idx = i * 2
|
||||
ay_idx = i * 2 + 1
|
||||
|
||||
if installation_position == 1:
|
||||
alpha_x[:, i] = angle_data[:, ax_idx]
|
||||
alpha_y[:, i] = angle_data[:, ay_idx]
|
||||
elif installation_position == 2:
|
||||
alpha_x[:, i] = -angle_data[:, ax_idx]
|
||||
alpha_y[:, i] = -angle_data[:, ay_idx]
|
||||
elif installation_position == 3:
|
||||
alpha_x[:, i] = -angle_data[:, ax_idx]
|
||||
alpha_y[:, i] = -angle_data[:, ay_idx]
|
||||
elif installation_position == 4:
|
||||
alpha_x[:, i] = angle_data[:, ax_idx]
|
||||
alpha_y[:, i] = angle_data[:, ay_idx]
|
||||
elif installation_position == 5:
|
||||
alpha_x[:, i] = angle_data[:, ay_idx]
|
||||
alpha_y[:, i] = -angle_data[:, ax_idx]
|
||||
elif installation_position == 6:
|
||||
alpha_x[:, i] = -angle_data[:, ay_idx]
|
||||
alpha_y[:, i] = angle_data[:, ax_idx]
|
||||
elif installation_position == 7:
|
||||
alpha_x[:, i] = -angle_data[:, ay_idx]
|
||||
alpha_y[:, i] = angle_data[:, ax_idx]
|
||||
elif installation_position == 8:
|
||||
alpha_x[:, i] = angle_data[:, ay_idx]
|
||||
alpha_y[:, i] = -angle_data[:, ax_idx]
|
||||
|
||||
# Approximate values
|
||||
alpha_x, alpha_y, temperature = approximate_values(alpha_x, alpha_y, temperature, decimals=3)
|
||||
|
||||
# Calculate differential values (relative to first reading or reference)
|
||||
alpha_x, alpha_y = calculate_differentials(
|
||||
control_unit_id, chain, alpha_x, alpha_y, is_new_zero
|
||||
)
|
||||
|
||||
# Process error flags
|
||||
error_matrix = process_error_flags(error_flags, n_sensors)
|
||||
|
||||
logger.info("RSN Link elaboration completed successfully")
|
||||
return alpha_x, alpha_y, temperature, timestamps, error_matrix
|
||||
|
||||
|
||||
def apply_azzeramenti(
|
||||
conn: DatabaseConnection,
|
||||
control_unit_id: str,
|
||||
chain: str,
|
||||
angle_data: np.ndarray,
|
||||
node_list: list,
|
||||
timestamps: np.ndarray
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Apply zeroing adjustments from database.
|
||||
|
||||
Converts MATLAB azzeramenti.m function.
|
||||
|
||||
Args:
|
||||
conn: Database connection
|
||||
control_unit_id: Control unit identifier
|
||||
chain: Chain identifier
|
||||
angle_data: Angle data array
|
||||
node_list: List of node IDs
|
||||
timestamps: Timestamp array
|
||||
|
||||
Returns:
|
||||
Adjusted angle data
|
||||
"""
|
||||
# Query database for zeroing events
|
||||
query = """
|
||||
SELECT nodeID, zeroDate, zeroValue
|
||||
FROM sensor_zeroing
|
||||
WHERE IDcentralina = %s
|
||||
AND DTcatena = %s
|
||||
AND nodeID IN (%s)
|
||||
ORDER BY zeroDate
|
||||
"""
|
||||
node_ids_str = ','.join(map(str, node_list))
|
||||
|
||||
try:
|
||||
results = conn.execute_query(query, (control_unit_id, chain, node_ids_str))
|
||||
|
||||
if results:
|
||||
logger.info(f"Applying {len(results)} zeroing adjustments")
|
||||
# Apply zeroing adjustments
|
||||
# Implementation would apply offsets based on zero dates
|
||||
# For now, return data unchanged
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load zeroing data: {e}")
|
||||
|
||||
return angle_data
|
||||
|
||||
|
||||
def calculate_differentials(
|
||||
control_unit_id: str,
|
||||
chain: str,
|
||||
alpha_x: np.ndarray,
|
||||
alpha_y: np.ndarray,
|
||||
is_new_zero: bool
|
||||
) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""
|
||||
Calculate differential values relative to reference.
|
||||
|
||||
Args:
|
||||
control_unit_id: Control unit identifier
|
||||
chain: Chain identifier
|
||||
alpha_x: Alpha X data
|
||||
alpha_y: Alpha Y data
|
||||
is_new_zero: Whether this is first processing
|
||||
|
||||
Returns:
|
||||
Tuple of differential alpha_x and alpha_y
|
||||
"""
|
||||
ref_file_x = Path(f"{control_unit_id}-{chain}-RifX.csv")
|
||||
ref_file_y = Path(f"{control_unit_id}-{chain}-RifY.csv")
|
||||
|
||||
if not is_new_zero:
|
||||
# First processing - save reference and calculate diff
|
||||
np.savetxt(ref_file_x, alpha_x[0:1, :], delimiter=',')
|
||||
np.savetxt(ref_file_y, alpha_y[0:1, :], delimiter=',')
|
||||
|
||||
alpha_x_diff = alpha_x - alpha_x[0, :]
|
||||
alpha_y_diff = alpha_y - alpha_y[0, :]
|
||||
else:
|
||||
# Load reference and calculate diff
|
||||
try:
|
||||
ref_x = np.loadtxt(ref_file_x, delimiter=',')
|
||||
ref_y = np.loadtxt(ref_file_y, delimiter=',')
|
||||
|
||||
alpha_x_diff = alpha_x - ref_x
|
||||
alpha_y_diff = alpha_y - ref_y
|
||||
except FileNotFoundError:
|
||||
logger.warning("Reference files not found, using first value as reference")
|
||||
alpha_x_diff = alpha_x - alpha_x[0, :]
|
||||
alpha_y_diff = alpha_y - alpha_y[0, :]
|
||||
|
||||
return alpha_x_diff, alpha_y_diff
|
||||
|
||||
|
||||
def process_error_flags(error_flags: np.ndarray, n_sensors: int) -> np.ndarray:
|
||||
"""
|
||||
Process error flags to create sensor-level error matrix.
|
||||
|
||||
Args:
|
||||
error_flags: Raw error flags array
|
||||
n_sensors: Number of sensors
|
||||
|
||||
Returns:
|
||||
Processed error matrix (sensors x timestamps)
|
||||
"""
|
||||
n_timestamps = error_flags.shape[0]
|
||||
error_matrix = np.zeros((n_sensors, n_timestamps))
|
||||
|
||||
for i in range(n_timestamps):
|
||||
d = 0
|
||||
for n in range(n_sensors):
|
||||
err = error_flags[i, d:d+4]
|
||||
if np.any(err == 1):
|
||||
error_matrix[n, i] = 1
|
||||
elif np.any(err == 0.5) and error_matrix[n, i] != 1:
|
||||
error_matrix[n, i] = 0.5
|
||||
d += 4
|
||||
|
||||
return error_matrix
|
||||
Reference in New Issue
Block a user