232 lines
12 KiB
Python
232 lines
12 KiB
Python
#!.venv/bin/python
|
|
import logging
|
|
import asyncio
|
|
|
|
from utils.database import FLAG_TO_TIMESTAMP, BATCH_SIZE
|
|
from datetime import datetime, timedelta
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
async def load_data(cfg: object, matrice_valori: list, pool: object, type: str) -> bool:
|
|
"""Carica una lista di record di dati grezzi nel database.
|
|
|
|
Esegue un'operazione di inserimento massivo (executemany) per caricare i dati.
|
|
Utilizza la clausola 'ON DUPLICATE KEY UPDATE' per aggiornare i record esistenti.
|
|
Implementa una logica di re-tentativo in caso di deadlock.
|
|
|
|
Args:
|
|
cfg (object): L'oggetto di configurazione contenente i nomi delle tabelle e i parametri di re-tentativo.
|
|
matrice_valori (list): Una lista di tuple, dove ogni tupla rappresenta una riga da inserire.
|
|
pool (object): Il pool di connessioni al database.
|
|
type (str): tipo di caricamento dati. Per GD fa l'update del tool DT corrispondente
|
|
|
|
Returns:
|
|
bool: True se il caricamento ha avuto successo, False altrimenti.
|
|
"""
|
|
if not matrice_valori:
|
|
logger.info("Nulla da caricare.")
|
|
return True
|
|
|
|
if type == "gd" and matrice_valori[0][0] == "RSSI":
|
|
matrice_valori.pop(0)
|
|
sql_load_RAWDATA = f"""
|
|
UPDATE {cfg.dbrawdata} t1
|
|
JOIN (
|
|
SELECT id
|
|
FROM {cfg.dbrawdata}
|
|
WHERE UnitName = %s AND ToolNameID = %s AND NodeNum = %s
|
|
AND TIMESTAMP(`EventDate`, `EventTime`) BETWEEN %s AND %s
|
|
ORDER BY ABS(TIMESTAMPDIFF(SECOND, TIMESTAMP(`EventDate`, `EventTime`), %s))
|
|
LIMIT 1
|
|
) t2 ON t1.id = t2.id
|
|
SET t1.BatLevelModule = %s, t1.TemperatureModule = %s, t1.RssiModule = %s
|
|
"""
|
|
else:
|
|
sql_load_RAWDATA = f"""
|
|
INSERT INTO {cfg.dbrawdata} (
|
|
`UnitName`,`ToolNameID`,`NodeNum`,`EventDate`,`EventTime`,`BatLevel`,`Temperature`,
|
|
`Val0`,`Val1`,`Val2`,`Val3`,`Val4`,`Val5`,`Val6`,`Val7`,
|
|
`Val8`,`Val9`,`ValA`,`ValB`,`ValC`,`ValD`,`ValE`,`ValF`,
|
|
`BatLevelModule`,`TemperatureModule`, `RssiModule`
|
|
)
|
|
VALUES (
|
|
%s, %s, %s, %s, %s, %s, %s,
|
|
%s, %s, %s, %s, %s, %s, %s, %s,
|
|
%s, %s, %s, %s, %s, %s, %s, %s,
|
|
%s, %s, %s
|
|
) as new_data
|
|
ON DUPLICATE KEY UPDATE
|
|
`BatLevel` = IF({cfg.dbrawdata}.`BatLevel` != new_data.`BatLevel`, new_data.`BatLevel`, {cfg.dbrawdata}.`BatLevel`),
|
|
`Temperature` = IF({cfg.dbrawdata}.`Temperature` != new_data.Temperature, new_data.Temperature, {cfg.dbrawdata}.`Temperature`),
|
|
`Val0` = IF({cfg.dbrawdata}.`Val0` != new_data.Val0 AND new_data.`Val0` IS NOT NULL, new_data.Val0, {cfg.dbrawdata}.`Val0`),
|
|
`Val1` = IF({cfg.dbrawdata}.`Val1` != new_data.Val1 AND new_data.`Val1` IS NOT NULL, new_data.Val1, {cfg.dbrawdata}.`Val1`),
|
|
`Val2` = IF({cfg.dbrawdata}.`Val2` != new_data.Val2 AND new_data.`Val2` IS NOT NULL, new_data.Val2, {cfg.dbrawdata}.`Val2`),
|
|
`Val3` = IF({cfg.dbrawdata}.`Val3` != new_data.Val3 AND new_data.`Val3` IS NOT NULL, new_data.Val3, {cfg.dbrawdata}.`Val3`),
|
|
`Val4` = IF({cfg.dbrawdata}.`Val4` != new_data.Val4 AND new_data.`Val4` IS NOT NULL, new_data.Val4, {cfg.dbrawdata}.`Val4`),
|
|
`Val5` = IF({cfg.dbrawdata}.`Val5` != new_data.Val5 AND new_data.`Val5` IS NOT NULL, new_data.Val5, {cfg.dbrawdata}.`Val5`),
|
|
`Val6` = IF({cfg.dbrawdata}.`Val6` != new_data.Val6 AND new_data.`Val6` IS NOT NULL, new_data.Val6, {cfg.dbrawdata}.`Val6`),
|
|
`Val7` = IF({cfg.dbrawdata}.`Val7` != new_data.Val7 AND new_data.`Val7` IS NOT NULL, new_data.Val7, {cfg.dbrawdata}.`Val7`),
|
|
`Val8` = IF({cfg.dbrawdata}.`Val8` != new_data.Val8 AND new_data.`Val8` IS NOT NULL, new_data.Val8, {cfg.dbrawdata}.`Val8`),
|
|
`Val9` = IF({cfg.dbrawdata}.`Val9` != new_data.Val9 AND new_data.`Val9` IS NOT NULL, new_data.Val9, {cfg.dbrawdata}.`Val9`),
|
|
`ValA` = IF({cfg.dbrawdata}.`ValA` != new_data.ValA AND new_data.`ValA` IS NOT NULL, new_data.ValA, {cfg.dbrawdata}.`ValA`),
|
|
`ValB` = IF({cfg.dbrawdata}.`ValB` != new_data.ValB AND new_data.`ValB` IS NOT NULL, new_data.ValB, {cfg.dbrawdata}.`ValB`),
|
|
`ValC` = IF({cfg.dbrawdata}.`ValC` != new_data.ValC AND new_data.`ValC` IS NOT NULL, new_data.ValC, {cfg.dbrawdata}.`ValC`),
|
|
`ValD` = IF({cfg.dbrawdata}.`ValD` != new_data.ValD AND new_data.`ValD` IS NOT NULL, new_data.ValD, {cfg.dbrawdata}.`ValD`),
|
|
`ValE` = IF({cfg.dbrawdata}.`ValE` != new_data.ValE AND new_data.`ValE` IS NOT NULL, new_data.ValE, {cfg.dbrawdata}.`ValE`),
|
|
`ValF` = IF({cfg.dbrawdata}.`ValF` != new_data.ValF AND new_data.`ValF` IS NOT NULL, new_data.ValF, {cfg.dbrawdata}.`ValF`),
|
|
`BatLevelModule` = IF({cfg.dbrawdata}.`BatLevelModule` != new_data.BatLevelModule, new_data.BatLevelModule, {cfg.dbrawdata}.`BatLevelModule`),
|
|
`TemperatureModule` = IF({cfg.dbrawdata}.`TemperatureModule` != new_data.TemperatureModule, new_data.TemperatureModule, {cfg.dbrawdata}.`TemperatureModule`),
|
|
`RssiModule` = IF({cfg.dbrawdata}.`RssiModule` != new_data.RssiModule, new_data.RssiModule, {cfg.dbrawdata}.`RssiModule`),
|
|
`Created_at` = NOW()
|
|
"""
|
|
#logger.info(f"Query insert: {sql_load_RAWDATA}.")
|
|
#logger.info(f"Matrice valori da inserire: {matrice_valori}.")
|
|
rc = False
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
for attempt in range(cfg.max_retries):
|
|
try:
|
|
logger.info(f"Loading data attempt {attempt + 1}.")
|
|
|
|
for i in range(0, len(matrice_valori), BATCH_SIZE):
|
|
batch = matrice_valori[i:i + BATCH_SIZE]
|
|
|
|
await cur.executemany(sql_load_RAWDATA, batch)
|
|
await conn.commit()
|
|
|
|
logger.info(f"Completed batch {i//BATCH_SIZE + 1}/{(len(matrice_valori)-1)//BATCH_SIZE + 1}")
|
|
|
|
logger.info("Data loaded.")
|
|
rc = True
|
|
break
|
|
except Exception as e:
|
|
await conn.rollback()
|
|
logger.error(f"Error: {e}.")
|
|
# logger.error(f"Matrice valori da inserire: {batch}.")
|
|
|
|
if e.args[0] == 1213: # Deadlock detected
|
|
logger.warning(
|
|
f"Deadlock detected, attempt {attempt + 1}/{cfg.max_retries}"
|
|
)
|
|
|
|
if attempt < cfg.max_retries - 1:
|
|
delay = 2 * attempt
|
|
await asyncio.sleep(delay)
|
|
continue
|
|
else:
|
|
logger.error("Max retry attempts reached for deadlock")
|
|
raise
|
|
return rc
|
|
|
|
|
|
async def update_status(cfg: object, id: int, status: str, pool: object) -> None:
|
|
"""Aggiorna lo stato di un record nella tabella dei record CSV.
|
|
|
|
Args:
|
|
cfg (object): L'oggetto di configurazione contenente il nome della tabella.
|
|
id (int): L'ID del record da aggiornare.
|
|
status (int): Il nuovo stato da impostare.
|
|
pool (object): Il pool di connessioni al database.
|
|
"""
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
try:
|
|
await cur.execute(
|
|
f"""update {cfg.dbrectable} set
|
|
status = status | {status},
|
|
{FLAG_TO_TIMESTAMP[status]} = now()
|
|
where id = {id}
|
|
"""
|
|
)
|
|
await conn.commit()
|
|
logger.info(f"Status updated id {id}.")
|
|
except Exception as e:
|
|
await conn.rollback()
|
|
logger.error(f"Error: {e}")
|
|
|
|
|
|
async def unlock(cfg: object, id: int, pool: object) -> None:
|
|
"""Sblocca un record nella tabella dei record CSV.
|
|
|
|
Imposta il campo 'locked' a 0 per un dato ID.
|
|
|
|
Args:
|
|
cfg (object): L'oggetto di configurazione contenente il nome della tabella.
|
|
id (int): L'ID del record da sbloccare.
|
|
pool (object): Il pool di connessioni al database.
|
|
"""
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
try:
|
|
await cur.execute(
|
|
f"update {cfg.dbrectable} set locked = 0 where id = {id}"
|
|
)
|
|
await conn.commit()
|
|
logger.info(f"id {id} unlocked.")
|
|
except Exception as e:
|
|
await conn.rollback()
|
|
logger.error(f"Error: {e}")
|
|
|
|
|
|
async def get_matlab_cmd(cfg: object, unit: str, tool: str, pool: object) -> tuple:
|
|
"""Recupera le informazioni per l'esecuzione di un comando Matlab dal database.
|
|
|
|
Args:
|
|
cfg (object): L'oggetto di configurazione.
|
|
unit (str): Il nome dell'unità.
|
|
tool (str): Il nome dello strumento.
|
|
pool (object): Il pool di connessioni al database.
|
|
|
|
Returns:
|
|
tuple: Una tupla contenente le informazioni del comando Matlab, o None in caso di errore.
|
|
"""
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
try:
|
|
await cur.execute(f'''select m.matcall, t.ftp_send , t.unit_id, s.`desc` as statustools, t.api_send, u.inoltro_api, u.inoltro_api_url, u.inoltro_api_bearer_token, IFNULL(u.duedate, "") as duedate
|
|
from matfuncs as m
|
|
inner join tools as t on t.matfunc = m.id
|
|
inner join units as u on u.id = t.unit_id
|
|
inner join statustools as s on t.statustool_id = s.id
|
|
where t.name = "{tool}" and u.name = "{unit}"''')
|
|
return await cur.fetchone()
|
|
except Exception as e:
|
|
logger.error(f"Error: {e}")
|
|
|
|
async def find_nearest_timestamp(cfg: object, unit_tool_data: dict, pool: object) -> tuple:
|
|
"""
|
|
Finds the nearest timestamp in the raw data table based on a reference timestamp
|
|
and unit/tool/node information.
|
|
|
|
Args:
|
|
cfg (object): Configuration object containing database table name (`cfg.dbrawdata`).
|
|
unit_tool_data (dict): A dictionary containing:
|
|
- "timestamp" (str): The reference timestamp string in "%Y-%m-%d %H:%M:%S" format.
|
|
- "unit" (str): The UnitName to filter by.
|
|
- "tool" (str): The ToolNameID to filter by.
|
|
- "node_num" (int): The NodeNum to filter by.
|
|
pool (object): The database connection pool.
|
|
|
|
Returns:
|
|
tuple: A tuple containing the event timestamp, BatLevel, and Temperature of the
|
|
nearest record, or None if an error occurs or no record is found.
|
|
"""
|
|
|
|
ref_timestamp = datetime.strptime(unit_tool_data["timestamp"], "%Y-%m-%d %H:%M:%S")
|
|
start_timestamp = ref_timestamp - timedelta(seconds=45)
|
|
end_timestamp = ref_timestamp + timedelta(seconds=45)
|
|
logger.info(f"Find nearest timestamp: {ref_timestamp}")
|
|
async with pool.acquire() as conn:
|
|
async with conn.cursor() as cur:
|
|
try:
|
|
await cur.execute(f'''SELECT TIMESTAMP(`EventDate`, `EventTime`) AS event_timestamp, BatLevel, Temperature
|
|
FROM {cfg.dbrawdata}
|
|
WHERE UnitName = "{unit_tool_data["unit"]}" AND ToolNameID = "{unit_tool_data["tool"]}" AND NodeNum = {unit_tool_data["node_num"]}
|
|
AND TIMESTAMP(`EventDate`, `EventTime`) BETWEEN "{start_timestamp}" AND "{end_timestamp}"
|
|
ORDER BY ABS(TIMESTAMPDIFF(SECOND, TIMESTAMP(`EventDate`, `EventTime`), "{ref_timestamp}"))
|
|
LIMIT 1
|
|
''')
|
|
return await cur.fetchone()
|
|
except Exception as e:
|
|
logger.error(f"Error: {e}") |