elab matlab
This commit is contained in:
@@ -1,77 +1,134 @@
|
|||||||
#!.venv/bin/python
|
#!.venv/bin/python
|
||||||
|
|
||||||
# Import necessary libraries
|
# Import necessary libraries
|
||||||
import mysql.connector
|
|
||||||
import logging
|
import logging
|
||||||
import importlib
|
|
||||||
import time
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import os
|
||||||
|
import aiomysql
|
||||||
|
import contextvars
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
# Import custom modules for configuration and database connection
|
# Import custom modules for configuration and database connection
|
||||||
from utils.config import loader_ftp_csv as setting
|
from utils.config import loader_matlab_elab as setting
|
||||||
from utils.database.connection import connetti_db
|
|
||||||
from utils.database.loader_action import get_matlab_cmd
|
|
||||||
from utils.database import DATA_LOADED
|
from utils.database import DATA_LOADED
|
||||||
|
from utils.database.matlab_query import get_matlab_command
|
||||||
|
from utils.csv.loaders import get_next_csv_atomic
|
||||||
|
|
||||||
|
|
||||||
|
# Crea una context variable per identificare il worker
|
||||||
|
worker_context = contextvars.ContextVar('worker_id', default='00')
|
||||||
|
|
||||||
|
# Formatter personalizzato che include il worker_id
|
||||||
|
class WorkerFormatter(logging.Formatter):
|
||||||
|
def format(self, record):
|
||||||
|
record.worker_id = worker_context.get()
|
||||||
|
return super().format(record)
|
||||||
|
|
||||||
# Initialize the logger for this module
|
# Initialize the logger for this module
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger()
|
||||||
|
|
||||||
# Function to elaborate CSV data
|
# Delay tra un processamento CSV e il successivo (in secondi)
|
||||||
async def run_matlab_elab(id: int, unit_name: str, unit_type: str, tool_name: str, tool_type: str, semaphore: asyncio.Semaphore) -> bool:
|
ELAB_PROCESSING_DELAY = 0.2
|
||||||
async with semaphore:
|
# Tempo di attesa se non ci sono record da elaborare
|
||||||
if get_matlab_cmd(cfg, unit_name, tool_name):
|
NO_RECORD_SLEEP = 60
|
||||||
# If a record is found, lock it by updating the 'locked' field to 1
|
|
||||||
|
|
||||||
|
async def worker(worker_id: int, cfg: object, pool) -> None:
|
||||||
|
# Imposta il context per questo worker
|
||||||
|
worker_context.set(f"W{worker_id}")
|
||||||
|
|
||||||
|
debug_mode = (logging.getLogger().getEffectiveLevel() == logging.DEBUG)
|
||||||
|
logger.info("Avviato")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
logger.info("Inizio elaborazione")
|
||||||
|
|
||||||
|
record = await get_next_csv_atomic(pool, cfg.dbrectable, DATA_LOADED)
|
||||||
|
|
||||||
|
if record:
|
||||||
|
id, unit_type, tool_type, unit_name, tool_name = [x.lower().replace(" ", "_") if isinstance(x, str) else x for x in record]
|
||||||
|
matlab_info = await get_matlab_command(cfg, tool_name, unit_name)
|
||||||
|
matlab_cmd = f"timeout {cfg.timeout} ./run_{matlab_info['matcall']}.sh {cfg.matlab_runtime} {unit_name} {tool_name}"
|
||||||
|
|
||||||
|
# matlab_error_filename = f'{cfg.matlab_error_path}{unit_name}{tool_name}_output_error.txt'
|
||||||
|
|
||||||
|
success = await subprocess.run(matlab_cmd,
|
||||||
|
cwd=cfg.matlab_func_path,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
check=True)
|
||||||
|
|
||||||
|
if not success:
|
||||||
|
logger.error("Errore durante l'elaborazione")
|
||||||
|
await asyncio.sleep(ELAB_PROCESSING_DELAY)
|
||||||
|
else:
|
||||||
|
logger.debug("Nessun record disponibile")
|
||||||
|
await asyncio.sleep(NO_RECORD_SLEEP)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Errore durante l'esecuzione: {e}", exc_info=debug_mode)
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
# Load the configuration settings
|
"""Main function: avvia i worker e gestisce il ciclo principale."""
|
||||||
|
logger.info("Avvio del sistema...")
|
||||||
|
|
||||||
cfg = setting.Config()
|
cfg = setting.Config()
|
||||||
|
logger.info("Configurazione caricata correttamente")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Configure logging to write log messages to a file with a specific format
|
# Configura il logging globale
|
||||||
logging.basicConfig(
|
log_level = os.getenv("LOG_LEVEL", "INFO").upper()
|
||||||
format="%(asctime)s - PID: %(process)d.%(name)s.%(levelname)s: %(message)s ",
|
debug_mode = (logging.getLogger().getEffectiveLevel() == logging.DEBUG)
|
||||||
filename=cfg.logfilename,
|
|
||||||
level=logging.INFO,
|
# Configura il logging con il formatter personalizzato
|
||||||
|
handler = logging.FileHandler(cfg.logfilename)
|
||||||
|
formatter = WorkerFormatter(
|
||||||
|
"%(asctime)s - PID: %(process)d.Worker-%(worker_id)s.%(name)s.%(funcName)s.%(levelname)s: %(message)s"
|
||||||
|
)
|
||||||
|
handler.setFormatter(formatter)
|
||||||
|
|
||||||
|
# Rimuovi eventuali handler esistenti e aggiungi il nostro
|
||||||
|
logger.handlers.clear()
|
||||||
|
logger.addHandler(handler)
|
||||||
|
logger.setLevel(getattr(logging, log_level))
|
||||||
|
|
||||||
|
logger.info("Logging configurato correttamente")
|
||||||
|
|
||||||
|
# Numero massimo di worker concorrenti
|
||||||
|
logger.info(f"Avvio di {cfg.max_threads} worker concorrenti")
|
||||||
|
|
||||||
|
pool = await aiomysql.create_pool(
|
||||||
|
host=cfg.dbhost,
|
||||||
|
user=cfg.dbuser,
|
||||||
|
password=cfg.dbpass,
|
||||||
|
db=cfg.dbname,
|
||||||
|
minsize=4,
|
||||||
|
maxsize=cfg.max_threads*4,
|
||||||
|
pool_recycle=3600
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Avvia i worker
|
||||||
|
workers = [
|
||||||
|
asyncio.create_task(worker(i, cfg, pool))
|
||||||
|
for i in range(cfg.max_threads)
|
||||||
|
]
|
||||||
|
|
||||||
# Limita il numero di esecuzioni concorrenti a max_threads
|
logger.info("Sistema avviato correttamente. In attesa di nuovi task...")
|
||||||
semaphore = asyncio.Semaphore(cfg.max_threads)
|
|
||||||
running_tasks = set()
|
|
||||||
|
|
||||||
# Enter an infinite loop to continuously process records
|
try:
|
||||||
while True:
|
await asyncio.gather(*workers, return_exceptions=debug_mode)
|
||||||
try:
|
finally:
|
||||||
# Establish a database connection
|
pool.close()
|
||||||
with connetti_db(cfg) as conn:
|
await pool.wait_closed()
|
||||||
cur = conn.cursor()
|
|
||||||
# Select a single record from the raw data table that is not currently locked and has a status of 0
|
|
||||||
cur.execute(f'select id, unit_name, unit_type, tool_name, tool_type from {cfg.dbname}.{cfg.dbrectable} where locked = 0 and status = {DATA_LOADED} limit 1')
|
|
||||||
id, unit_name, unit_type, tool_name, tool_type = cur.fetchone()
|
|
||||||
if id:
|
|
||||||
task = asyncio.create_task(run_matlab_elab(id, unit_name, unit_type, tool_name, tool_type, semaphore))
|
|
||||||
running_tasks.add(task)
|
|
||||||
# Rimuovi i task completati dal set
|
|
||||||
running_tasks = {t for t in running_tasks if not t.done()}
|
|
||||||
|
|
||||||
|
|
||||||
# If a record was successfully processed, log the number of threads currently running
|
|
||||||
#logger.info(f"Threads in execution: {len(threads)}")
|
|
||||||
except Exception as e:
|
|
||||||
logger.info(f"Error: {e}.")
|
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
# Handle a keyboard interrupt (e.g., Ctrl+C) to gracefully shut down the program
|
logger.info("Info: Shutdown richiesto... chiusura in corso")
|
||||||
logger.info("Info: Shutdown requested...exiting")
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.info(f"Error: {e}.")
|
logger.error(f"Errore principale: {e}", exc_info=debug_mode)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
14
env/elab.ini
vendored
14
env/elab.ini
vendored
@@ -0,0 +1,14 @@
|
|||||||
|
[logging]
|
||||||
|
logFilename = ./load_raw_data.log
|
||||||
|
|
||||||
|
[threads]
|
||||||
|
max_num = 20
|
||||||
|
|
||||||
|
[matlab]
|
||||||
|
runtime = "/usr/local/MATLAB/MATLAB_Runtime/v93"
|
||||||
|
func_path = "/usr/local/matlab_func/"
|
||||||
|
timeout = 1800
|
||||||
|
error = ""
|
||||||
|
error_path = "/tmp/"
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,8 @@ import contextvars
|
|||||||
from utils.config import loader_load_data as setting
|
from utils.config import loader_load_data as setting
|
||||||
from utils.database import CSV_RECEIVED
|
from utils.database import CSV_RECEIVED
|
||||||
|
|
||||||
|
from utils.csv.loaders import get_next_csv_atomic
|
||||||
|
|
||||||
# Crea una context variable per identificare il worker
|
# Crea una context variable per identificare il worker
|
||||||
worker_context = contextvars.ContextVar('worker_id', default='00')
|
worker_context = contextvars.ContextVar('worker_id', default='00')
|
||||||
|
|
||||||
@@ -29,41 +31,6 @@ CSV_PROCESSING_DELAY = 0.2
|
|||||||
# Tempo di attesa se non ci sono record da elaborare
|
# Tempo di attesa se non ci sono record da elaborare
|
||||||
NO_RECORD_SLEEP = 60
|
NO_RECORD_SLEEP = 60
|
||||||
|
|
||||||
async def get_next_csv_atomic(pool, table_name):
|
|
||||||
"""Preleva atomicamente il prossimo CSV da elaborare"""
|
|
||||||
async with pool.acquire() as conn:
|
|
||||||
# IMPORTANTE: Disabilita autocommit per questa transazione
|
|
||||||
await conn.begin()
|
|
||||||
|
|
||||||
try:
|
|
||||||
async with conn.cursor() as cur:
|
|
||||||
# Usa SELECT FOR UPDATE per lock atomico
|
|
||||||
await cur.execute(f"""
|
|
||||||
SELECT id, unit_type, tool_type, unit_name, tool_name
|
|
||||||
FROM {table_name}
|
|
||||||
WHERE locked = 0 AND status = %s
|
|
||||||
ORDER BY id
|
|
||||||
LIMIT 1
|
|
||||||
FOR UPDATE SKIP LOCKED
|
|
||||||
""", (CSV_RECEIVED,))
|
|
||||||
|
|
||||||
result = await cur.fetchone()
|
|
||||||
if result:
|
|
||||||
await cur.execute(f"""
|
|
||||||
UPDATE {table_name}
|
|
||||||
SET locked = 1
|
|
||||||
WHERE id = %s
|
|
||||||
""", (result[0],))
|
|
||||||
|
|
||||||
# Commit esplicito per rilasciare il lock
|
|
||||||
await conn.commit()
|
|
||||||
return result
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
# Rollback in caso di errore
|
|
||||||
await conn.rollback()
|
|
||||||
raise e
|
|
||||||
|
|
||||||
async def worker(worker_id: int, cfg: object, pool) -> None:
|
async def worker(worker_id: int, cfg: object, pool) -> None:
|
||||||
# Imposta il context per questo worker
|
# Imposta il context per questo worker
|
||||||
worker_context.set(f"W{worker_id}")
|
worker_context.set(f"W{worker_id}")
|
||||||
@@ -75,7 +42,7 @@ async def worker(worker_id: int, cfg: object, pool) -> None:
|
|||||||
try:
|
try:
|
||||||
logger.info("Inizio elaborazione")
|
logger.info("Inizio elaborazione")
|
||||||
|
|
||||||
record = await get_next_csv_atomic(pool, cfg.dbrectable)
|
record = await get_next_csv_atomic(pool, cfg.dbrectable, CSV_RECEIVED)
|
||||||
|
|
||||||
if record:
|
if record:
|
||||||
success = await load_csv(record, cfg, pool)
|
success = await load_csv(record, cfg, pool)
|
||||||
|
|||||||
2584
old_script/TS_PiniScript.py
Executable file
2584
old_script/TS_PiniScript.py
Executable file
File diff suppressed because one or more lines are too long
15
old_script/dbconfig.py
Executable file
15
old_script/dbconfig.py
Executable file
@@ -0,0 +1,15 @@
|
|||||||
|
from configparser import ConfigParser
|
||||||
|
|
||||||
|
def read_db_config(filename='/home/battilo/scripts/config.ini', section='mysql'):
|
||||||
|
parser = ConfigParser()
|
||||||
|
parser.read(filename)
|
||||||
|
|
||||||
|
db = {}
|
||||||
|
if parser.has_section(section):
|
||||||
|
items = parser.items(section)
|
||||||
|
for item in items:
|
||||||
|
db[item[0]] = item[1]
|
||||||
|
else:
|
||||||
|
raise Exception('{0} not found in the {1} file'.format(section, filename))
|
||||||
|
|
||||||
|
return db
|
||||||
@@ -29,3 +29,10 @@ class Config:
|
|||||||
self.dbrawdata = c.get("tables", "rawTableName")
|
self.dbrawdata = c.get("tables", "rawTableName")
|
||||||
self.dbrawdata = c.get("tables", "rawTableName")
|
self.dbrawdata = c.get("tables", "rawTableName")
|
||||||
self.dbnodes = c.get("tables", "nodesTableName")
|
self.dbnodes = c.get("tables", "nodesTableName")
|
||||||
|
|
||||||
|
# Matlab
|
||||||
|
self.matlab_runtime = c.get("matlab", "runtime")
|
||||||
|
self.matlab_func_path = c.get("matlab", "func_path")
|
||||||
|
self.matlab_timeout = c.getint("matlab", "timeout")
|
||||||
|
self.matlab_error = c.get("matlab", "error")
|
||||||
|
self.matlab_error_path = c.get("matlab", "error_path")
|
||||||
@@ -9,6 +9,16 @@ from itertools import islice
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
async def get_data(cfg: object, id: int, pool) -> tuple:
|
async def get_data(cfg: object, id: int, pool) -> tuple:
|
||||||
|
"""
|
||||||
|
Retrieves unit name, tool name, and tool data for a given record ID from the database.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (object): Configuration object containing database table name.
|
||||||
|
id (int): The ID of the record to retrieve.
|
||||||
|
pool: The database connection pool.
|
||||||
|
Returns:
|
||||||
|
tuple: A tuple containing unit_name, tool_name, and tool_data.
|
||||||
|
"""
|
||||||
async with pool.acquire() as conn:
|
async with pool.acquire() as conn:
|
||||||
async with conn.cursor() as cur:
|
async with conn.cursor() as cur:
|
||||||
await cur.execute(f'select unit_name, tool_name, tool_data from {cfg.dbrectable} where id = {id}')
|
await cur.execute(f'select unit_name, tool_name, tool_data from {cfg.dbrectable} where id = {id}')
|
||||||
@@ -17,6 +27,16 @@ async def get_data(cfg: object, id: int, pool) -> tuple:
|
|||||||
return unit_name, tool_name, tool_data
|
return unit_name, tool_name, tool_data
|
||||||
|
|
||||||
async def make_pipe_sep_matrix(cfg: object, id: int, pool) -> list:
|
async def make_pipe_sep_matrix(cfg: object, id: int, pool) -> list:
|
||||||
|
"""
|
||||||
|
Processes pipe-separated data from a CSV record into a structured matrix.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (object): Configuration object.
|
||||||
|
id (int): The ID of the CSV record.
|
||||||
|
pool: The database connection pool.
|
||||||
|
Returns:
|
||||||
|
list: A list of lists, where each inner list represents a row in the matrix.
|
||||||
|
"""
|
||||||
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
||||||
righe = ToolData.splitlines()
|
righe = ToolData.splitlines()
|
||||||
matrice_valori = []
|
matrice_valori = []
|
||||||
@@ -39,6 +59,16 @@ async def make_pipe_sep_matrix(cfg: object, id: int, pool) -> list:
|
|||||||
return matrice_valori
|
return matrice_valori
|
||||||
|
|
||||||
async def make_ain_din_matrix(cfg: object, id: int, pool) -> list:
|
async def make_ain_din_matrix(cfg: object, id: int, pool) -> list:
|
||||||
|
"""
|
||||||
|
Processes analog and digital input data from a CSV record into a structured matrix.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (object): Configuration object.
|
||||||
|
id (int): The ID of the CSV record.
|
||||||
|
pool: The database connection pool.
|
||||||
|
Returns:
|
||||||
|
list: A list of lists, where each inner list represents a row in the matrix.
|
||||||
|
"""
|
||||||
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
||||||
node_channels, node_types, node_ains, node_dins = get_nodes_type(cfg, ToolNameID, UnitName)
|
node_channels, node_types, node_ains, node_dins = get_nodes_type(cfg, ToolNameID, UnitName)
|
||||||
righe = ToolData.splitlines()
|
righe = ToolData.splitlines()
|
||||||
@@ -63,6 +93,16 @@ async def make_ain_din_matrix(cfg: object, id: int, pool) -> list:
|
|||||||
return matrice_valori
|
return matrice_valori
|
||||||
|
|
||||||
async def make_channels_matrix(cfg: object, id: int, pool) -> list:
|
async def make_channels_matrix(cfg: object, id: int, pool) -> list:
|
||||||
|
"""
|
||||||
|
Processes channel-based data from a CSV record into a structured matrix.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (object): Configuration object.
|
||||||
|
id (int): The ID of the CSV record.
|
||||||
|
pool: The database connection pool.
|
||||||
|
Returns:
|
||||||
|
list: A list of lists, where each inner list represents a row in the matrix.
|
||||||
|
"""
|
||||||
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
||||||
node_channels, node_types, node_ains, node_dins = get_nodes_type(cfg, ToolNameID, UnitName)
|
node_channels, node_types, node_ains, node_dins = get_nodes_type(cfg, ToolNameID, UnitName)
|
||||||
righe = ToolData.splitlines()
|
righe = ToolData.splitlines()
|
||||||
@@ -81,6 +121,16 @@ async def make_channels_matrix(cfg: object, id: int, pool) -> list:
|
|||||||
return matrice_valori
|
return matrice_valori
|
||||||
|
|
||||||
async def make_musa_matrix(cfg: object, id: int, pool) -> list:
|
async def make_musa_matrix(cfg: object, id: int, pool) -> list:
|
||||||
|
"""
|
||||||
|
Processes 'Musa' specific data from a CSV record into a structured matrix.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (object): Configuration object.
|
||||||
|
id (int): The ID of the CSV record.
|
||||||
|
pool: The database connection pool.
|
||||||
|
Returns:
|
||||||
|
list: A list of lists, where each inner list represents a row in the matrix.
|
||||||
|
"""
|
||||||
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
||||||
node_channels, node_types, node_ains, node_dins = get_nodes_type(cfg, ToolNameID, UnitName)
|
node_channels, node_types, node_ains, node_dins = get_nodes_type(cfg, ToolNameID, UnitName)
|
||||||
righe = ToolData.splitlines()
|
righe = ToolData.splitlines()
|
||||||
@@ -104,6 +154,16 @@ async def make_musa_matrix(cfg: object, id: int, pool) -> list:
|
|||||||
|
|
||||||
|
|
||||||
async def make_tlp_matrix(cfg: object, id: int, pool) -> list:
|
async def make_tlp_matrix(cfg: object, id: int, pool) -> list:
|
||||||
|
"""
|
||||||
|
Processes 'TLP' specific data from a CSV record into a structured matrix.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (object): Configuration object.
|
||||||
|
id (int): The ID of the CSV record.
|
||||||
|
pool: The database connection pool.
|
||||||
|
Returns:
|
||||||
|
list: A list of lists, where each inner list represents a row in the matrix.
|
||||||
|
"""
|
||||||
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
||||||
righe = ToolData.splitlines()
|
righe = ToolData.splitlines()
|
||||||
valori_x_nodo = 2
|
valori_x_nodo = 2
|
||||||
@@ -121,6 +181,16 @@ async def make_tlp_matrix(cfg: object, id: int, pool) -> list:
|
|||||||
|
|
||||||
|
|
||||||
async def make_gd_matrix(cfg: object, id: int, pool) -> list:
|
async def make_gd_matrix(cfg: object, id: int, pool) -> list:
|
||||||
|
"""
|
||||||
|
Processes 'GD' specific data from a CSV record into a structured matrix.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (object): Configuration object.
|
||||||
|
id (int): The ID of the CSV record.
|
||||||
|
pool: The database connection pool.
|
||||||
|
Returns:
|
||||||
|
list: A list of lists, where each inner list represents a row in the matrix.
|
||||||
|
"""
|
||||||
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
||||||
righe = ToolData.splitlines()
|
righe = ToolData.splitlines()
|
||||||
matrice_valori = []
|
matrice_valori = []
|
||||||
|
|||||||
@@ -7,6 +7,15 @@ import logging
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
async def main_loader(cfg: object, id: int, pool, action: str) -> None:
|
async def main_loader(cfg: object, id: int, pool, action: str) -> None:
|
||||||
|
"""
|
||||||
|
Main loader function to process CSV data based on the specified action.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
cfg (object): Configuration object.
|
||||||
|
id (int): The ID of the CSV record to process.
|
||||||
|
pool: The database connection pool.
|
||||||
|
action (str): The type of data processing to perform (e.g., "pipe_separator", "analogic_digital").
|
||||||
|
"""
|
||||||
type_matrix_mapping = {
|
type_matrix_mapping = {
|
||||||
"pipe_separator": make_pipe_sep_matrix,
|
"pipe_separator": make_pipe_sep_matrix,
|
||||||
"analogic_digital": make_ain_din_matrix,
|
"analogic_digital": make_ain_din_matrix,
|
||||||
@@ -27,3 +36,39 @@ async def main_loader(cfg: object, id: int, pool, action: str) -> None:
|
|||||||
await unlock(cfg, id, pool)
|
await unlock(cfg, id, pool)
|
||||||
else:
|
else:
|
||||||
logger.warning(f"Action '{action}' non riconosciuta.")
|
logger.warning(f"Action '{action}' non riconosciuta.")
|
||||||
|
|
||||||
|
|
||||||
|
async def get_next_csv_atomic(pool, table_name, status):
|
||||||
|
"""Preleva atomicamente il prossimo CSV da elaborare"""
|
||||||
|
async with pool.acquire() as conn:
|
||||||
|
# IMPORTANTE: Disabilita autocommit per questa transazione
|
||||||
|
await conn.begin()
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with conn.cursor() as cur:
|
||||||
|
# Usa SELECT FOR UPDATE per lock atomico
|
||||||
|
await cur.execute(f"""
|
||||||
|
SELECT id, unit_type, tool_type, unit_name, tool_name
|
||||||
|
FROM {table_name}
|
||||||
|
WHERE locked = 0 AND status = %s
|
||||||
|
ORDER BY id
|
||||||
|
LIMIT 1
|
||||||
|
FOR UPDATE SKIP LOCKED
|
||||||
|
""", (status,))
|
||||||
|
|
||||||
|
result = await cur.fetchone()
|
||||||
|
if result:
|
||||||
|
await cur.execute(f"""
|
||||||
|
UPDATE {table_name}
|
||||||
|
SET locked = 1
|
||||||
|
WHERE id = %s
|
||||||
|
""", (result[0],))
|
||||||
|
|
||||||
|
# Commit esplicito per rilasciare il lock
|
||||||
|
await conn.commit()
|
||||||
|
return result
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# Rollback in caso di errore
|
||||||
|
await conn.rollback()
|
||||||
|
raise e
|
||||||
27
utils/database/matlab_query.py
Normal file
27
utils/database/matlab_query.py
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
from utils.database.connection import connetti_db
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def get_matlab_command(cfg: object, tool: str, unit: str) -> tuple:
|
||||||
|
|
||||||
|
with connetti_db(cfg) as conn:
|
||||||
|
cur = conn.cursor(dictionary=True)
|
||||||
|
query = f"""
|
||||||
|
SELECT m.matcall, t.ftp_send , t.unit_id, s.`desc` as statustools, t.api_send, u.inoltro_api, u.inoltro_api_url, u.inoltro_api_bearer_token, IFNULL(u.duedate, "") as duedate from matfuncs as m
|
||||||
|
INNER JOIN tools as t on t.matfunc = m.id
|
||||||
|
INNER JOIN units as u on u.id = t.unit_id
|
||||||
|
INNER JOIN statustools as s on t.statustool_id = s.id
|
||||||
|
where t.name = '{tool}' AND u.name = '{unit}';
|
||||||
|
|
||||||
|
"""
|
||||||
|
cur.execute(query)
|
||||||
|
result = cur.fetchone()
|
||||||
|
cur.close()
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
logger.error(f"{unit} - {tool}: Matlab command not found.")
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return result
|
||||||
@@ -1,2 +1,35 @@
|
|||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
import os
|
||||||
|
|
||||||
|
from utils.database.loader_action import DATA_LOADED, update_status, unlock
|
||||||
|
from utils.csv.data_preparation import get_data
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
async def main_loader(cfg: object, id: int, pool) -> None:
|
async def main_loader(cfg: object, id: int, pool) -> None:
|
||||||
pass
|
|
||||||
|
UnitName, ToolNameID, ToolData = await get_data(cfg, id, pool)
|
||||||
|
# Creare un file temporaneo
|
||||||
|
with tempfile.NamedTemporaryFile(mode='w', suffix='.csv', delete=False) as temp_file:
|
||||||
|
temp_file.write(ToolData)
|
||||||
|
temp_filename = temp_file.name
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Eseguire il programma con il file temporaneo
|
||||||
|
result = await subprocess.run(['python3', 'old_script/TS_PiniScript.py', temp_filename], capture_output=True, text=True)
|
||||||
|
print(result.stdout)
|
||||||
|
print(result.stderr)
|
||||||
|
finally:
|
||||||
|
# Pulire il file temporaneo
|
||||||
|
os.unlink(temp_filename)
|
||||||
|
|
||||||
|
if result.returncode != 0:
|
||||||
|
logger.error(f"Errore nell'esecuzione del programma TS_PiniScript.py: {result.stderr}")
|
||||||
|
raise Exception(f"Errore nel programma: {result.stderr}")
|
||||||
|
else:
|
||||||
|
logger.info(f"Programma TS_PiniScript.py eseguito con successo: {result.stdout}")
|
||||||
|
await update_status(cfg, id, DATA_LOADED, pool)
|
||||||
|
await unlock(cfg, id, pool)
|
||||||
Reference in New Issue
Block a user